index
int64 | repo_name
string | branch_name
string | path
string | content
string | import_graph
string |
|---|---|---|---|---|---|
5,489
|
damian-villarreal/CoinsApi
|
refs/heads/main
|
/app/__init__.py
|
from flask import Flask
from config import Config
from flask_mongoengine import MongoEngine
from flask_login import LoginManager
from flask_restful import Api
from flask_cors import CORS
app = Flask(__name__)
app.config.from_object(Config)
api = Api(app)
login = LoginManager(app)
db = MongoEngine(app)
CORS(app)
from app import routes
|
{"/app/__init__.py": ["/config.py"], "/app/routes.py": ["/app/__init__.py", "/app/resources.py"], "/app/models.py": ["/app/__init__.py"], "/app/resources.py": ["/app/models.py"]}
|
5,490
|
damian-villarreal/CoinsApi
|
refs/heads/main
|
/app/routes.py
|
from werkzeug.utils import redirect
from app import api
from .resources import *
api.add_resource(Home, '/')
api.add_resource(Redirect, '/redirect')
api.add_resource(CoinsApi, '/api/coins')
api.add_resource(CoinApi, '/api/coin/<id>','/api/coin')
api.add_resource(UsersApi, '/api/users')
api.add_resource(UserApi, '/api/user/<id>')
api.add_resource(TransactionsApi, '/api/transactions')
api.add_resource(TransactionApi, '/api/transactions/<id>')
api.add_resource(AccountApi, '/api/account/<id>')
api.add_resource(AccountsApi, '/api/accounts')
api.add_resource(SignupApi, '/api/signup')
api.add_resource(LoginApi, '/api/login')
api.add_resource(LogoutApi, '/api/logout')
# api.add_resource(MyTransactions, '/api/mytransactions')
|
{"/app/__init__.py": ["/config.py"], "/app/routes.py": ["/app/__init__.py", "/app/resources.py"], "/app/models.py": ["/app/__init__.py"], "/app/resources.py": ["/app/models.py"]}
|
5,491
|
damian-villarreal/CoinsApi
|
refs/heads/main
|
/app/models.py
|
from app import db
import datetime
from flask_bcrypt import generate_password_hash, check_password_hash
from flask_login import UserMixin
from app import login
class User(UserMixin, db.Document):
email = db.EmailField(required=True, unique=True)
password = db.StringField(required=True, min_length=6)
role = db.StringField(required=True, default='user')
accounts = db.ListField(db.ReferenceField('Account'))
def hash_password(self):
self.password = generate_password_hash(self.password).decode('utf8')
def check_password(self, password):
return check_password_hash(self.password, password)
@login.user_loader
def load_user(id):
return User.objects.get(id = id)
class Account(db.Document):
user = db.ReferenceField('User', required = True)
coin = db.ReferenceField('Coin', required = True)
balance = db.FloatField(required = True, min=0)
class Transaction(db.Document):
fromAccount = db.ReferenceField('Account', required = True)
toAccount = db.ReferenceField('Account', required = True)
amount = db.FloatField(required = True, min=0)
creation = db.DateTimeField(default = datetime.datetime.utcnow, required=True)
class Coin(db.Document):
name = db.StringField(required=True, unique=True )
currency = db.StringField(required=True, unique=True)
|
{"/app/__init__.py": ["/config.py"], "/app/routes.py": ["/app/__init__.py", "/app/resources.py"], "/app/models.py": ["/app/__init__.py"], "/app/resources.py": ["/app/models.py"]}
|
5,492
|
damian-villarreal/CoinsApi
|
refs/heads/main
|
/app/resources.py
|
from flask_login.utils import login_required
from flask_restful import Resource
from flask import Response, request, jsonify, redirect, url_for
from flask_login import current_user, login_user, logout_user
from werkzeug.utils import redirect
from .models import Coin, Account, User, Transaction
class Home(Resource):
def get(self):
if current_user.is_authenticated:
return 'bienvenido ' + current_user.email
else:
return 'no logueado'
class Redirect(Resource):
def get(self):
return redirect('/')
#--Auth resources--
class SignupApi(Resource):
def post(self):
body = request.get_json()
user = User(**body)
if User.objects(email=user.email):
return 'el email ya se encuentra en uso'
else:
user.hash_password()
#a los fines de test, al crear el usuario se crea la cuenta en peso argentino con saldo inicial
user.save()
account = Account()
coin = Coin.objects.get(name="peso argentino")
account.user = user
account.coin = coin
account.balance = 1000
account.save()
user.accounts.append(account)
user.save()
userId = user.id
return {'userId': str(userId)}, 200
class LoginApi(Resource):
def post(self):
body = request.get_json()
user = User(**body)
if not user == None:
try:
result = User.objects.get(email = user.email)
except:
return 'usuario y/o contraseña incorrectos'
if not result.check_password(user.password):
return 'usuario y/o contraseña incorrectos'
else:
login_user(result)
return current_user.get(id)
class LogoutApi(Resource):
def get(self):
logout_user()
return 'logged out'
#---coin resources---
class CoinApi(Resource):
#get a coins
def get(self, id):
coin = Coin.objects(id=id)
return jsonify(coin)
#create a coin
@login_required
def post(self):
body = request.get_json()
coin = Coin(**body)
if Coin.objects(name = coin.name):
return 'el nombre de la moneda ya se encuentra en uso'
elif Coin.objects(currency = coin.currency):
return 'la sigla ya se encuentra en uso'
else:
coin.save()
id = coin.id
return {'id': str(id)}, 200
class CoinsApi(Resource):
#get all coins
def get(self):
coins = Coin.objects()
return jsonify(coins)
#--User Resources--
class UsersApi(Resource):
def get(self):
users = User.objects.account()
return jsonify(users)
class UserApi(Resource):
def get(self, id):
user = User.objects(id=id)
return jsonify(user)
#--Account Resources--
class AccountApi(Resource):
def get(self, id):
account = Account.objects(id=id)
return jsonify(account)
class AccountsApi(Resource):
def get(self):
accounts = Account.objects.coin(name='peso argentino')
return jsonify(accounts)
#--Transaction Resources--
class TransactionApi(Resource):
def get(self, id):
transaction = Transaction.objects(id=id)
return jsonify(transaction)
class TransactionsApi(Resource):
#get all transactions
def get(self):
transactions = Transaction.objects()
return jsonify(transactions)
#create transaction
def post(self):
body = request.get_json()
transaction = Transaction()
fromAccount = Account.objects.get(id = body['fromAccount'])
#el usuario debe estar logueado y no puede hacer transferencias desde una cuenta diferente a la suya
if not fromAccount.user == current_user:
return 'operacion no permitida'
#el usuario no puede transferir a una cuenta inexistente o de otra moneda
try:
toAccount = Account.objects.get(id = body['toAccount'], coin = fromAccount.coin)
except:
return "la cuenta de destino es incorrecta o inexistente"
#el usuario no puede transferirse a si mismo
if fromAccount == toAccount:
return 'la cuenta de destino es incorrecta o inexistente'
amount = body['amount']
if amount <= 0:
return 'El monto a transferir no puede ser cero'
#el usuario no puede transferir mas de lo que tiene en su cuenta.
if amount > fromAccount.balance:
return 'el importe ingresado supera el saldo de la cuenta'
else:
transaction.fromAccount = fromAccount
transaction.toAccount = toAccount
transaction.amount = amount
toAccount.balance += amount
toAccount.save()
fromAccount.balance -= amount
fromAccount.save()
transaction.save()
transactionId = transaction.id
return {'transactionId': str(transactionId)}, 200
|
{"/app/__init__.py": ["/config.py"], "/app/routes.py": ["/app/__init__.py", "/app/resources.py"], "/app/models.py": ["/app/__init__.py"], "/app/resources.py": ["/app/models.py"]}
|
5,493
|
damian-villarreal/CoinsApi
|
refs/heads/main
|
/config.py
|
import os
class Config(object):
SECRET_KEY = 'MbXCJXAW9PHIMJnYc87E9yT_T-Bxbd6zDpuKWg'
MONGODB_SETTINGS = {'host': 'mongodb+srv://admin:root@coins.zw6ys.mongodb.net/Coins?retryWrites=true&w=majority'}
JSON_SORT_KEYS = False
|
{"/app/__init__.py": ["/config.py"], "/app/routes.py": ["/app/__init__.py", "/app/resources.py"], "/app/models.py": ["/app/__init__.py"], "/app/resources.py": ["/app/models.py"]}
|
5,494
|
RALF34/McGyver_Game
|
refs/heads/master
|
/classes_and_methods.py
|
"""classes for McGyver game"""
import pygame
import random
from pygame.locals import *
from constants import *
class Labyrinth :
def __init__(self, code_file):
self.code_file = code_file
self.position_tools = {'ether':(0,0), 'tube':(0,0), 'needle':(0,0)}
def display_game(self,position_mcgyver,screen):
"""Method to display the labyrinth"""
pygame.init()
background = pygame.image.load("green_background.jpg").convert()
mcgyver = pygame.image.load("mcgyver.png").convert()
guard = pygame.image.load("guard.png").convert()
needle = pygame.image.load("needle.png").convert()
tube = pygame.image.load("plastic_tube.png").convert()
ether = pygame.image.load("ether.png").convert()
screen.blit(background,(0,0))
screen.blit(mcgyver,position_mcgyver)
screen.blit(guard,((nbr_cells_on_board-1)*lenght_cell,(nbr_cells_on_board-1)*lenght_cell))
if 'ether' in self.position_tools.keys():
screen.blit(ether,self.position_tools['ether'])
if 'tube' in self.position_tools.keys():
screen.blit(tube,self.position_tools['tube'])
if 'needle' in self.position_tools.keys():
screen.blit(needle,self.position_tools['needle'])
pygame.display.flip()
wall = pygame.image.load("WALL.png").convert()
with open(self.code_file, "r") as f:
i = 0
for line in f:
for j in range(len(line)-1):
if line[j] == 'M':
screen.blit(wall, (j*lenght_cell,i*lenght_cell))
pygame.display.flip()
i += 1
def placing_tools(self):
"""Méthod for randomly placing tools that McGyver has to collect in order to make the syringue"""
needle_correctly_placed, tube_correctly_placed, ether_correctly_placed = False, False, False
with open(self.code_file, "r") as f:
lines = f.readlines()
while not ether_correctly_placed:
x_cell_ether = random.randint(2,nbr_cells_on_board-2)
y_cell_ether = random.randint(2,nbr_cells_on_board-2)
if lines[y_cell_ether][x_cell_ether] == "C":
self.position_tools['ether'] = (x_cell_ether * lenght_cell , y_cell_ether * lenght_cell)
ether_correctly_placed = True
while not tube_correctly_placed:
x_cell_tube = random.randint(2,nbr_cells_on_board-2)
y_cell_tube = random.randint(2,nbr_cells_on_board-2)
if lines[y_cell_tube][x_cell_tube] == "C" and (x_cell_tube , y_cell_tube) != (x_cell_ether , y_cell_ether):
self.position_tools['tube'] = (x_cell_tube * lenght_cell , y_cell_tube * lenght_cell)
tube_correctly_placed = True
while not needle_correctly_placed:
x_cell_needle = random.randint(2,nbr_cells_on_board-2)
y_cell_needle = random.randint(2,nbr_cells_on_board-2)
if lines[y_cell_needle][x_cell_needle] == "C":
if (x_cell_needle , y_cell_needle) != (x_cell_tube , y_cell_tube):
if (x_cell_needle , y_cell_needle) != (x_cell_ether , y_cell_ether):
self.position_tools['needle'] = (x_cell_needle * lenght_cell , y_cell_needle * lenght_cell)
needle_correctly_placed = True
class McGyver:
def __init__(self):
self.x_cell = 0
self.y_cell = 0
self.x_pixel_pos = 0
self.y_pixel_pos = 0
self.objects_found = 0
def turning(self, laby, towards):
mcgyver = pygame.image.load("mcgyver.png").convert()
with open(laby.code_file, "r") as f:
lines = f.readlines()
if towards == 'right':
if self.x_cell != (nbr_cells_on_board - 1):
if lines[self.y_cell][self.x_cell+1] != 'M':
if (self.x_pixel_pos+lenght_cell , self.y_pixel_pos) in laby.position_tools.values():
self.objects_found += 1
for key in laby.position_tools.keys():
if laby.position_tools[key] == (self.x_pixel_pos+lenght_cell , self.y_pixel_pos):
laby.position_tools.pop(key)
break
self.x_cell += 1
self.x_pixel_pos = self.x_cell * lenght_cell
if towards == 'left':
if self.x_cell != 0:
if lines[self.y_cell][self.x_cell-1] != 'M':
if (self.x_pixel_pos-lenght_cell , self.y_pixel_pos) in laby.position_tools.values():
self.objects_found += 1
for key in laby.position_tools.keys():
if laby.position_tools[key] == (self.x_pixel_pos-lenght_cell , self.y_pixel_pos):
laby.position_tools.pop(key)
break
self.x_cell = self.x_cell-1
self.x_pixel_pos = self.x_cell * lenght_cell
if towards == 'up':
if self.y_cell != 0:
if lines[self.y_cell-1][self.x_cell] != 'M':
if (self.x_pixel_pos , self.y_pixel_pos-lenght_cell) in laby.position_tools.values():
self.objects_found += 1
for key in laby.position_tools.keys():
if laby.position_tools[key] == (self.x_pixel_pos , self.y_pixel_pos-lenght_cell):
laby.position_tools.pop(key)
break
self.y_cell = self.y_cell-1
self.y_pixel_pos = self.y_cell * lenght_cell
if towards == 'down':
if self.y_cell != (nbr_cells_on_board - 1):
if lines[self.y_cell+1][self.x_cell] != 'M':
if (self.x_pixel_pos , self.y_pixel_pos+lenght_cell) in laby.position_tools.values():
self.objects_found += 1
for key in laby.position_tools.keys():
if laby.position_tools[key] == (self.x_pixel_pos , self.y_pixel_pos+lenght_cell):
laby.position_tools.pop(key)
break
self.y_cell += 1
self.y_pixel_pos = self.y_cell * lenght_cell
|
{"/classes_and_methods.py": ["/constants.py"], "/McGyver_game.py": ["/classes.py", "/constants.py"], "/classes.py": ["/constants.py"]}
|
5,495
|
RALF34/McGyver_Game
|
refs/heads/master
|
/Labyrinth _game.py
|
"""classes for McGyver game"""
import pygame, random
from pygame.locals import *
from constantes import *
class Labyrinth :
def __init__(self, code_file):
self.code_file = code_file
self.position_tools = ((0,0),(0,0),(0,0))
def display_laby(self):
"""Method to display the labyrinth"""
pygame.init()
screen = pygame.display.set_mode((15*30, 15*30))
background = pygame.image.load("floor.jpg").convert()
screen.blit(background, (0,0))
pygame.display.flip()
wall = pygame.image.load("wall.png").convert()
with open(self.code_file, "r") as f:
i = 0
for line in f:
for j in range(len(line)-1):
if line[j] == 'M':
screen.blit(wall, (j*30,i*30))
pygame.display.flip()
i += 1
running = True
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
def display_tools(self, screen):
"""Mthod for randomly placing tools that McGyver has to collect in order to make the syringue"""
needle = pygame.image.load("needle.png").convert()
tube = pygame.image.load("plastic_tube.png").convert()
ether = pygame.image.load("ether.png").convert()
needle_correctly_placed, tube_correctly_placed, ether_correctly_placed = False, False, False
with open(self.code_file, "r") as f:
lines = f.readlines()
while not needle_correctly_placed:
while not tube_correctly_placed:
while not ether_correctly_placed:
x_ether, y_ether = random.randint(2,14), random.randint(2,14)
if lines[y_ether][x_ether] == "C":
self.position_tools[0] = (x_ether,y_ether)
ether_correctly_placed = True
screen.blit(ether, (x_ether*30,y_ether*30))
pygame.display.flip()
x_tube, y_tube = random.randint(2,14), random.randint(2,14)
if lines[y_tube][x_tube] == "C" and (x_tube,y_tube) != (x_ether,y_ether):
self.position_tools[1] = (x_tube,y_tube)
tube_correctly_placed = True
screen.blit(tube, (x_tube*30,y_tube*30))
pygame.display.flip()
x_needle, y_needle = random.randint(2,14), random.randint(2,14)
if lines[y_needle][x_needle] == "C" and (x_needle,y_needle) != (x_tube,y_tube) and (x_needle,y_needle) != (x_ether,y_ether):
self.position_tools[2] = (x_needle,y_needle)
needle_corretly_placed = True
screen.blit(needle,(x_needle * 30, y_needle * 30))
pygame.display.flip()
class McGyver:
def __init__(self):
self.x_cell = 0
self.y_cell = 0
self.x_pixel_pos = 0
self.y_pixel_pos = 0
self.
def moving(self, towards):
mcgyver = pygame.image.load("mcgyver.png").convert()
with open("struct_laby.txt","r") as f:
lines = f.readlines()
if towards == 'right':
if self.x_cell != (nbr_cells_on_board - 1):
if lines[self.y_cell][self.x_cell+1] != 'M':
if (self.x_cell+1,self.y_cell) in self.position_tools:
self.x_cell += 1
self.x_pixel_pos = self.x_cell * length_cell
if towards == 'left':
if self.case_x != 0:
if lines[self.y_cell][self.x_cell-1] != 'M':
if (self.x_cell-1,self.y_cell) in self.position_tools:
self.x_cell -= 1
self.x_pixel_pos = self.x_cell * lenght_cell
if towards == 'up':
if self.y_cell != 0:
if lines[self.y_cell-1][self.x_cell] != 'M':
if (self.x_cell,self.y_cell-1) in self.position_tools:
self.y_cell -= 1
self.y = self.y_cell * lenght_cell
if towards == 'down':
if self.y_cell != (nbr_cells_on_board - 1):
if lines[self.y_cell+1][self.x_cell] != 'M':
if (self.x_cell,self.self.y_cell+1) in self.position_tools:
self.y_cell += 1
self.y_pixel_pos = self.y_cell * lenght_cell
|
{"/classes_and_methods.py": ["/constants.py"], "/McGyver_game.py": ["/classes.py", "/constants.py"], "/classes.py": ["/constants.py"]}
|
5,496
|
RALF34/McGyver_Game
|
refs/heads/master
|
/McGyver_game.py
|
"""
Game "Trapped in the labyrinthe"
McGyver has to collect three objects and reach the exit of the labyrinthe
Python script
Fichiers : ,McGyver_game.py, classes.py, constants.py, struct_laby.txt
"""
import pygame
from pygame.locals import *
from classes import *
from constants import *
pygame.init()
screen = pygame.display.set_mode((nbr_cells_on_board*lenght_cell,\
nbr_cells_on_board*lenght_cell))
laby = Labyrinth("struct_laby.txt")
macGyver = Character()
laby.placing_tools()
laby.display_game((0, 0), screen)
pygame.display.flip()
running = 1
while running:
pygame.time.Clock().tick(30)
for event in pygame.event.get():
if event.type == QUIT:
running = 0
elif event.type == KEYDOWN:
if event.key == K_RIGHT:
macGyver.turning(laby, 'right')
elif event.key == K_LEFT:
macGyver.turning(laby, 'left')
elif event.key == K_UP:
macGyver.turning(laby, 'up')
elif event.key == K_DOWN:
macGyver.turning(laby, 'down')
laby.display_game((macGyver.x_pixel_pos, macGyver.y_pixel_pos),\
screen)
pygame.display.flip()
if (macGyver.x_cell, macGyver.y_cell) == \
(nbr_cells_on_board-1, nbr_cells_on_board-1):
if macGyver.tools_found == 3:
screen.fill((234, 234, 234))
success = pygame.image.load("you've_won.jpg").convert()
screen.blit(success, (0.15*nbr_cells_on_board*lenght_cell, 0.4*nbr_cells_on_board*lenght_cell))
else:
screen.fill((107, 133, 237))
defeat = pygame.image.load("you've_lost.png").convert()
screen.blit(defeat, (0.1*nbr_cells_on_board*lenght_cell, 0.3*nbr_cells_on_board*lenght_cell))
pygame.display.flip()
|
{"/classes_and_methods.py": ["/constants.py"], "/McGyver_game.py": ["/classes.py", "/constants.py"], "/classes.py": ["/constants.py"]}
|
5,497
|
RALF34/McGyver_Game
|
refs/heads/master
|
/classes.py
|
"""classes for McGyver game"""
import pygame
import random
from pygame.locals import *
from constants import *
class Labyrinth:
def __init__(self, code_file):
"""File coding the structure of the labyrinth """
self.code_file = code_file
"""Dictionnary containing the three objects as keys
and their positions as values"""
self.position_tools = {'ether': (0,0), 'tube': (0,0), 'needle': (0,0)}
def display_game(self, position_mcgyver, screen):
"""Method to display the labyrinth"""
screen.fill((34,177,76))
mcgyver = pygame.image.load("mcgyver.png").convert()
guard = pygame.image.load("guard.png").convert()
needle = pygame.image.load("needle.png").convert()
tube = pygame.image.load("plastic_tube.png").convert()
ether = pygame.image.load("ether.png").convert()
screen.blit(mcgyver, position_mcgyver)
screen.blit(guard, ((nbr_cells_on_board-1)*lenght_cell, \
(nbr_cells_on_board-1)*lenght_cell))
"""The following tests are telling which tool(s) has been
found by McGyver, so they don't appears on the screen anymore"""
if 'ether' in self.position_tools.keys():
screen.blit(ether, self.position_tools['ether'])
if 'tube' in self.position_tools.keys():
screen.blit(tube, self.position_tools['tube'])
if 'needle' in self.position_tools.keys():
screen.blit(needle, self.position_tools['needle'])
#pygame.display.flip()
wall = pygame.image.load("WALL.png").convert()
"""Reading of "code_file" and loop
to display the walls of the labyrinth"""
with open(self.code_file, "r") as f:
text = f.read()
i, j, k = 0, 0, 0
while text[k] != 't':
if text[k] == '\n':
i += 1
j = 0
k += 1
elif text[k] == 'M':
screen.blit(wall, (j*lenght_cell, i*lenght_cell))
#pygame.display.flip()
j += 1
k += 1
else:
j += 1
k += 1
def placing_tools(self):
"""Méthod for randomly placing tools that McGyver has to collect
in order to make the syringue """
needle_correctly_placed = False
tube_correctly_placed = False
ether_correctly_placed = False
with open(self.code_file, "r") as f:
lines = f.readlines()
while not ether_correctly_placed:
x_cell_ether = random.randint(2, nbr_cells_on_board-2)
y_cell_ether = random.randint(2, nbr_cells_on_board-2)
if lines[y_cell_ether][x_cell_ether] == "C":
self.position_tools['ether'] = \
(x_cell_ether*lenght_cell, y_cell_ether*lenght_cell)
ether_correctly_placed = True
while not tube_correctly_placed:
x_cell_tube = random.randint(2, nbr_cells_on_board-2)
y_cell_tube = random.randint(2, nbr_cells_on_board-2)
if lines[y_cell_tube][x_cell_tube] == "C":
"""test to prevent the tube and the ether bottle
from being at the same place in the labyrinth"""
if (x_cell_tube, \
y_cell_tube) != (x_cell_ether, y_cell_ether):
self.position_tools['tube'] = \
(x_cell_tube* lenght_cell, y_cell_tube* lenght_cell)
tube_correctly_placed = True
while not needle_correctly_placed:
x_cell_needle = random.randint(2, nbr_cells_on_board-2)
y_cell_needle = random.randint(2, nbr_cells_on_board-2)
if lines[y_cell_needle][x_cell_needle] == "C":
"""New tests to avoid having several tools
at the same place in the labyrinth"""
if (x_cell_needle, \
y_cell_needle) != (x_cell_tube, y_cell_tube):
if (x_cell_needle, \
y_cell_needle) != (x_cell_ether, \
y_cell_ether):
self.position_tools['needle'] = \
(x_cell_needle*lenght_cell, \
y_cell_needle*lenght_cell)
needle_correctly_placed = True
class Character:
def __init__(self):
self.x_cell = 0
self.y_cell = 0
self.x_pixel_pos = 0
self.y_pixel_pos = 0
self.tools_found = 0
def turning(self, laby, towards):
mcgyver = pygame.image.load("mcgyver.png").convert()
with open(laby.code_file, "r") as f:
lines = f.readlines()
if towards == 'right':
"""Making sure that Mcgyver is not on the
right border of the labyrinth"""
if self.x_cell != nbr_cells_on_board-1:
"""Making sure that the cell on the
right is not a wall ? """
if lines[self.y_cell][self.x_cell+1] != 'M':
"""Making sure whether or not McGyver is
moving to a position of a tool"""
if (self.x_pixel_pos+lenght_cell, \
self.y_pixel_pos) in \
laby.position_tools.values():
self.tools_found += 1
"""Loop to remove the tool (that McGyver
has just found) from the dictionnary"""
for key in laby.position_tools.keys():
if laby.position_tools[key] == \
(self.x_pixel_pos+lenght_cell, \
self.y_pixel_pos):
laby.position_tools.pop(key)
break
self.x_cell += 1
self.x_pixel_pos = self.x_cell*lenght_cell
if towards == 'left':
"""Making sure that McGyver is not on the
left border of the labyrinth"""
if self.x_cell != 0:
"""Making sure that there is no wall"""
if lines[self.y_cell][self.x_cell-1] != 'M':
"""Checking the presence of a tool on the way"""
if (self.x_pixel_pos-lenght_cell, \
self.y_pixel_pos) in \
laby.position_tools.values():
self.tools_found += 1
"""Loop to remove the tools
from the dictionnary"""
for key in laby.position_tools.keys():
if laby.position_tools[key] == \
(self.x_pixel_pos-lenght_cell, \
self.y_pixel_pos):
laby.position_tools.pop(key)
break
self.x_cell = self.x_cell-1
self.x_pixel_pos = self.x_cell*lenght_cell
if towards == 'up':
"""Making sure that McGyver is
not on the top of the screen"""
if self.y_cell != 0:
"""Making sure that there's no wall"""
if lines[self.y_cell-1][self.x_cell] != 'M':
"""Checking the presence of a tool"""
if (self.x_pixel_pos, \
self.y_pixel_pos-lenght_cell) in \
laby.position_tools.values():
self.tools_found += 1
"""Removing the element from the dictionnary"""
for key in laby.position_tools.keys():
if laby.position_tools[key] == \
(self.x_pixel_pos, \
self.y_pixel_pos-lenght_cell):
laby.position_tools.pop(key)
break
self.y_cell = self.y_cell-1
self.y_pixel_pos = self.y_cell*lenght_cell
if towards == 'down':
if self.y_cell != nbr_cells_on_board-1:
if lines[self.y_cell+1][self.x_cell] != 'M':
if (self.x_pixel_pos, \
self.y_pixel_pos+lenght_cell) in \
laby.position_tools.values():
self.tools_found += 1
for key in laby.position_tools.keys():
if laby.position_tools[key] == \
(self.x_pixel_pos, \
self.y_pixel_pos+lenght_cell):
laby.position_tools.pop(key)
break
self.y_cell += 1
self.y_pixel_pos = self.y_cell*lenght_cell
|
{"/classes_and_methods.py": ["/constants.py"], "/McGyver_game.py": ["/classes.py", "/constants.py"], "/classes.py": ["/constants.py"]}
|
5,498
|
RALF34/McGyver_Game
|
refs/heads/master
|
/constants.py
|
nbr_cells_on_board = 15
lenght_cell = 45
|
{"/classes_and_methods.py": ["/constants.py"], "/McGyver_game.py": ["/classes.py", "/constants.py"], "/classes.py": ["/constants.py"]}
|
5,499
|
dera1992/biosec
|
refs/heads/main
|
/home/models.py
|
from django.db import models
from simple_history.models import HistoricalRecords
# Create your models here.
# the model for the operation is created here.
class Employee(models.Model):
id=models.AutoField(primary_key=True)
name=models.CharField(max_length=255)
phone=models.CharField(max_length=255)
age = models.CharField(max_length=255)
address=models.CharField(max_length=255)
email = models.CharField(max_length=255, null=True, blank=True)
added_on=models.DateTimeField(auto_now_add=True)
active = models.BooleanField(default=True)
history = HistoricalRecords()
objects=models.Manager()
def __str__(self):
return self.name
|
{"/home/views.py": ["/home/models.py", "/home/serializers.py"], "/home/serializers.py": ["/home/models.py"]}
|
5,500
|
dera1992/biosec
|
refs/heads/main
|
/home/urls.py
|
from django.urls import path, include
from rest_framework import routers
from . import views
app_name = 'home'
router=routers.DefaultRouter()
router.register("employee",views.EmployeeViewset,basename="employee")
urlpatterns = [
path('api/',include(router.urls)),
path('api/archive/<str:pk>', views.archive, name="archive"),
]
|
{"/home/views.py": ["/home/models.py", "/home/serializers.py"], "/home/serializers.py": ["/home/models.py"]}
|
5,501
|
dera1992/biosec
|
refs/heads/main
|
/home/views.py
|
from rest_framework import viewsets, generics
from rest_framework.response import Response
from rest_framework.generics import get_object_or_404
from home.models import Employee
from home.serializers import EmployeeSerializer
from rest_framework.decorators import api_view
#Employee Class Base Viewset for create,retrieve,list and update a record
class EmployeeViewset(viewsets.ViewSet):
def create(self,request):
try:
serializer=EmployeeSerializer(data=request.data,context={"request":request})
serializer.is_valid(raise_exception=True)
serializer.save()
dict_response={"error":False,"message":"Employee Data Save Successfully"}
except:
dict_response={"error":True,"message":"Error During Saving Employee Data"}
return Response(dict_response)
def list(self,request):
employee=Employee.objects.filter(active=True)
serializer=EmployeeSerializer(employee,many=True,context={"request":request})
response_dict={"error":False,"message":"All Employee List Data","data":serializer.data}
return Response(response_dict)
def retrieve(self,request,pk=None):
queryset=Employee.objects.all()
employee=get_object_or_404(queryset,pk=pk)
serializer=EmployeeSerializer(employee,context={"request":request})
return Response({"error":False,"message":"Single Data Fetch","data":serializer.data})
def update(self,request,pk=None):
queryset=Employee.objects.all()
employee=get_object_or_404(queryset,pk=pk)
serializer=EmployeeSerializer(employee,data=request.data,context={"request":request})
serializer.is_valid()
serializer.save()
return Response({"error":False,"message":"Data Has Been Updated"})
# Funtion base view of django restframe work for achieving a record
@api_view(['DELETE'])
def archive(request, pk=None):
employee = Employee.objects.get(id=pk)
employee.active = False
employee.save()
return Response(data='delete success')
|
{"/home/views.py": ["/home/models.py", "/home/serializers.py"], "/home/serializers.py": ["/home/models.py"]}
|
5,502
|
dera1992/biosec
|
refs/heads/main
|
/home/migrations/0004_auto_20210407_0410.py
|
# Generated by Django 3.2 on 2021-04-07 03:10
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('home', '0003_historicalemployee'),
]
operations = [
migrations.AddField(
model_name='employee',
name='email',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='historicalemployee',
name='email',
field=models.CharField(blank=True, max_length=255, null=True),
),
]
|
{"/home/views.py": ["/home/models.py", "/home/serializers.py"], "/home/serializers.py": ["/home/models.py"]}
|
5,503
|
dera1992/biosec
|
refs/heads/main
|
/home/serializers.py
|
from rest_framework import serializers
from home.models import Employee
#The employee serializer is done here for json generation
class EmployeeSerializer(serializers.ModelSerializer):
class Meta:
model=Employee
fields="__all__"
|
{"/home/views.py": ["/home/models.py", "/home/serializers.py"], "/home/serializers.py": ["/home/models.py"]}
|
5,504
|
dera1992/biosec
|
refs/heads/main
|
/home/migrations/0005_auto_20210407_0427.py
|
# Generated by Django 3.2 on 2021-04-07 03:27
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('home', '0004_auto_20210407_0410'),
]
operations = [
migrations.RemoveField(
model_name='employee',
name='joining_date',
),
migrations.RemoveField(
model_name='historicalemployee',
name='joining_date',
),
]
|
{"/home/views.py": ["/home/models.py", "/home/serializers.py"], "/home/serializers.py": ["/home/models.py"]}
|
5,505
|
leighsix/CompetitionPycharm
|
refs/heads/master
|
/InterconnectedDynamics.py
|
import numpy as np
import networkx as nx
import Setting_Simulation_Value
import OpinionDynamics
import DecisionDynamics
import MakingPandas
import InterconnectedLayerModeling
import matplotlib
import time
matplotlib.use("Agg")
class InterconnectedDynamics:
def __init__(self):
self.opinion = OpinionDynamics.OpinionDynamics()
self.decision = DecisionDynamics.DecisionDynamics()
self.mp = MakingPandas.MakingPandas()
def interconnected_dynamics(self, setting, inter_layer, gamma, beta):
total_value = np.zeros(15)
for step_number in range(setting.Limited_step+1):
if step_number == 0:
decision_prob = self.decision.B_state_change_probability_cal(setting, inter_layer, beta)
opinion_prob = self.opinion.A_state_change_probability_cal(inter_layer, gamma)
initial_value = self.making_properties_array(setting, inter_layer, gamma, beta, opinion_prob[1],
opinion_prob[2], decision_prob[1])
total_value = total_value + initial_value
elif step_number >= 1:
opinion_result = self.opinion.A_layer_dynamics(setting, inter_layer, gamma)
decision_result = self.decision.B_layer_dynamics(setting, opinion_result[0], beta)
array_value = self.making_properties_array(setting, decision_result[0], gamma, beta, opinion_result[1],
opinion_result[2], decision_result[1])
total_value = np.vstack([total_value, array_value])
self.opinion.A_COUNT = 0
self.decision.B_COUNT = 0
return total_value
def making_properties_array(self, setting, inter_layer, gamma, beta, persuasion_prob, compromise_prob, prob_beta_mean):
interacting_properties = self.mp.interacting_property(setting, inter_layer)
change_count = self.opinion.A_COUNT + self.decision.B_COUNT
array_value = np.array([gamma, beta, prob_beta_mean, persuasion_prob, compromise_prob,
interacting_properties[0], interacting_properties[1],
interacting_properties[2], interacting_properties[3],
interacting_properties[4], interacting_properties[5],
interacting_properties[6],
len(sorted(inter_layer.A_edges.edges)), len(inter_layer.B_edges),
change_count])
return array_value
if __name__ == "__main__":
print("InterconnectedDynamics")
start = time.time()
setting = Setting_Simulation_Value.Setting_Simulation_Value()
inter_layer = InterconnectedLayerModeling.InterconnectedLayerModeling(setting)
gamma = 0.5
beta = 1.5
state = 0
for i in range(setting.A_node):
state += inter_layer.two_layer_graph.nodes[i]['state']
print(state)
inter_dynamics = InterconnectedDynamics()
array = inter_dynamics.interconnected_dynamics(setting, inter_layer, gamma, beta)
print(array)
state = 0
for i in range(setting.A_node):
state += inter_layer.two_layer_graph.nodes[i]['state']
print(state)
end = time.time()
print(end-start)
|
{"/InterconnectedDynamics.py": ["/Setting_Simulation_Value.py", "/OpinionDynamics.py", "/DecisionDynamics.py", "/MakingPandas.py", "/InterconnectedLayerModeling.py"], "/AnalysisDB.py": ["/Setting_Simulation_Value.py"], "/Layer_B_Modeling.py": ["/Setting_Simulation_Value.py"], "/OpinionDynamics.py": ["/Setting_Simulation_Value.py", "/InterconnectedLayerModeling.py"], "/Interconnected_Network_Visualization.py": ["/Setting_Simulation_Value.py", "/InterconnectedLayerModeling.py"], "/RepeatDynamics.py": ["/Setting_Simulation_Value.py", "/InterconnectedDynamics.py", "/InterconnectedLayerModeling.py", "/MakingPandas.py"], "/MyWindow.py": ["/InterconnectedLayerModeling.py", "/Setting_Simulation_Value.py", "/Layer_A_Modeling.py", "/Layer_B_Modeling.py", "/Changing_Variable.py", "/Visualization.py", "/Interconnected_Network_Visualization.py", "/DB_Management.py"], "/MakingPandas.py": ["/InterconnectedLayerModeling.py", "/Setting_Simulation_Value.py"], "/InterconnectedLayerModeling.py": ["/Setting_Simulation_Value.py"], "/VisualizationNew.py": ["/Setting_Simulation_Value.py"], "/MakingMovie.py": ["/Setting_Simulation_Value.py", "/Interconnected_Network_Visualization.py", "/Layer_A_Modeling.py", "/Layer_B_Modeling.py"], "/DecisionDynamics.py": ["/Setting_Simulation_Value.py", "/InterconnectedLayerModeling.py"], "/Layer_A_Modeling.py": ["/Setting_Simulation_Value.py"], "/Visualization.py": ["/Setting_Simulation_Value.py"], "/Changing_Variable.py": ["/Setting_Simulation_Value.py", "/RepeatDynamics.py"]}
|
5,506
|
leighsix/CompetitionPycharm
|
refs/heads/master
|
/AnalysisDB.py
|
import Setting_Simulation_Value
import sqlalchemy
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
from sympy import *
import SelectDB
import seaborn as sns
from mpl_toolkits.mplot3d.axes3d import *
matplotlib.use("TkAgg")
class AnalysisDB:
def __init__(self):
self.select_db = SelectDB.SelectDB()
def making_property_array(self, setting, step, a):
property_array = np.zeros(11)
df_original = self.select_db.select_data_from_DB(setting)
df_step = df_original[df_original.Steps == step]
model = df_step['MODEL']
model = sorted(model.unique())
for m in model:
df_model = df_step[df_step.MODEL == m]
A_nodes = df_model['A_node_number']
A_nodes = sorted(A_nodes.unique())
for an in A_nodes:
df_an = df_model[df_model.A_node_number == an]
B_nodes = df_an['B_node_number']
B_nodes = sorted(B_nodes.unique())
for bn in B_nodes:
df_bn = df_an[df_an.B_node_number == bn]
A_edges = df_bn['A_internal_edges']
A_edges = sorted(A_edges.unique())
for ae in A_edges:
df_ae = df_bn[df_bn.A_internal_edges == ae]
B_edges = df_ae['B_internal_edges']
B_edges = sorted(B_edges.unique())
for be in B_edges:
df_be = df_ae[df_ae.B_internal_edges == be]
total_AS = self.calculate_total_AS(setting, df_be)
total_fraction_ab = self.calculate_fraction_ab_total(df_be)
pcr, ncr = self.calculate_consensus_number(setting, df_be, a)
initial_value = np.array([m, an, bn, ae, be, step,
total_AS, total_fraction_ab, pcr,
ncr, pcr+ncr])
property_array = np.vstack([property_array, initial_value])
property_data = property_array[1:]
columns = ['Model', 'A nodes', 'B nodes', 'A edges', 'B edges', 'steps', 'AS total', 'Fraction AB total', 'PCR',
'NCR', 'CR']
df = pd.DataFrame(property_data, columns=columns)
return df
def calculate_total_AS(self, setting, df):
AS = (df['LAYER_A_MEAN'] / setting.MAX + df['LAYER_B_MEAN']) / 2
total_AS = sum(AS) / len(AS)
return total_AS
def calculate_consensus_number(self, setting, df, a): # 옛날 버전에 적용할 consensus 수 계산 인덱스
pos_con = 0
neg_con = 0
signed_CI = (df['LAYER_A_MEAN'] / setting.MAX) + df['LAYER_B_MEAN']
for i in range(len(df)):
if signed_CI.iloc[i] > a: # 원래 1.9
pos_con += 1
elif signed_CI.iloc[i] < -a: # 원래 -1.9
neg_con += 1
pos_con = pos_con / len(df)
neg_con = neg_con / len(df)
return pos_con, neg_con
def calculate_fraction_ab_total(self, df):
fraction_ab = df['FRACTION_A'] + df['FRACTION_B']
total_fraction_ab = sum(fraction_ab) / len(fraction_ab)
return total_fraction_ab
def making_hist_for_pcr(self, df):
fig = plt.figure() # 그래프 창생성
ax = fig.add_subplot(111)
N = len(df)
tuples = self.making_tuple_data(df)
PCRs = tuples[1] # 남학생 수
NCRs = tuples[2] # 여학생 수
ind = np.arange(N) # x축
width = 0.3 # 너비
p1 = ax.bar(ind, PCRs, width, color='SkyBlue') # subplot에 bar chart 생성(남학생)
p2 = ax.bar(ind, NCRs, width, color='IndianRed', bottom=PCRs) # subplot에 bar chart 생성(여학생), bottom 옵션에 남학생 위에다 그리기
ax.set_ylabel('ratio of state', fontsize=16) # y축 라벨
ax.set_xlabel('model', fontsize=16) # x축 라벨
ax.set_title('consensus ratio of models', fontsize=18) # subplot의 제목
ax.set_yticks(np.arange(0, 1.2, 0.2)) # 0 ~ 81까지 10간격식으로 y축 틱설정
ax.set_xticks(ind) # x축 틱설정
ax.set_xticklabels(tuples[0]) # x축 틱 라벨설정
ax.tick_params(labelsize=11)
plt.legend((p1[0], p2[0]), ("PCR", "NCR"), loc=0, fontsize=14)
plt.show()
plt.close()
def making_hist_for_index(self, df):
N = len(df)
tuples = self.making_tuple_data(df)
CIs = tuples[4]
FRs = tuples[5]
CRs = tuples[6]
ind = np.arange(N) # x축
width = 0.2 # 너비
fig, ax = plt.subplots()
p1 = ax.bar(ind - width, CIs, width, color='SkyBlue', label='AS')
p2 = ax.bar(ind, FRs, width, color='IndianRed', label='FR')
p3 = ax.bar(ind + width, CRs, width, color='green', label='CR') # subplot에 bar chart 생성(여학생), bottom 옵션에 남학생 위에다 그리기
ax.set_ylabel('values', fontsize=18) # y축 라벨
ax.set_xlabel('model', fontsize=18) # x축 라벨
ax.set_title('consensus index of models', fontsize=18) # subplot의 제목
ax.set_yticks(np.arange(0, 2.2, 0.2)) # 0 ~ 81까지 10간격식으로 y축 틱설정
ax.set_xticks(ind) # x축 틱설정
ax.set_xticklabels(tuples[0]) # x축 틱 라벨설정
ax.tick_params(labelsize=8)
plt.legend((p1[0], p2[0], p3[0]), ("CI", "FR", "CR"), loc=0, fontsize=14)
plt.show()
plt.close()
def making_property_array_for_new(self, setting, step):
property_array = np.zeros(5)
df_original = self.select_db.select_data_from_DB(setting)
df_step = df_original[df_original.Steps == step]
model = df_step['MODEL']
model = sorted(model.unique())
for m in model:
df_model = df_step[df_step.MODEL == m]
total_AS = self.calculate_total_AS_new(df_model)
pcr, ncr = self.calculate_consensus_number_new(df_model)
initial_value = np.array([m, total_AS, pcr, ncr, pcr+ncr])
property_array = np.vstack([property_array, initial_value])
property_data = property_array[1:]
columns = ['Model', 'AS total', 'PCR', 'NCR', 'CR']
df = pd.DataFrame(property_data, columns=columns)
return df
def making_mixed_hist(self, df):
fig = plt.figure() # 그래프 창생성
ax = fig.add_subplot(111)
N = len(df)
tuples = self.making_tuple_data(df)
PCRs = tuples[1] # 남학생 수
NCRs = tuples[2] # 여학생 수
CIs = tuples[3]
ind = np.arange(N) # x축
width = 0.2 # 너비
p1 = ax.bar(ind-(width/2), PCRs, width, color='SkyBlue') # subplot에 bar chart 생성(남학생)
p2 = ax.bar(ind-(width/2), NCRs, width, color='IndianRed', bottom=PCRs) # subplot에 bar chart 생성(여학생), bottom 옵션에 남학생 위에다 그리기
p3 = ax.bar(ind+(width/2), CIs, width, color='palegreen', label='AS')
ax.set_xlabel('model', fontsize=16) # x축 라벨
ax.set_title('Competition results', fontsize=18) # subplot의 제목
ax.set_yticks(np.arange(0, 1.2, 0.2)) # 0 ~ 81까지 10간격식으로 y축 틱설정
ax.set_xticks(ind) # x축 틱설정
ax.set_xticklabels(tuples[0]) # x축 틱 라벨설정
ax.tick_params(labelsize=13)
plt.legend((p1[0], p2[0], p3[0]), ("PCR", "NCR", "AS total"), loc=0, fontsize=14)
plt.show()
plt.close()
def making_tuple_data(self, df):
model = []
pcrs = []
ncrs = []
As = []
fr = []
cr = []
for i in df['Model']:
model.append(i)
model = tuple(model)
for i in df['PCR']:
pcrs.append(eval(i))
pcrs = tuple(pcrs)
for i in df['NCR']:
ncrs.append(eval(i))
ncrs = tuple(ncrs)
for i in df['AS total']:
As.append(eval(i))
As = tuple(As)
for i in df['Fraction AB total']:
fr.append(eval(i))
fr = tuple(fr)
for i in df['CR']:
cr.append(eval(i))
cr = tuple(cr)
return model, pcrs, ncrs, As, fr, cr
def calculate_total_AS_new(self, df):
total_AS = sum(df['AS']) / len(df)
return total_AS
def calculate_consensus_number_new(self, df):
pos_con = 0
neg_con = 0
signed_CI = df['AS']
for i in range(len(df)):
if signed_CI.iloc[i] > 0.95:
pos_con += 1
elif signed_CI.iloc[i] < -0.95:
neg_con += 1
return pos_con/len(df), neg_con/len(df)
def making_tuple_data_new(self, df):
model = []
As = []
pcrs = []
ncrs = []
cr = []
for i in df['Model']:
model.append(i)
model = tuple(model)
for i in df['AS total']:
As.append(eval(i))
As = tuple(As)
for i in df['PCR']:
pcrs.append(eval(i))
pcrs = tuple(pcrs)
for i in df['NCR']:
ncrs.append(eval(i))
ncrs = tuple(ncrs)
for i in df['CR']:
cr.append(eval(i))
cr = tuple(cr)
return model, As, pcrs, ncrs, cr
def making_mixed_hist_new(self, df):
fig = plt.figure() # 그래프 창생성
ax = fig.add_subplot(111)
N = len(df)
tuples = self.making_tuple_data_new(df)
CIs = tuples[1]
PCRs = tuples[2] # 남학생 수
NCRs = tuples[3] # 여학생 수
ind = np.arange(N) # x축
width = 0.2 # 너비
p1 = ax.bar(ind-(width/2), PCRs, width, color='SkyBlue') # subplot에 bar chart 생성(남학생)
p2 = ax.bar(ind-(width/2), NCRs, width, color='IndianRed', bottom=PCRs) # subplot에 bar chart 생성(여학생), bottom 옵션에 남학생 위에다 그리기
p3 = ax.bar(ind+(width/2), CIs, width, color='palegreen', label='AS')
ax.set_xlabel('model', fontsize=16) # x축 라벨
ax.set_title('Competition results', fontsize=18) # subplot의 제목
ax.set_yticks(np.arange(0, 1.2, 0.2)) # 0 ~ 81까지 10간격식으로 y축 틱설정
ax.set_xticks(ind) # x축 틱설정
ax.set_xticklabels(tuples[0]) # x축 틱 라벨설정
ax.tick_params(labelsize=13)
plt.legend((p1[0], p2[0], p3[0]), ("PCR", "NCR", "AS total"), loc=0, fontsize=14)
plt.show()
plt.close()
#'CI total', 'Fraction AB total', 'CR'
if __name__ == "__main__":
print("AnalysisDB")
setting = Setting_Simulation_Value.Setting_Simulation_Value()
analysis_db = AnalysisDB()
# setting.database = 'paper_revised_data'
# setting.table = 'simulation_table2'
# df = analysis_db.making_property_array_for_new(setting, 100)
# analysis_db.making_mixed_hist_new(df)
# df = pd.read_pickle('df1')
# print(df.head())
# setting.database = 'competition'
# setting.table = 'result_db'
# df1 = analysis_db.making_property_array(setting, 30, 1.9)
# df2 = analysis_db.making_property_array(setting, 100, 1.95)
# setting.database = 'paper_revised_data'
# setting.table = 'simulation_table'
# df3 = analysis_db.making_property_array(setting, 100, 1.95)
# df = pd.concat([df1, df2, df3], ignore_index=True)
# df = df.iloc[:, [0, 6, 8, 9, 10]]
# print(df)
# pd.to_pickle(df, 'df1')
df = pd.read_pickle('df1')
# print(df)
# fig.6 # different structures
df = df.loc[[9, 8, 1, 0, 3], :]
df['Model'] = ['RR(5)-RR(5)', 'RR(5)-BA', 'BA-RR(5)', 'BA-BA', 'RR(10)-RR(5)']
analysis_db.making_mixed_hist_new(df)
# fig.5 # different internal edges
# df = df.loc[[4, 5, 6, 9], :]
# analysis_db.making_mixed_hist_new(df)
# fig.4 # Hierarchical Model
# df = df.loc[[0, 2, 4, 6, 1, 3, 5], :]
# df['Model'] = ['RRM', 'HM(2)', 'HM(4)', 'HM(8)', 'HM(16)', 'HM(32)', 'HM(64)']
# analysis_db.making_mixed_hist(df)
# df = pd.concat([df2, df3], ignore_index=True)
# df = df.iloc[:, [0, 6, 8, 9, 10]]
# print(df)
# for i in [0, 2, 4, 6, 1, 3, 5]:
# print(df.iloc[i, :])
# analysis_db.making_mixed_hist(df)
#
# df.iloc[0, 0] = 'LM(1)'
#analysis_db.making_hist_for_pcr(df)
# analysis_db.making_mixed_hist(df)
# df = df.loc[[7, 9, 8, 1, 0, 3, 4, 5, 6, 11, 13, 15, 10, 12, 14], :]
# df = df.loc[[4, 5, 6, 7], :]
# df = df.loc[[7, 8, 1, 0, 3], :]
# df.iloc[0, 0] = 'BM(30)'
# df.iloc[1, 0] = 'BM(100)'
# df = df.iloc[:, [0, 6, 8, 9, 10]]
# for i in range(15):
# print(df.iloc[i, :])
# print(df)
# for i in range(len(df)):
# print(df['Model'][i], df['AS total'][i])
#print(df['Model'])
#df = df.iloc[4:8, :]
#df = df.loc[[7, 1, 3, 0, 8], :]
#'CI total', 'Fraction AB total', 'PCR', 'NCR', 'CR'
#df = df[df.Model == 'LM']
#print(df['AS total'])
#print(format(eval(df['CR']), '10.4f'), format(eval(df['NCR']), '10.4f'), format(eval(df['PCR']), '10.4f'))
# df = df.sort_values(by='CR', ascending=False)
# print(tuple(df['Model']))
#
# df = df.sort_values(by='NCR', ascending=False)
# print(tuple(df['Model']))
#
# df = df.sort_values(by='PCR', ascending=False)
# print(tuple(df['Model']))
#tuples = analysis_db.making_tuple_data(df)
#
# NCRs = tuple(array['NCR'])
# Coexs = tuple(array['CR'])
# print(Coexs)
|
{"/InterconnectedDynamics.py": ["/Setting_Simulation_Value.py", "/OpinionDynamics.py", "/DecisionDynamics.py", "/MakingPandas.py", "/InterconnectedLayerModeling.py"], "/AnalysisDB.py": ["/Setting_Simulation_Value.py"], "/Layer_B_Modeling.py": ["/Setting_Simulation_Value.py"], "/OpinionDynamics.py": ["/Setting_Simulation_Value.py", "/InterconnectedLayerModeling.py"], "/Interconnected_Network_Visualization.py": ["/Setting_Simulation_Value.py", "/InterconnectedLayerModeling.py"], "/RepeatDynamics.py": ["/Setting_Simulation_Value.py", "/InterconnectedDynamics.py", "/InterconnectedLayerModeling.py", "/MakingPandas.py"], "/MyWindow.py": ["/InterconnectedLayerModeling.py", "/Setting_Simulation_Value.py", "/Layer_A_Modeling.py", "/Layer_B_Modeling.py", "/Changing_Variable.py", "/Visualization.py", "/Interconnected_Network_Visualization.py", "/DB_Management.py"], "/MakingPandas.py": ["/InterconnectedLayerModeling.py", "/Setting_Simulation_Value.py"], "/InterconnectedLayerModeling.py": ["/Setting_Simulation_Value.py"], "/VisualizationNew.py": ["/Setting_Simulation_Value.py"], "/MakingMovie.py": ["/Setting_Simulation_Value.py", "/Interconnected_Network_Visualization.py", "/Layer_A_Modeling.py", "/Layer_B_Modeling.py"], "/DecisionDynamics.py": ["/Setting_Simulation_Value.py", "/InterconnectedLayerModeling.py"], "/Layer_A_Modeling.py": ["/Setting_Simulation_Value.py"], "/Visualization.py": ["/Setting_Simulation_Value.py"], "/Changing_Variable.py": ["/Setting_Simulation_Value.py", "/RepeatDynamics.py"]}
|
5,507
|
leighsix/CompetitionPycharm
|
refs/heads/master
|
/Layer_B_Modeling.py
|
import networkx as nx
import numpy as np
import pandas as pd
import random
import Setting_Simulation_Value
import math
## B layer : B, B_edges
class Layer_B_Modeling:
def __init__(self, setting):
self.B_edges = self.B_layer_config(setting)
self.B_node_info = self.making_node_info()
self.G_B = self.making_layer_B_graph(setting)
def making_layer_B_graph(self, setting):
self.G_B = nx.Graph()
for i in range(setting.B_node):
self.G_B.add_node(i, name='B_%s' % i, state=setting.B[i])
B_edges_list = sorted(self.B_edges.edges)
self.G_B.add_edges_from(B_edges_list)
return self.G_B
def B_layer_config(self, setting): # B_layer 구성요소 B_layer_config(state = [-1], node = 2048, edge = 5, inter_edge= 1)
self.select_layer_B_model(setting)
return self.B_edges
def select_layer_B_model(self, setting):
if setting.Structure.split('-')[1] == 'RR':
self.making_layer_B_random_regular(setting)
elif setting.Structure.split('-')[1] == 'BA':
self.making_layer_B_barabasi_albert(setting)
return self.B_edges
def making_layer_B_random_regular(self, setting): # B_layer random_regular network
self.B_edges = nx.random_regular_graph(setting.B_edge, setting.B_node, seed=None)
return self.B_edges
def making_layer_B_barabasi_albert(self, setting): # B_layer 바바라시-알버트 네트워크
self.B_edges = nx.barabasi_albert_graph(setting.B_node, setting.B_edge, seed=None)
return self.B_edges
def making_node_info(self): # layer, node_number, location
node_info = [{'node_number': i, 'layer': 'B', 'location': (random.random(), random.random())}
for i in sorted(self.B_edges.nodes)]
node_info = pd.DataFrame(node_info, columns=['node_number', 'layer', 'location'])
return node_info
if __name__ == "__main__":
print("layer_B")
setting = Setting_Simulation_Value.Setting_Simulation_Value()
Layer_B = Layer_B_Modeling(setting)
#B = Layer_B.B
#B_edges = Layer_B.B_edges
print(Layer_B.G_B.nodes)
state = 0
for i in range(len(Layer_B.G_B.nodes)) :
state += Layer_B.G_B.nodes[i]['state']
print(state)
|
{"/InterconnectedDynamics.py": ["/Setting_Simulation_Value.py", "/OpinionDynamics.py", "/DecisionDynamics.py", "/MakingPandas.py", "/InterconnectedLayerModeling.py"], "/AnalysisDB.py": ["/Setting_Simulation_Value.py"], "/Layer_B_Modeling.py": ["/Setting_Simulation_Value.py"], "/OpinionDynamics.py": ["/Setting_Simulation_Value.py", "/InterconnectedLayerModeling.py"], "/Interconnected_Network_Visualization.py": ["/Setting_Simulation_Value.py", "/InterconnectedLayerModeling.py"], "/RepeatDynamics.py": ["/Setting_Simulation_Value.py", "/InterconnectedDynamics.py", "/InterconnectedLayerModeling.py", "/MakingPandas.py"], "/MyWindow.py": ["/InterconnectedLayerModeling.py", "/Setting_Simulation_Value.py", "/Layer_A_Modeling.py", "/Layer_B_Modeling.py", "/Changing_Variable.py", "/Visualization.py", "/Interconnected_Network_Visualization.py", "/DB_Management.py"], "/MakingPandas.py": ["/InterconnectedLayerModeling.py", "/Setting_Simulation_Value.py"], "/InterconnectedLayerModeling.py": ["/Setting_Simulation_Value.py"], "/VisualizationNew.py": ["/Setting_Simulation_Value.py"], "/MakingMovie.py": ["/Setting_Simulation_Value.py", "/Interconnected_Network_Visualization.py", "/Layer_A_Modeling.py", "/Layer_B_Modeling.py"], "/DecisionDynamics.py": ["/Setting_Simulation_Value.py", "/InterconnectedLayerModeling.py"], "/Layer_A_Modeling.py": ["/Setting_Simulation_Value.py"], "/Visualization.py": ["/Setting_Simulation_Value.py"], "/Changing_Variable.py": ["/Setting_Simulation_Value.py", "/RepeatDynamics.py"]}
|
5,508
|
leighsix/CompetitionPycharm
|
refs/heads/master
|
/OpinionDynamics.py
|
import random
import Setting_Simulation_Value
import InterconnectedLayerModeling
import time
import math
import networkx as nx
import numpy as np
class OpinionDynamics:
def __init__(self):
self.A_COUNT = 0
def A_layer_dynamics(self, setting, inter_layer, gamma): # A_layer 다이내믹스, 감마 적용 및 설득/타협 알고리즘 적용
prob_p = gamma / (1+gamma)
persuasion_count = 0
compromise_count = 0
total_edges_layer_A = len(sorted(inter_layer.A_edges.edges())) + len(sorted(inter_layer.AB_edges))
for i, j in sorted(inter_layer.A_edges.edges()):
a = inter_layer.two_layer_graph.nodes[i]['state']
b = inter_layer.two_layer_graph.nodes[j]['state']
if a * b > 0:
z = random.random()
if z < prob_p:
persuasion = self.A_layer_persuasion_function(setting, inter_layer.two_layer_graph.nodes[i],
inter_layer.two_layer_graph.nodes[j])
inter_layer.two_layer_graph.nodes[i]['state'] = persuasion[0]
inter_layer.two_layer_graph.nodes[j]['state'] = persuasion[1]
persuasion_count += 1
elif a * b < 0:
z = random.random()
if z < (1 - prob_p):
compromise = self.A_layer_compromise_function(setting, inter_layer.two_layer_graph.nodes[i],
inter_layer.two_layer_graph.nodes[j], prob_p, z)
inter_layer.two_layer_graph.nodes[i]['state'] = compromise[0]
inter_layer.two_layer_graph.nodes[j]['state'] = compromise[1]
compromise_count += 1
for i, j in sorted(inter_layer.AB_edges):
a = inter_layer.two_layer_graph.nodes[j]['state']
b = inter_layer.two_layer_graph.nodes[i]['state']
if a * b > 0:
z = random.random()
if z < prob_p:
inter_layer.two_layer_graph.nodes[j]['state'] \
= self.AB_layer_persuasion_function(setting, inter_layer.two_layer_graph.nodes[j])
persuasion_count += 1
elif a * b < 0:
z = random.random()
if z < (1 - prob_p):
inter_layer.two_layer_graph.nodes[j]['state'] \
= self.AB_layer_compromise_function(setting, inter_layer.two_layer_graph.nodes[j])
compromise_count += 1
persuasion_prob = persuasion_count / total_edges_layer_A
compromise_prob = compromise_count / total_edges_layer_A
return inter_layer, persuasion_prob, compromise_prob
def A_layer_persuasion_function(self, setting, a, b): # A layer 중에서 same orientation 에서 일어나는 변동 현상
if (a['state']) > 0 and (b['state']) > 0:
a['state'] = self.A_layer_node_right(a, setting.MAX)
b['state'] = self.A_layer_node_right(b, setting.MAX)
elif (a['state']) < 0 and (b['state']) < 0:
a['state'] = self.A_layer_node_left(a, setting.MIN)
b['state'] = self.A_layer_node_left(b, setting.MIN)
return a['state'], b['state']
def A_layer_compromise_function(self, setting, a, b, prob_p, z): # A layer 중에서 opposite orientation 에서 일어나는 변동 현상
if (a['state']) * (b['state']) == -1:
if z < ((1 - prob_p) / 2):
(a['state']) = 1
(b['state']) = 1
elif z > ((1 - prob_p) / 2):
a['state'] = -1
b['state'] = -1
elif (a['state']) > 0:
a['state'] = self.A_layer_node_left(a, setting.MIN)
b['state'] = self.A_layer_node_right(b, setting.MAX)
elif (a['state']) < 0:
a['state'] = self.A_layer_node_right(a, setting.MAX)
b['state'] = self.A_layer_node_left(b, setting.MIN)
return a['state'], b['state']
def AB_layer_persuasion_function(self, setting, a): # A-B layer 중에서 same orientation 에서 일어나는 변동 현상
if (a['state']) > 0:
a['state'] = self.A_layer_node_right(a, setting.MAX)
elif (a['state']) < 0:
a['state'] = self.A_layer_node_left(a, setting.MIN)
return a['state']
def AB_layer_compromise_function(self, setting, a): # A-B layer 중에서 opposite orientation 에서 일어나는 변동 현상
if (a['state']) > 0:
a['state'] = self.A_layer_node_left(a, setting.MIN)
elif (a['state']) < 0:
a['state'] = self.A_layer_node_right(a, setting.MAX)
return a['state']
def A_layer_node_left(self, a, Min):
if (a['state']) > Min:
if (a['state']) < 0 or (a['state']) > 1:
(a['state']) = (a['state']) - 1
self.A_COUNT += 1
elif (a['state']) == 1:
a['state'] = -1
self.A_COUNT += 1
elif (a['state']) <= Min:
(a['state']) = Min
return a['state']
def A_layer_node_right(self, a, Max):
if (a['state']) < Max:
if (a['state']) > 0 or (a['state']) < -1:
a['state'] = (a['state']) + 1
self.A_COUNT += 1
elif (a['state']) == -1:
a['state'] = 1
self.A_COUNT += 1
elif (a['state']) >= Max:
a['state'] = Max
return a['state']
def A_state_change_probability_cal(self, inter_layer, gamma):
prob_p = gamma / (1+gamma)
prob_list = []
prob_per_list = []
prob_com_list = []
for node_i in sorted(inter_layer.A_edges):
neighbors = np.array(sorted(nx.neighbors(inter_layer.two_layer_graph, node_i)))
neighbor_state = []
for neighbor in neighbors:
neighbor_state.append(inter_layer.two_layer_graph.nodes[neighbor]['state'])
neighbor_array = np.array(neighbor_state)
same_orientation = int(np.sum(neighbor_array * inter_layer.two_layer_graph.nodes[node_i]['state'] > 0))
opposite_orientation = len(neighbors) - same_orientation
node_unchanging_prob = 0
node_persuasion_prob = 0
node_compromise_prob = 0
for n in range(0, same_orientation + 1):
for m in range(0, opposite_orientation + 1):
n_combi = self.nCr(same_orientation, n)
m_combi = self.nCr(opposite_orientation, m)
if n == m:
node_unchanging_prob += prob_p ** (n + opposite_orientation - m) * (
(1 - prob_p) ** (same_orientation - n + m)) * n_combi * m_combi
elif n > m:
node_persuasion_prob += prob_p ** (n + opposite_orientation - m) * (
(1 - prob_p) ** (same_orientation - n + m)) * n_combi * m_combi
elif n < m:
node_compromise_prob += prob_p ** (n + opposite_orientation - m) * (
(1 - prob_p) ** (same_orientation - n + m)) * n_combi * m_combi
prob_list.append((node_unchanging_prob, node_unchanging_prob+node_persuasion_prob,
node_unchanging_prob+node_persuasion_prob+node_compromise_prob))
prob_per_list.append(node_persuasion_prob)
prob_com_list.append(node_compromise_prob)
prob_array = np.array(prob_list)
persuasion_prob = sum(prob_per_list) / len(sorted(inter_layer.A_edges))
compromise_prob = sum(prob_com_list) / len(sorted(inter_layer.A_edges))
return prob_array, persuasion_prob, compromise_prob
def nCr(self, n, r):
f = math.factorial
return f(n) // f(r) // f(n - r)
if __name__ == "__main__":
print("OpinionDynamics")
setting = Setting_Simulation_Value.Setting_Simulation_Value()
inter_layer = InterconnectedLayerModeling.InterconnectedLayerModeling(setting)
state = 0
for i in range(setting.A_node):
state += inter_layer.two_layer_graph.nodes[i]['state']
print(state)
opinion = OpinionDynamics()
start = time.time()
prob = opinion.A_state_change_probability_cal(inter_layer, 0.3)
print(prob[1], prob[2])
opinion_result = opinion.A_layer_dynamics(setting, inter_layer, 0.3)
print(opinion_result[1])
print(opinion_result[2])
state = 0
for i in range(setting.A_node):
state += inter_layer.two_layer_graph.nodes[i]['state']
print(state)
end = time.time()
print(end - start)
|
{"/InterconnectedDynamics.py": ["/Setting_Simulation_Value.py", "/OpinionDynamics.py", "/DecisionDynamics.py", "/MakingPandas.py", "/InterconnectedLayerModeling.py"], "/AnalysisDB.py": ["/Setting_Simulation_Value.py"], "/Layer_B_Modeling.py": ["/Setting_Simulation_Value.py"], "/OpinionDynamics.py": ["/Setting_Simulation_Value.py", "/InterconnectedLayerModeling.py"], "/Interconnected_Network_Visualization.py": ["/Setting_Simulation_Value.py", "/InterconnectedLayerModeling.py"], "/RepeatDynamics.py": ["/Setting_Simulation_Value.py", "/InterconnectedDynamics.py", "/InterconnectedLayerModeling.py", "/MakingPandas.py"], "/MyWindow.py": ["/InterconnectedLayerModeling.py", "/Setting_Simulation_Value.py", "/Layer_A_Modeling.py", "/Layer_B_Modeling.py", "/Changing_Variable.py", "/Visualization.py", "/Interconnected_Network_Visualization.py", "/DB_Management.py"], "/MakingPandas.py": ["/InterconnectedLayerModeling.py", "/Setting_Simulation_Value.py"], "/InterconnectedLayerModeling.py": ["/Setting_Simulation_Value.py"], "/VisualizationNew.py": ["/Setting_Simulation_Value.py"], "/MakingMovie.py": ["/Setting_Simulation_Value.py", "/Interconnected_Network_Visualization.py", "/Layer_A_Modeling.py", "/Layer_B_Modeling.py"], "/DecisionDynamics.py": ["/Setting_Simulation_Value.py", "/InterconnectedLayerModeling.py"], "/Layer_A_Modeling.py": ["/Setting_Simulation_Value.py"], "/Visualization.py": ["/Setting_Simulation_Value.py"], "/Changing_Variable.py": ["/Setting_Simulation_Value.py", "/RepeatDynamics.py"]}
|
5,509
|
leighsix/CompetitionPycharm
|
refs/heads/master
|
/DB_Management.py
|
import mysql.connector
class DB_Management:
def drop_duplicate_row(self, setting):
creating_intermediate_table = ('''
CREATE TABLE %s_copy LIKE %s;''' % (setting.table, setting.table))
insert_to_intermediate_table = ('''
INSERT INTO %s_copy
SELECT * FROM %s
GROUP BY
Structure, A_node_number, B_node_number, A_internal_edges, B_internal_edges,
A_external_edges, B_external_edges, beta, gamma, Steps;''' % (setting.table, setting.table))
drop_and_rename_table = ('''
DROP TABLE %s;
ALTER TABLE %s_copy RENAME TO %s;''' % (setting.table, setting.table, setting.table))
cnx = mysql.connector.connect(user='root', password='2853',
host='127.0.0.1', database=setting.database)
cur = cnx.cursor(buffered=True)
cur.execute(creating_intermediate_table)
cur.execute(insert_to_intermediate_table)
cur.execute(drop_and_rename_table)
cnx.commit()
cnx.close()
if __name__ == "__main__":
print("DB_Management")
#setting = Setting_Simulation_Value.Setting_Simulation_Value()
#db_management = DB_Management(setting)
#db_management.drop_duplicate_row()
print("DB_Management_finished")
|
{"/InterconnectedDynamics.py": ["/Setting_Simulation_Value.py", "/OpinionDynamics.py", "/DecisionDynamics.py", "/MakingPandas.py", "/InterconnectedLayerModeling.py"], "/AnalysisDB.py": ["/Setting_Simulation_Value.py"], "/Layer_B_Modeling.py": ["/Setting_Simulation_Value.py"], "/OpinionDynamics.py": ["/Setting_Simulation_Value.py", "/InterconnectedLayerModeling.py"], "/Interconnected_Network_Visualization.py": ["/Setting_Simulation_Value.py", "/InterconnectedLayerModeling.py"], "/RepeatDynamics.py": ["/Setting_Simulation_Value.py", "/InterconnectedDynamics.py", "/InterconnectedLayerModeling.py", "/MakingPandas.py"], "/MyWindow.py": ["/InterconnectedLayerModeling.py", "/Setting_Simulation_Value.py", "/Layer_A_Modeling.py", "/Layer_B_Modeling.py", "/Changing_Variable.py", "/Visualization.py", "/Interconnected_Network_Visualization.py", "/DB_Management.py"], "/MakingPandas.py": ["/InterconnectedLayerModeling.py", "/Setting_Simulation_Value.py"], "/InterconnectedLayerModeling.py": ["/Setting_Simulation_Value.py"], "/VisualizationNew.py": ["/Setting_Simulation_Value.py"], "/MakingMovie.py": ["/Setting_Simulation_Value.py", "/Interconnected_Network_Visualization.py", "/Layer_A_Modeling.py", "/Layer_B_Modeling.py"], "/DecisionDynamics.py": ["/Setting_Simulation_Value.py", "/InterconnectedLayerModeling.py"], "/Layer_A_Modeling.py": ["/Setting_Simulation_Value.py"], "/Visualization.py": ["/Setting_Simulation_Value.py"], "/Changing_Variable.py": ["/Setting_Simulation_Value.py", "/RepeatDynamics.py"]}
|
5,510
|
leighsix/CompetitionPycharm
|
refs/heads/master
|
/Interconnected_Network_Visualization.py
|
from pymnet import *
import matplotlib.pyplot as plt
import Setting_Simulation_Value
import InterconnectedLayerModeling
from mpl_toolkits.mplot3d.axes3d import *
import matplotlib.animation as animation
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.image import imread
import matplotlib
matplotlib.use("TkAgg")
class Interconnected_Network_Visualization:
def making_layer_A_graph(self, setting, inter_layer, interconnected_network):
interconnected_network.add_layer('layer_A')
for i in range(setting.A_node):
interconnected_network.add_node(i)
for i, j in sorted(inter_layer.A_edges.edges):
interconnected_network[i, j, 'layer_A'] = 1
return interconnected_network
def making_layer_B_graph(self, setting, inter_layer, interconnected_network):
interconnected_network.add_layer('layer_B')
for i in range(setting.B_node):
interconnected_network.add_node(i)
for i, j in sorted(inter_layer.B_edges):
interconnected_network[i-setting.A_node, j-setting.A_node, 'layer_B'] = 1
return interconnected_network
def making_interconnected_layer(self, setting, inter_layer):
interconnected_network = MultilayerNetwork(aspects=1)
self.making_layer_A_graph(setting, inter_layer, interconnected_network)
self.making_layer_B_graph(setting, inter_layer, interconnected_network)
for i, j in sorted(inter_layer.AB_edges):
interconnected_network[j, 'layer_A'][i-setting.A_node, 'layer_B'] = 1
return interconnected_network
def making_node_color(self, setting, inter_layer):
node_color_dic = {}
for i in range(setting.A_node):
node_color_dic[(i, 'layer_A')] = setting.NodeColorDict[inter_layer.two_layer_graph.nodes[i]['state']]
for i in range(setting.B_node):
node_color_dic[(i, 'layer_B')] = setting.NodeColorDict[inter_layer.two_layer_graph.nodes[i+setting.A_node]['state']]
return node_color_dic
def making_edge_color(self, setting, inter_layer):
edge_color_dic = {}
for i, j in sorted(inter_layer.A_edges.edges):
a = inter_layer.two_layer_graph.nodes[i]['state']
b = inter_layer.two_layer_graph.nodes[j]['state']
edge_color_dic[(i, 'layer_A'), (j, 'layer_A')] = setting.EdgeColorDict[a * b]
for i, j in sorted(inter_layer.B_edges):
a = inter_layer.two_layer_graph.nodes[i]['state']
b = inter_layer.two_layer_graph.nodes[j]['state']
edge_color_dic[(i-setting.A_node, 'layer_B'), (j-setting.A_node, 'layer_B')] = setting.EdgeColorDict[a * b]
for i, j in sorted(inter_layer.AB_edges):
a = inter_layer.two_layer_graph.nodes[j]['state']
b = inter_layer.two_layer_graph.nodes[i]['state']
edge_color_dic[(j-setting.A_node, 'layer_A'), (i, 'layer_B')] = setting.EdgeColorDict[a * b]
return edge_color_dic
def making_node_coordinates(self, setting, inter_layer):
node_coordinates_dic = {}
for i in range(setting.A_node):
node_coordinates_dic[i] = np.array(inter_layer.A_node_info['location'][i])
for i in range(setting.B_node):
node_coordinates_dic[i] = np.array(inter_layer.B_node_info['location'][i])
return node_coordinates_dic
def draw_interconnected_network(self, setting, inter_layer, save_file_name):
fig = plt.figure()
ax = plt.axes(projection='3d')
draw(self.making_interconnected_layer(setting, inter_layer), layout='circular', layergap=1.3,
layershape='rectangle', nodeCoords=self.making_node_coordinates(setting, inter_layer), nodelayerCoords={},
layerPadding=0.05, alignedNodes=True, ax=ax, layerColorDict={'layer_A': 'pink', 'layer_B': 'steelblue'},
layerColorRule={}, edgeColorDict = self.making_edge_color(setting, inter_layer), edgeColorRule={},
edgeWidthDict={}, edgeWidthRule={}, defaultEdgeWidth=0.15, edgeStyleDict={},
edgeStyleRule={'rule': 'edgetype', 'inter': ':', 'intra': '-'}, defaultEdgeStyle='-',
nodeLabelDict={}, nodeLabelRule={}, defaultNodeLabel=None,
nodeColorDict=self.making_node_color(setting, inter_layer), nodeColorRule={}, defaultNodeColor=None,
nodeLabelColorDict={}, nodeLabelColorRule={}, defaultNodeLabelColor='k',
nodeSizeDict={}, nodeSizeRule={'scalecoeff': 0.2, 'rule': 'scaled'}, defaultNodeSize=None)
plt.savefig(save_file_name)
im = plt.imread(save_file_name)
return np.array(im), fig
def making_movie_for_dynamics(self, ims):
dpi = 72
x_pixels, y_pixels = ims[0].shape[0], ims[0].shape[1]
fig = plt.figure(figsize=(y_pixels / dpi, x_pixels / dpi), dpi=dpi)
im = plt.figimage(ims[0])
def animate(i):
im.set_array(ims[i])
return (im,)
ani = animation.FuncAnimation(fig, animate, frames=len(ims), repeat=False, interval=1000)
ani.save('dynamics2.mp4')
if __name__ == "__main__":
print("Interconnected Layer Modeling")
setting = Setting_Simulation_Value.Setting_Simulation_Value()
inter_layer = InterconnectedLayerModeling.InterconnectedLayerModeling(setting)
ILM = Interconnected_Network_Visualization()
fig = ILM.draw_interconnected_network(setting, inter_layer, 'result.png')[1]
plt.show()
print("Operating finished")
|
{"/InterconnectedDynamics.py": ["/Setting_Simulation_Value.py", "/OpinionDynamics.py", "/DecisionDynamics.py", "/MakingPandas.py", "/InterconnectedLayerModeling.py"], "/AnalysisDB.py": ["/Setting_Simulation_Value.py"], "/Layer_B_Modeling.py": ["/Setting_Simulation_Value.py"], "/OpinionDynamics.py": ["/Setting_Simulation_Value.py", "/InterconnectedLayerModeling.py"], "/Interconnected_Network_Visualization.py": ["/Setting_Simulation_Value.py", "/InterconnectedLayerModeling.py"], "/RepeatDynamics.py": ["/Setting_Simulation_Value.py", "/InterconnectedDynamics.py", "/InterconnectedLayerModeling.py", "/MakingPandas.py"], "/MyWindow.py": ["/InterconnectedLayerModeling.py", "/Setting_Simulation_Value.py", "/Layer_A_Modeling.py", "/Layer_B_Modeling.py", "/Changing_Variable.py", "/Visualization.py", "/Interconnected_Network_Visualization.py", "/DB_Management.py"], "/MakingPandas.py": ["/InterconnectedLayerModeling.py", "/Setting_Simulation_Value.py"], "/InterconnectedLayerModeling.py": ["/Setting_Simulation_Value.py"], "/VisualizationNew.py": ["/Setting_Simulation_Value.py"], "/MakingMovie.py": ["/Setting_Simulation_Value.py", "/Interconnected_Network_Visualization.py", "/Layer_A_Modeling.py", "/Layer_B_Modeling.py"], "/DecisionDynamics.py": ["/Setting_Simulation_Value.py", "/InterconnectedLayerModeling.py"], "/Layer_A_Modeling.py": ["/Setting_Simulation_Value.py"], "/Visualization.py": ["/Setting_Simulation_Value.py"], "/Changing_Variable.py": ["/Setting_Simulation_Value.py", "/RepeatDynamics.py"]}
|
5,511
|
leighsix/CompetitionPycharm
|
refs/heads/master
|
/RepeatDynamics.py
|
import numpy as np
import Setting_Simulation_Value
import InterconnectedDynamics
import InterconnectedLayerModeling
import MakingPandas
import time
class RepeatDynamics:
def __init__(self):
self.inter_dynamics = InterconnectedDynamics.InterconnectedDynamics()
self.mp = MakingPandas.MakingPandas()
def repeat_dynamics(self, setting, gamma, beta):
num_data = np.zeros([setting.Limited_step + 1, 15])
for i in range(setting.Repeating_number):
inter_layer = InterconnectedLayerModeling.InterconnectedLayerModeling(setting)
total_array = self.inter_dynamics.interconnected_dynamics(setting, inter_layer, gamma, beta)
num_data = num_data + total_array
Num_Data = num_data / setting.Repeating_number
panda_db = self.mp.making_dataframe_per_step(setting, Num_Data)
return panda_db
if __name__ == "__main__":
print("RepeatDynamics")
start = time.time()
setting = Setting_Simulation_Value.Setting_Simulation_Value()
gamma = 0.2
beta = 1.5
repeat = RepeatDynamics()
result = repeat.repeat_dynamics(setting, gamma, beta)
print(result)
end = time.time()
print(end - start)
|
{"/InterconnectedDynamics.py": ["/Setting_Simulation_Value.py", "/OpinionDynamics.py", "/DecisionDynamics.py", "/MakingPandas.py", "/InterconnectedLayerModeling.py"], "/AnalysisDB.py": ["/Setting_Simulation_Value.py"], "/Layer_B_Modeling.py": ["/Setting_Simulation_Value.py"], "/OpinionDynamics.py": ["/Setting_Simulation_Value.py", "/InterconnectedLayerModeling.py"], "/Interconnected_Network_Visualization.py": ["/Setting_Simulation_Value.py", "/InterconnectedLayerModeling.py"], "/RepeatDynamics.py": ["/Setting_Simulation_Value.py", "/InterconnectedDynamics.py", "/InterconnectedLayerModeling.py", "/MakingPandas.py"], "/MyWindow.py": ["/InterconnectedLayerModeling.py", "/Setting_Simulation_Value.py", "/Layer_A_Modeling.py", "/Layer_B_Modeling.py", "/Changing_Variable.py", "/Visualization.py", "/Interconnected_Network_Visualization.py", "/DB_Management.py"], "/MakingPandas.py": ["/InterconnectedLayerModeling.py", "/Setting_Simulation_Value.py"], "/InterconnectedLayerModeling.py": ["/Setting_Simulation_Value.py"], "/VisualizationNew.py": ["/Setting_Simulation_Value.py"], "/MakingMovie.py": ["/Setting_Simulation_Value.py", "/Interconnected_Network_Visualization.py", "/Layer_A_Modeling.py", "/Layer_B_Modeling.py"], "/DecisionDynamics.py": ["/Setting_Simulation_Value.py", "/InterconnectedLayerModeling.py"], "/Layer_A_Modeling.py": ["/Setting_Simulation_Value.py"], "/Visualization.py": ["/Setting_Simulation_Value.py"], "/Changing_Variable.py": ["/Setting_Simulation_Value.py", "/RepeatDynamics.py"]}
|
5,512
|
leighsix/CompetitionPycharm
|
refs/heads/master
|
/MyWindow.py
|
import sys
from pymnet import *
import InterconnectedLayerModeling
from Setting_Simulation_Value import *
import Layer_A_Modeling
import Layer_B_Modeling
import Changing_Variable
import Visualization
import Interconnected_Network_Visualization
import DB_Management
import SelectDB
import seaborn as sns
import pandas as pd
import PyQt5
from PyQt5.QtWidgets import *
from matplotlib.image import imread
import matplotlib.pyplot as plt
import time
import numpy as np
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5 import uic
from PyQt5.QtWidgets import QApplication, QTableWidgetItem, QWidget, QLabel, QScrollArea, QTableWidget
from PyQt5.QtMultimedia import QMediaContent, QMediaPlayer
from PyQt5.QtMultimediaWidgets import QVideoWidget
from PyQt5.QtCore import QDir, Qt, QUrl
from PyQt5.QtMultimedia import QMediaContent, QMediaPlayer
from PyQt5.QtGui import QIcon, QPixmap
from PyQt5.QtWidgets import (QApplication, QFileDialog, QHBoxLayout, QLabel,
QPushButton, QSizePolicy, QSlider, QStyle, QVBoxLayout, QWidget)
from PyQt5.QtWidgets import QMainWindow,QWidget, QPushButton, QAction
from PyQt5.QtGui import QIcon
from mpl_toolkits.mplot3d.axes3d import *
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
import matplotlib
matplotlib.use("TkAgg")
WindowModel = uic.loadUiType("mainwindow.ui")[0]
class MyWindow(QMainWindow, WindowModel):
def __init__(self, setting):
QMainWindow.__init__(self, None)
self.mediaPlayer = QMediaPlayer(None, QMediaPlayer.VideoSurface)
self.setupUi(self)
self.changing_variable = Changing_Variable.Changing_Variable()
self.visualization = Visualization.Visualization()
self.network = Interconnected_Network_Visualization.Interconnected_Network_Visualization()
self.db_manager = DB_Management.DB_Management()
self.select_db = SelectDB.SelectDB()
self.select_sql = SelectDB.SelectSQlite()
self.condition_settingButton.clicked.connect(lambda state, sets=setting: self.condition_setting(state, sets))
self.Simulation_Start.clicked.connect(lambda state, sets=setting: self.doing_simulation(state, sets))
self.Initial_State_Button.clicked.connect(lambda state, sets=setting: self.initial_state_graph(state, sets))
self.Total_Result_Button.clicked.connect(lambda state, sets=setting: self.total_result_graph(state, sets))
self.result_gamma_Button.clicked.connect(lambda state, sets=setting: self.result_gamma_graph(state, sets))
self.result_beta_Button.clicked.connect(lambda state, sets=setting: self.result_beta_graph(state, sets))
self.prob_beta_Button.clicked.connect(lambda state, sets=setting: self.prob_beta_graph(state, sets))
self.different_ratio_Button.clicked.connect(lambda state, sets=setting: self.different_state_ratio_graph(state, sets))
self.play_movie_Button.clicked.connect(self.making_movie_function)
self.drop_duplicate_Button.clicked.connect(lambda state, sets=setting: self.db_drop_duplicate_row(state, sets))
self.duplicate_Button.clicked.connect(lambda state, sets=setting: self.duplicate_db_func(state, sets))
self.select_db_Button.clicked.connect(lambda state, sets=setting: self.select_db_func(state, sets))
def making_df(self, setting):
df = pd.DataFrame()
if setting.DB == 'MySQL':
df = self.select_db.select_data_from_setting(setting)
elif setting.DB == 'SQLITE':
df = self.select_sql.select_data_from_sqlite(setting)
return df
def initial_state_graph(self, state, setting):
print('drawing initial state...')
self.Initial_State_layout.takeAt(0)
inter_layer = InterconnectedLayerModeling.InterconnectedLayerModeling(setting)
fig = self.network.draw_interconnected_network(setting, inter_layer, 'result.png')[1]
if self.display_locBox_2.currentText() == 'outer graph':
plt.show()
elif self.display_locBox_2.currentText() == 'inner graph':
canvas = FigureCanvas(fig)
layout = self.Initial_State_layout
layout.addWidget(canvas)
canvas.draw()
canvas.show()
def total_result_graph(self, state, setting):
print('drawing total result...')
if self.display_typeBox.currentText() == 'scatter':
print('scatter type...')
if self.display_locBox.currentText() == 'outer graph':
plt.figure()
plt.style.use('seaborn-whitegrid')
df = self.making_df(setting)
self.visualization.plot_3D_scatter_for_average_state(setting, df)
plt.show()
plt.close()
elif self.display_locBox.currentText() == 'inner graph':
self.Total_Result_layout.takeAt(0)
fig = plt.figure()
plt.style.use('seaborn-whitegrid')
df = self.making_df(setting)
self.visualization.plot_3D_scatter_for_average_state(setting, df)
canvas = FigureCanvas(fig)
layout = self.Total_Result_layout
layout.addWidget(canvas)
canvas.draw()
canvas.show()
plt.close()
elif self.display_typeBox.currentText() == 'trisurf':
print('trisurf type...')
if self.display_locBox.currentText() == 'outer graph':
plt.figure()
plt.style.use('seaborn-whitegrid')
df = self.making_df(setting)
self.visualization.plot_3D_trisurf_for_average_state(setting, df)
plt.show()
plt.close()
elif self.display_locBox.currentText() == 'inner graph':
self.Total_Result_layout.takeAt(0)
fig = plt.figure()
plt.style.use('seaborn-whitegrid')
df = self.making_df(setting)
self.visualization.plot_3D_trisurf_for_average_state(setting, df)
canvas = FigureCanvas(fig)
layout = self.Total_Result_layout
layout.addWidget(canvas)
canvas.draw()
canvas.show()
plt.close()
elif self.display_typeBox.currentText() == 'contour2D':
print('contour2D type...')
if self.display_locBox.currentText() == 'outer graph':
fig = plt.figure()
plt.style.use('seaborn-whitegrid')
ax = fig.add_subplot(111)
ax.tick_params(axis='both', labelsize=14)
df = self.making_df(setting)
self.visualization.plot_3D_to_2D_contour_for_average_state(setting, df)
cb = plt.colorbar()
cb.set_label(label='AS', size=15, labelpad=10)
cb.ax.tick_params(labelsize=12)
plt.clim(-1, 1)
plt.xlabel(r'$\beta$', fontsize=18, labelpad=6)
plt.ylabel(r'$\gamma$', fontsize=18, labelpad=6)
plt.show()
plt.close()
elif self.display_locBox.currentText() == 'inner graph':
self.Total_Result_layout.takeAt(0)
fig = plt.figure()
plt.style.use('seaborn-whitegrid')
ax = fig.add_subplot(111)
ax.tick_params(axis='both', labelsize=14)
df = self.making_df(setting)
self.visualization.plot_3D_to_2D_contour_for_average_state(setting, df)
cb = plt.colorbar()
cb.set_label(label='AS', size=15, labelpad=10)
cb.ax.tick_params(labelsize=12)
plt.clim(-1, 1)
plt.xlabel(r'$\beta$', fontsize=18, labelpad=6)
plt.ylabel(r'$\gamma$', fontsize=18, labelpad=6)
canvas = FigureCanvas(fig)
layout = self.Total_Result_layout
layout.addWidget(canvas)
canvas.draw()
canvas.show()
plt.close()
elif self.display_typeBox.currentText() == 'contour3D':
print('contour3D type...')
if self.display_locBox.currentText() == 'outer graph':
plt.figure()
plt.style.use('seaborn-whitegrid')
df = self.making_df(setting)
self.visualization.plot_3D_contour_for_average_state(setting, df)
plt.show()
plt.close()
elif self.display_locBox.currentText() == 'inner graph':
self.Total_Result_layout.takeAt(0)
fig = plt.figure()
plt.style.use('seaborn-whitegrid')
df = self.making_df(setting)
self.visualization.plot_3D_contour_for_average_state(setting, df)
canvas = FigureCanvas(fig)
layout = self.Total_Result_layout
layout.addWidget(canvas)
canvas.draw()
canvas.show()
plt.close()
def result_gamma_graph(self, state, setting):
print('drawing gamma graph...')
box = [eval(self.beta_spinBox1.text()), eval(self.beta_spinBox2.text()),
eval(self.beta_spinBox3.text()), eval(self.beta_spinBox4.text()),
eval(self.beta_spinBox5.text()), eval(self.beta_spinBox6.text())]
marker = ['-o', '-x', '-v', '-^', '-s', '-d']
if self.result_gamma_locBox.currentText() == 'outer graph':
fig = plt.figure()
plt.style.use('seaborn-whitegrid')
ax = fig.add_subplot(111)
ax.tick_params(axis='both', labelsize=14)
df = self.making_df(setting)
for i, j in enumerate(box):
if j > 0:
self.visualization.plot_2D_gamma_for_average_state(setting, df, j, marker[i])
plt.legend(framealpha=1, frameon=True, prop={'size': 12})
plt.ylim(-1.7, 1.7)
plt.xlabel(r'$\gamma$', fontsize=18, labelpad=4)
plt.ylabel('AS', fontsize=18, labelpad=4)
plt.show()
plt.close()
elif self.result_gamma_locBox.currentText() == 'inner graph':
self.Result_gamma_layout.takeAt(0)
fig = plt.figure()
plt.style.use('seaborn-whitegrid')
ax = fig.add_subplot(111)
ax.tick_params(axis='both', labelsize=14)
df = self.making_df(setting)
for i, j in enumerate(box):
if j > 0:
self.visualization.plot_2D_gamma_for_average_state(setting, df, j, marker[i])
plt.legend(framealpha=1, frameon=True, prop={'size': 12})
plt.ylim(-1.7, 1.7)
plt.xlabel(r'$\gamma$', fontsize=18, labelpad=4)
plt.ylabel('AS', fontsize=18, labelpad=4)
canvas = FigureCanvas(fig)
layout = self.Result_gamma_layout
layout.addWidget(canvas)
canvas.draw()
canvas.show()
plt.close()
def result_beta_graph(self, state, setting):
print('drawing beta graph...')
box = [eval(self.gamma_spinBox1.text()), eval(self.gamma_spinBox2.text()),
eval(self.gamma_spinBox3.text()), eval(self.gamma_spinBox4.text()),
eval(self.gamma_spinBox5.text()), eval(self.gamma_spinBox6.text())]
marker = ['-o', '-x', '-v', '-^', '-s', '-d']
if self.result_beta_locBox.currentText() == 'outer graph':
fig = plt.figure()
plt.style.use('seaborn-whitegrid')
ax = fig.add_subplot(111)
ax.tick_params(axis='both', labelsize=14)
df = self.making_df(setting)
for i, j in enumerate(box):
if j > 0:
self.visualization.plot_2D_beta_for_average_state(setting, df, j, marker[i])
plt.legend(framealpha=1, frameon=True, prop={'size': 12})
plt.ylim(-1.7, 1.7)
plt.xlabel(r'$\beta$', fontsize=18, labelpad=4)
plt.ylabel('AS', fontsize=18, labelpad=4)
plt.show()
plt.close()
elif self.result_beta_locBox.currentText() == 'inner graph':
self.Result_beta_layout.takeAt(0)
fig = plt.figure()
plt.style.use('seaborn-whitegrid')
ax = fig.add_subplot(111)
ax.tick_params(axis='both', labelsize=14)
df = self.making_df(setting)
for i, j in enumerate(box):
if j > 0:
self.visualization.plot_2D_beta_for_average_state(setting, df, j, marker[i])
plt.legend(framealpha=1, frameon=True, prop={'size': 12})
plt.ylim(-1.7, 1.7)
plt.xlabel(r'$\beta$', fontsize=18, labelpad=4)
plt.ylabel('AS', fontsize=18, labelpad=4)
canvas = FigureCanvas(fig)
layout = self.Result_beta_layout
layout.addWidget(canvas)
canvas.draw()
canvas.show()
plt.close()
def prob_beta_graph(self, state, setting):
print('drawing prob beta graph...')
beta_value = [eval(self.beta_minBox.text()), eval(self.beta_maxBox.text())]
gamma_value = [eval(self.gamma_minBox.text()), eval(self.gamma_maxBox.text())]
if self.prob_beta_locBox.currentText() == 'outer graph':
fig = plt.figure()
sns.set()
ax = fig.add_subplot(111)
ax.tick_params(axis='both', labelsize=14)
df = self.making_df(setting)
self.visualization.flow_prob_beta_chart(setting, df, beta_value, gamma_value)
plt.ylabel('probability for layer B', fontsize=18, labelpad=4)
plt.xlabel('time(step)', fontsize=18, labelpad=4)
plt.show()
plt.close()
elif self.prob_beta_locBox.currentText() == 'inner graph':
self.prob_beta_layout.takeAt(0)
fig = plt.figure()
sns.set()
ax = fig.add_subplot(111)
ax.tick_params(axis='both', labelsize=14)
df = self.making_df(setting)
self.visualization.flow_prob_beta_chart(setting, df, beta_value, gamma_value)
plt.ylabel('probability for layer B', fontsize=18, labelpad=4)
plt.xlabel('time(step)', fontsize=18, labelpad=4)
canvas = FigureCanvas(fig)
layout = self.prob_beta_layout
layout.addWidget(canvas)
canvas.draw()
canvas.show()
plt.close()
def different_state_ratio_graph(self, state, setting):
print('drawing different state ratio graph...')
beta_value = [eval(self.beta_minBox_4.text()), eval(self.beta_maxBox_4.text())]
gamma_value = [eval(self.gamma_minBox_4.text()), eval(self.gamma_maxBox_4.text())]
select_layer = str(self.select_layerBox.currentText())
if self.prob_beta_locBox.currentText() == 'outer graph':
fig = plt.figure()
sns.set()
ax = fig.add_subplot(111)
ax.tick_params(axis='both', labelsize=14)
df = self.making_df(setting)
self.visualization.different_state_ratio_chart(setting, df, beta_value, gamma_value, select_layer)
plt.ylabel('different state ratio for layer %s' % select_layer, fontsize=18, labelpad=6)
plt.xlabel('time(step)', fontsize=18, labelpad=6)
plt.show()
plt.close()
elif self.state_ratio_locBox.currentText() == 'inner graph':
self.different_state_layout.takeAt(0)
fig = plt.figure()
sns.set()
ax = fig.add_subplot(111)
ax.tick_params(axis='both', labelsize=14)
df = self.making_df(setting)
self.visualization.different_state_ratio_chart(setting, df, beta_value, gamma_value, select_layer)
plt.ylabel('different state ratio for layer %s' % select_layer, fontsize=18, labelpad=6)
plt.xlabel('time(step)', fontsize=18, labelpad=6)
canvas = FigureCanvas(fig)
layout = self.different_state_layout
layout.addWidget(canvas)
canvas.draw()
canvas.show()
plt.close()
def db_drop_duplicate_row(self, state, setting):
print('duplicate DB dropped...')
self.db_manager.drop_duplicate_row(setting)
def select_db_func(self, state, setting):
print('select DB...')
table = self.DB_table
df = self.making_df(setting)
df.head(100)
table.setColumnCount(len(df.columns))
table.setRowCount(len(df.index))
table.setHorizontalHeaderLabels(df.columns)
for i in range(len(df.index)):
for j in range(len(df.columns)):
table.setItem(i, j, QTableWidgetItem(str(df.iloc[i, j])))
self.table_widget.show()
def duplicate_db_func(self, state, setting):
print('duplicate DB...')
table = self.DB_table
df = self.making_df(setting)
df.head(100)
table.setColumnCount(len(df.columns))
table.setRowCount(len(df.index))
table.setHorizontalHeaderLabels(df.columns)
for i in range(len(df.index)):
for j in range(len(df.columns)):
table.setItem(i, j, QTableWidgetItem(str(df.iloc[i, j])))
self.table_widget.show()
def making_movie_function(self):
print('making movie...')
self.movie_layout.takeAt(0)
layout = self.movie_layout
videoWidget = QVideoWidget()
layout.addWidget(videoWidget)
self.mediaPlayer.setVideoOutput(videoWidget)
self.mediaPlayer.setMedia(QMediaContent(QUrl.fromLocalFile('C:/Users/Purple/CompetingLayer/dynamics.mp4')))
self.mediaPlayer.play()
def doing_simulation(self, state, setting):
print('doing simulation...')
self.changing_variable.paralleled_work(setting)
def condition_setting(self, state, setting):
setting.Structure = str(self.StructuresBox.currentText())
setting.A_state = eval(self.A_StateBox.currentText())
setting.A_node = int(self.A_NodeBox.currentText())
setting.A_edge = int(self.A_InternalEdgeBox.currentText())
setting.MAX = int(self.A_MAXBox.currentText())
setting.MIN = int(self.A_MINBox.currentText())
setting.B_state = eval(self.B_StateBox.currentText())
setting.B_node = int(self.B_NodeBox.currentText())
setting.B_edge = int(self.B_InternalEdgeBox.currentText())
setting.B_inter_edges = int(self.B_ExternalEdgeBox.currentText())
setting.A_inter_edges = int(self.A_ExternalEdgeBox.currentText())
setting.Limited_step = int(self.StepBox.currentText())
setting.drawing_graph = bool(self.DrawingBox.currentText() == 'True')
setting.database = str(self.DatabaseBox.currentText())
setting.table = str(self.TableBox.currentText())
setting.DB = str(self.DBBox.currentText())
print('setting is completed')
if __name__ == "__main__":
SS = Setting_Simulation_Value()
app = QApplication(sys.argv)
my_window = MyWindow(SS)
my_window.show()
app.exec_()
|
{"/InterconnectedDynamics.py": ["/Setting_Simulation_Value.py", "/OpinionDynamics.py", "/DecisionDynamics.py", "/MakingPandas.py", "/InterconnectedLayerModeling.py"], "/AnalysisDB.py": ["/Setting_Simulation_Value.py"], "/Layer_B_Modeling.py": ["/Setting_Simulation_Value.py"], "/OpinionDynamics.py": ["/Setting_Simulation_Value.py", "/InterconnectedLayerModeling.py"], "/Interconnected_Network_Visualization.py": ["/Setting_Simulation_Value.py", "/InterconnectedLayerModeling.py"], "/RepeatDynamics.py": ["/Setting_Simulation_Value.py", "/InterconnectedDynamics.py", "/InterconnectedLayerModeling.py", "/MakingPandas.py"], "/MyWindow.py": ["/InterconnectedLayerModeling.py", "/Setting_Simulation_Value.py", "/Layer_A_Modeling.py", "/Layer_B_Modeling.py", "/Changing_Variable.py", "/Visualization.py", "/Interconnected_Network_Visualization.py", "/DB_Management.py"], "/MakingPandas.py": ["/InterconnectedLayerModeling.py", "/Setting_Simulation_Value.py"], "/InterconnectedLayerModeling.py": ["/Setting_Simulation_Value.py"], "/VisualizationNew.py": ["/Setting_Simulation_Value.py"], "/MakingMovie.py": ["/Setting_Simulation_Value.py", "/Interconnected_Network_Visualization.py", "/Layer_A_Modeling.py", "/Layer_B_Modeling.py"], "/DecisionDynamics.py": ["/Setting_Simulation_Value.py", "/InterconnectedLayerModeling.py"], "/Layer_A_Modeling.py": ["/Setting_Simulation_Value.py"], "/Visualization.py": ["/Setting_Simulation_Value.py"], "/Changing_Variable.py": ["/Setting_Simulation_Value.py", "/RepeatDynamics.py"]}
|
5,513
|
leighsix/CompetitionPycharm
|
refs/heads/master
|
/MakingPandas.py
|
import pandas as pd
import numpy as np
import InterconnectedLayerModeling
import Setting_Simulation_Value
class MakingPandas:
def making_dataframe_per_step(self, setting, value_array):
columns = ['gamma', 'beta', 'prob_beta', 'persuasion', 'compromise',
'A_plus', 'A_minus', 'B_plus', 'B_minus',
'Layer_A_Mean', 'Layer_B_Mean', 'AS',
'A_total_edges', 'B_total_edges', 'change_count']
df = pd.DataFrame(value_array, columns=columns)
step = [i for i in range(0, setting.Limited_step+1)]
df['MODEL'] = setting.MODEL
df['Steps'] = step
df['Structure'] = setting.Structure
df['A_node_number'] = setting.A_node
df['B_node_number'] = setting.B_node
df['A_external_edges'] = setting.A_inter_edges
df['B_external_edges'] = setting.B_inter_edges
return df
def interacting_property(self, setting, inter_layer):
property_A = []
property_B = []
for i in range(setting.A_node):
property_A.append(inter_layer.two_layer_graph.nodes[i]['state'])
for i in range(setting.A_node, setting.A_node + setting.B_node):
property_B.append(inter_layer.two_layer_graph.nodes[i]['state'])
judge_A = np.array(property_A)
judge_B = np.array(property_B)
A_plus = int(np.sum(judge_A > 0))
A_minus = int(np.sum(judge_A < 0))
B_plus = int(np.sum(judge_B > 0))
B_minus = int(np.sum(judge_B < 0))
layer_A_mean = int(np.sum(judge_A)) / setting.A_node
layer_B_mean = int(np.sum(judge_B)) / setting.B_node
average_state = ((layer_A_mean / setting.MAX) + layer_B_mean) / 2
return A_plus, A_minus, B_plus, B_minus, layer_A_mean, layer_B_mean, average_state
if __name__ == "__main__":
print("MakingPandas")
setting = Setting_Simulation_Value.Setting_Simulation_Value()
inter_layer = InterconnectedLayerModeling.InterconnectedLayerModeling(setting)
mp = MakingPandas()
|
{"/InterconnectedDynamics.py": ["/Setting_Simulation_Value.py", "/OpinionDynamics.py", "/DecisionDynamics.py", "/MakingPandas.py", "/InterconnectedLayerModeling.py"], "/AnalysisDB.py": ["/Setting_Simulation_Value.py"], "/Layer_B_Modeling.py": ["/Setting_Simulation_Value.py"], "/OpinionDynamics.py": ["/Setting_Simulation_Value.py", "/InterconnectedLayerModeling.py"], "/Interconnected_Network_Visualization.py": ["/Setting_Simulation_Value.py", "/InterconnectedLayerModeling.py"], "/RepeatDynamics.py": ["/Setting_Simulation_Value.py", "/InterconnectedDynamics.py", "/InterconnectedLayerModeling.py", "/MakingPandas.py"], "/MyWindow.py": ["/InterconnectedLayerModeling.py", "/Setting_Simulation_Value.py", "/Layer_A_Modeling.py", "/Layer_B_Modeling.py", "/Changing_Variable.py", "/Visualization.py", "/Interconnected_Network_Visualization.py", "/DB_Management.py"], "/MakingPandas.py": ["/InterconnectedLayerModeling.py", "/Setting_Simulation_Value.py"], "/InterconnectedLayerModeling.py": ["/Setting_Simulation_Value.py"], "/VisualizationNew.py": ["/Setting_Simulation_Value.py"], "/MakingMovie.py": ["/Setting_Simulation_Value.py", "/Interconnected_Network_Visualization.py", "/Layer_A_Modeling.py", "/Layer_B_Modeling.py"], "/DecisionDynamics.py": ["/Setting_Simulation_Value.py", "/InterconnectedLayerModeling.py"], "/Layer_A_Modeling.py": ["/Setting_Simulation_Value.py"], "/Visualization.py": ["/Setting_Simulation_Value.py"], "/Changing_Variable.py": ["/Setting_Simulation_Value.py", "/RepeatDynamics.py"]}
|
5,514
|
leighsix/CompetitionPycharm
|
refs/heads/master
|
/UsingGPU.py
|
import math
from numba import vectorize, cuda
import numpy as np
from numba import guvectorize
@vectorize(['float32(float32, float32)', 'float64(float64, float64)'], target='cuda')
def gpu_sincos(x, y):
return math.sin(x) * math.cos(y)
result = gpu_sincos(3.0, 4.0)
print(result)
@guvectorize(..., target='cuda')
def very_complex_kernel(A, B, C):
...
very_complex_kernel.max_blocksize = 32 # limits to 32 threads per block
|
{"/InterconnectedDynamics.py": ["/Setting_Simulation_Value.py", "/OpinionDynamics.py", "/DecisionDynamics.py", "/MakingPandas.py", "/InterconnectedLayerModeling.py"], "/AnalysisDB.py": ["/Setting_Simulation_Value.py"], "/Layer_B_Modeling.py": ["/Setting_Simulation_Value.py"], "/OpinionDynamics.py": ["/Setting_Simulation_Value.py", "/InterconnectedLayerModeling.py"], "/Interconnected_Network_Visualization.py": ["/Setting_Simulation_Value.py", "/InterconnectedLayerModeling.py"], "/RepeatDynamics.py": ["/Setting_Simulation_Value.py", "/InterconnectedDynamics.py", "/InterconnectedLayerModeling.py", "/MakingPandas.py"], "/MyWindow.py": ["/InterconnectedLayerModeling.py", "/Setting_Simulation_Value.py", "/Layer_A_Modeling.py", "/Layer_B_Modeling.py", "/Changing_Variable.py", "/Visualization.py", "/Interconnected_Network_Visualization.py", "/DB_Management.py"], "/MakingPandas.py": ["/InterconnectedLayerModeling.py", "/Setting_Simulation_Value.py"], "/InterconnectedLayerModeling.py": ["/Setting_Simulation_Value.py"], "/VisualizationNew.py": ["/Setting_Simulation_Value.py"], "/MakingMovie.py": ["/Setting_Simulation_Value.py", "/Interconnected_Network_Visualization.py", "/Layer_A_Modeling.py", "/Layer_B_Modeling.py"], "/DecisionDynamics.py": ["/Setting_Simulation_Value.py", "/InterconnectedLayerModeling.py"], "/Layer_A_Modeling.py": ["/Setting_Simulation_Value.py"], "/Visualization.py": ["/Setting_Simulation_Value.py"], "/Changing_Variable.py": ["/Setting_Simulation_Value.py", "/RepeatDynamics.py"]}
|
5,515
|
leighsix/CompetitionPycharm
|
refs/heads/master
|
/InterconnectedLayerModeling.py
|
import networkx as nx
import numpy as np
import pandas as pd
import random
import matplotlib.pyplot as plt
import matplotlib
import Setting_Simulation_Value
matplotlib.use("TkAgg")
class InterconnectedLayerModeling:
def __init__(self, setting):
A_edges_array = self.A_layer_config(setting)
self.A_edges = A_edges_array[0]
self.AB_edges = A_edges_array[1]
self.AB_neighbor = A_edges_array[2]
self.B_edges = self.B_layer_config(setting)
self.two_layer_graph = self.making_interconnected_graph(setting)
def making_interconnected_graph(self, setting):
self.two_layer_graph = nx.Graph()
for i in range(setting.A_node):
self.two_layer_graph.add_node(i, name='A_%s' % i, state=setting.A[i])
for i in range(setting.B_node):
self.two_layer_graph.add_node(i+setting.A_node, name='B_%s' % i, state=setting.B[i])
A_edges_list = sorted(self.A_edges.edges)
self.two_layer_graph.add_edges_from(A_edges_list)
B_edges_list = self.B_edges
self.two_layer_graph.add_edges_from(B_edges_list)
AB_edges_list = self.AB_edges
self.two_layer_graph.add_edges_from(AB_edges_list)
return self.two_layer_graph
def A_layer_config(self, setting):
# A_layer 구성요소 A_layer_config(state = [1,2], node = 2048, edge = 5, Max = 2, Min = -2)
self.select_layer_A_model(setting)
self.making_interconnected_edges(setting)
return self.A_edges, self.AB_edges, self.AB_neighbor
# A : A의 각 노드의 상태, A_state : A 노드 상태의 종류(1, 2, -1, -2),
# A_node : 노드의 수, A_edge : 내부연결선수, A_edges : 내부연결상태(튜플), MAX : 최대상태, MIN : 최소상태
def select_layer_A_model(self, setting):
if setting.Structure.split('-')[0] == 'RR':
self.making_layer_A_random_regular(setting)
elif setting.Structure.split('-')[0] == 'BA':
self.making_layer_A_barabasi_albert(setting)
return self.A_edges
def making_layer_A_random_regular(self, setting):
# A_layer random_regular network
self.A_edges = nx.random_regular_graph(setting.A_edge, setting.A_node, seed=None)
return self.A_edges
def making_layer_A_barabasi_albert(self, setting):
# A_layer 바바라시-알버트 네트워크
self.A_edges = nx.barabasi_albert_graph(setting.A_node, setting.A_edge, seed=None)
return self.A_edges
def making_interconnected_edges(self, setting):
self.AB_edges = []
self.AB_neighbor = []
for i in range(int(setting.A_node / setting.B_inter_edges)):
for j in range(setting.B_inter_edges):
connected_A_node = np.array(self.A_edges.nodes).reshape(-1, setting.B_inter_edges)[i][j]
self.AB_neighbor.append(connected_A_node)
self.AB_edges.append((i + setting.A_node, connected_A_node))
self.AB_neighbor = np.array(self.AB_neighbor).reshape(-1, setting.B_inter_edges)
return self.AB_edges, self.AB_neighbor
# AB_neighbor은 B노드번호 기준으로 연결된 A노드번호 ex) AB_neighbor[0]= array([0, 1])
# B 노드 0에 A노드 0번, 1번이 연결되어 있다는 뜻
# AB_edges는 (0, 1)은 B 노드 0번과 A 노드 1번이 연결되어 있다는 뜻
def B_layer_config(self, setting): # B_layer 구성요소 B_layer_config(state = [-1], node = 2048, edge = 5, inter_edge= 1)
self.select_layer_B_model(setting)
return self.B_edges
def select_layer_B_model(self, setting):
if setting.Structure.split('-')[1] == 'RR':
self.making_layer_B_random_regular(setting)
elif setting.Structure.split('-')[1] == 'BA':
self.making_layer_B_barabasi_albert(setting)
return self.B_edges
def making_layer_B_random_regular(self, setting): # B_layer random_regular network
b_edges = nx.random_regular_graph(setting.B_edge, setting.B_node, seed=None)
self.B_edges = []
for i in range(len(b_edges.edges)):
self.B_edges.append((sorted(b_edges.edges)[i][0] + setting.A_node,
sorted(b_edges.edges)[i][1] + setting.A_node))
return self.B_edges
def making_layer_B_barabasi_albert(self, setting): # B_layer 바바라시-알버트 네트워크
b_edges = nx.barabasi_albert_graph(setting.B_node, setting.B_edge, seed=None)
self.B_edges = []
for i in range(len(b_edges.edges)):
self.B_edges.append((sorted(b_edges.edges)[i][0] + setting.A_node,
sorted(b_edges.edges)[i][1] + setting.A_node))
return self.B_edges
if __name__ == "__main__" :
print("interconnectedlayer")
setting = Setting_Simulation_Value.Setting_Simulation_Value()
inter_layer = InterconnectedLayerModeling(setting)
print(len(sorted(inter_layer.A_edges.edges())))
#print(graph.two_layer_graph.edges)
#print(len(inter_layer.B_edges))
# for i in range(len(inter_layer.two_layer_graph.nodes)):
# state += inter_layer.two_layer_graph.nodes[i]['state']
# print(state)
# print(inter_layer.A_edges.edges)
# external_edge_number = len(inter_layer.AB_neighbor[1])
# print(external_edge_number)
# inter_edges = len(sorted(nx.all_neighbors(inter_layer.two_layer_graph, 1 + setting.A_node))) - external_edge_number
# print(inter_edges)
# print(sorted(nx.all_neighbors(inter_layer.two_layer_graph, 1 + setting.A_node), reverse=True))
#print(Layer_A.AB_edges)
#print(Layer_A.AB_neighbor)
#print(Layer_A.SS.A_node)
|
{"/InterconnectedDynamics.py": ["/Setting_Simulation_Value.py", "/OpinionDynamics.py", "/DecisionDynamics.py", "/MakingPandas.py", "/InterconnectedLayerModeling.py"], "/AnalysisDB.py": ["/Setting_Simulation_Value.py"], "/Layer_B_Modeling.py": ["/Setting_Simulation_Value.py"], "/OpinionDynamics.py": ["/Setting_Simulation_Value.py", "/InterconnectedLayerModeling.py"], "/Interconnected_Network_Visualization.py": ["/Setting_Simulation_Value.py", "/InterconnectedLayerModeling.py"], "/RepeatDynamics.py": ["/Setting_Simulation_Value.py", "/InterconnectedDynamics.py", "/InterconnectedLayerModeling.py", "/MakingPandas.py"], "/MyWindow.py": ["/InterconnectedLayerModeling.py", "/Setting_Simulation_Value.py", "/Layer_A_Modeling.py", "/Layer_B_Modeling.py", "/Changing_Variable.py", "/Visualization.py", "/Interconnected_Network_Visualization.py", "/DB_Management.py"], "/MakingPandas.py": ["/InterconnectedLayerModeling.py", "/Setting_Simulation_Value.py"], "/InterconnectedLayerModeling.py": ["/Setting_Simulation_Value.py"], "/VisualizationNew.py": ["/Setting_Simulation_Value.py"], "/MakingMovie.py": ["/Setting_Simulation_Value.py", "/Interconnected_Network_Visualization.py", "/Layer_A_Modeling.py", "/Layer_B_Modeling.py"], "/DecisionDynamics.py": ["/Setting_Simulation_Value.py", "/InterconnectedLayerModeling.py"], "/Layer_A_Modeling.py": ["/Setting_Simulation_Value.py"], "/Visualization.py": ["/Setting_Simulation_Value.py"], "/Changing_Variable.py": ["/Setting_Simulation_Value.py", "/RepeatDynamics.py"]}
|
5,516
|
leighsix/CompetitionPycharm
|
refs/heads/master
|
/VisualizationNew.py
|
import SelectDB
import numpy as np
import Setting_Simulation_Value
import matplotlib.pyplot as plt
import matplotlib
import seaborn as sns
import pandas as pd
from sympy import *
from matplotlib import cycler
from mpl_toolkits.mplot3d.axes3d import *
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
matplotlib.use("TkAgg")
class Visualization:
def plot_2D_gamma_for_average_state(self, df, beta_values): # v_values =[]
marker = ['-o', '-x', '-v', '-^', '-s', '-d']
plt.style.use('seaborn-whitegrid')
ax = fig.add_subplot(111)
ax.tick_params(axis='both', labelsize=14)
beta_list = Visualization.making_select_list(df, 'beta')
for i, beta_value in enumerate(beta_values):
temp_value = Visualization.covert_to_select_list_value(beta_list, beta_value)
df = df[df.beta == temp_value]
plt.plot(df['gamma'], df['AS'], marker[i], label=r'$\beta$=%.2f' % temp_value,
markersize=6, linewidth=1.5, markeredgewidth=1)
plt.legend(framealpha=1, frameon=True, prop={'size': 12})
plt.ylim(-1.5, 1.5)
plt.xlabel(r'$\gamma$', fontsize=18, labelpad=4)
plt.ylabel('AS', fontsize=18, labelpad=4)
def plot_2D_beta_for_average_state(self, df, gamma_values):
marker = ['-o', '-x', '-v', '-^', '-s', '-d']
plt.style.use('seaborn-whitegrid')
ax = fig.add_subplot(111)
ax.tick_params(axis='both', labelsize=14)
gamma_list = Visualization.making_select_list(df, 'gamma')
for i, gamma_value in enumerate(gamma_values):
temp_value = Visualization.covert_to_select_list_value(gamma_list, gamma_value)
df = df[df.p == temp_value]
plt.plot(df['beta'], df['AS'], marker[i], label=r'$\gamma$=%.2f' % temp_value,
markersize=6, linewidth=1.5, markeredgewidth=1)
plt.legend(framealpha=1, frameon=True, prop={'size': 12})
plt.ylim(-1.5, 1.5)
plt.xlabel(r'$\beta$', fontsize=18, labelpad=4)
plt.ylabel('AS', fontsize=18, labelpad=4)
def plot_3D_scatter_for_average_state(self, df):
plt.style.use('seaborn-whitegrid')
ax = plt.axes(projection='3d')
ax.scatter(df['beta'], df['gamma'], df['AS'], c=df['AS'], cmap='RdBu', linewidth=0.2)
ax.set_xlabel(r'$\beta$', fontsize=18, labelpad=8)
ax.set_ylabel(r'$\gamma$', fontsize=18, labelpad=8)
ax.set_zlabel('AS', fontsize=18, labelpad=8)
ax.set_title(r'$\beta$-$\gamma$-AS', fontsize=18)
ax.tick_params(axis='both', labelsize=14)
ax.view_init(45, 45)
def plot_3D_trisurf_for_average_state(self, df):
plt.style.use('seaborn-whitegrid')
ax = plt.axes(projection='3d')
ax.plot_trisurf(df['beta'], df['gamma'], df['AS'], cmap='RdBu', edgecolor='none')
ax.set_xlabel(r'$\beta$', fontsize=18, labelpad=8)
ax.set_ylabel(r'$\gamma$', fontsize=18, labelpad=8)
ax.set_zlabel('AS', fontsize=18, labelpad=8)
ax.set_title(r'$\beta$-$\gamma$-AS', fontsize=18)
ax.tick_params(axis='both', labelsize=14)
ax.view_init(45, 45)
def plot_3D_contour_for_average_state(self, df):
plt.style.use('seaborn-whitegrid')
v_list = Visualization.making_select_list(df, 'beta') # list이지만 실제로는 array
p_list = Visualization.making_select_list(df, 'gamma')
X, Y = np.meshgrid(v_list, p_list)
Z = Visualization.state_list_function(df, p_list, v_list)
ax = plt.axes(projection='3d')
ax.contour3D(X, Y, Z, 50, cmap='RdBu')
ax.set_xlabel(r'$\beta$', fontsize=18, labelpad=6)
ax.set_ylabel(r'$\gamma$', fontsize=18, labelpad=6)
ax.set_zlabel('AS', fontsize=18, labelpad=6)
ax.set_title(r'$\beta$-$\gamma$-AS', fontsize=18)
ax.view_init(45, 45)
def plot_3D_to_2D_contour_for_average_state(self, df):
plt.style.use('seaborn-whitegrid')
ax = fig.add_subplot(111)
ax.tick_params(axis='both', labelsize=14)
beta_list = Visualization.making_select_list(df, 'beta') # list이지만 실제로는 array
gamma_list = Visualization.making_select_list(df, 'gamma')
X, Y = np.meshgrid(beta_list, gamma_list)
Z = Visualization.state_list_function(df, gamma_list, beta_list)
plt.contourf(X, Y, Z, 50, cmap='RdBu')
cb = plt.colorbar()
cb.set_label(label='AS', size=15, labelpad=10)
cb.ax.tick_params(labelsize=12)
plt.clim(-1, 1)
plt.xlabel(r'$\beta$', fontsize=18, labelpad=6)
plt.ylabel(r'$\gamma$', fontsize=18, labelpad=6)
#plt.clabel(contours, inline=True, fontsize=8)
def average_state_for_steps(self, df, v_value, p_value):
v_list = Visualization.making_select_list(df, 'beta') # list이지만 실제로는 array
p_list = Visualization.making_select_list(df, 'gamma')
temp_v_value = Visualization.covert_to_select_list_value(v_list, v_value)
temp_p_value = Visualization.covert_to_select_list_value(p_list, p_value)
df1 = df[df.p == temp_p_value]
df2 = df1[df1.v == temp_v_value]
df3 = df2.sort_values(by='Steps', ascending=True)
plt.plot(df3['Steps'], df3['AS'], linestyle=':', marker='o', markersize=2, linewidth=0.3)
plt.ylabel('AS', fontsize=18, labelpad=4)
plt.xlabel('time(step)', fontsize=18, labelpad=4)
def flow_prob_beta_chart(self, df, beta_values, gamma_values):
beta_list = Visualization.making_select_list(df, 'beta') # list이지만 실제로는 array
gamma_list = Visualization.making_select_list(df, 'gamma')
beta_min = Visualization.covert_to_select_list_value(beta_list, beta_values[0])
beta_max = Visualization.covert_to_select_list_value(beta_list, beta_values[1])
gamma_min = Visualization.covert_to_select_list_value(gamma_list, gamma_values[0])
gamma_max = Visualization.covert_to_select_list_value(gamma_list, gamma_values[1])
df = df[df.gamma >= gamma_min]
df = df[df.gamma <= gamma_max]
df = df[df.beta >= beta_min]
df = df[df.beta <= beta_max]
gamma_df = df['gamma'].drop_duplicates()
beta_df = df['beta'].drop_duplicates()
gamma_array = np.array(gamma_df)
beta_array = np.array(beta_df)
for i in sorted(gamma_array):
for j in sorted(beta_array):
df1 = df[df.gamma == i]
df2 = df1[df1.beta == j]
plt.plot(df2['Steps'], df2['prob_beta'], '-', markersize=2, linewidth=0.3)
plt.ylabel('probability for layer B', fontsize=18, labelpad=4)
plt.xlabel('time(step)', fontsize=18, labelpad=4)
def average_state_for_steps_scale(self, df, beta_values, gamma_values):
beta_list = Visualization.making_select_list(df, 'beta') # list이지만 실제로는 array
gamma_list = Visualization.making_select_list(df, 'gamma')
beta_min = Visualization.covert_to_select_list_value(beta_list, beta_values[0])
beta_max = Visualization.covert_to_select_list_value(beta_list, beta_values[1])
gamma_min = Visualization.covert_to_select_list_value(gamma_list, gamma_values[0])
gamma_max = Visualization.covert_to_select_list_value(gamma_list, gamma_values[1])
df = df[df.gamma >= gamma_min]
df = df[df.gamma <= gamma_max]
df = df[df.beta >= beta_min]
df = df[df.beta <= beta_max]
gamma_df = df['gamma'].drop_duplicates()
beta_df = df['beta'].drop_duplicates()
gamma_array = np.array(gamma_df)
beta_array = np.array(beta_df)
for i in sorted(gamma_array):
for j in sorted(beta_array):
df1 = df[df.gamma == i]
df2 = df1[df1.beta == j]
plt.plot(df2['Steps'], df2['AS'], linewidth=0.5)
plt.ylabel('AS', fontsize=18, labelpad=6)
plt.xlabel('time(step)', fontsize=18, labelpad=6)
@staticmethod
def state_list_function(df, p_list, v_list):
Z = np.zeros([len(p_list), len(v_list)])
for i, p in enumerate(p_list):
for j, v in enumerate(v_list):
df1 = df[df.gamma == p]
df2 = df1[df1.beta == v]
if len(df2) == 0:
Z[i][j] = 0
else:
Z[i][j] = df2['AS'].iloc[0]
return Z
@staticmethod
def covert_to_select_list_value(select_list, input_value): # list가 만들어져 있는 곳에 사용
loc = np.sum(select_list <= input_value) # select_list는 making_select_list를 사용, array로 만들어져 있음
temp_value = select_list[loc - 1]
return temp_value
@staticmethod
def making_select_list(df, list_name):
list = []
df = df[list_name]
select_list = np.array(df.drop_duplicates())
for i in range(len(select_list)):
list.append(select_list[i])
return np.array(sorted(list))
if __name__ == "__main__":
print("Visualization")
setting = Setting_Simulation_Value.Setting_Simulation_Value()
setting.database = 'paper_revised_data'
setting.table = 'simulation_table4'
select_db = SelectDB.SelectDB()
df = select_db.select_data_from_DB(setting)
# df = df[df.MODEL == 'LM(16)']
df = df[df.Steps == 100]
# array1 = Visualization.making_select_list(df, 'p')
# array2 = Visualization.making_select_list(df, 'v')
# temp1 = Visualization.covert_to_select_list_value(array1, 0.1)
# temp2 = Visualization.covert_to_select_list_value(array2, 0.6)
# df1 = df[df.p == temp1]
# df2 = df1[df1.v == temp2]
# print(df2)
visualization = Visualization()
fig = plt.figure()
sns.set()
visualization.plot_3D_scatter_for_average_state(df)
# visualization.average_state_for_steps_scale(df, [0, 3], [0, 2])
plt.show()
plt.close()
#previous_research
# visualization.average_state_for_steps(setting, df, [0, 0.5], [0, 0.5])
# fig = plt.figure()
#
# plt.show()
# plt.close()
#visualization.plot_3D_to_2D_contour_for_average_state(setting, df)
#visualization.plot_3D_to_2D_contour_for_average_state()
#visualization.plot_3D_contour_for_average_state('previous_research')
#visualization.plot_3D_scatter_for_average_state('average_layer_state') #previous_research
# fig = visualization.flow_prob_beta_chart([0, 3], [0, 2])
# fig = visualization.different_state_ratio_chart([0, 3], [0, 2], 'B')
# visualization.plot_2D_beta_for_average_state(0.2)
# visualization.plot_2D_beta_for_average_state(0.4)
#visualization.plot_2D_beta_for_average_state('previous_research', 0.4)
#visualization.plot_2D_beta_for_average_state('previous_research', 0.6)
print("paint finished")
|
{"/InterconnectedDynamics.py": ["/Setting_Simulation_Value.py", "/OpinionDynamics.py", "/DecisionDynamics.py", "/MakingPandas.py", "/InterconnectedLayerModeling.py"], "/AnalysisDB.py": ["/Setting_Simulation_Value.py"], "/Layer_B_Modeling.py": ["/Setting_Simulation_Value.py"], "/OpinionDynamics.py": ["/Setting_Simulation_Value.py", "/InterconnectedLayerModeling.py"], "/Interconnected_Network_Visualization.py": ["/Setting_Simulation_Value.py", "/InterconnectedLayerModeling.py"], "/RepeatDynamics.py": ["/Setting_Simulation_Value.py", "/InterconnectedDynamics.py", "/InterconnectedLayerModeling.py", "/MakingPandas.py"], "/MyWindow.py": ["/InterconnectedLayerModeling.py", "/Setting_Simulation_Value.py", "/Layer_A_Modeling.py", "/Layer_B_Modeling.py", "/Changing_Variable.py", "/Visualization.py", "/Interconnected_Network_Visualization.py", "/DB_Management.py"], "/MakingPandas.py": ["/InterconnectedLayerModeling.py", "/Setting_Simulation_Value.py"], "/InterconnectedLayerModeling.py": ["/Setting_Simulation_Value.py"], "/VisualizationNew.py": ["/Setting_Simulation_Value.py"], "/MakingMovie.py": ["/Setting_Simulation_Value.py", "/Interconnected_Network_Visualization.py", "/Layer_A_Modeling.py", "/Layer_B_Modeling.py"], "/DecisionDynamics.py": ["/Setting_Simulation_Value.py", "/InterconnectedLayerModeling.py"], "/Layer_A_Modeling.py": ["/Setting_Simulation_Value.py"], "/Visualization.py": ["/Setting_Simulation_Value.py"], "/Changing_Variable.py": ["/Setting_Simulation_Value.py", "/RepeatDynamics.py"]}
|
5,517
|
leighsix/CompetitionPycharm
|
refs/heads/master
|
/MakingMovie.py
|
import Setting_Simulation_Value
import Interconnected_Network_Visualization
import numpy as np
from IPython.display import display, HTML
import matplotlib.pyplot as plt
from matplotlib import animation
from mpl_toolkits.mplot3d.axes3d import *
from sympy import *
import Layer_A_Modeling
import Layer_B_Modeling
class MakingMovie:
def __init__(self):
self.SS = Setting_Simulation_Value.Setting_Simulation_Value()
self.InterNetwork = Interconnected_Network_Visualization.Interconnected_Network_Visualization()
def making_movie_for_interconneted_dynamics(self, layer_A, layer_B, img_file_name, save_file_name):
ims = [np.array(self.InterNetwork.draw_interconnected_network(layer_A, layer_B, img_file_name))]
self.plot_movie_mp4(image_array, save_file_name)
def plot_movie_mp4(self, image_array, save_file_name):
dpi = 72.0
xpixels, ypixels = image_array[0].shape[0], image_array[0].shape[1]
fig = plt.figure(figsize=(ypixels/dpi, xpixels/dpi), dpi=dpi)
im = plt.figimage(image_array[0])
def animate(i):
im.set_array(image_array[i])
return (im,)
anim = animation.FuncAnimation(fig, animate, frames=len(image_array), repeat=False, interval=500)
anim.save(save_file_name)
display(HTML(anim.to_html5_video()))
fig = plt.figure()
ims = [np.array(drawing_graph('dynamic_image.png'))]
limited_time = 1000
total = 0
while True :
im = animation_interconnected_dynamics('dynamic_image.png')
ims.append(np.array(im))
total += 1
if (np.all(A > 0) == 1 and np.all(B > 0) == 1) or (np.all(A < 0)== 1 and np.all(B < 0)== 1) or (total == limited_time) :
break
im = animation_interconnected_dynamics('dynamic_image.png')
ims.append(np.array(im))
IMS = np.array(ims)
plot_movie_mp4(IMS,'dynamic_images_no_leader(128(BA), 128(RR), ganma=0.5, beta=2).mp4')
if __name__ == "__main__":
print("Making the movie for competition")
making_interconnected_edges()
static_variable(0.5, 2)
fig = plt.figure()
ims = [np.array(drawing_graph('dynamic_image.png'))]
limited_time = 1000
total = 0
while True:
im = animation_interconnected_dynamics('dynamic_image.png')
ims.append(np.array(im))
total += 1
if (np.all(A > 0) == 1 and np.all(B > 0) == 1) or (np.all(A < 0) == 1 and np.all(B < 0) == 1) or (
total == limited_time):
break
im = animation_interconnected_dynamics('dynamic_image.png')
ims.append(np.array(im))
IMS = np.array(ims)
plot_movie_mp4(IMS, 'dynamic_images_no_leader(128(BA), 128(RR), ganma=0.5, beta=2).mp4')
|
{"/InterconnectedDynamics.py": ["/Setting_Simulation_Value.py", "/OpinionDynamics.py", "/DecisionDynamics.py", "/MakingPandas.py", "/InterconnectedLayerModeling.py"], "/AnalysisDB.py": ["/Setting_Simulation_Value.py"], "/Layer_B_Modeling.py": ["/Setting_Simulation_Value.py"], "/OpinionDynamics.py": ["/Setting_Simulation_Value.py", "/InterconnectedLayerModeling.py"], "/Interconnected_Network_Visualization.py": ["/Setting_Simulation_Value.py", "/InterconnectedLayerModeling.py"], "/RepeatDynamics.py": ["/Setting_Simulation_Value.py", "/InterconnectedDynamics.py", "/InterconnectedLayerModeling.py", "/MakingPandas.py"], "/MyWindow.py": ["/InterconnectedLayerModeling.py", "/Setting_Simulation_Value.py", "/Layer_A_Modeling.py", "/Layer_B_Modeling.py", "/Changing_Variable.py", "/Visualization.py", "/Interconnected_Network_Visualization.py", "/DB_Management.py"], "/MakingPandas.py": ["/InterconnectedLayerModeling.py", "/Setting_Simulation_Value.py"], "/InterconnectedLayerModeling.py": ["/Setting_Simulation_Value.py"], "/VisualizationNew.py": ["/Setting_Simulation_Value.py"], "/MakingMovie.py": ["/Setting_Simulation_Value.py", "/Interconnected_Network_Visualization.py", "/Layer_A_Modeling.py", "/Layer_B_Modeling.py"], "/DecisionDynamics.py": ["/Setting_Simulation_Value.py", "/InterconnectedLayerModeling.py"], "/Layer_A_Modeling.py": ["/Setting_Simulation_Value.py"], "/Visualization.py": ["/Setting_Simulation_Value.py"], "/Changing_Variable.py": ["/Setting_Simulation_Value.py", "/RepeatDynamics.py"]}
|
5,518
|
leighsix/CompetitionPycharm
|
refs/heads/master
|
/DecisionDynamics.py
|
import random
import time
import numpy as np
import Setting_Simulation_Value
import InterconnectedLayerModeling
import networkx as nx
class DecisionDynamics:
def __init__(self):
self.B_COUNT = 0
def B_layer_dynamics(self, setting, inter_layer, beta): # B_layer 다이내믹스, 베타 적용 및 언어데스 알고리즘 적용
prob_beta_list = []
for node_i in range(setting.A_node, setting.A_node+setting.B_node):
neighbors = np.array(sorted(nx.neighbors(inter_layer.two_layer_graph, node_i)))
neighbor_state = []
for neighbor in neighbors:
neighbor_state.append(inter_layer.two_layer_graph.nodes[neighbor]['state'])
neighbor_array = np.array(neighbor_state)
same_orientation = int(np.sum(neighbor_array * inter_layer.two_layer_graph.nodes[node_i]['state'] > 0))
opposite_orientation = len(neighbors) - same_orientation
prob_beta = (opposite_orientation / len(neighbors)) ** beta
z = random.random()
if z < prob_beta:
inter_layer.two_layer_graph.nodes[node_i]['state'] = \
-(inter_layer.two_layer_graph.nodes[node_i]['state'])
self.B_COUNT += 1
prob_beta_list.append(prob_beta)
prob_beta_array = np.array(prob_beta_list)
prob_beta_mean = np.sum(prob_beta_array) / len(prob_beta_array)
return inter_layer, prob_beta_mean
def B_state_change_probability_cal(self, setting, inter_layer, beta):
prob_beta_list = []
for node_i in range(setting.A_node, setting.A_node+setting.B_node):
neighbors = np.array(sorted(nx.neighbors(inter_layer.two_layer_graph, node_i)))
neighbor_state = []
for neighbor in neighbors:
neighbor_state.append(inter_layer.two_layer_graph.nodes[neighbor]['state'])
neighbor_array = np.array(neighbor_state)
same_orientation = int(np.sum(neighbor_array * inter_layer.two_layer_graph.nodes[node_i]['state'] > 0))
opposite_orientation = len(neighbors) - same_orientation
prob_beta = (opposite_orientation / len(neighbors)) ** beta
prob_beta_list.append(prob_beta)
prob_beta_array = np.array(prob_beta_list)
prob_beta_mean = np.sum(prob_beta_array) / len(prob_beta_array)
return prob_beta_array, prob_beta_mean
if __name__ == "__main__" :
print("DecisionDynamics")
setting = Setting_Simulation_Value.Setting_Simulation_Value()
inter_layer = InterconnectedLayerModeling.InterconnectedLayerModeling(setting)
state = 0
for i in range(setting.A_node, setting.A_node+setting.B_node):
state += inter_layer.two_layer_graph.nodes[i]['state']
print(state)
start = time.time()
decision = DecisionDynamics()
inter_layer = decision.B_layer_dynamics(setting, inter_layer, 1.5)[0]
state = 0
for i in range(setting.A_node, setting.A_node+setting.B_node):
state += inter_layer.two_layer_graph.nodes[i]['state']
print(state)
end = time.time()
print(end-start)
|
{"/InterconnectedDynamics.py": ["/Setting_Simulation_Value.py", "/OpinionDynamics.py", "/DecisionDynamics.py", "/MakingPandas.py", "/InterconnectedLayerModeling.py"], "/AnalysisDB.py": ["/Setting_Simulation_Value.py"], "/Layer_B_Modeling.py": ["/Setting_Simulation_Value.py"], "/OpinionDynamics.py": ["/Setting_Simulation_Value.py", "/InterconnectedLayerModeling.py"], "/Interconnected_Network_Visualization.py": ["/Setting_Simulation_Value.py", "/InterconnectedLayerModeling.py"], "/RepeatDynamics.py": ["/Setting_Simulation_Value.py", "/InterconnectedDynamics.py", "/InterconnectedLayerModeling.py", "/MakingPandas.py"], "/MyWindow.py": ["/InterconnectedLayerModeling.py", "/Setting_Simulation_Value.py", "/Layer_A_Modeling.py", "/Layer_B_Modeling.py", "/Changing_Variable.py", "/Visualization.py", "/Interconnected_Network_Visualization.py", "/DB_Management.py"], "/MakingPandas.py": ["/InterconnectedLayerModeling.py", "/Setting_Simulation_Value.py"], "/InterconnectedLayerModeling.py": ["/Setting_Simulation_Value.py"], "/VisualizationNew.py": ["/Setting_Simulation_Value.py"], "/MakingMovie.py": ["/Setting_Simulation_Value.py", "/Interconnected_Network_Visualization.py", "/Layer_A_Modeling.py", "/Layer_B_Modeling.py"], "/DecisionDynamics.py": ["/Setting_Simulation_Value.py", "/InterconnectedLayerModeling.py"], "/Layer_A_Modeling.py": ["/Setting_Simulation_Value.py"], "/Visualization.py": ["/Setting_Simulation_Value.py"], "/Changing_Variable.py": ["/Setting_Simulation_Value.py", "/RepeatDynamics.py"]}
|
5,519
|
leighsix/CompetitionPycharm
|
refs/heads/master
|
/UI.py
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'C:\Users\Purple\CompetingLayer\mainwindow.ui'
#
# Created by: PyQt5 UI code generator 5.11.3
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(1198, 849)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap("network.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
MainWindow.setWindowIcon(icon)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.gridLayout_2 = QtWidgets.QGridLayout(self.centralwidget)
self.gridLayout_2.setObjectName("gridLayout_2")
self.Total = QtWidgets.QTabWidget(self.centralwidget)
font = QtGui.QFont()
font.setFamily("Sitka Small")
font.setPointSize(10)
self.Total.setFont(font)
self.Total.setTabShape(QtWidgets.QTabWidget.Rounded)
self.Total.setObjectName("Total")
self.Initial_State = QtWidgets.QWidget()
self.Initial_State.setObjectName("Initial_State")
self.Initial_Condition = QtWidgets.QGroupBox(self.Initial_State)
self.Initial_Condition.setGeometry(QtCore.QRect(920, 20, 261, 741))
font = QtGui.QFont()
font.setFamily("Sitka Text")
font.setPointSize(10)
font.setBold(False)
font.setItalic(False)
font.setWeight(50)
self.Initial_Condition.setFont(font)
self.Initial_Condition.setStyleSheet("font: 10pt \"Sitka Text\";")
self.Initial_Condition.setFlat(False)
self.Initial_Condition.setObjectName("Initial_Condition")
self.Simulation_Start = QtWidgets.QPushButton(self.Initial_Condition)
self.Simulation_Start.setGeometry(QtCore.QRect(100, 680, 151, 41))
font = QtGui.QFont()
font.setFamily("Sitka Text")
font.setPointSize(10)
font.setBold(False)
font.setItalic(False)
font.setWeight(50)
self.Simulation_Start.setFont(font)
self.Simulation_Start.setObjectName("Simulation_Start")
self.layoutWidget = QtWidgets.QWidget(self.Initial_Condition)
self.layoutWidget.setGeometry(QtCore.QRect(10, 20, 237, 642))
self.layoutWidget.setObjectName("layoutWidget")
self.gridLayout_4 = QtWidgets.QGridLayout(self.layoutWidget)
self.gridLayout_4.setContentsMargins(0, 0, 0, 0)
self.gridLayout_4.setObjectName("gridLayout_4")
self.Layer_A = QtWidgets.QGroupBox(self.layoutWidget)
self.Layer_A.setObjectName("Layer_A")
self.gridLayout = QtWidgets.QGridLayout(self.Layer_A)
self.gridLayout.setObjectName("gridLayout")
self.A_node = QtWidgets.QLabel(self.Layer_A)
self.A_node.setObjectName("A_node")
self.gridLayout.addWidget(self.A_node, 0, 0, 1, 1)
self.A_NodeBox = QtWidgets.QComboBox(self.Layer_A)
font = QtGui.QFont()
font.setFamily("Sitka Text")
font.setPointSize(10)
font.setBold(False)
font.setItalic(False)
font.setWeight(50)
self.A_NodeBox.setFont(font)
self.A_NodeBox.setIconSize(QtCore.QSize(12, 12))
self.A_NodeBox.setObjectName("A_NodeBox")
self.A_NodeBox.addItem("")
self.A_NodeBox.addItem("")
self.gridLayout.addWidget(self.A_NodeBox, 0, 1, 1, 1)
self.A_internaledge = QtWidgets.QLabel(self.Layer_A)
self.A_internaledge.setObjectName("A_internaledge")
self.gridLayout.addWidget(self.A_internaledge, 1, 0, 1, 1)
self.A_InternalEdgeBox = QtWidgets.QComboBox(self.Layer_A)
font = QtGui.QFont()
font.setFamily("Sitka Text")
font.setPointSize(10)
font.setBold(False)
font.setItalic(False)
font.setWeight(50)
self.A_InternalEdgeBox.setFont(font)
self.A_InternalEdgeBox.setMaxVisibleItems(0)
self.A_InternalEdgeBox.setIconSize(QtCore.QSize(12, 12))
self.A_InternalEdgeBox.setObjectName("A_InternalEdgeBox")
self.A_InternalEdgeBox.addItem("")
self.A_InternalEdgeBox.addItem("")
self.gridLayout.addWidget(self.A_InternalEdgeBox, 1, 1, 1, 1)
self.A_externaledge = QtWidgets.QLabel(self.Layer_A)
self.A_externaledge.setTextFormat(QtCore.Qt.AutoText)
self.A_externaledge.setObjectName("A_externaledge")
self.gridLayout.addWidget(self.A_externaledge, 2, 0, 1, 1)
self.A_ExternalEdgeBox = QtWidgets.QComboBox(self.Layer_A)
font = QtGui.QFont()
font.setFamily("Sitka Text")
font.setPointSize(10)
font.setBold(False)
font.setItalic(False)
font.setWeight(50)
self.A_ExternalEdgeBox.setFont(font)
self.A_ExternalEdgeBox.setIconSize(QtCore.QSize(12, 12))
self.A_ExternalEdgeBox.setObjectName("A_ExternalEdgeBox")
self.A_ExternalEdgeBox.addItem("")
self.gridLayout.addWidget(self.A_ExternalEdgeBox, 2, 1, 1, 1)
self.A_structure = QtWidgets.QLabel(self.Layer_A)
self.A_structure.setObjectName("A_structure")
self.gridLayout.addWidget(self.A_structure, 3, 0, 1, 1)
self.A_StructureBox = QtWidgets.QComboBox(self.Layer_A)
font = QtGui.QFont()
font.setFamily("Sitka Text")
font.setPointSize(10)
font.setBold(False)
font.setItalic(False)
font.setWeight(50)
self.A_StructureBox.setFont(font)
self.A_StructureBox.setIconSize(QtCore.QSize(12, 12))
self.A_StructureBox.setObjectName("A_StructureBox")
self.A_StructureBox.addItem("")
self.A_StructureBox.addItem("")
self.gridLayout.addWidget(self.A_StructureBox, 3, 1, 1, 1)
self.A_state = QtWidgets.QLabel(self.Layer_A)
self.A_state.setObjectName("A_state")
self.gridLayout.addWidget(self.A_state, 4, 0, 1, 1)
self.A_StateBox = QtWidgets.QComboBox(self.Layer_A)
font = QtGui.QFont()
font.setFamily("Sitka Text")
font.setPointSize(10)
font.setBold(False)
font.setItalic(False)
font.setWeight(50)
self.A_StateBox.setFont(font)
self.A_StateBox.setIconSize(QtCore.QSize(12, 12))
self.A_StateBox.setObjectName("A_StateBox")
self.A_StateBox.addItem("")
self.gridLayout.addWidget(self.A_StateBox, 4, 1, 1, 1)
self.A_max = QtWidgets.QLabel(self.Layer_A)
self.A_max.setObjectName("A_max")
self.gridLayout.addWidget(self.A_max, 5, 0, 1, 1)
self.A_MAXBox = QtWidgets.QComboBox(self.Layer_A)
font = QtGui.QFont()
font.setFamily("Sitka Text")
font.setPointSize(10)
font.setBold(False)
font.setItalic(False)
font.setWeight(50)
self.A_MAXBox.setFont(font)
self.A_MAXBox.setIconSize(QtCore.QSize(12, 12))
self.A_MAXBox.setObjectName("A_MAXBox")
self.A_MAXBox.addItem("")
self.gridLayout.addWidget(self.A_MAXBox, 5, 1, 1, 1)
self.A_min = QtWidgets.QLabel(self.Layer_A)
self.A_min.setObjectName("A_min")
self.gridLayout.addWidget(self.A_min, 6, 0, 1, 1)
self.A_MINBox = QtWidgets.QComboBox(self.Layer_A)
font = QtGui.QFont()
font.setFamily("Sitka Text")
font.setPointSize(10)
font.setBold(False)
font.setItalic(False)
font.setWeight(50)
self.A_MINBox.setFont(font)
self.A_MINBox.setIconSize(QtCore.QSize(12, 12))
self.A_MINBox.setObjectName("A_MINBox")
self.A_MINBox.addItem("")
self.gridLayout.addWidget(self.A_MINBox, 6, 1, 1, 1)
self.gridLayout_4.addWidget(self.Layer_A, 0, 0, 1, 1)
self.Layer_B = QtWidgets.QGroupBox(self.layoutWidget)
self.Layer_B.setObjectName("Layer_B")
self.formLayout = QtWidgets.QFormLayout(self.Layer_B)
self.formLayout.setObjectName("formLayout")
self.B_node = QtWidgets.QLabel(self.Layer_B)
self.B_node.setObjectName("B_node")
self.formLayout.setWidget(0, QtWidgets.QFormLayout.LabelRole, self.B_node)
self.B_NodeBox = QtWidgets.QComboBox(self.Layer_B)
font = QtGui.QFont()
font.setFamily("Sitka Text")
font.setPointSize(10)
font.setBold(False)
font.setItalic(False)
font.setWeight(50)
self.B_NodeBox.setFont(font)
self.B_NodeBox.setMaxVisibleItems(0)
self.B_NodeBox.setIconSize(QtCore.QSize(12, 12))
self.B_NodeBox.setObjectName("B_NodeBox")
self.B_NodeBox.addItem("")
self.B_NodeBox.addItem("")
self.formLayout.setWidget(0, QtWidgets.QFormLayout.FieldRole, self.B_NodeBox)
self.label_11 = QtWidgets.QLabel(self.Layer_B)
self.label_11.setObjectName("label_11")
self.formLayout.setWidget(1, QtWidgets.QFormLayout.LabelRole, self.label_11)
self.B_InternalEdgeBox = QtWidgets.QComboBox(self.Layer_B)
font = QtGui.QFont()
font.setFamily("Sitka Text")
font.setPointSize(10)
font.setBold(False)
font.setItalic(False)
font.setWeight(50)
self.B_InternalEdgeBox.setFont(font)
self.B_InternalEdgeBox.setIconSize(QtCore.QSize(12, 12))
self.B_InternalEdgeBox.setObjectName("B_InternalEdgeBox")
self.B_InternalEdgeBox.addItem("")
self.B_InternalEdgeBox.addItem("")
self.formLayout.setWidget(1, QtWidgets.QFormLayout.FieldRole, self.B_InternalEdgeBox)
self.label_12 = QtWidgets.QLabel(self.Layer_B)
self.label_12.setTextFormat(QtCore.Qt.AutoText)
self.label_12.setObjectName("label_12")
self.formLayout.setWidget(2, QtWidgets.QFormLayout.LabelRole, self.label_12)
self.B_ExternalEdgeBox = QtWidgets.QComboBox(self.Layer_B)
font = QtGui.QFont()
font.setFamily("Sitka Text")
font.setPointSize(10)
font.setBold(False)
font.setItalic(False)
font.setWeight(50)
self.B_ExternalEdgeBox.setFont(font)
self.B_ExternalEdgeBox.setIconSize(QtCore.QSize(12, 12))
self.B_ExternalEdgeBox.setObjectName("B_ExternalEdgeBox")
self.B_ExternalEdgeBox.addItem("")
self.B_ExternalEdgeBox.addItem("")
self.formLayout.setWidget(2, QtWidgets.QFormLayout.FieldRole, self.B_ExternalEdgeBox)
self.label_13 = QtWidgets.QLabel(self.Layer_B)
self.label_13.setObjectName("label_13")
self.formLayout.setWidget(3, QtWidgets.QFormLayout.LabelRole, self.label_13)
self.B_StructureBox = QtWidgets.QComboBox(self.Layer_B)
font = QtGui.QFont()
font.setFamily("Sitka Text")
font.setPointSize(10)
font.setBold(False)
font.setItalic(False)
font.setWeight(50)
self.B_StructureBox.setFont(font)
self.B_StructureBox.setIconSize(QtCore.QSize(12, 12))
self.B_StructureBox.setObjectName("B_StructureBox")
self.B_StructureBox.addItem("")
self.B_StructureBox.addItem("")
self.formLayout.setWidget(3, QtWidgets.QFormLayout.FieldRole, self.B_StructureBox)
self.label_14 = QtWidgets.QLabel(self.Layer_B)
self.label_14.setObjectName("label_14")
self.formLayout.setWidget(4, QtWidgets.QFormLayout.LabelRole, self.label_14)
self.B_StateBox = QtWidgets.QComboBox(self.Layer_B)
font = QtGui.QFont()
font.setFamily("Sitka Text")
font.setPointSize(10)
font.setBold(False)
font.setItalic(False)
font.setWeight(50)
self.B_StateBox.setFont(font)
self.B_StateBox.setIconSize(QtCore.QSize(12, 12))
self.B_StateBox.setObjectName("B_StateBox")
self.B_StateBox.addItem("")
self.formLayout.setWidget(4, QtWidgets.QFormLayout.FieldRole, self.B_StateBox)
self.gridLayout_4.addWidget(self.Layer_B, 1, 0, 1, 1)
self.Simulation_Condition = QtWidgets.QGroupBox(self.layoutWidget)
self.Simulation_Condition.setObjectName("Simulation_Condition")
self.gridLayout_3 = QtWidgets.QGridLayout(self.Simulation_Condition)
self.gridLayout_3.setObjectName("gridLayout_3")
self.label_7 = QtWidgets.QLabel(self.Simulation_Condition)
self.label_7.setObjectName("label_7")
self.gridLayout_3.addWidget(self.label_7, 0, 0, 1, 1)
self.StepBox = QtWidgets.QComboBox(self.Simulation_Condition)
font = QtGui.QFont()
font.setFamily("Sitka Text")
font.setPointSize(10)
font.setBold(False)
font.setItalic(False)
font.setWeight(50)
self.StepBox.setFont(font)
self.StepBox.setIconSize(QtCore.QSize(12, 12))
self.StepBox.setObjectName("StepBox")
self.StepBox.addItem("")
self.StepBox.addItem("")
self.gridLayout_3.addWidget(self.StepBox, 0, 1, 1, 1)
self.label_8 = QtWidgets.QLabel(self.Simulation_Condition)
self.label_8.setObjectName("label_8")
self.gridLayout_3.addWidget(self.label_8, 2, 0, 1, 1)
self.DrawingBox = QtWidgets.QComboBox(self.Simulation_Condition)
font = QtGui.QFont()
font.setFamily("Sitka Text")
font.setPointSize(10)
font.setBold(False)
font.setItalic(False)
font.setWeight(50)
self.DrawingBox.setFont(font)
self.DrawingBox.setIconSize(QtCore.QSize(12, 12))
self.DrawingBox.setObjectName("DrawingBox")
self.DrawingBox.addItem("")
self.DrawingBox.addItem("")
self.gridLayout_3.addWidget(self.DrawingBox, 2, 1, 1, 1)
self.label_9 = QtWidgets.QLabel(self.Simulation_Condition)
self.label_9.setObjectName("label_9")
self.gridLayout_3.addWidget(self.label_9, 6, 0, 1, 1)
self.DatabaseBox = QtWidgets.QComboBox(self.Simulation_Condition)
font = QtGui.QFont()
font.setFamily("Sitka Text")
font.setPointSize(10)
font.setBold(False)
font.setItalic(False)
font.setWeight(50)
self.DatabaseBox.setFont(font)
self.DatabaseBox.setIconSize(QtCore.QSize(12, 12))
self.DatabaseBox.setObjectName("DatabaseBox")
self.DatabaseBox.addItem("")
self.DatabaseBox.addItem("")
self.gridLayout_3.addWidget(self.DatabaseBox, 6, 1, 1, 1)
self.label_16 = QtWidgets.QLabel(self.Simulation_Condition)
self.label_16.setObjectName("label_16")
self.gridLayout_3.addWidget(self.label_16, 10, 0, 1, 1)
self.StructuresBox = QtWidgets.QComboBox(self.Simulation_Condition)
font = QtGui.QFont()
font.setFamily("Sitka Text")
font.setPointSize(10)
font.setBold(False)
font.setItalic(False)
font.setWeight(50)
self.StructuresBox.setFont(font)
self.StructuresBox.setIconSize(QtCore.QSize(12, 12))
self.StructuresBox.setObjectName("StructuresBox")
self.StructuresBox.addItem("")
self.StructuresBox.addItem("")
self.StructuresBox.addItem("")
self.StructuresBox.addItem("")
self.gridLayout_3.addWidget(self.StructuresBox, 10, 1, 1, 1)
self.gridLayout_4.addWidget(self.Simulation_Condition, 2, 0, 1, 1)
self.condition_settingButton = QtWidgets.QPushButton(self.layoutWidget)
self.condition_settingButton.setObjectName("condition_settingButton")
self.gridLayout_4.addWidget(self.condition_settingButton, 3, 0, 1, 1)
self.Initial_State_Button = QtWidgets.QPushButton(self.Initial_State)
self.Initial_State_Button.setGeometry(QtCore.QRect(780, 740, 111, 27))
font = QtGui.QFont()
font.setFamily("Sitka Text")
font.setPointSize(10)
font.setBold(False)
font.setItalic(False)
font.setWeight(50)
self.Initial_State_Button.setFont(font)
self.Initial_State_Button.setObjectName("Initial_State_Button")
self.frame = QtWidgets.QFrame(self.Initial_State)
self.frame.setGeometry(QtCore.QRect(20, 10, 881, 711))
self.frame.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.frame.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame.setObjectName("frame")
self.horizontalLayoutWidget = QtWidgets.QWidget(self.frame)
self.horizontalLayoutWidget.setGeometry(QtCore.QRect(20, 20, 851, 681))
self.horizontalLayoutWidget.setObjectName("horizontalLayoutWidget")
self.horizontalLayout = QtWidgets.QHBoxLayout(self.horizontalLayoutWidget)
self.horizontalLayout.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout.setObjectName("horizontalLayout")
self.Total.addTab(self.Initial_State, "")
self.Total_Result = QtWidgets.QWidget()
self.Total_Result.setObjectName("Total_Result")
self.Simulation_Start_2 = QtWidgets.QPushButton(self.Total_Result)
self.Simulation_Start_2.setGeometry(QtCore.QRect(820, 730, 151, 41))
font = QtGui.QFont()
font.setFamily("Sitka Text")
font.setPointSize(10)
font.setBold(False)
font.setItalic(False)
font.setWeight(50)
self.Simulation_Start_2.setFont(font)
self.Simulation_Start_2.setObjectName("Simulation_Start_2")
self.graphicsView_2 = QtWidgets.QGraphicsView(self.Total_Result)
self.graphicsView_2.setGeometry(QtCore.QRect(20, 10, 1101, 711))
self.graphicsView_2.setObjectName("graphicsView_2")
self.Total.addTab(self.Total_Result, "")
self.Result_gamma = QtWidgets.QWidget()
self.Result_gamma.setObjectName("Result_gamma")
self.buttonBox_4 = QtWidgets.QDialogButtonBox(self.Result_gamma)
self.buttonBox_4.setGeometry(QtCore.QRect(430, 510, 341, 32))
self.buttonBox_4.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox_4.setStandardButtons(QtWidgets.QDialogButtonBox.Cancel|QtWidgets.QDialogButtonBox.Ok)
self.buttonBox_4.setObjectName("buttonBox_4")
self.Total.addTab(self.Result_gamma, "")
self.Result_beta = QtWidgets.QWidget()
self.Result_beta.setObjectName("Result_beta")
self.buttonBox_5 = QtWidgets.QDialogButtonBox(self.Result_beta)
self.buttonBox_5.setGeometry(QtCore.QRect(420, 520, 341, 32))
self.buttonBox_5.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox_5.setStandardButtons(QtWidgets.QDialogButtonBox.Cancel|QtWidgets.QDialogButtonBox.Ok)
self.buttonBox_5.setObjectName("buttonBox_5")
self.Total.addTab(self.Result_beta, "")
self.Movie = QtWidgets.QWidget()
self.Movie.setObjectName("Movie")
self.buttonBox_6 = QtWidgets.QDialogButtonBox(self.Movie)
self.buttonBox_6.setGeometry(QtCore.QRect(430, 520, 341, 32))
self.buttonBox_6.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox_6.setStandardButtons(QtWidgets.QDialogButtonBox.Cancel|QtWidgets.QDialogButtonBox.Ok)
self.buttonBox_6.setObjectName("buttonBox_6")
self.Total.addTab(self.Movie, "")
self.gridLayout_2.addWidget(self.Total, 0, 0, 1, 1)
MainWindow.setCentralWidget(self.centralwidget)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
self.Total.setCurrentIndex(0)
self.Initial_State_Button.clicked.connect(self.frame.show)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
self.Initial_Condition.setTitle(_translate("MainWindow", "Initial Condition"))
self.Simulation_Start.setText(_translate("MainWindow", "Simulation Start"))
self.Layer_A.setTitle(_translate("MainWindow", "Layer A"))
self.A_node.setText(_translate("MainWindow", "Node"))
self.A_NodeBox.setItemText(0, _translate("MainWindow", "2048"))
self.A_NodeBox.setItemText(1, _translate("MainWindow", "128"))
self.A_internaledge.setText(_translate("MainWindow", "Internal Edge"))
self.A_InternalEdgeBox.setItemText(0, _translate("MainWindow", "5"))
self.A_InternalEdgeBox.setItemText(1, _translate("MainWindow", "2"))
self.A_externaledge.setText(_translate("MainWindow", "External Edge"))
self.A_ExternalEdgeBox.setItemText(0, _translate("MainWindow", "1"))
self.A_structure.setText(_translate("MainWindow", "Structure"))
self.A_StructureBox.setItemText(0, _translate("MainWindow", "1"))
self.A_StructureBox.setItemText(1, _translate("MainWindow", "2"))
self.A_state.setText(_translate("MainWindow", "State"))
self.A_StateBox.setItemText(0, _translate("MainWindow", "[1, 2]"))
self.A_max.setText(_translate("MainWindow", "MAX"))
self.A_MAXBox.setItemText(0, _translate("MainWindow", "+2"))
self.A_min.setText(_translate("MainWindow", "MIN"))
self.A_MINBox.setItemText(0, _translate("MainWindow", "-2"))
self.Layer_B.setTitle(_translate("MainWindow", "Layer B"))
self.B_node.setText(_translate("MainWindow", "Node"))
self.B_NodeBox.setItemText(0, _translate("MainWindow", "2048"))
self.B_NodeBox.setItemText(1, _translate("MainWindow", "128"))
self.label_11.setText(_translate("MainWindow", "Internal Edge"))
self.B_InternalEdgeBox.setItemText(0, _translate("MainWindow", "5"))
self.B_InternalEdgeBox.setItemText(1, _translate("MainWindow", "2"))
self.label_12.setText(_translate("MainWindow", "External Edge"))
self.B_ExternalEdgeBox.setItemText(0, _translate("MainWindow", "1"))
self.B_ExternalEdgeBox.setItemText(1, _translate("MainWindow", "16"))
self.label_13.setText(_translate("MainWindow", "Structure"))
self.B_StructureBox.setItemText(0, _translate("MainWindow", "1"))
self.B_StructureBox.setItemText(1, _translate("MainWindow", "2"))
self.label_14.setText(_translate("MainWindow", "State"))
self.B_StateBox.setItemText(0, _translate("MainWindow", "[-1]"))
self.Simulation_Condition.setTitle(_translate("MainWindow", "Simulation Condition"))
self.label_7.setText(_translate("MainWindow", "Step"))
self.StepBox.setItemText(0, _translate("MainWindow", "100"))
self.StepBox.setItemText(1, _translate("MainWindow", "30"))
self.label_8.setText(_translate("MainWindow", "Drawing"))
self.DrawingBox.setItemText(0, _translate("MainWindow", "False"))
self.DrawingBox.setItemText(1, _translate("MainWindow", "True"))
self.label_9.setText(_translate("MainWindow", "Database"))
self.DatabaseBox.setItemText(0, _translate("MainWindow", "renew_competition"))
self.DatabaseBox.setItemText(1, _translate("MainWindow", "competition"))
self.label_16.setText(_translate("MainWindow", "Structures"))
self.StructuresBox.setItemText(0, _translate("MainWindow", "RR-RR"))
self.StructuresBox.setItemText(1, _translate("MainWindow", "BA-BA"))
self.StructuresBox.setItemText(2, _translate("MainWindow", "RR-BA"))
self.StructuresBox.setItemText(3, _translate("MainWindow", "BA-RA"))
self.condition_settingButton.setText(_translate("MainWindow", "Setting"))
self.Initial_State_Button.setText(_translate("MainWindow", "Initial State"))
self.Total.setTabText(self.Total.indexOf(self.Initial_State), _translate("MainWindow", "Initial State"))
self.Simulation_Start_2.setText(_translate("MainWindow", "Simulation Start"))
self.Total.setTabText(self.Total.indexOf(self.Total_Result), _translate("MainWindow", "Total Result"))
self.Total.setTabText(self.Total.indexOf(self.Result_gamma), _translate("MainWindow", "Result(gamma)"))
self.Total.setTabText(self.Total.indexOf(self.Result_beta), _translate("MainWindow", "Result(beta)"))
self.Total.setTabText(self.Total.indexOf(self.Movie), _translate("MainWindow", "Movie"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
|
{"/InterconnectedDynamics.py": ["/Setting_Simulation_Value.py", "/OpinionDynamics.py", "/DecisionDynamics.py", "/MakingPandas.py", "/InterconnectedLayerModeling.py"], "/AnalysisDB.py": ["/Setting_Simulation_Value.py"], "/Layer_B_Modeling.py": ["/Setting_Simulation_Value.py"], "/OpinionDynamics.py": ["/Setting_Simulation_Value.py", "/InterconnectedLayerModeling.py"], "/Interconnected_Network_Visualization.py": ["/Setting_Simulation_Value.py", "/InterconnectedLayerModeling.py"], "/RepeatDynamics.py": ["/Setting_Simulation_Value.py", "/InterconnectedDynamics.py", "/InterconnectedLayerModeling.py", "/MakingPandas.py"], "/MyWindow.py": ["/InterconnectedLayerModeling.py", "/Setting_Simulation_Value.py", "/Layer_A_Modeling.py", "/Layer_B_Modeling.py", "/Changing_Variable.py", "/Visualization.py", "/Interconnected_Network_Visualization.py", "/DB_Management.py"], "/MakingPandas.py": ["/InterconnectedLayerModeling.py", "/Setting_Simulation_Value.py"], "/InterconnectedLayerModeling.py": ["/Setting_Simulation_Value.py"], "/VisualizationNew.py": ["/Setting_Simulation_Value.py"], "/MakingMovie.py": ["/Setting_Simulation_Value.py", "/Interconnected_Network_Visualization.py", "/Layer_A_Modeling.py", "/Layer_B_Modeling.py"], "/DecisionDynamics.py": ["/Setting_Simulation_Value.py", "/InterconnectedLayerModeling.py"], "/Layer_A_Modeling.py": ["/Setting_Simulation_Value.py"], "/Visualization.py": ["/Setting_Simulation_Value.py"], "/Changing_Variable.py": ["/Setting_Simulation_Value.py", "/RepeatDynamics.py"]}
|
5,520
|
leighsix/CompetitionPycharm
|
refs/heads/master
|
/Layer_A_Modeling.py
|
import networkx as nx
import numpy as np
import pandas as pd
import random
import math
import Setting_Simulation_Value
## A layer Modeling : A, A_edges, AB_edges, AB_neighbor
class Layer_A_Modeling():
def __init__(self, setting):
# network : 1 = random regular graph 2 = barabasi-albert graph
A_edges_array = self.A_layer_config(setting)
self.A_edges = A_edges_array[0]
self.AB_edges = A_edges_array[1]
self.AB_neighbor = A_edges_array[2]
self.A_node_info = self.making_node_info()
self.G_A = self.making_layer_A_graph(setting)
def making_layer_A_graph(self, setting):
self.G_A = nx.Graph()
for i in range(setting.A_node):
self.G_A.add_node(i, name='A_%s' % i, state=setting.A[i])
A_edges_list = sorted(self.A_edges.edges)
self.G_A.add_edges_from(A_edges_list)
return self.G_A
def A_layer_config(self, setting):
# A_layer 구성요소 A_layer_config(state = [1,2], node = 2048, edge = 5, Max = 2, Min = -2)
self.select_layer_A_model(setting)
self.making_interconnected_edges(setting)
return self.A_edges, self.AB_edges, self.AB_neighbor
# A : A의 각 노드의 상태, A_state : A 노드 상태의 종류(1, 2, -1, -2),
# A_node : 노드의 수, A_edge : 내부연결선수, A_edges : 내부연결상태(튜플), MAX : 최대상태, MIN : 최소상태
def select_layer_A_model(self, setting):
if setting.Structure.split('-')[0] == 'RR':
self.making_layer_A_random_regular(setting)
elif setting.Structure.split('-')[0] == 'BA':
self.making_layer_A_barabasi_albert(setting)
return self.A_edges
def making_layer_A_random_regular(self, setting):
# A_layer random_regular network
self.A_edges = nx.random_regular_graph(setting.A_edge, setting.A_node, seed=None)
return self.A_edges
def making_layer_A_barabasi_albert(self, setting):
# A_layer 바바라시-알버트 네트워크
self.A_edges = nx.barabasi_albert_graph(setting.A_node, setting.A_edge, seed=None)
return self.A_edges
def making_interconnected_edges(self, setting):
self.AB_edges = []
self.AB_neighbor = []
for i in range(int(setting.A_node / setting.B_inter_edges)):
for j in range(setting.B_inter_edges):
connected_A_node = np.array(self.A_edges.nodes).reshape(-1, setting.B_inter_edges)[i][j]
self.AB_neighbor.append(connected_A_node)
self.AB_edges.append((i, connected_A_node))
self.AB_neighbor = np.array(self.AB_neighbor).reshape(-1, setting.B_inter_edges)
return self.AB_edges, self.AB_neighbor
# AB_neighbor은 B노드번호 기준으로 연결된 A노드번호 ex) AB_neighbor[0]= array([0, 1])
# B 노드 0에 A노드 0번, 1번이 연결되어 있다는 뜻
# AB_edges는 (0, 1)은 B 노드 0번과 A 노드 1번이 연결되어 있다는 뜻
def making_node_info(self): # layer, node_number, location
node_info = [{'node_number': i, 'layer': 'A', 'location': (random.random(), random.random())}
for i in sorted(self.A_edges.nodes)]
node_info = pd.DataFrame(node_info, columns=['node_number', 'layer', 'location'])
return node_info
if __name__ == "__main__" :
print("layer_A")
setting = Setting_Simulation_Value.Setting_Simulation_Value()
Layer_A = Layer_A_Modeling(setting)
print(Layer_A.G_A.nodes)
state = 0
for i in range(len(Layer_A.G_A.nodes)):
state += Layer_A.G_A.nodes[i]['state']
print(state)
#print(Layer_A.AB_edges)
#print(Layer_A.AB_neighbor)
#print(Layer_A.SS.A_node)
|
{"/InterconnectedDynamics.py": ["/Setting_Simulation_Value.py", "/OpinionDynamics.py", "/DecisionDynamics.py", "/MakingPandas.py", "/InterconnectedLayerModeling.py"], "/AnalysisDB.py": ["/Setting_Simulation_Value.py"], "/Layer_B_Modeling.py": ["/Setting_Simulation_Value.py"], "/OpinionDynamics.py": ["/Setting_Simulation_Value.py", "/InterconnectedLayerModeling.py"], "/Interconnected_Network_Visualization.py": ["/Setting_Simulation_Value.py", "/InterconnectedLayerModeling.py"], "/RepeatDynamics.py": ["/Setting_Simulation_Value.py", "/InterconnectedDynamics.py", "/InterconnectedLayerModeling.py", "/MakingPandas.py"], "/MyWindow.py": ["/InterconnectedLayerModeling.py", "/Setting_Simulation_Value.py", "/Layer_A_Modeling.py", "/Layer_B_Modeling.py", "/Changing_Variable.py", "/Visualization.py", "/Interconnected_Network_Visualization.py", "/DB_Management.py"], "/MakingPandas.py": ["/InterconnectedLayerModeling.py", "/Setting_Simulation_Value.py"], "/InterconnectedLayerModeling.py": ["/Setting_Simulation_Value.py"], "/VisualizationNew.py": ["/Setting_Simulation_Value.py"], "/MakingMovie.py": ["/Setting_Simulation_Value.py", "/Interconnected_Network_Visualization.py", "/Layer_A_Modeling.py", "/Layer_B_Modeling.py"], "/DecisionDynamics.py": ["/Setting_Simulation_Value.py", "/InterconnectedLayerModeling.py"], "/Layer_A_Modeling.py": ["/Setting_Simulation_Value.py"], "/Visualization.py": ["/Setting_Simulation_Value.py"], "/Changing_Variable.py": ["/Setting_Simulation_Value.py", "/RepeatDynamics.py"]}
|
5,521
|
leighsix/CompetitionPycharm
|
refs/heads/master
|
/Setting_Simulation_Value.py
|
import numpy as np
import math
import random
class Setting_Simulation_Value:
def __init__(self):
self.database = 'paper_revised_data' # 'competition renew_competition'
self.table = 'simulation_table4'
self.MODEL = 'BA-BA' #'RR(2)-RR(5), 'BA-BA', 'BA-RR(5)', 'RR(5)-BA', 'RR(10)-BA'
self.Structure = 'BA-BA'
self.Limited_step = 100
self.Repeating_number = 20
self.A_state = [1, 2]
self.A_node = 2048
self.A_edge = 5
self.A_inter_edges = 1
self.A = self.static_making_A_array()
self.MAX = 2
self.MIN = -2
self.B_state = [-1]
self.B_node = 2048
self.B_edge = 5
self.B_inter_edges = int(self.A_node / self.B_node)
self.B = self.static_making_B_array()
self.DB = 'MySQL'
self.gap = 40
simulation_condition = self.simulation_condition(self.gap)
self.R = simulation_condition[0]
self.D = simulation_condition[1]
self.variable_list = self.gamma_and_beta_list(self.R, self.D)
self.workers = 5
def simulation_condition(self, gap):
self.R = np.linspace(0, 2, gap)
self.D = np.linspace(self.making_beta_scale(gap)[0], self.making_beta_scale(gap)[1], gap)
return self.R, self.D
def gamma_and_beta_list(self, gamma_list, beta_list):
self.variable_list = []
for gamma in gamma_list:
for beta in beta_list:
self.variable_list.append((gamma, beta))
return self.variable_list
def making_beta_scale(self, a):
scale = math.log((1 / (self.B_edge + 1)) ** 3)\
/ math.log(self.B_inter_edges / (self.B_edge + self.B_inter_edges))
return 0, scale, a
def static_making_A_array(self):
values = self.A_state * int(self.A_node / len(self.A_state))
self.A = np.array(values)
random.shuffle(self.A)
return self.A
def static_making_B_array(self):
values = self.B_state * int(self.B_node / len(self.B_state))
self.B = np.array(values)
random.shuffle(self.B)
return self.B
if __name__ == "__main__":
SS = Setting_Simulation_Value()
#layer_A1 = Layer_A_Modeling.Layer_A_Modeling(SS)
print(SS.A_node)
#print(len(layer_A1.A))
#layer_A2 = Layer_A_Modeling.Layer_A_Modeling(SS)
print(SS.B_node)
print(SS.A)
print(SS.variable_list)
#print(len(layer_A2.A))
|
{"/InterconnectedDynamics.py": ["/Setting_Simulation_Value.py", "/OpinionDynamics.py", "/DecisionDynamics.py", "/MakingPandas.py", "/InterconnectedLayerModeling.py"], "/AnalysisDB.py": ["/Setting_Simulation_Value.py"], "/Layer_B_Modeling.py": ["/Setting_Simulation_Value.py"], "/OpinionDynamics.py": ["/Setting_Simulation_Value.py", "/InterconnectedLayerModeling.py"], "/Interconnected_Network_Visualization.py": ["/Setting_Simulation_Value.py", "/InterconnectedLayerModeling.py"], "/RepeatDynamics.py": ["/Setting_Simulation_Value.py", "/InterconnectedDynamics.py", "/InterconnectedLayerModeling.py", "/MakingPandas.py"], "/MyWindow.py": ["/InterconnectedLayerModeling.py", "/Setting_Simulation_Value.py", "/Layer_A_Modeling.py", "/Layer_B_Modeling.py", "/Changing_Variable.py", "/Visualization.py", "/Interconnected_Network_Visualization.py", "/DB_Management.py"], "/MakingPandas.py": ["/InterconnectedLayerModeling.py", "/Setting_Simulation_Value.py"], "/InterconnectedLayerModeling.py": ["/Setting_Simulation_Value.py"], "/VisualizationNew.py": ["/Setting_Simulation_Value.py"], "/MakingMovie.py": ["/Setting_Simulation_Value.py", "/Interconnected_Network_Visualization.py", "/Layer_A_Modeling.py", "/Layer_B_Modeling.py"], "/DecisionDynamics.py": ["/Setting_Simulation_Value.py", "/InterconnectedLayerModeling.py"], "/Layer_A_Modeling.py": ["/Setting_Simulation_Value.py"], "/Visualization.py": ["/Setting_Simulation_Value.py"], "/Changing_Variable.py": ["/Setting_Simulation_Value.py", "/RepeatDynamics.py"]}
|
5,522
|
leighsix/CompetitionPycharm
|
refs/heads/master
|
/Visualization.py
|
import SelectDB
import numpy as np
import Setting_Simulation_Value
import matplotlib.pyplot as plt
import matplotlib
import seaborn as sns
import pandas as pd
from sympy import *
from mpl_toolkits.mplot3d.axes3d import *
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from numba import jit
matplotlib.use("TkAgg")
class Visualization:
def plot_2D_gamma_for_average_state(self, setting, df, beta_value, marker):
beta_list = Visualization.making_select_list(df, 'beta')
temp_value = Visualization.covert_to_select_list_value(beta_list, beta_value)
df = df[df.Steps == setting.Limited_step]
df = df[df.beta == temp_value]
plt.plot(df['gamma'], ((df['LAYER_A_MEAN']/setting.MAX) + df['LAYER_B_MEAN']) / 2, marker,
label=r'$\beta$=%.2f' % temp_value,
markersize=6, linewidth=1.5, markeredgewidth=1)
def plot_2D_beta_for_average_state(self, setting, df, gamma_value, marker):
gamma_list = Visualization.making_select_list(df, 'gamma')
temp_value = Visualization.covert_to_select_list_value(gamma_list, gamma_value)
df = df[df.Steps == setting.Limited_step]
df = df[df.gamma == temp_value]
plt.style.use('seaborn-whitegrid')
plt.plot(df['beta'], ((df['LAYER_A_MEAN']/setting.MAX) + df['LAYER_B_MEAN']) / 2, marker,
label=r'$\gamma$=%.2f' % temp_value,
markersize=6, linewidth=1.5, markeredgewidth=1)
def plot_3D_scatter_for_average_state(self, df):
ax = plt.axes(projection='3d')
ax.scatter(df['beta'], df['gamma'], ((df['LAYER_A_MEAN']/setting.MAX) + df['LAYER_B_MEAN']) / 2,
c=((df['LAYER_A_MEAN']/setting.MAX) + df['LAYER_B_MEAN']) / 2, cmap='RdBu', linewidth=0.2)
ax.set_xlabel(r'$\beta$', fontsize=18, labelpad=8)
ax.set_ylabel(r'$\gamma$', fontsize=18, labelpad=8)
ax.set_zlabel('AS', fontsize=18, labelpad=8)
ax.set_title(r'$\beta$-$\gamma$-AS', fontsize=18)
ax.tick_params(axis='both', labelsize=14)
ax.view_init(45, 45)
def plot_3D_trisurf_for_average_state(self, df):
ax = plt.axes(projection='3d')
ax.plot_trisurf(df['beta'], df['gamma'], ((df['LAYER_A_MEAN']/setting.MAX) + df['LAYER_B_MEAN']) / 2,
cmap='RdBu', edgecolor='none')
ax.set_xlabel(r'$\beta$', fontsize=18, labelpad=8)
ax.set_ylabel(r'$\gamma$', fontsize=18, labelpad=8)
ax.set_zlabel('AS', fontsize=18, labelpad=8)
ax.set_title(r'$\beta$-$\gamma$-AS', fontsize=18)
ax.tick_params(axis='both', labelsize=14)
ax.view_init(45, 45)
def plot_3D_contour_for_average_state(self, setting, df):
beta_list = Visualization.making_select_list(df, 'beta') # list이지만 실제로는 array
gamma_list = Visualization.making_select_list(df, 'gamma')
X, Y = np.meshgrid(beta_list, gamma_list)
Z = Visualization.state_list_function(setting, df, gamma_list, beta_list)
ax = plt.axes(projection='3d')
ax.contour3D(X, Y, Z, 50, cmap='RdBu')
ax.set_xlabel(r'$\beta$', fontsize=18, labelpad=6)
ax.set_ylabel(r'$\gamma$', fontsize=18, labelpad=6)
ax.set_zlabel('AS', fontsize=18, labelpad=6)
ax.set_title(r'$\beta$-$\gamma$-AS', fontsize=18)
ax.view_init(45, 45)
def plot_3D_to_2D_contour_for_average_state(self, setting, df):
# df = df[df.Steps == setting.Limited_step]
beta_list = Visualization.making_select_list(df, 'beta') # list이지만 실제로는 array
gamma_list = Visualization.making_select_list(df, 'gamma')
X, Y = np.meshgrid(beta_list, gamma_list)
Z = Visualization.state_list_function(setting, df, gamma_list, beta_list)
plt.contourf(X, Y, Z, 50, cmap='RdBu')
#plt.clabel(contours, inline=True, fontsize=8)
def average_state_for_steps(self, setting, df, beta_value, gamma_value):
beta_list = Visualization.making_select_list(df, 'beta') # list이지만 실제로는 array
gamma_list = Visualization.making_select_list(df, 'gamma')
temp_beta_value = Visualization.covert_to_select_list_value(beta_list, beta_value)
temp_gamma_value = Visualization.covert_to_select_list_value(gamma_list, gamma_value)
df1 = df[df.gamma == temp_gamma_value]
df2 = df1[df1.beta == temp_beta_value]
df3 = df2.sort_values(by='Steps', ascending=True)
plt.plot(df3['Steps'], ((df3['LAYER_A_MEAN']/setting.MAX) + df3['LAYER_B_MEAN']) / 2, linestyle=':', marker='o', markersize=2, linewidth=0.3)
plt.ylabel('AS', fontsize=18, labelpad=4)
plt.xlabel('time(step)', fontsize=18, labelpad=4)
def average_state_for_steps_scale(self, df, beta_values, gamma_values):
beta_list = Visualization.making_select_list(df, 'beta') # list이지만 실제로는 array
gamma_list = Visualization.making_select_list(df, 'gamma')
beta_min = Visualization.covert_to_select_list_value(beta_list, beta_values[0])
beta_max = Visualization.covert_to_select_list_value(beta_list, beta_values[1])
gamma_min = Visualization.covert_to_select_list_value(gamma_list, gamma_values[0])
gamma_max = Visualization.covert_to_select_list_value(gamma_list, gamma_values[1])
df = df[df.gamma >= gamma_min]
df = df[df.gamma <= gamma_max]
df = df[df.beta >= beta_min]
df = df[df.beta <= beta_max]
gamma_df = df['gamma'].drop_duplicates()
beta_df = df['beta'].drop_duplicates()
gamma_array = np.array(gamma_df)
beta_array = np.array(beta_df)
for i in sorted(gamma_array):
for j in sorted(beta_array):
df1 = df[df.gamma == i]
df2 = df1[df1.beta == j]
plt.plot(df2['Steps'], ((df2['LAYER_A_MEAN']/setting.MAX) + df2['LAYER_B_MEAN']) / 2, linewidth=0.5)
plt.ylabel('AS', fontsize=18, labelpad=6)
plt.xlabel('time(step)', fontsize=18, labelpad=6)
def flow_prob_beta_chart(self, setting, df, beta_value, gamma_value):
# beta_value = [min, max], #gamma_value =[min, max]
# df = df[df.Steps <= setting.Limited_step]
beta_list = Visualization.making_select_list(df, 'beta') # 이름은 list이지만 실제로는 array
gamma_list = Visualization.making_select_list(df, 'gamma') # 이름은 list이지만 실제로는 array
beta_min = Visualization.covert_to_select_list_value(beta_list, beta_value[0])
beta_max = Visualization.covert_to_select_list_value(beta_list, beta_value[1])
gamma_min = Visualization.covert_to_select_list_value(gamma_list, gamma_value[0])
gamma_max = Visualization.covert_to_select_list_value(gamma_list, gamma_value[1])
df = df[df.gamma >= gamma_min]
df = df[df.gamma <= gamma_max]
df = df[df.beta >= beta_min]
df = df[df.beta <= beta_max]
gamma_array = pd.DataFrame(df['gamma'])
gamma_array = np.array(gamma_array.drop_duplicates())
beta_array = pd.DataFrame(df['beta'])
beta_array = np.array(beta_array.drop_duplicates())
for i in sorted(gamma_array):
for j in beta_array:
df1 = df[df.gamma == i[0]]
df2 = df1[df1.beta == j[0]]
if len(df2) >= setting.Limited_step :
plt.plot(df2['Steps'], df2['PROB_BETA'], linewidth=0.3)
def different_state_ratio_chart(self, setting, df, beta_value, gamma_value, select_layer):
# df = df[df.Steps <= setting.Limited_step]
beta_list = Visualization.making_select_list(df, 'beta') # 이름은 list이지만 실제로는 array
gamma_list = Visualization.making_select_list(df, 'gamma') # 이름은 list이지만 실제로는 array
beta_min = Visualization.covert_to_select_list_value(beta_list, beta_value[0])
beta_max = Visualization.covert_to_select_list_value(beta_list, beta_value[1])
gamma_min = Visualization.covert_to_select_list_value(gamma_list, gamma_value[0])
gamma_max = Visualization.covert_to_select_list_value(gamma_list, gamma_value[1])
df = df[df.gamma >= gamma_min]
df = df[df.gamma <= gamma_max]
df = df[df.beta >= beta_min]
df = df[df.beta <= beta_max]
gamma_array = pd.DataFrame(df['gamma'])
gamma_array = np.array(gamma_array.drop_duplicates())
beta_array = pd.DataFrame(df['beta'])
beta_array = np.array(beta_array.drop_duplicates())
for i in sorted(gamma_array):
for j in beta_array:
df1 = df[df.gamma == i[0]]
df2 = df1[df1.beta == j[0]]
if len(df2) >= setting.Limited_step:
plt.plot(df2['Steps'], df2['%s_DIFFERENT_STATE_RATIO' % select_layer], linewidth=0.7)
@staticmethod
def state_list_function(setting, df, gamma_list, beta_list):
Z = np.zeros([len(gamma_list), len(beta_list)])
for i, gamma in enumerate(gamma_list):
for j, beta in enumerate(beta_list):
df1 = df[df.gamma == gamma]
df2 = df1[df1.beta == beta]
if len(df2) == 0:
Z[i][j] = 0
else:
Z[i][j] = ((df2['LAYER_A_MEAN'].iloc[0]/setting.MAX) + df2['LAYER_B_MEAN'].iloc[0]) / 2
return Z
@staticmethod
def covert_to_select_list_value(select_list, input_value): # list가 만들어져 있는 곳에 사용
loc = sum(select_list <= input_value) # select_list는 making_select_list를 사용, array로 만들어져 있음
temp_value = select_list[loc - 1]
return temp_value
@staticmethod
def making_select_list(df, list_name):
list = []
df = pd.DataFrame(df[list_name])
select_list = np.array(df.drop_duplicates())
for i in range(len(select_list)):
list.append(select_list[i][0])
return np.array(sorted(list))
if __name__ == "__main__":
print("Visualization")
setting = Setting_Simulation_Value.Setting_Simulation_Value()
setting.database = 'paper_revised_data'
setting.table = 'simulation_table'
select_db = SelectDB.SelectDB()
df = select_db.select_data_from_DB(setting)
print(len(df))
# df = df[df.Unchanged_A_Node == 'A_95']
# df = df[df.Steps == 100]
df = df[df.MODEL == 'LM(16)']
print(len(df))
# print(len(df))
# df = df[df.Unchanged_A_Node == 'A_N']
visualization = Visualization()
fig = plt.figure()
# visualization.plot_3D_trisurf_for_average_state(df)
visualization.average_state_for_steps_scale(df, [0, 20], [0, 2])
plt.show()
plt.close()
#visualization.plot_3D_to_2D_contour_for_average_state(setting, df)
#visualization.plot_3D_to_2D_contour_for_average_state()
#visualization.plot_3D_contour_for_average_state('previous_research')
#visualization.plot_3D_scatter_for_average_state('average_layer_state') #previous_research
# fig = visualization.flow_prob_beta_chart([0, 3], [0, 2])
# fig = visualization.different_state_ratio_chart([0, 3], [0, 2], 'B')
# visualization.plot_2D_beta_for_average_state(0.2)
# visualization.plot_2D_beta_for_average_state(0.4)
#visualization.plot_2D_beta_for_average_state('previous_research', 0.4)
#visualization.plot_2D_beta_for_average_state('previous_research', 0.6)
print("paint finished")
|
{"/InterconnectedDynamics.py": ["/Setting_Simulation_Value.py", "/OpinionDynamics.py", "/DecisionDynamics.py", "/MakingPandas.py", "/InterconnectedLayerModeling.py"], "/AnalysisDB.py": ["/Setting_Simulation_Value.py"], "/Layer_B_Modeling.py": ["/Setting_Simulation_Value.py"], "/OpinionDynamics.py": ["/Setting_Simulation_Value.py", "/InterconnectedLayerModeling.py"], "/Interconnected_Network_Visualization.py": ["/Setting_Simulation_Value.py", "/InterconnectedLayerModeling.py"], "/RepeatDynamics.py": ["/Setting_Simulation_Value.py", "/InterconnectedDynamics.py", "/InterconnectedLayerModeling.py", "/MakingPandas.py"], "/MyWindow.py": ["/InterconnectedLayerModeling.py", "/Setting_Simulation_Value.py", "/Layer_A_Modeling.py", "/Layer_B_Modeling.py", "/Changing_Variable.py", "/Visualization.py", "/Interconnected_Network_Visualization.py", "/DB_Management.py"], "/MakingPandas.py": ["/InterconnectedLayerModeling.py", "/Setting_Simulation_Value.py"], "/InterconnectedLayerModeling.py": ["/Setting_Simulation_Value.py"], "/VisualizationNew.py": ["/Setting_Simulation_Value.py"], "/MakingMovie.py": ["/Setting_Simulation_Value.py", "/Interconnected_Network_Visualization.py", "/Layer_A_Modeling.py", "/Layer_B_Modeling.py"], "/DecisionDynamics.py": ["/Setting_Simulation_Value.py", "/InterconnectedLayerModeling.py"], "/Layer_A_Modeling.py": ["/Setting_Simulation_Value.py"], "/Visualization.py": ["/Setting_Simulation_Value.py"], "/Changing_Variable.py": ["/Setting_Simulation_Value.py", "/RepeatDynamics.py"]}
|
5,523
|
leighsix/CompetitionPycharm
|
refs/heads/master
|
/Changing_Variable.py
|
import Setting_Simulation_Value
import RepeatDynamics
import sqlalchemy
from multiprocessing import Pool
from concurrent import futures
from tqdm import tqdm
class Changing_Variable:
def __init__(self):
self.repeat_dynamics = RepeatDynamics.RepeatDynamics()
def many_execute_for_simulation(self, setting):
setting_variable_list = self.making_variable_tuples_list(setting)
engine = sqlalchemy.create_engine('mysql+pymysql://root:2853@localhost:3306/%s' % setting.database)
with futures.ProcessPoolExecutor(max_workers=setting.workers) as executor:
to_do_map = {}
for setting_variable_tuple in setting_variable_list:
future = executor.submit(self.calculate_for_simulation, setting_variable_tuple)
to_do_map[future] = setting_variable_tuple
done_iter = futures.as_completed(to_do_map)
done_iter = tqdm(done_iter, total=len(setting_variable_list))
for future in done_iter:
res = future.result()
res.to_sql(name='%s' % setting.table, con=engine, index=False, if_exists='append')
def calculate_for_simulation(self, setting_variable_tuple):
gamma = setting_variable_tuple[1][0]
beta = setting_variable_tuple[1][1]
panda_db = self.repeat_dynamics.repeat_dynamics(setting_variable_tuple[0], gamma, beta)
return panda_db
def making_variable_tuples_list(self, setting):
setting_variable_list = []
for i in setting.variable_list:
setting_variable_list.append((setting, i))
return setting_variable_list
def calculate_and_input_database(self, setting_variable_tuple):
gamma = setting_variable_tuple[1][0]
beta = setting_variable_tuple[1][1]
panda_db = self.repeat_dynamics.repeat_dynamics(setting_variable_tuple[0], gamma, beta)
print(panda_db.loc[0])
engine = sqlalchemy.create_engine('mysql+pymysql://root:2853@localhost:3306/%s' % setting_variable_tuple[0].database)
panda_db.to_sql(name='%s' % setting_variable_tuple[0].table, con=engine, index=False, if_exists='append')
def paralleled_work(self, setting):
workers = setting.workers
setting_variable_list = []
for i in setting.variable_list:
setting_variable_list.append((setting, i))
with Pool(workers) as p:
p.map(self.calculate_and_input_database, setting_variable_list)
if __name__ == "__main__":
print("Changing_Variable")
setting = Setting_Simulation_Value.Setting_Simulation_Value()
changing_variable = Changing_Variable()
print(changing_variable.making_variable_tuples_list(setting))
# changing_variable.many_execute_for_simulation(setting)
print("Operating end")
|
{"/InterconnectedDynamics.py": ["/Setting_Simulation_Value.py", "/OpinionDynamics.py", "/DecisionDynamics.py", "/MakingPandas.py", "/InterconnectedLayerModeling.py"], "/AnalysisDB.py": ["/Setting_Simulation_Value.py"], "/Layer_B_Modeling.py": ["/Setting_Simulation_Value.py"], "/OpinionDynamics.py": ["/Setting_Simulation_Value.py", "/InterconnectedLayerModeling.py"], "/Interconnected_Network_Visualization.py": ["/Setting_Simulation_Value.py", "/InterconnectedLayerModeling.py"], "/RepeatDynamics.py": ["/Setting_Simulation_Value.py", "/InterconnectedDynamics.py", "/InterconnectedLayerModeling.py", "/MakingPandas.py"], "/MyWindow.py": ["/InterconnectedLayerModeling.py", "/Setting_Simulation_Value.py", "/Layer_A_Modeling.py", "/Layer_B_Modeling.py", "/Changing_Variable.py", "/Visualization.py", "/Interconnected_Network_Visualization.py", "/DB_Management.py"], "/MakingPandas.py": ["/InterconnectedLayerModeling.py", "/Setting_Simulation_Value.py"], "/InterconnectedLayerModeling.py": ["/Setting_Simulation_Value.py"], "/VisualizationNew.py": ["/Setting_Simulation_Value.py"], "/MakingMovie.py": ["/Setting_Simulation_Value.py", "/Interconnected_Network_Visualization.py", "/Layer_A_Modeling.py", "/Layer_B_Modeling.py"], "/DecisionDynamics.py": ["/Setting_Simulation_Value.py", "/InterconnectedLayerModeling.py"], "/Layer_A_Modeling.py": ["/Setting_Simulation_Value.py"], "/Visualization.py": ["/Setting_Simulation_Value.py"], "/Changing_Variable.py": ["/Setting_Simulation_Value.py", "/RepeatDynamics.py"]}
|
5,527
|
pratham-pay/Dockertest
|
refs/heads/master
|
/main_code.py
|
import json
import pandas as pd
from datetime import datetime
import re
import numpy as np
VALID_ACCOUNT_TYPES = {
123: "Personal Loan"
}
def parse(json_obj):
out_dict = {}
for customer_id, account_list in json_obj.items():
out_list = []
if account_list:
account_list = json.loads(account_list)
if isinstance(account_list, list):
for account in account_list:
parsed_account = parse_account(account)
if parsed_account:
out_list.append(parsed_account)
out_dict[customer_id] = out_list
else:
out_dict[customer_id] = "No accounts found"
return out_dict
def get_nper(open_dt, bal_dt):
nper = 12*(bal_dt.year-open_dt.year) + (bal_dt.month-open_dt.month) -1
return nper
def parse_account(account):
try:
account_id = account['ACCOUNT_NB']
account_type = int(account['ACCT_TYPE_CD'])
open_date = datetime.strptime(account['OPEN_DT'], "%Y/%m/%d")
balance_dt = datetime.strptime(account['BALANCE_DT'], "%Y/%m/%d")
amount = int(account['ORIG_LOAN_AM'])
except:
return "Error in input data"
if account_type not in VALID_ACCOUNT_TYPES:
return "invalid account type"
total_paid_period = get_nper(open_date, balance_dt)
p1 = re.compile('BALANCE_AM_\d{2}')
bal_keys = sorted(list(filter(p1.match, account.keys())))
p2 = re.compile('DAYS_PAST_DUE_\d{2}')
dpd_keys = sorted(list(filter(p2.match, account.keys())))
bal_array = []
for b, d in zip(bal_keys, dpd_keys):
if not pd.isna(account[d]) and account[d] == 0:
bal = account[b]
if not pd.isna(bal) and bal > 0 and bal < amount:
per = total_paid_period - int(b[-2:]) + 1
bal_array.append([bal, per])
if len(bal_array) > 0:
ret = calc_emi(open_date, amount, bal_array)
if ret:
parsed_account = {}
parsed_account['account_id'] = str(account_id)
rate, tenure, emi = ret
parsed_account['rate'] = float(rate)
parsed_account['tenure'] = int(tenure)
parsed_account['emi']= int(emi)
return parsed_account
else:
return "Model returned no values"
else:
return "Insufficient data for this account"
def calc_emi(open_date, amount, bal):
np_rates = np.arange(10, 40, 0.5)
np_tenure = np.arange(1, 60, 1)
rt_pairs = [(r,t) for r in np_rates for t in np_tenure]
new_bal_array = []
for tup in bal:
balance = tup[0]
nper = tup[1]
new_bal_array.append((balance, nper))
def multiproc(pair):
r,t = pair
nper_ = [1 if y>t else 0 for x,y in new_bal_array]
if any(nper_) > 0:
return None
rate = r/1200
emi = -np.pmt(rate, t, amount)
diff=0
for tup in new_bal_array:
bal, nper = tup
calc_balance = np.fv(rate, nper, emi, -amount)
diff = diff+ (bal-calc_balance)**2
balance_diff = np.sqrt(diff/len(new_bal_array))
return balance_diff
res = map(multiproc, rt_pairs)
min_diff = float('inf')
for tup, diff in zip(rt_pairs, res):
if diff and diff < min_diff:
min_diff = diff
r,t=tup
if r and t:
emi = -np.pmt(r/1200, t, amount)
return (r,t,emi)
else:
None
|
{"/api_code.py": ["/main_code.py"]}
|
5,528
|
pratham-pay/Dockertest
|
refs/heads/master
|
/api_code.py
|
import flask
from flask import request
from main_code import parse
app= flask.Flask(__name__)
app.config["DEBUG"]=True
@app.route('/', methods=['POST', 'GET'])
def api_method():
if request.is_json:
return parse(request.get_json())
else:
print(request)
return "Input not right"
app.run(host='127.0.0.1', port=8000)
|
{"/api_code.py": ["/main_code.py"]}
|
5,529
|
peulsilva/fourth_sail
|
refs/heads/main
|
/utils.py
|
def exists(char, array):
for element in array:
if element.char == char:
element.addCounter()
return True
return False
def printArray(array):
for element in array:
print("Letter: "+ element.char+ " Count: ", element.count)
|
{"/main.py": ["/CharObject.py", "/utils.py"]}
|
5,530
|
peulsilva/fourth_sail
|
refs/heads/main
|
/CharObject.py
|
class CharObject:
def __init__(self, char):
self.char = char
self.count = 1
def addCounter(self):
self.count+=1
|
{"/main.py": ["/CharObject.py", "/utils.py"]}
|
5,531
|
peulsilva/fourth_sail
|
refs/heads/main
|
/main.py
|
# Given an array which may contain duplicates, print all elements and their frequencies in descending order.
# Your code solution should be sent to the result printed.
from CharObject import CharObject
from utils import exists, printArray
sep= ' '
fileName= "textFile.txt"
file = open(fileName,"r")
line = file.readline()
charArray=[]
for char in line:
if not char == sep:
charObj = CharObject(char)
if not exists(char,charArray):
charArray.append(charObj)
charArray = sorted(charArray, key= lambda x: x.count, reverse= True)
printArray(charArray)
|
{"/main.py": ["/CharObject.py", "/utils.py"]}
|
5,533
|
pranavg000/Social-Network-Django
|
refs/heads/master
|
/Social Network/core/forms.py
|
from django.contrib.auth.models import User
from django import forms
from .models import Profile,Post,Comment
class UserForm(forms.ModelForm):
password = forms.CharField(widget=forms.PasswordInput)
email = forms.EmailField(max_length=254, help_text='Required field')
class Meta:
model = User
fields = ['username','email','password']
class UpdateUserForm(forms.ModelForm):
email = forms.EmailField(max_length=254, help_text='Required field')
class Meta:
model = User
fields = ['email']
class UpdateProfileForm(forms.ModelForm):
class Meta:
model = Profile
fields = ['status_info','profile_photo']
class CreatePost(forms.ModelForm):
class Meta:
model = Post
fields = ['post_text','post_picture']
class CreateComment(forms.ModelForm):
class Meta:
model = Comment
fields = ['comment_text']
|
{"/Social Network/core/forms.py": ["/Social Network/core/models.py"], "/Social Network/core/views.py": ["/Social Network/core/forms.py", "/Social Network/core/models.py"], "/Social Network/core/admin.py": ["/Social Network/core/models.py"]}
|
5,534
|
pranavg000/Social-Network-Django
|
refs/heads/master
|
/Social Network/core/views.py
|
from django.shortcuts import render,redirect
from django.views import generic
from django.views.generic import View
from django.views.generic.edit import CreateView,UpdateView,DeleteView
from .forms import UserForm,UpdateUserForm,UpdateProfileForm,CreatePost,CreateComment
from django.http import HttpResponse
from django.contrib.auth import authenticate,login
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.contrib.auth.forms import AuthenticationForm
from .models import User,Following,Follower,Post
from django.urls import reverse
from django.http import HttpResponseRedirect
def index(request):
return render(request,'core/index.html')
@login_required
def profile(request, username):
if request.method == 'POST':
u_form = UpdateUserForm(request.POST,instance=request.user)
p_form = UpdateProfileForm(request.POST, request.FILES, instance=request.user.profile)
if u_form.is_valid() and p_form.is_valid():
u_form.save()
p_form.save()
messages.success(request,f'Your Profile has been updated!')
url = reverse('profile', kwargs = {'username' : username})
return redirect(url)
else:
if username == request.user.username:
u_form = UpdateUserForm(instance=request.user)
p_form = UpdateProfileForm(instance=request.user.profile)
post_form = CreatePost()
person = User.objects.get(username = username)
context = {
'u_form':u_form,
'p_form':p_form,
'post_form':post_form,
'person':person,
}
else:
person = User.objects.get(username = username)
already_a_follower=0
for followers in person.follower_set.all():
if (followers.follower_user == request.user.username):
already_a_follower=1
break;
if already_a_follower==1:
context = {
'person':person,
}
else:
context = {
'person':person,
'f':1,
}
comment_form = CreateComment()
context.update({'comment_form':comment_form})
return render(request, 'core/profile.html', context)
class UserFormView(View):
form_class = UserForm
template_name = 'core/registration_form.html'
def get(self, request):
form = self.form_class(None)
return render(request,self.template_name,{'form':form})
def post(self, request):
form = self.form_class(request.POST)
if form.is_valid():
user = form.save(commit=False)
username = form.cleaned_data['username']
password = form.cleaned_data['password']
user.set_password(password)
user.save()
user = authenticate(username=username,password=password)
if user is not None:
if user.is_active:
login(request, user)
messages.success(request,f'Account created for {username}!')
return redirect('core:index')
return render(request,self.template_name, {'form':form})
def followweb(request, username):
if request.user.username != username:
if request.method == 'POST':
disciple = User.objects.get(username=request.user.username)
leader = User.objects.get(username=username)
leader.follower_set.create(follower_user = disciple)
disciple.following_set.create(following_user = leader)
url = reverse('profile', kwargs = {'username' : username})
return redirect(url)
def unfollowweb(request, username):
if request.method == 'POST':
disciple = User.objects.get(username=request.user.username)
leader = User.objects.get(username=username)
leader.follower_set.get(follower_user = disciple).delete()
disciple.following_set.get(following_user = leader).delete()
url = reverse('profile', kwargs = {'username' : username})
return redirect(url)
def welcome(request):
url = reverse('profile', kwargs = {'username' : request.user.username})
return redirect(url)
def postweb(request, username):
if request.method == 'POST':
post_form = CreatePost(request.POST,request.FILES)
if post_form.is_valid():
post_text = post_form.cleaned_data['post_text']
post_picture = request.FILES.get('post_picture')
request.user.post_set.create(post_text=post_text, post_picture = post_picture)
messages.success(request,f'You have successfully posted!')
url = reverse('profile', kwargs = {'username' : username})
return redirect(url)
def commentweb(request, username, post_id):
if request.method == 'POST':
comment_form = CreateComment(request.POST)
if comment_form.is_valid():
comment_text = comment_form.cleaned_data['comment_text']
user = User.objects.get(username=username)
post = user.post_set.get(pk=post_id)
post.comment_set.create(user=request.user,comment_text=comment_text)
messages.success(request,f'Your Comment has been posted')
url = reverse('profile', kwargs = {'username' : username})
return redirect(url)
def feed(request):
post_all = Post.objects.order_by('created_at').reverse()
comment_form = CreateComment()
context = {
'post_all' : post_all,
'comment_form':comment_form,
}
return render(request,'core/feed.html',context)
|
{"/Social Network/core/forms.py": ["/Social Network/core/models.py"], "/Social Network/core/views.py": ["/Social Network/core/forms.py", "/Social Network/core/models.py"], "/Social Network/core/admin.py": ["/Social Network/core/models.py"]}
|
5,535
|
pranavg000/Social-Network-Django
|
refs/heads/master
|
/Social Network/core/models.py
|
from django.db import models
from django.contrib.auth.models import User
class Profile(models.Model):
user = models.OneToOneField(User, on_delete= models.CASCADE)
profile_photo = models.FileField(default='default.jpg', upload_to='profile_photos')
status_info = models.CharField(default="Enter status", max_length=1000)
def __str__(self):
return f'{self.user.username} Profile'
class Post(models.Model):
user = models.ForeignKey(User,on_delete = models.CASCADE,null=True)
created_at = models.DateTimeField(auto_now_add=True)
post_text = models.CharField(max_length=2000)
post_picture = models.FileField(default="default.jpg",upload_to='post_picture')
class Following(models.Model):
user = models.ForeignKey(User,on_delete = models.CASCADE,null=True)
following_user = models.CharField(max_length=100,null=True)
def __str__(self):
return self.following_user.username
class Follower(models.Model):
user = models.ForeignKey(User,on_delete = models.CASCADE,null=True)
follower_user = models.CharField(max_length=100,null=True)
def __str__(self):
return self.follower_user.username
class Comment(models.Model):
post = models.ForeignKey(Post,on_delete = models.CASCADE)
user = models.ForeignKey(User,on_delete = models.CASCADE,null=True)
comment_text = models.CharField(default="Enter Comment Here",max_length=2000)
|
{"/Social Network/core/forms.py": ["/Social Network/core/models.py"], "/Social Network/core/views.py": ["/Social Network/core/forms.py", "/Social Network/core/models.py"], "/Social Network/core/admin.py": ["/Social Network/core/models.py"]}
|
5,536
|
pranavg000/Social-Network-Django
|
refs/heads/master
|
/Social Network/core/admin.py
|
from django.contrib import admin
from .models import Profile,Following,Follower,Post,Comment
admin.site.register(Profile)
admin.site.register(Follower)
admin.site.register(Following)
admin.site.register(Post)
admin.site.register(Comment)
|
{"/Social Network/core/forms.py": ["/Social Network/core/models.py"], "/Social Network/core/views.py": ["/Social Network/core/forms.py", "/Social Network/core/models.py"], "/Social Network/core/admin.py": ["/Social Network/core/models.py"]}
|
5,537
|
pranavg000/Social-Network-Django
|
refs/heads/master
|
/Social Network/SocialNetwork/urls.py
|
from django.contrib import admin
from django.urls import path,include
from django.contrib.auth import views as auth_views
from core import views as user_views
from django.contrib.auth.decorators import login_required
from django.conf import settings
from django.conf.urls.static import static
from django.conf.urls import url
urlpatterns = [
path('', user_views.feed, name='home'),
path('admin/', admin.site.urls),
path('core/',include('core.urls')),
path('login/',auth_views.LoginView.as_view(template_name='core/login.html'),name='login'),
path('welcome/',user_views.welcome,name="welcome"),
path('logout/',auth_views.LogoutView.as_view(template_name='core/logout.html'),name='logout'),
path('register/',user_views.UserFormView.as_view(template_name='core/registration_form.html'),name='register'),
url(r'^profile/(?P<username>\w+)/$',user_views.profile,name='profile'),
url(r'^followweb/(?P<username>\w+)/$',user_views.followweb,name="followweb"),
url(r'^unfollowweb/(?P<username>\w+)/$',user_views.unfollowweb,name="unfollowweb"),
url(r'^postweb/(?P<username>\w+)/$',user_views.postweb,name="postweb"),
url(r'^commentweb/(?P<username>\w+)/(?P<post_id>\d+)/$', user_views.commentweb,name = "commentweb"),
path('feed/',user_views.feed,name="feed"),
]
if settings.DEBUG:
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
{"/Social Network/core/forms.py": ["/Social Network/core/models.py"], "/Social Network/core/views.py": ["/Social Network/core/forms.py", "/Social Network/core/models.py"], "/Social Network/core/admin.py": ["/Social Network/core/models.py"]}
|
5,538
|
pranavg000/Social-Network-Django
|
refs/heads/master
|
/Social Network/core/migrations/0001_initial.py
|
# Generated by Django 2.1.5 on 2019-03-03 06:29
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('comment_text', models.CharField(default='Enter Comment Here', max_length=2000)),
],
),
migrations.CreateModel(
name='Follower',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('follower_user', models.CharField(max_length=100, null=True)),
('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Following',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('following_user', models.CharField(max_length=100, null=True)),
('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('post_text', models.CharField(max_length=2000)),
('post_picture', models.FileField(default='default.jpg', upload_to='post_picture')),
('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Profile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('profile_photo', models.FileField(default='default.jpg', upload_to='profile_photos')),
('status_info', models.CharField(default='Enter status', max_length=1000)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.AddField(
model_name='comment',
name='post',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Post'),
),
migrations.AddField(
model_name='comment',
name='user',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
|
{"/Social Network/core/forms.py": ["/Social Network/core/models.py"], "/Social Network/core/views.py": ["/Social Network/core/forms.py", "/Social Network/core/models.py"], "/Social Network/core/admin.py": ["/Social Network/core/models.py"]}
|
5,544
|
viraatdas/Deep-tic-tac-toe
|
refs/heads/master
|
/Games.py
|
import numpy as np
class Game:
def __init__(self,num_rows, num_cols, action_space, obs_space):
self.num_rows = num_rows
self.num_cols = num_cols
self.action_space = action_space
self.obs_space = obs_space
def board2array(self):
new_board = np.zeros(self.obs_space)
for row in range(self.num_rows):
for col in range(self.num_cols):
val = self.board[row][col]
new_board[(3 * self.num_cols) * row + 3 * col + val] = 1
return(new_board)
class TicTacToe(Game):
def __init__(self):
self.board = np.zeros((3,3),dtype="int")
self.terminal = False
super().__init__(3,3,9,27)
def restart(self):
self.board = np.zeros((3,3),dtype="int")
self.terminal = False
def is_valid(self, action):
if self.board[int(np.floor(action / 3))][action % 3] != 0:
return False
else:
return True
def invert_board(self):
for row in range(3):
for col in range(3):
if(self.board[row][col] == 1):
self.board[row][col] = 2
elif(self.board[row][col] == 2):
self.board[row][col] = 1
def step(self,action):
"""
PARAMS: a valid action (int 0 to 8)
RETURN: reward (-1,0,1)
self.board is updated in the process
self.terminal is updated in the process
"""
# insert
row_index = int(np.floor(action / 3))
col_index = action % 3
self.board[row_index][col_index] = 1
# undecided
terminal = 1
# to check for 3 in a row horizontal
for row in range(3):
for col in range(3):
if(self.board[row][col] != 1):
terminal = 0
if(terminal == 1):
self.terminal = True
return +1
else:
terminal = 1
# to check for 3 in a row vertical
for col in range(3):
for row in range(3):
if(self.board[row][col] != 1):
terminal = 0
if(terminal == 1):
self.terminal = True
return +1
else:
terminal = 1
# diagonal top-left to bottom-right
for diag in range(3):
if(self.board[diag][diag] != 1):
terminal = 0
if(terminal == 1):
self.terminal = True
return +1
else:
terminal = 1
# diagonal bottom-left to top-right
for diag in range(3):
if(self.board[2 - diag][diag] != 1):
terminal = 0
if(terminal == 1):
self.terminal = True
return +1
else:
terminal = 1
# checks if board is filled completely
for row in range(3):
for col in range(3):
if(self.board[row][col] == 0):
terminal = 0
break
if terminal == 1:
self.terminal = True
return 0
def render(self):
"""
print to screen the full board nicely
"""
for i in range(3):
print('\n|', end="")
for j in range(3):
if self.board[i][j] == 1:
print(' X |' , end="")
elif self.board[i][j] == 0:
print(' |' , end="")
else:
print(' O |' , end="")
print('\n')
|
{"/gui.py": ["/main.py", "/const.py"], "/nn.py": ["/const.py"], "/const.py": ["/Games.py", "/File_storage.py"], "/main.py": ["/File_storage.py", "/nn.py", "/Games.py", "/const.py", "/gui.py"]}
|
5,545
|
viraatdas/Deep-tic-tac-toe
|
refs/heads/master
|
/gui.py
|
from tkinter import *
from main import *
import const
class tile(Button):
def __init__(self,id,mainframe):
self.name = str(id)
super().__init__(mainframe,
text = " ",
height = 3,
width = 7,
borderwidth = 0,
bg = "Lightgray",
font= ("Roboto", 22),
command = lambda: self.make_move(id))
def update(self,player):
# update the tile according to the player who is playing
if player == 1:
self["text"] = "X"
else:
self["text"] = "O"
def make_move(self,action):
global current_node
global buttons
if const.game.is_valid(action) == True:
self.update(1)
const.game.invert_board()
r = - const.game.step(action)
const.game.invert_board()
current_node = const.mct[current_node.Child_nodes[action]]
player = 0
if const.game.terminal == False:
#player 2 plays
a = choose_move(current_node)
buttons[a].update(0)
r = const.game.step(a)
current_node = const.mct[current_node.Child_nodes[a]]
player = 1
if const.game.terminal == True:
global bottom_label
for b in buttons:
b["state"] = "disabled"
if r == 0:
bottom_label["text"] = "Tie"
elif r == -1:
bottom_label["text"] = "You won"
else:
bottom_label["text"] = "You lost"
def generate_buttons(num_rows, num_cols,mainframe):
# generate as many buttons as the number of tiles in the board
buttons = []
for r in range(num_rows):
for c in range(num_cols):
new_button = tile(r*num_cols + c,mainframe)
buttons.append(new_button)
new_button.grid(row = r, column = c, padx=(1,1), pady=(1,1))
return buttons
def restart():
# restart the game
global buttons
global player
global current_node
player = random.randint(0,1) # choose player
const.game.restart() # empty board
current_node = const.mct[0] # root of the tree is current node
if buttons != []:
for b in buttons:
b["state"] = "normal"
b["text"] = " "
global bottom_label
bottom_label["text"] = "Make your move!"
# initialize
buttons = []
restart()
# display
root = Tk()
# root.attributes("-fullscreen", True) -- this is not a nice fullscreen
topframe = Frame(root)
topframe.pack()
mainframe = Frame(root)
mainframe.pack()
bottomframe = Frame(root)
bottomframe.pack()
title = Label(topframe,
text="Tic-Tac-Toe Zero",
font= ("Roboto", 30))
title.pack(pady = (32,32))
buttons = generate_buttons(3,3,mainframe)
bottom_label = Label(bottomframe,
text = "make your move!",
font = ("Roboto", 14))
bottom_label.pack(pady = (32,32))
restart_button = Button(bottomframe,
text = "Restart",
bg = "DarkGray",
borderwidth = 0,
font = ("Roboto",14),
command = restart)
restart_button.pack(pady = (0,32))
#choose move
if player == 0:
#if player 1 not random
a = choose_move(current_node)
buttons[a].update(player)
r = const.game.step(a)
root.mainloop()
|
{"/gui.py": ["/main.py", "/const.py"], "/nn.py": ["/const.py"], "/const.py": ["/Games.py", "/File_storage.py"], "/main.py": ["/File_storage.py", "/nn.py", "/Games.py", "/const.py", "/gui.py"]}
|
5,546
|
viraatdas/Deep-tic-tac-toe
|
refs/heads/master
|
/File_storage.py
|
import os
import pickle
def save_mct(mct):
mct_filename = "mct.txt"
with open(mct_filename, "wb") as fp: #Pickling
pickle.dump(mct, fp)
print("Monte Carlo Tree saved correctly")
def load_mct():
mct_filename = "mct.txt"
if os.path.isfile(mct_filename):
# load existing file
print("Found existing Monte Carlo Tree file. Opening it")
with open(mct_filename, "rb") as fp: # Unpickling
mct = pickle.load(fp)
else:
print("Creating new Monte Carlo Tree.")
mct = []
return mct
|
{"/gui.py": ["/main.py", "/const.py"], "/nn.py": ["/const.py"], "/const.py": ["/Games.py", "/File_storage.py"], "/main.py": ["/File_storage.py", "/nn.py", "/Games.py", "/const.py", "/gui.py"]}
|
5,547
|
viraatdas/Deep-tic-tac-toe
|
refs/heads/master
|
/nn.py
|
"""
Neural network
Made by Lorenzo Mambretti
"""
import tensorflow as tf
from tensorflow import keras
print("tensorflow: ",tf.__version__)
import numpy as np
import random
import const
class NN:
"""
INPUT LAYER
27 neurons
"""
x = tf.placeholder(tf.float32,[None, 27], name='x')
"""
HIDDEN LAYER 1
21 neurons
tanh
"""
with tf.name_scope('Hidden_layer_1') as scope:
W1 = tf.Variable(tf.random_uniform([27,27], minval = -1, maxval = 1), name='W1')
b1 = tf.Variable(tf.random_uniform([27], minval = -1, maxval = 1), name='b1')
h1 = tf.tanh(tf.matmul(x, W1) + b1)
"""
HIDDEN LAYER 2
15 neurons
tanh
"""
with tf.name_scope('Hidden_layer_2') as scope:
W2 = tf.Variable(tf.random_uniform([27,21], minval = -1, maxval = 1), name='W2')
b2 = tf.Variable(tf.random_uniform([21], minval = -1, maxval = 1), name='b2')
h2 = tf.tanh(tf.matmul(h1, W2) + b2)
"""
HIDDEN LAYER 3
15 neurons
tanh
"""
with tf.name_scope('Hidden_layer_3') as scope:
W3 = tf.Variable(tf.random_uniform([21,15], minval = -1, maxval = 1), name='W3')
b3 = tf.Variable(tf.random_uniform([15], minval = -1, maxval = 1), name='b3')
h3 = tf.tanh(tf.matmul(h2, W3) + b3)
"""
OUTPUT LAYER
9 neurons
tanh
"""
with tf.name_scope('Output_layer') as scope:
W4 = tf.Variable(tf.random_uniform([15,9], minval = -1, maxval = 1))
b4 = tf.Variable(tf.random_uniform([9], minval = -1, maxval = 1))
y_ = tf.tanh(tf.matmul(h3, W4) + b4)
y = tf.placeholder(tf.float32,[None, 9], name="y")
"""
loss = mean squared error
optimizer: adam
or try the fastai library optimizers
"""
loss = tf.losses.mean_squared_error(y_,y)
def __init__(self, lr = 0.00025, batch_size = 64):
self.batch_size = batch_size
self.train_step = tf.train.AdamOptimizer(lr).minimize(self.loss)
# summaries
tf.summary.scalar('loss', self.loss)
self.summaries = tf.summary.merge_all()
# start training session
self.sess = tf.InteractiveSession()
self.train_writer = tf.summary.FileWriter(const.cwd, self.sess.graph)
tf.global_variables_initializer().run()
self.training_mode = False
def train(self, mct, iterations, training_steps):
self.training_mode = True
# create batches
input_batch = np.zeros((self.batch_size, 27))
output_batch = np.zeros((self.batch_size, 9))
action_matrix = np.zeros(9, dtype="int")
for i in range(iterations):
seed = random.randint(0, len(mct) - self.batch_size - 1)
for b in range(self.batch_size):
if not mct[seed + b].Child_nodes:
# this node is not useful for training if it's not visited
# don't count it and advance by 1 in the list
b = b - 1
# generate new point from where to look in the list
seed = random.randint(0, len(mct) - self.batch_size - 1)
else:
input_batch[b] = mct[seed + b].board
for a in range(9):
if mct[seed + b].Child_nodes[a] != None:
action_matrix[a] = mct[mct[seed + b].Child_nodes[a]].Q()
else:
action_matrix[a] = -1
output_batch[b] = action_matrix
for j in range(training_steps):
summary, _ = self.sess.run([self.summaries, self.train_step],
feed_dict={ self.x: input_batch,
self.y: output_batch})
self.train_writer.add_summary(summary, i)
print("loss: ",self.sess.run(self.loss, feed_dict={self.x: input_batch,
self.y: output_batch}))
def run(self,input_data):
"""
PARAMS
input_data a 27d representation of a single board
RETURN
v a 9d float array with the q values of all the actions
"""
if self.training_mode == True:
v = self.sess.run(self.y_, feed_dict={ self.x: [input_data]})
else:
v = np.zeros(9,dtype=int)
return v
|
{"/gui.py": ["/main.py", "/const.py"], "/nn.py": ["/const.py"], "/const.py": ["/Games.py", "/File_storage.py"], "/main.py": ["/File_storage.py", "/nn.py", "/Games.py", "/const.py", "/gui.py"]}
|
5,548
|
viraatdas/Deep-tic-tac-toe
|
refs/heads/master
|
/const.py
|
import os
from Games import TicTacToe
from File_storage import *
EPSILON = 0.1
SANITY_CHECK = True
cwd = os.getcwd()
cwd = cwd + '\\tensorflow_logs'
def init():
# create game
global game
game = TicTacToe()
# initialize Monte Carlo tree
global mct
mct = load_mct()
if mct == []:
mct.append(Node(game))
|
{"/gui.py": ["/main.py", "/const.py"], "/nn.py": ["/const.py"], "/const.py": ["/Games.py", "/File_storage.py"], "/main.py": ["/File_storage.py", "/nn.py", "/Games.py", "/const.py", "/gui.py"]}
|
5,549
|
viraatdas/Deep-tic-tac-toe
|
refs/heads/master
|
/dd_ttt.py
|
"""
Self-learning Tic Tac Toe
Made by Lorenzo Mambretti and Hariharan Sezhiyan
"""
import random
import numpy as np
import tensorflow as tf
import time
import datetime
class State:
board = np.zeros((3,3))
terminal = False
def is_valid(action, state):
if state.board[int(np.floor(action / 3))][action % 3] != 0:
return False
else:
return True
def step(state, action):
# insert
state_ = State()
state_.board = np.copy(state.board)
row_index = int(np.floor(action / 3))
col_index = action % 3
state_.board[row_index][col_index] = 1
# undecided
terminal = 1
# to check for 3 in a row horizontal
for row in range(3):
for col in range(3):
if(state_.board[row][col] != 1):
terminal = 0
if(terminal == 1):
state_.terminal = True
return +1, state_
else:
terminal = 1
# to check for 3 in a row vertical
for col in range(3):
for row in range(3):
if(state_.board[row][col] != 1):
terminal = 0
if(terminal == 1):
state_.terminal = True
return +1, state_
else:
terminal = 1
# diagonal top-left to bottom-right
for diag in range(3):
if(state_.board[diag][diag] != 1):
terminal = 0
if(terminal == 1):
state_.terminal = True
return +1, state_
else:
terminal = 1
# diagonal bottom-left to top-right
for diag in range(3):
if(state_.board[2 - diag][diag] != 1):
terminal = 0
if(terminal == 1):
state_.terminal = True
return +1, state_
else:
terminal = 1
# checks if board is filled completely
for row in range(3):
for col in range(3):
if(state_.board[row][col] == 0):
terminal = 0
break
if terminal == 1:
state_.terminal = True
return 0, state_
def save(W1, W2, B1, B2):
ts = time.time()
st = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')
filename = "weights "+ str(st)+".npz"
np.savez(filename, W1, W2, B1, B2)
print("The file has beeen saved successfully")
def load():
npzfile = np.load("weights.npz")
W1 = np.reshape(npzfile['arr_0'], (27, 18))
W2 = np.reshape(npzfile['arr_1'], (18,9))
b1 = np.reshape(npzfile['arr_2'], (18))
b2 = np.reshape(npzfile['arr_3'], (9))
return w1, w2, b1, b2
def invert_board(state):
state_ = State()
state_.board = np.copy(state.board)
state_.terminal = state.terminal
for row in range(3):
for col in range(3):
if(state.board[row][col] == 1):
state_.board[row][col] = 2
elif(state.board[row][col] == 2):
state_.board[row][col] = 1
return state_
def play_game():
while(True):
start_nb = input("If you would like to move first, enter 1. Otherwise, enter 2. ")
start = int(start_nb)
state = State()
state.board = np.zeros((3,3))
while not state.terminal:
if start == 1:
action = int(input("Please enter your move: "))
while(is_valid(action, state) == False):
action = int(input("Please enter a correct move: "))
start = 0
r, state = step(state, action)
else:
state = invert_board(state)
action = player1.extract_policy(state)
start = 1
r, state = step(state, action)
r = -r
state = invert_board(state)
print(state.board)
if r == 0:
print ("Tie")
elif r == 1:
print ("You won")
else:
print ("You lost")
def convert_state_representation(state):
new_board = np.zeros(27)
for row in range(3):
for col in range(3):
if(state[row][col] == 0):
new_board[9 * row + 3 * col] = 1
elif(state[row][col] == 1):
new_board[9 * row + 3 * col + 1] = 1
else:
new_board[9 * row + 3 * col + 2] = 1
return(new_board)
class DDQN(object):
def __init__(self):
self.x = tf.placeholder(tf.float32, [None, 27], name='x')
self.x_ = tf.placeholder(tf.float32, [None, 27], name='x_')
# for testing against random play
self.tie_rate_value = 0.0
self.win_rate_value = 0.0
self.loss_rate_value = 0.0
xavier = tf.contrib.layers.xavier_initializer(uniform=True,seed=None,dtype=tf.float32)
# Q learner
with tf.name_scope('Q-learner') as scope:
with tf.name_scope('hidden_layer') as scope:
self.W1 = tf.Variable(xavier([27, 18]))
self.b1 = tf.Variable(xavier([18]))
self.h1 = tf.tanh(tf.matmul(self.x, self.W1) + self.b1)
self.h1_alt = tf.tanh(tf.matmul(self.x_, self.W1) + self.b1)
with tf.name_scope('output_layer') as scope:
self.W2 = tf.Variable(xavier([18,9]))
self.b2 = tf.Variable(xavier([9]))
self.y = tf.tanh(tf.matmul(self.h1, self.W2) + self.b2)
self.y_alt = tf.stop_gradient(tf.tanh(tf.matmul(self.h1_alt, self.W2) + self.b2))
self.action_t = tf.placeholder(tf.int32, [None, 2])
self.q_learner = tf.gather_nd(self.y, self.action_t)
# Q target
with tf.name_scope('Q-target') as scope:
with tf.name_scope('hidden_layer') as scope:
self.W1_old = tf.placeholder(tf.float32, [27, 18], name = 'W1_old')
self.b1_old = tf.placeholder(tf.float32, [18], name = 'b1_old')
self.h1_old = tf.tanh(tf.matmul(self.x_, self.W1_old) + self.b1_old, name ='h1')
with tf.name_scope('output_layer') as scope:
self.W2_old =tf.placeholder(tf.float32, [18, 9], name='W2_old')
self.b2_old =tf.placeholder(tf.float32, [9], name='b2_old')
self.y_old = tf.tanh(tf.matmul(self.h1_old, self.W2_old) + self.b2_old, name='y_old')
self.l_done = tf.placeholder(tf.bool, [None], name='done')
self.reward = tf.placeholder(tf.float32, [None], name='reward')
self.gamma = tf.constant(0.99, name='gamma')
self.qt_best_action = tf.argmax(self.y_alt, axis = 1, name='qt_best_action')
self.qt_selected_action_onehot = tf.one_hot(indices = self.qt_best_action, depth = 9)
self.qt= tf.reduce_sum( tf.multiply( self.y_old, self.qt_selected_action_onehot ) , reduction_indices=[1,] )
self.q_target = tf.where(self.l_done, self.reward, self.reward + (self.gamma * self.qt), name='selected_max_qt')
self.loss = tf.losses.mean_squared_error(self.q_target, self.q_learner)
self.tie_rate = tf.placeholder(tf.float32, name='tie_rate')
self.win_rate = tf.placeholder(tf.float32, name='win_rate')
self.loss_rate = tf.placeholder(tf.float32, name='loss_rate')
self.train_step = tf.train.RMSPropOptimizer(0.00020, momentum=0.95, use_locking=False, centered=False, name='RMSProp').minimize(self.loss)
tf.summary.scalar('loss', self.loss)
tf.summary.scalar('tie_rate', self.tie_rate)
tf.summary.scalar('win_rate', self.win_rate)
tf.summary.scalar('loss_rate', self.loss_rate)
self.merged = tf.summary.merge_all()
tf.global_variables_initializer().run()
def update_old_weights(self):
self.saved_W1 = self.W1.eval()
self.saved_W2 = self.W2.eval()
self.saved_b1 = self.b1.eval()
self.saved_b2 = self.b2.eval()
def compute_Q_values(self,state):
# computes associated Q value based on NN function approximator
q_board = [np.copy(convert_state_representation(state.board))]
#NN forward propogation
q_values = sess.run(self.y, {self.x: q_board})
q_values = np.reshape(q_values, 9)
return (q_values)
def extract_policy(self,state):
policy = None
q_values = self.compute_Q_values(state)
for action in range(9):
if is_valid(action,state):
if policy == None:
policy = action
best_q = q_values[action]
else:
new_q = q_values[action]
if new_q > best_q:
policy = action
best_q = new_q
return policy
def train(self):
for _ in range(4):
# take a random mini_batch
mini_batch = experience_replay[np.random.choice(experience_replay.shape[0], batch_size), :]
# select state, state_, action, and reward from the mini batch
state = np.concatenate(mini_batch[:,0]).reshape((batch_size, -1))
a = np.transpose(np.append([np.arange(batch_size)],[np.array(mini_batch[:,1])], axis = 0))
r = mini_batch[:,2]
state_ = np.concatenate(mini_batch[:,3]).reshape((batch_size, -1))
done = mini_batch[:,4]
# is the list of all rewards within the mini_batch
summary, _= sess.run([self.merged, self.train_step], { self.x: state,
self.x_ : state_,
self.W1_old : self.saved_W1,
self.W2_old : self.saved_W2,
self.b1_old : self.saved_b1,
self.b2_old : self.saved_b2,
self.l_done : done,
self.reward : r,
self.action_t : a,
self.tie_rate : self.tie_rate_value,
self.win_rate : self.win_rate_value,
self.loss_rate : self.loss_rate_value})
train_writer.add_summary(summary, e)
def random_play_test(self):
numTests = 100
numWins = 0
numLosses = 0
numTies = 0
state = State()
for _ in range(numTests):
state.board = np.zeros((3,3))
state.terminal = False
turn = 1
while not state.terminal:
if turn == 1:
action = self.extract_policy(state) # agent action
r, state = step(state, action)
turn = 0
else:
state = invert_board(state)
action = np.random.randint(9)
while(is_valid(action, state) == False):
action = np.random.randint(9)
r, state = step(state, action)
r = -r
state = invert_board(state)
turn = 1
if r == 0:
numTies += 1
elif r == 1:
numWins += 1
else:
numLosses += 1
self.tie_rate_value = numTies
self.win_rate_value = numWins
self.loss_rate_value = numLosses
sess = tf.InteractiveSession()
train_writer = tf.summary.FileWriter('tensorflow_logs', sess.graph)
# Global variables
global experience_replay
global batch_size
global e
# Hyperparameters
batch_size = 64
episodes = 100000
epsilon_minimum = 0.1
n0 = 100
start_size = 500
update_target_rate = 50
# Create experience_replay
experience_replay = np.zeros((0,5))
print("All set. Start playing")
# Create players
player1 = DDQN()
#player2 = DDQN() not used yet *** future improvements coming
for e in range(episodes):
# print("episode ",e)
state = State()
if e >= start_size:
epsilon = max(n0 / (n0 + (e - start_size)), epsilon_minimum)
else:
epsilon = 1
if e % 2 == 1:
# this is player 2's turn
state = invert_board(state)
if random.random() < epsilon:
# take random action
action_pool = np.random.choice(9,9, replace = False)
for a in action_pool:
if is_valid(a, state):
action = a
break
else:
# take greedy action
action = player1.extract_policy(state)
r, state = step(state, action)
state = invert_board(state)
r = -r
while not state.terminal:
# this section is player 1's turn
# select epsilon-greedy action
if random.random() < epsilon:
# take random action
action_pool = np.random.choice(9,9, replace = False)
for a in action_pool:
if is_valid(a, state):
action = a
break
else:
# take greedy action
action = player1.extract_policy(state)
r, state_ = step(state, action)
if not state_.terminal:
# this is player 2's turn
state_ = invert_board(state_)
if random.random() < epsilon:
# take random action
action_pool = np.random.choice(9,9, replace = False)
for a in action_pool:
if is_valid(a, state_):
action2 = a
break
else:
# take greedy action
action2 = player1.extract_policy(state_) # in the future, it will be player2
r, state_ = step(state_, action2)
state_ = invert_board(state_)
r = -r
s = convert_state_representation(np.copy(state.board))
s_ = convert_state_representation(np.copy(state_.board))
done = state_.terminal
D = (s, action, r, s_, done)
experience_replay = np.append(experience_replay, [D], axis = 0)
state.board = np.copy(state_.board)
state.terminal = state_.terminal
if e == start_size: print("Start Training")
if e >= start_size:
if (e % update_target_rate == 0):
print(e)
# here save the W1,W2,b1,B2
player1.update_old_weights()
player1.random_play_test()
player1.train()
print("Training completed")
save(player1.W1.eval(), player1.W2.eval(), player1.b1.eval(), player1.b2.eval())
play_game()
|
{"/gui.py": ["/main.py", "/const.py"], "/nn.py": ["/const.py"], "/const.py": ["/Games.py", "/File_storage.py"], "/main.py": ["/File_storage.py", "/nn.py", "/Games.py", "/const.py", "/gui.py"]}
|
5,550
|
viraatdas/Deep-tic-tac-toe
|
refs/heads/master
|
/main.py
|
"""
Self-learning Tic Tac Toe
Made by Lorenzo Mambretti
Last Update: 8/10/2018 11:38 AM (Lorenzo)
"""
import random
import numpy as np
import progressbar
from File_storage import *
from nn import NN
from Games import *
import const
import math
import tensorflow as tf
import argparse
class Node:
global nnet
def __init__(self):
self.N = 0
self.V = 0
self.Child_nodes = []
self.board = const.game.board2array()
def update(self,r):
self.V = self.V + r
self.N = self.N + 1
def Q(self):
c_puct = 0.2 #hyperparameter
P = np.max(nnet.run(self.board))
if self.N == 0:
return c_puct * P * math.sqrt(self.N)/(1 + self.N)
else:
if self.Child_nodes == []:
Q = self.V
else:
Q = ((self.V * self.N) + P)/(self.N + 1)
return Q / self.N
def check_new_node(current_node):
"""
this function check if it is the first time the player visited the current node
if it is the first time, create all the child nodes
and append them in the Monte Carlo Tree (const.mct)
"""
if current_node.N == 0:
# generate child nodes
for a in range(9):
if const.game.is_valid(a) == True:
current_node.Child_nodes.append(len(const.mct))
const.mct.append(Node())
else:
current_node.Child_nodes.append(None)
def random_move(current_node):
check_new_node(current_node) #check if it is a new node
#random action
a = random.randint(0,const.game.action_space - 1)
while const.game.is_valid(a) == False:
a = (a + 1) % const.game.action_space
return a
def choose_move(current_node):
# if is the first time you visit this node
if current_node.N == 0:
# generate child nodes
for a in range(9):
if const.game.is_valid(a) == True:
current_node.Child_nodes.append(len(const.mct))
const.mct.append(Node())
else:
current_node.Child_nodes.append(None)
#random action
a = random.randint(0,8)
while const.game.is_valid(a) == False:
a = (a + 1) % 9
return a
# if you already visited this node
else:
best_a = 0
best_q = -2
for c in current_node.Child_nodes:
if c != None:
if const.mct[c].Q() > best_q:
best_q = const.mct[c].Q()
best_a = current_node.Child_nodes.index(c)
#print(const.mct[c].Q())
#else:
#print("None")
return best_a
def simulation(episodes, TRAINING = False):
node_list = [[]]
# progressbar
bar = progressbar.ProgressBar(maxval=episodes, \
widgets=[progressbar.Bar('=', '[', ']'), ' ', progressbar.Percentage()])
bar.start()
for e in range(episodes):
if (e + 1) % (episodes/100) == 0:
bar.update(e)
player = e % 2 # choose player
const.game.restart() # empty board
node_list.clear()
current_node = const.mct[0] # root of the tree is current node
node_list.append([0,player])
# while state not terminal
while const.game.terminal == False:
#choose move
if player == 0:
#if player 1 not random
a = choose_move(current_node)
r = const.game.step(a)
else:
#if player 2 epsilon-greedy
if random.random() < const.EPSILON:
a = random_move(current_node)
else:
a = choose_move(current_node)
const.game.invert_board()
r = - const.game.step(a)
const.game.invert_board()
current_node = const.mct[current_node.Child_nodes[a]]
player = (player + 1) % 2
node_list.append([const.mct.index(current_node),player])
#save state in node list
#update all nodes
for node in node_list:
if node[1] == 0:
const.mct[node[0]].update(-r)
else:
const.mct[node[0]].update(r)
# train neural network
nnet.train(const.mct, 100, 2)
bar.finish()
def play():
import gui
def train():
global nnet
if const.SANITY_CHECK == True:
if len(const.mct) > 1000:
# sanity check
print("Single batch overfit.")
nnet.train(const.mct, 1, 10000)
# SIMULATION: playing and updating Monte Carlo Tree
print("Simulating episodes")
if len(const.mct) < 30000:
# const.mct is small, make a lot of simulations
print("Simulation without neural network")
simulation(95000)
# TRAINING: neural network is trained while keeping playing
print("Neural network training")
simulation(5000, TRAINING = True)
else:
# TRAINING: neural network is trained on the Monte Carlo Tree
print("Neural network training. This will take a while")
for _ in range(10):
nnet.train(const.mct,10000,2)
print("Simulation terminated.")
# SAVE FILE
try:
saver.save(nnet.sess, "/tmp/model.ckpt")
print("/tmp/model.ckpt saved correctly.")
except:
print("ERROR: an error has occured while saving the weights. The session will not be available when closing the program")
save_mct(const.mct)
if __name__ == "__main__":
const.init()
# create neural network
nnet = NN(0.0001, 64)
saver = tf.train.Saver()
try:
saver.restore(nnet.sess, "/tmp/model.ckpt")
nnet.training_mode = True
except:
print("/tmp/model.ckpt not found. Training new session (nnet.sess)")
parser = argparse.ArgumentParser(description='Train or play.')
parser.add_argument('--play', dest='accumulate', action='store_const',
const=train, default=play,
help='play a const.game (default: train)')
args = parser.parse_args()
print(args.accumulate())
|
{"/gui.py": ["/main.py", "/const.py"], "/nn.py": ["/const.py"], "/const.py": ["/Games.py", "/File_storage.py"], "/main.py": ["/File_storage.py", "/nn.py", "/Games.py", "/const.py", "/gui.py"]}
|
5,551
|
viraatdas/Deep-tic-tac-toe
|
refs/heads/master
|
/ttt.py
|
"""
Self-learning Tic Tac Toe
Made by Lorenzo Mambretti and Hariharan Sezhiyan
"""
import random
import numpy as np
import tensorflow as tf
class State:
board = np.zeros((3,3))
terminal = False
def is_valid(action, state):
if state.board[int(np.floor(action / 3))][action % 3] != 0:
return False
else:
return True
def step(state, action):
# insert
state_ = State()
state_.board = np.copy(state.board)
row_index = int(np.floor(action / 3))
col_index = action % 3
state_.board[row_index][col_index] = 1
# undecided
terminal = 1
# to check for 3 in a row horizontal
for row in range(3):
for col in range(3):
if(state_.board[row][col] != 1):
terminal = 0
if(terminal == 1):
state_.terminal = True
return +1, state_
else:
terminal = 1
# to check for 3 in a row vertical
for col in range(3):
for row in range(3):
if(state_.board[row][col] != 1):
terminal = 0
if(terminal == 1):
state_.terminal = True
return +1, state_
else:
terminal = 1
# diagonal top-left to bottom-right
for diag in range(3):
if(state_.board[diag][diag] != 1):
terminal = 0
if(terminal == 1):
state_.terminal = True
return +1, state_
else:
terminal = 1
# diagonal bottom-left to top-right
for diag in range(3):
if(state_.board[2 - diag][diag] != 1):
terminal = 0
if(terminal == 1):
state_.terminal = True
return +1, state_
else:
terminal = 1
# checks if board is filled completely
for row in range(3):
for col in range(3):
if(state_.board[row][col] == 0):
terminal = 0
break
if terminal == 1:
state_.terminal = True
return 0, state_
def save(W1, W2, B1, B2):
np.savez("weights.npz", W1, W2, B1, B2)
print("file weights.txt has beeen updated successfully")
def load():
npzfile = np.load("weights.npz")
W1 = np.reshape(npzfile['arr_0'], (27, 18))
W2 = np.reshape(npzfile['arr_1'], (18,9))
b1 = np.reshape(npzfile['arr_2'], (18))
b2 = np.reshape(npzfile['arr_3'], (9))
return w1, w2, b1, b2
def extract_policy(state):
policy = None
q_values = compute_Q_values(state)
for action in range(9):
if is_valid(action,state):
if policy == None:
policy = action
best_q = q_values[action]
else:
new_q = q_values[action]
if new_q > best_q:
policy = action
best_q = new_q
return policy
def invert_board(state):
state_ = State()
state_.board = np.copy(state.board)
state_.terminal = state.terminal
for row in range(3):
for col in range(3):
if(state.board[row][col] == 1):
state_.board[row][col] = 2
elif(state.board[row][col] == 2):
state_.board[row][col] = 1
return state_
def play_game():
while(True):
start_nb = input("If you would like to move first, enter 1. Otherwise, enter 2. ")
start = int(start_nb)
state = State()
state.board = np.zeros((3,3))
while not state.terminal:
if start == 1:
action = int(input("Please enter your move: "))
while(is_valid(action, state) == False):
action = int(input("Please enter a correct move: "))
start = 0
r, state = step(state, action)
else:
state = invert_board(state)
action = extract_policy(state)
start = 1
r, state = step(state, action)
r = -r
state = invert_board(state)
print(state.board)
if r == 0:
print ("Tie")
elif r == 1:
print ("You won")
else:
print ("You lost")
def convert_state_representation(state):
new_board = np.zeros(27)
for row in range(3):
for col in range(3):
if(state[row][col] == 0):
new_board[9 * row + 3 * col] = 1
elif(state[row][col] == 1):
new_board[9 * row + 3 * col + 1] = 1
else:
new_board[9 * row + 3 * col + 2] = 1
return(new_board)
def compute_Q_values(state):
# computes associated Q value based on NN function approximator
q_board = np.zeros((1,27))
q_board = [np.copy(convert_state_representation(state.board))]
#NN forward propogation
q_values = sess.run(y, feed_dict = {x: q_board})
q_values = np.reshape(q_values, 9)
return (q_values)
def train(experience_replay, saved_W1, saved_W2, saved_b1, saved_b2):
# can modify batch size here
batch_size = 32
# take a random mini_batch
mini_batch = experience_replay[np.random.choice(experience_replay.shape[0], batch_size), :]
# select state, state_, action, and reward from the mini batch
state = np.concatenate(mini_batch[:,0]).reshape((batch_size, -1))
act = np.array(mini_batch[:,1])
act = np.append([np.arange(batch_size)],[act], axis = 0)
act = np.transpose(act)
r = mini_batch[:,2]
state_ = np.concatenate(mini_batch[:,3]).reshape((batch_size, -1))
done = mini_batch[:,4]
# is the list of all rewards within the mini_batch
summary, _= sess.run([merged, train_step], feed_dict={ x: state,
x_old : state_,
W1_old : saved_W1,
W2_old : saved_W2,
b1_old : saved_b1,
b2_old : saved_b2,
l_done : done,
reward : r,
action_t : act
})
train_writer.add_summary(summary)
# Q learner neural network
with tf.name_scope('Q-learner') as scope:
x = tf.placeholder(tf.float32, [None, 27], name='x')
with tf.name_scope('hidden_layer') as scope:
W1 = tf.get_variable("W1", shape=[27, 18],
initializer=tf.contrib.layers.xavier_initializer())
b1 = tf.get_variable("b1", shape=[18],
initializer=tf.contrib.layers.xavier_initializer())
h1 = tf.tanh(tf.matmul(x, W1) + b1)
with tf.name_scope('output_layer') as scope:
W2 = tf.get_variable("W2", shape=[18, 9],
initializer=tf.contrib.layers.xavier_initializer())
b2 = tf.get_variable("b2", shape=[9],
initializer=tf.contrib.layers.xavier_initializer())
y = tf.tanh(tf.matmul(h1, W2) + b2)
action_t = tf.placeholder(tf.int32, [None, 2])
q_learner = tf.gather_nd(y, action_t)
# Q target neural network
with tf.name_scope('Q-target') as scope:
x_old = tf.placeholder(tf.float32, [None, 27], name='x_old')
with tf.name_scope('hidden_layer') as scope:
W1_old = tf.placeholder(tf.float32, [27, 18], name='W1_old')
b1_old = tf.placeholder(tf.float32, [18], name='b1_old')
h1_old = tf.tanh(tf.matmul(x_old, W1_old) + b1_old, name='h1')
with tf.name_scope('output_layer') as scope:
W2_old =tf.placeholder(tf.float32, [18, 9], name='W2_old')
b2_old =tf.placeholder(tf.float32, [9], name='b2_old')
y_old = tf.tanh(tf.matmul(h1_old, W2_old) + b2_old, name='y_old')
l_done = tf.placeholder(tf.bool, [None])
reward = tf.placeholder(tf.float32, [None])
gamma = tf.constant(0.99, name='gamma')
qt = tf.reduce_max(y_old, axis = 1, name='maximum_qt')
q_target = tf.where(l_done, reward, reward + (gamma * qt), name='selected_max_qt')
with tf.name_scope('loss') as scope:
loss = tf.losses.mean_squared_error(q_target, q_learner)
#train_step = tf.train.GradientDescentOptimizer(0.03).minimize(loss)
train_step = tf.train.RMSPropOptimizer(0.00025, momentum=0.95, use_locking=False, centered=False, name='RMSProp').minimize(loss)
tf.summary.scalar('loss', loss)
merged = tf.summary.merge_all()
sess = tf.InteractiveSession()
train_writer = tf.summary.FileWriter('tensorflow_logs', sess.graph)
tf.global_variables_initializer().run()
episodes = 100000
n0 = 100.0
start_size = 500
experience_replay = np.zeros((0,5))
print("All set. Start epoch")
for e in range(episodes):
# print("episode ",e)
state = State()
if e >= start_size:
epsilon = max(n0 / (n0 + (e- start_size)), 0.1)
else: epsilon = 1
if e % 2 == 1:
# this is player 2's turn
state = invert_board(state)
if random.random() < epsilon:
# take random action
action_pool = np.random.choice(9,9, replace = False)
for a in action_pool:
if is_valid(a, state):
action = a
break
else:
# take greedy action
action = extract_policy(state)
r, state = step(state, action)
state = invert_board(state)
r = -r
while not state.terminal:
# this section is player 1's turn
# select epsilon-greedy action
if random.random() < epsilon:
# take random action
action_pool = np.random.choice(9,9, replace = False)
for a in action_pool:
if is_valid(a, state):
action = a
break
else:
# take greedy action
action = extract_policy(state)
r, state_ = step(state, action)
if not state_.terminal:
# this is player 2's turn
state_ = invert_board(state_)
if random.random() < epsilon:
# take random action
action_pool = np.random.choice(9,9, replace = False)
for a in action_pool:
if is_valid(a, state_):
action2 = a
break
else:
# take greedy action
action2 = extract_policy(state_)
r, state_ = step(state_, action2)
state_ = invert_board(state_)
r = -r
s = convert_state_representation(np.copy(state.board))
s_ = convert_state_representation(np.copy(state_.board))
done = state_.terminal
D = (s, action, r, s_, done)
experience_replay = np.append(experience_replay, [D], axis = 0)
state.board = np.copy(state_.board)
state.terminal = state_.terminal
if e == start_size: print("Start Training")
if e >= start_size:
if((e % 50) == 0):
print("Episode:",e)
# here save the W1,W2,b1,B2
saved_W1 = W1.eval()
saved_W2 = W2.eval()
saved_b1 = b1.eval()
saved_b2 = b2.eval()
train(experience_replay, saved_W1, saved_W2, saved_b1, saved_b2)
print("Training completed")
play_game()
|
{"/gui.py": ["/main.py", "/const.py"], "/nn.py": ["/const.py"], "/const.py": ["/Games.py", "/File_storage.py"], "/main.py": ["/File_storage.py", "/nn.py", "/Games.py", "/const.py", "/gui.py"]}
|
5,563
|
ZhouYii/NE_Recognizer
|
refs/heads/master
|
/state.py
|
from document import Document
from helper import *
from os import listdir
import pickle
import re
from NE_Recognizer import NE_Recognizer
from nltk import word_tokenize
from os.path import isfile, join
class State :
def __init__(self) :
self.corpus = []
#self.add_corpus("text_extracted")
self.add_corpus("Raw")
print "Load Corpus Done"
#From all iteration
self.ne = dict()
self.rules = dict()
#Find inconsistent rules
self.candidate_rules = dict()
self.candidate_ne = dict()
self.init_dict("PER", "PER.txt")
self.init_dict("ORG", "ORG.txt")
self.init_dict("LOC", "LOC.txt")
#Promote new things
self.promoted = []
self.ne_r = NE_Recognizer()
for doc in self.corpus :
self.ne_r.train_document(doc)
self.name_dict = dict()
for i in range(0, len(self.corpus)) :
new_dict = self.ne_r.extract_names(self.corpus[i].tokens)
self.name_dict = merge_name_dict(self.name_dict, new_dict)
by_score = sorted(self.name_dict.items(), key=lambda x:x[1], reverse=True)
by_score = [b for b in by_score if b[1] > 0.5 ]
f = open("Out", "w")
for entry in by_score :
f.write(str(entry[0])+'\t'+str(entry[1])+'\n')
with open('ne_r.pickle', 'wb') as w:
pickle.dump(self.ne_r, w)
def get_type_and_score(self,rule) :
if rule not in self.rules :
return ("None", -1)
maj_map = self.rules[rule]
return (maj_map.get_type(), maj_map.get_max_score())
def init_dict(self, label, filepath) :
f = open(filepath, "r")
for line in f :
name = line.strip()
#Represent NE as a list of tokens
name = tuple(word_tokenize(name))
self.ne[name] = Scorekeeper()
# Put some large weight for the label since it's seed
self.ne[name].positive_scoring(label, 99)
def add_corpus(self, filepath) :
onlyfiles = [ f for f in listdir(filepath) if isfile(join(filepath,f)) ]
for f in onlyfiles :
if f[-4:] != '.txt' :
''' All our training data is text files'''
continue
self.corpus.append(Document(filepath+"/"+f))
def get_corpus(self) :
return self.corpus
def add_candidate_rules(self, ne, rules) :
for rule in rules :
ne_type = self.ne[ne].get_type()
ne_score = self.ne[ne].get_max_score()
if rule not in self.candidate_rules.keys() :
self.candidate_rules[rule] = Scorekeeper()
self.candidate_rules[rule].positive_scoring(ne_type, ne_score)
def promotion_filter(self, item, dictionary, threshold) :
# If any filter is true, ignore rule
filters = [ # Do not promote suspiciously pure items
#lambda r : dictionary[r].get_max_score() == 1.0,
# Must meet certain support threshold
#lambda r : dictionary[r].total < 100,
# Must meet purity threshold
lambda r : dictionary[r].get_max_score() < threshold,
# Not previously promoted
lambda r : r in self.promoted
]
if filters[0](item) or filters[1](item):
return False
return True
def promote_rules(self, threshold, max_to_promote) :
def promote(rule_list) :
for rule in rule_list :
if rule in self.rules.keys() :
self.rules[rule].merge(self.candidate_rules[rule])
else :
self.rules[rule] = self.candidate_rules[rule]
self.promoted.append(rule)
return rule_list
rule_dict = self.candidate_rules
print "Rule Promotion Candidates :"+str(self.candidate_rules)
rules = sort_by_score(rule_dict)
rules = [r for r in rules if self.promotion_filter(r, rule_dict, threshold)]
rules = [r for r in rules if r not in self.promoted]
print "Filtered Rules :"+str(rules)
ret = promote(rules[:max_to_promote])
#Clear candidate rules
self.candidate_rules = dict()
return ret
def promote_ne(self, threshold, max_to_promote) :
def promote(ne_list) :
for name in ne_list :
if name in self.ne.keys() :
self.ne[name].merge(self.candidate_ne[name])
else :
self.ne[name] = self.candidate_ne[name]
self.promoted.append(name)
return ne_list
ne_dict = self.candidate_ne
print ne_dict.keys()
ne_list = sort_by_score(ne_dict)
ne = [ne for ne in ne_list if self.promotion_filter(ne, ne_dict, threshold)]
ne = [ne for ne in ne_list if ne not in self.ne.keys()]
ret = promote(ne_list[:max_to_promote])
for item in ret :
if item in self.promoted :
print "repromote"
self.candidate_ne = dict()
return ret
def find_ne(self) :
def insert_candidate_ne(ne, rule_type, rule_score) :
if len(ne) == 0 :
return
if ne not in self.candidate_ne.keys() :
self.candidate_ne[ne] = Scorekeeper()
self.candidate_ne[ne].positive_scoring(rule_type, rule_score)
def distance_close(text, l_bound, r_bound) :
if -1 == text[l_bound:r_bound].find('.') :
return True
return False
def search_substring(text, query, traverse) :
if query == "" :
#Empty rule
return traverse, traverse
while traverse < len(text) :
candidate_index = text.find(query, traverse)
'''New traverse pointer should not point to same index
as query to prevent infinite loop'''
traverse = candidate_index+len(query)
if candidate_index == -1 :
return -1, len(text)
if subword_filter(text, candidate_index, query) :
return candidate_index, traverse
return -1, len(text)
self.candidate_ne = dict()
#You only find new NE from newly promoted rules
rule_list = self.rules
for rule in rule_list :
info = self.get_type_and_score(rule)
rule_score, rule_type = info[1], info[0]
for doc in self.corpus :
text = doc.text
traverse = 0
while traverse < len(text) :
l_bound, traverse= search_substring(text,rule[0],traverse)
if l_bound == -1 :
break
r_bound, traverse = search_substring(text,rule[1],traverse)
if r_bound == -1 :
traverse = len(text)
break
''' If rule parts are too distance or different sentences, do
not count '''
if not distance_close(text, l_bound, r_bound) :
break
''' If the rule only specifies previous token, seek forward
word. Otherwise, you find the wrong NE'''
if len(rule[1]) == 0:
ne = get_next_word(text, r_bound)
else :
ne = get_prev_word(text, r_bound)
insert_candidate_ne(ne, rule_type, rule_score)
|
{"/nameentity.py": ["/scorable.py"], "/RuleFactory.py": ["/rule.py"], "/rule.py": ["/state2.py", "/scorable.py"], "/controller.py": ["/state.py"]}
|
5,564
|
ZhouYii/NE_Recognizer
|
refs/heads/master
|
/load.py
|
import pickle
ner = pickle.load(open("ne_r.pickle", "rb"))
print ner
|
{"/nameentity.py": ["/scorable.py"], "/RuleFactory.py": ["/rule.py"], "/rule.py": ["/state2.py", "/scorable.py"], "/controller.py": ["/state.py"]}
|
5,565
|
ZhouYii/NE_Recognizer
|
refs/heads/master
|
/nameentity.py
|
from helper import *
from scorable import Scorable
class NameEntity(Scorable) :
def __init__(self, tok_tuple, is_seed=False) :
Scorable.__init__(self)
self.name = tok_tuple
self.mined_rules = None
def init_seed(self, type) :
self.is_seed = True
self.total = 1
for k in self.dictionary.keys() :
if k == type :
self.dictionary[k] = 1
else :
self.dictionary[k] = 0
|
{"/nameentity.py": ["/scorable.py"], "/RuleFactory.py": ["/rule.py"], "/rule.py": ["/state2.py", "/scorable.py"], "/controller.py": ["/state.py"]}
|
5,566
|
ZhouYii/NE_Recognizer
|
refs/heads/master
|
/RuleFactory.py
|
'''
Rule Factory Singleton
'''
from rule import Rule
def make_rule() :
r = Rule()
return r
|
{"/nameentity.py": ["/scorable.py"], "/RuleFactory.py": ["/rule.py"], "/rule.py": ["/state2.py", "/scorable.py"], "/controller.py": ["/state.py"]}
|
5,567
|
ZhouYii/NE_Recognizer
|
refs/heads/master
|
/state2.py
|
'''
Implementation of State singleton
'''
import ConfigParser
from os import listdir
from helper import *
import re
import RuleFactory
from document import Document
from nameentity import NameEntity
from os.path import isfile, join
import pickle
from nltk import word_tokenize
cfg = ConfigParser.ConfigParser()
cfg.read("config.ini")
corpus = []
# all_* data structures primarity for caching results
all_rules = dict()
# promoted_* data structure for recording promoted entities
promoted_rules = []
all_ne = dict()
promoted_ne = []
# name recognizer
recognizer = None
def init() :
init_corpus()
init_recognizer()
for t in get_types():
init_dict(t, cfg.get("SeedFiles", t))
def init_corpus() :
filepath = cfg.get('StateInit','CorpusDir')
onlyfiles = [ f for f in listdir(filepath) if isfile(join(filepath,f)) ]
for f in onlyfiles :
if f[-4:] != '.txt' :
''' All our training data is text files'''
continue
corpus.append(Document(filepath+"/"+f))
def init_dict(label, filepath) :
f = open(filepath, "r")
for line in f :
name = line.strip()
#Represent NE as a list of tokens
name = tuple(word_tokenize(name))
ne = NameEntity(name, is_seed=True)
ne.init_seed(label)
all_ne[ne.name] = ne
promoted_ne.append(ne.name)
def init_recognizer() :
global recognizer
filepath = cfg.get('StateInit', 'ExtractorSeed')
recognizer = pickle.load(open(filepath, "rb"))
print "Recognizer:"+str(recognizer)
def get_ne_object(name) :
if not all_ne.has_key(name) :
return None
return all_ne[name]
def new_ne_object(name) :
if not all_ne.has_key(name) :
all_ne[name] = NameEntity(name)
return all_ne[name]
init()
|
{"/nameentity.py": ["/scorable.py"], "/RuleFactory.py": ["/rule.py"], "/rule.py": ["/state2.py", "/scorable.py"], "/controller.py": ["/state.py"]}
|
5,568
|
ZhouYii/NE_Recognizer
|
refs/heads/master
|
/ner_old.py
|
from os import listdir
#from textblob import TextBlob
import re
from os.path import isfile, join
INPUT = "Raw"
LABELS = ["PER", "LOC", "ORG"]
#PARAMETERS
prom_thresh = 0.5 #To check if a rule is promoted or not
CORPUS = []
#DICTIONARIES
PER_DICT = []
ORG_DICT = []
LOC_DICT = []
#PROMOTED RULES
RULES = []
#CANDIDATE RULES
CR_PER = []
CR_ORG = []
CR_LOC = []
#CANDIDATE NE
CNE = dict()
#CANDIDATE NE (Normalized on documents)
CNE_DOC = dict()
#SCORES
NE_SCORES = dict()
RULE_SCORES = dict()
# Entities associated with a rule
rule_entities = dict()
entity_rules = dict()
NE_TYPE = [(PER_DICT,CR_PER,"PER"), (ORG_DICT,CR_ORG,"ORG"), (LOC_DICT,CR_LOC,"LOC")]
def get_next_word(text, index) :
buf = []
#Stop if no following word
if index+1 >= len(text) or text[index+1] == '.' :
return ""
index += 1
while index < len(text) and unicode.isalnum(text[index]) :
buf.append(unicode(text[index]))
index += 1
return "".join(buf)
def get_prev_word(text, index) :
buf = []
#Stop if no previous words
if text[index-1] == '.' :
return ""
index -= 2
while index > 0 and unicode.isalnum(text[index]) :
buf.insert(0, unicode(text[index]))
index -= 1
return "".join(buf)
def reset() :
#refresh candidate lists for new iteration
CR_PER = []
CR_ORG = []
CR_LOC = []
C_NE = dict()
CNE_DOC = dict()
def subword_filter(text, index, word) :
if unicode.isalnum(text[index-1]) or unicode.isalnum(text[index+len(word)]) :
return False
return True
class Document :
def __init__(self, filepath) :
f = open(filepath, "r")
self.text = f.read()
try :
self.text = self.text.decode('utf-8')
except UnicodeDecodeError :
self.text=""
print "failed"
f.close()
def extract_np(self, rule) :
result_set = []
#Use this class for noun phrase
blob = TextBlob(self.text)
if rule.prefix == "" :
print "prefix"
indicies = [m.start() for m in re.finditer(rule.prefix, self.text)]
'''Filter out matches which are substring matches (not true rule
match)'''
indicies = [index for index in indicies if subword_filter(self.text, index, rule.suffix)]
'''Advance indicies so index = index of candidate NE'''
indicies = [index + 1 for index in indicies]
''' For each noun phrase occurrence, if index corresponds with rule
occurrence indicies, use np instead of single word in result set'''
print list(blob.noun_phrases)
for np in list(blob.noun_phrases) :
traverse = 0
for i in range(0, self.text.count(np)) :
np_index = self.text.find(np, traverse)
if np_index in indicies :
indicies.remove(np_index)
result_set.append(np)
traverse = np_index + len(np)
single_ne = [get_prev_word(self.text, index) for index in indicies]
return result_set.extend(single_ne)
if rule.suffix == "" :
indicies = [m.start() for m in re.finditer(rule.prefix, self.text)]
indicies = [index for index in indicies if subword_filter(self.text, index, rule.prefix)]
indicies = [index + len(rule.prefix)+ 1 for index in indicies]
for np in blob.noun_phrases :
print np
traverse = 0
for i in range(0, self.text.count(np)) :
np_index = self.text.find(np, traverse)
if np_index in indicies :
indicies.remove(np_index)
result_set.append(np)
traverse = np_index + len(np)
print indicies
single_ne = [get_next_word(self.text, index+len(rule.prefix)) for index in indicies]
print single_ne
return result_set.extend(single_ne)
def extract(self, rule) :
if rule.prefix == "" and rule.suffix == "" :
return []
if rule.prefix == "" :
indicies = [m.start() for m in re.finditer(rule.suffix, self.text)]
indicies = [index for index in indicies if subword_filter(self.text, index, rule.suffix)]
return list(set([get_prev_word(self.text, index) for index in indicies]))
if rule.suffix == "" :
indicies = [m.start() for m in re.finditer(rule.prefix, self.text)]
indicies = [index for index in indicies if subword_filter(self.text, index, rule.prefix)]
return list(set([get_next_word(self.text, index+len(rule.prefix)) for index in indicies]))
def find_rules(self,gazetteer, label) :
count = 0
rules = []
for word in gazetteer :
traverse = 0
while traverse < len(self.text) :
index = self.text.find(word, traverse)
if index < 0 :
traverse = len(self.text)
break
traverse = index+len(word)
if unicode.isalnum(self.text[index-1]) or \
unicode.isalnum(self.text[index+len(word)-1]) :
continue
#count += 1
#print count
next_word = get_next_word(self.text, index+len(word))
prev_word = get_prev_word(self.text, index)
#print "WORD:"+word
#print prev_word
#print next_word
if len(prev_word) > 3 :
rules.append(Rule(label, prev_word, ""))
add_rule_entity(rules[-1], word)
if len(next_word) > 3 :
rules.append(Rule(label, "", next_word))
add_rule_entity(rules[-1], word)
return rules
def add_rule_entity(rule, entity):
if rule not in rule_entities:
rule_entities[rule] = dict()
if entity not in rule_entities[rule]:
rule_entities[rule][entity] = 0
rule_entities[rule][entity] += 1
#print 'RULE ENTITIES'
#print rule_entities
def add_entity_rule(entity, rule):
if entity not in entity_rules:
entity_rules[entity] = {}
if rule not in entity_rules[entity]:
entity_rules[entity][rule] = 0
entity_rules[entity][rule] += 1
class Rule :
def __init__(self, label, prefix, suffix) :
self.label = label
self.prefix = prefix if prefix != None else ""
self.suffix = suffix if suffix != None else ""
self.application = 0
self.correct = 0
self.wrong = 0
def is_wrong(self) :
self.wrong += 1
self.application += 1
def is_correct(self) :
self.correct += 1
self.application += 1
def print_rule(self) :
print self.prefix + "<"+self.label+">" + self.suffix
# Candidate rules scored using dictionary entities
def score_rule(rule, rule_label):
label_total = other_total = 0
for ne in rule_entities[rule]:
if get_nelabel(ne) == rule_label:
label_total += NE_SCORES[ne]
else:
other_total += NE_SCORES[ne]
RULE_SCORES[rule] = (label_total - other_total) / float(len(rule_entities[rule]))
#print 'Score for ', rule.label + ' ' + rule.prefix + ' ' + rule.suffix + ' ', RULE_SCORES[rule]
if RULE_SCORES[rule] >= prom_thresh:
RULES.append(rule)
def get_nelabel(ne):
for tup in NE_TYPE:
if ne in tup[0]:
return tup[2]
# Candidate NEs scored using promoted rules
def score_ne(ne, ne_label):
label_total = other_total = 0
for rule in entity_rules[ne]:
if rule.label == ne_label:
label_total += 1
else:
other_total += 1
NE_SCORES[ne] = (label_total - other_total) / float(len(entity_rules[ne]))
if NE_SCORES[ne] > prom_thresh:
add_to_dict(ne, ne_label)
def add_to_dict(ne, label):
for pair in NE_TYPE:
if label == pair[2]:
pair[0].append(ne)
#print 'Adding ', ne, 'to ', pair[2], 'with score ', NE_SCORES[ne]
#startalgorithm
# Initialize dictionary
for tup in [(PER_DICT,"PER.txt"),(ORG_DICT,"ORG.txt"),(LOC_DICT,"LOC.txt")] :
dictionary, f = tup[0], open(tup[1],"r")
for line in f :
dictionary.append(line.strip())
NE_SCORES[line.strip()] = 1.0
#Initialize Corpus
onlyfiles = [ f for f in listdir(INPUT) if isfile(join(INPUT,f)) ]
for f in onlyfiles :
CORPUS.append(Document(INPUT+"/"+f))
#print 'Adding size', len(CORPUS[-1].text)
with open('rules.txt', 'w') as fp:
with open('NEs.txt', 'w') as fp1:
for i in range(30):
print 'iteration #', i
fp.write('iteration '+str(i)+'\n')
fp1.write('iteration '+str(i)+'\n')
#generate global set of rules
for doc in CORPUS :
for pair in NE_TYPE :
dictionary, candidate_rules, label = pair[0], pair[1], pair[2]
#Generate candidate rules
candidate_rules.extend(doc.find_rules(dictionary, label))
#print len(candidate_rules)
#Rule promotion
for pair in NE_TYPE:
candidate_rules = pair[1]
for rule in candidate_rules:
score_rule(rule, rule.label)
print 'Promoted Rules...'
for rule in RULES:
s = (rule.label+'\t'+rule.prefix+'\t'+rule.suffix+'\n')#+unicode(RULE_SCORES[rule])
fp.write(s.encode('utf-8'))
fp.write('\n\n')
#print 'Promoted rules: ', RULES
#print '\n\n\n'
#generate NE with document-level consistency
for doc in CORPUS :
for pair in NE_TYPE :
dictionary, candidate_rules, label = pair[0], pair[1], pair[2]
#print label
# TODO : Change this to use known rules, not candidate
#for rule in candidate_rules :
for rule in RULES:
#list of Names
results = doc.extract(rule)
results = [r for r in results if len(r) > 0]
for r in results :
if r not in CNE.keys() :
CNE[r] = dict()
# Key invariant
CNE[r]["PER"] = []
CNE[r]["LOC"] = []
CNE[r]["ORG"] = []
CNE[r][rule.label].append(rule)
add_entity_rule(r, rule)
#Make NE labels consistent across document
for name in CNE.keys() :
#Keep track of majority label type and count
curr = (None, 0)
for label in LABELS :
if len(CNE[name][label]) > curr[1] :
curr = (label, len(CNE[name][label]))
#Reward and punish rules
for rule in CNE[name][curr[0]] :
rule.is_correct()
for label in LABELS :
if label is curr[0] :
continue
for rule in CNE[name][label] :
rule.is_wrong()
CNE_DOC[name] = curr[0]
print CNE_DOC
#Update NE scores
print 'Updating NE scores...'
for ne in CNE:
for label in CNE[ne]:
score_ne(ne, label)
s = (ne +'\t'+label+'\n')#+unicode(NE_SCORES[ne])
fp1.write(s.encode('utf-8'))
fp1.write('\n\n')
reset()
print '\n\n\n'
|
{"/nameentity.py": ["/scorable.py"], "/RuleFactory.py": ["/rule.py"], "/rule.py": ["/state2.py", "/scorable.py"], "/controller.py": ["/state.py"]}
|
5,569
|
ZhouYii/NE_Recognizer
|
refs/heads/master
|
/scorable.py
|
from helper import *
class Scorable :
def __init__(self) :
self.is_seed = False
self.dictionary = dict()
# Max : (max score, type responsible)
self.max_type = None
self.max_stale = True
self.total = 0.0
for t in get_types() :
self.dictionary[t] = 0
def add_score(self, key, val=1) :
''' Potentially skip all call where value (confidence) is within epsilon
of 1/#types, to get rid of noise '''
if self.is_seed : #Seed confidence is immutable
return
val = float(val)
if not self.dictionary.has_key(key) :
print("Adding score to unknown key: "+str(key) )
return
self.max_stale = True
self.dictionary[key] += val
val = self.dictionary[key]
self.total += val
#Lower score for negative examples
for t in get_types() :
if t == key :
continue
self.dictionary[t] -= val
def recalc_max_type(self) :
ordered = sorted(self.dictionary.items(), key=lambda x:x[1], reverse=True)
self.max_type = ordered[0][0]
self.max_stale = False
def merge_scores(self, other) :
self.max_stale=True
self.total += other.total
for k in other.dictionary.keys() :
self.dictionary[k] += other.dictionary[k]
def get_score(self) :
if self.max_stale :
self.recalc_max_type()
return self.dictionary[self.max_type]/self.total
def get_type(self) :
if self.max_stale :
self.recalc_max_type()
return self.max_type
|
{"/nameentity.py": ["/scorable.py"], "/RuleFactory.py": ["/rule.py"], "/rule.py": ["/state2.py", "/scorable.py"], "/controller.py": ["/state.py"]}
|
5,570
|
ZhouYii/NE_Recognizer
|
refs/heads/master
|
/rule.py
|
from helper import *
import state2
from scorable import *
class Rule(Scorable) :
def __init__(self, fwd=None, rev=None) :
''' Sets all parameters to None, to distinguish between never-set and
set-to-empty '''
Scorable.__init__(self)
self.found_by = set()
self.fwd_window = fwd
self.rev_window = rev
self.matches_with_ne = 0
self.total_matches = 0
self.extracted_ne = None
def __str__(self) :
return "Rule :"+str(self.fwd_window)+"/"+str(self.rev_window)
def __eq__(self, other) :
if other == None :
return False
ret = True
ret &= self.fwd_window == other.fwd_window
ret &= self.rev_window == other.rev_window
return ret
def __hash__(self) :
''' Hash by constant and unique identifiers : meaning not any form of
purity score '''
h = (tuple(self.fwd_window), tuple(self.rev_window))
return h.__hash__()
def get_id(self) :
''' More memory efficient than just tuple '''
return self.__hash__()
def __ne__(self, other) :
return not self == other
def blank_rule(self) :
r = Rule()
return self == r
def incr_score(self, name) :
''' Prevent double counting from the same named entity '''
ne_obj = state2.all_ne[name]
if name in self.found_by :
return
self.add_score(ne_obj.get_type(), ne_obj.get_score())
def match_rev(self, seq, index) :
if self.rev_window == None or len(self.rev_window) == 0 :
return range(len(seq))
if not index.has_key(self.rev_window[0]) :
return []
return self.match_seq(self.rev_window, seq, index)
def match_fwd(self, seq, index) :
if self.fwd_window == None or len(self.fwd_window) == 0 :
return range(len(seq))
if not index.has_key(self.fwd_window[0]) :
return []
candidates = self.match_seq(self.fwd_window, seq, index)
# advance index point past the matched sequence
candidates = [c+len(self.fwd_window) for c in candidates]
return candidates
def match_rule(self, seq, index) :
fwd_matches = self.match_fwd(seq, index)
rev_matches = self.match_rev(seq, index)
boundary_pairs = []
for f in fwd_matches :
cont = False
for r in rev_matches :
if f < r :
boundary_pairs.append((f,r))
cont = True
break
if not cont :
break
return boundary_pairs
def match_seq(self, target, seq, index) :
candidates = index[target[0]]
for i in range(1, len(target)) :
tmp = index[target[i]]
candidates = [c for c in candidates if c+i in target]
return candidates
def promotion_score(self) :
purity_min = float(state2.cfg.get("Promotion","PurityMin"))
extract_min = float(state2.cfg.get("Promotion","ExtractPercentage"))
if purity_min > self.get_score() or \
self.extracted_ne == None or \
extract_min > self.matches_with_ne/float(self.total_matches) :
return 0
return self.get_score()
def score_candidate_ne(self) :
ne_list = self.extracted_ne
if ne_list == None or len(ne_list) == 0 :
return
for ne in ne_list :
ne_obj = state2.new_ne_object(ne)
ne_obj.add_score(self.get_type(),self.get_score())
|
{"/nameentity.py": ["/scorable.py"], "/RuleFactory.py": ["/rule.py"], "/rule.py": ["/state2.py", "/scorable.py"], "/controller.py": ["/state.py"]}
|
5,571
|
ZhouYii/NE_Recognizer
|
refs/heads/master
|
/run.py
|
import state2
def mine_rules() :
result_set = set()
for doc in state2.corpus :
for ne in state2.promoted_ne :
ne_obj = state2.get_ne_object(ne)
if ne_obj != None and ne_obj.mined_rules != None:
#Calculation already been done
precomputed_rules = [r for r in ne_obj.mined_rules \
if r not in state2.promoted_rules]
rules_hashes = [r.get_id() for r in precomputed_rules]
result_set.union(rules_hashes)
else :
found_rules = doc.find_rules(ne)
if len(found_rules) == 0 :
continue
ne_obj = state2.new_ne_object(ne)
ne_obj.mined_rules = found_rules
for r in found_rules :
if not state2.all_rules.has_key(r.get_id()) :
state2.all_rules[r.get_id()] = r
state2.all_rules[r.get_id()].incr_score(ne)
result_set.add(r.get_id())
# Only consider new rules for promotion
result_set = [r for r in result_set if r not in state2.promoted_rules]
return result_set
def mine_ne() :
all_ne = set()
for rule_id in state2.promoted_rules :
rule_obj = state2.all_rules[rule_id]
names = rule_obj.extracted_ne
names = [n for n in names if n not in state2.promoted_ne]
for n in names :
all_ne.add(n)
return list(all_ne)
def score_rules(rule_ids) :
'''
Calculate score based on ratio of rule hits in the document.
Caches extracted name entities as well.
'''
for doc in state2.corpus :
# Match rule to each sentence
for context in doc.context_list :
tokens = context.tok_list
for i in rule_ids :
rule = state2.all_rules[i]
hits = rule.match_rule(tokens, context.word_map)
for hit in hits :
rule.total_matches += 1
ne_list = state2.recognizer.extract_names(tokens[hit[0]:hit[1]])
ne_list = [n for n in ne_list if n not in state2.promoted_ne]
if len(ne_list) == 0 :
continue
rule.extracted_ne = ne_list
rule.matches_with_ne += 1
def promote_ne(ne_list, max_to_promote=10) :
promoted_ids = []
min_purity = float(state2.cfg.get("Promotion", "PurityMin"))
for i in ne_list :
print state2.get_ne_object(i).get_score()
ne_list = [ne for ne in ne_list\
if state2.get_ne_object(ne).get_score() > min_purity]
ne_list = sorted(ne_list,\
key=lambda x:state2.get_ne_object(x).get_score(), reverse=True)
for i in range(0, min(len(ne_list), max_to_promote)) :
state2.promoted_ne.append(ne_list[i])
promoted_ids.append(ne_list[i])
return promoted_ids
def promote_rules(rule_ids, max_to_promote=10) :
promoted_ids = []
promotion_min = float(state2.cfg.get("Promotion", "PromotionThreshold"))
candidates = [i for i in rule_ids if \
state2.all_rules[i].promotion_score() > promotion_min]
candidates = sorted(candidates, \
key=lambda x: state2.all_rules[i].promotion_score(), reverse=True)
for i in range(0, min(len(candidates), max_to_promote)):
state2.all_rules[candidates[i]].score_candidate_ne()
state2.promoted_rules.append(candidates[i])
promoted_ids.append(candidates[i])
return promoted_ids
rule_ids = mine_rules()
score_rules(rule_ids)
promote_ids = promote_rules(rule_ids)
print promote_ids
ne_candidates = mine_ne()
print ne_candidates
promoted_ne = promote_ne(ne_candidates)
print promoted_ne
|
{"/nameentity.py": ["/scorable.py"], "/RuleFactory.py": ["/rule.py"], "/rule.py": ["/state2.py", "/scorable.py"], "/controller.py": ["/state.py"]}
|
5,572
|
ZhouYii/NE_Recognizer
|
refs/heads/master
|
/main.py
|
def print_log(promote_set,dic) :
for item in promote_set :
print str(item) +" purity:" + str(dic[item].get_max_score())+ " type :" + \
str(dic[item].get_type())
for type in dic[item].dictionary.keys() :
print "type:"+str(type) +" : " +str(dic[item].dictionary[type])
from controller import Controller
NER = Controller()
'''
#Specify number of iterations
for i in range(9):
print "-------------------Iteration:"+str(i)+"-------------------"
NER.find_rules_tok()
promote_set = NER.promote_rules(0.6, 9) #Args : threshold [0,1], max promotions
print "Rules:"
print_log(promote_set, NER.state.rules)
NER.find_ne()
promote_set = NER.promote_ne(0.6, 9)
print "NE:"
print_log(promote_set, NER.state.ne)
NER.end_iteration()
'''
|
{"/nameentity.py": ["/scorable.py"], "/RuleFactory.py": ["/rule.py"], "/rule.py": ["/state2.py", "/scorable.py"], "/controller.py": ["/state.py"]}
|
5,573
|
ZhouYii/NE_Recognizer
|
refs/heads/master
|
/controller.py
|
from state import State
class Controller :
def __init__(self) :
self.state = State()
def find_rules_tok(self) :
for doc in self.state.corpus :
for ne in self.state.ne.keys() :
candidates = doc.find_rules(ne)
'''
prune rules
'''
candidates.extend(rules)
self.state.add_candidate_rules(ne, candidates)
def find_ne_tok(self) :
for doc in self.state.corpus :
for rule in self.state.rules :
candidates = doc.find_ne(rule)
if len(candidates) == 0 :
continue
for ne in candidates :
self.insert_candidate_ne(ne, rule)
def promote_rules(self, threshold, max) :
return self.state.promote_rules(threshold, max)
def promote_ne(self, threshold, max) :
return self.state.promote_ne(threshold, max)
def find_ne(self):
self.state.find_ne()
def end_iteration(self) :
self.state.candidate_rules = dict()
self.state.candidate_ne = dict()
def insert_candidate_ne(self, ne, rule) :
candidate_dict = self.state.candidate_ne
rule_dict = self.state.rules
rule_type = rule_dict[rule].get_type()
rule_score = rule_dict[rule].get_max_score()
if ne not in candidate_dict.keys() :
candidate_dict[ne] = Scorekeeper()
candidate_dict[ne].positive_scoring(rule_type, rule_score)
|
{"/nameentity.py": ["/scorable.py"], "/RuleFactory.py": ["/rule.py"], "/rule.py": ["/state2.py", "/scorable.py"], "/controller.py": ["/state.py"]}
|
5,578
|
DandinPower/CryptoAI
|
refs/heads/main
|
/ann.py
|
import keras
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
from sklearn.compose import ColumnTransformer
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from price import GetData
def main():
X,Y = GetData('BTCUSDT','15m', '5 day ago UTC', 89)
labelencoder_X_4 = LabelEncoder()
X[:, 4] = labelencoder_X_4.fit_transform(X[:, 4])
transformer = ColumnTransformer(
transformers=[
("OneHot", # Just a name
OneHotEncoder(), # The transformer class
[5] # The column(s) to be applied on.
)
],
remainder='passthrough' # donot apply anything to the remaining columns
)
X = transformer.fit_transform(X.tolist())
X = X.astype('float64')
# 預防虛擬變量陷阱
#X = X[:,1:]
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size = 0.2, random_state = 0)
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
main()
|
{"/ann.py": ["/price.py"]}
|
5,579
|
DandinPower/CryptoAI
|
refs/heads/main
|
/price.py
|
import numpy as np
import pandas as pd
from binance.client import Client
import plotly.graph_objects as go
api_key = "hbOfUeQs7wRpllIrZfbXgxRMNudnWybfoyE4MOhtO2nU2iuHta5A21TxHpapWRSY"
api_secret = "Zhb9OuU8g3zUmUmabxxZwdL9AGT21eOBTPR7VsUmNUrNxzzO3GvMSuZvNP6BIhxf"
client = Client(api_key, api_secret) # 用自己創立的Key,Secret登入binance帳戶
def GetEma(CloseList,_len):
EmaList = []
ema_len = _len
k = 2/(ema_len + 1)
for i in range(len(CloseList)):
if(i == 0):
EmaList.append(CloseList[i])
else:
ema_last = EmaList[i-1]
TempEma = (CloseList[i]*k) + (ema_last*(1-k))
EmaList.append(TempEma)
return EmaList
def GetEVolume(VolumeList,_len):
EVolume = []
k = 2/(_len + 1)
for i in range(len(VolumeList)):
if(i == 0):
EVolume.append(VolumeList[i])
else:
volume_last = VolumeList[i-1]
Temp = (VolumeList[i]*k) + (volume_last*(1-k))
EVolume.append(Temp)
return EVolume
def GetPrice(Coin_Money,Interval,Time_Interval):
OpenList = []
CloseList = []
HighList = []
LowList = []
VolumeList = []
NumOfTradeList = []
AvList = []
Time = []
i = 0
# 讀取K線
for kline in client.get_historical_klines(Coin_Money, Interval, Time_Interval):
Open = float(kline[1])
OpenList.append(Open)
High = float(kline[2])
HighList.append(High)
Low = float(kline[3])
LowList.append(Low)
Close = float(kline[4])
CloseList.append(Close)
Volume = float(kline[5])
VolumeList.append(Volume)
Trade = float(kline[8])
NumOfTradeList.append(Trade)
TempAv = (Open + Close)/2
AvList.append(TempAv)
Time.append(i)
i += 1
klines = [OpenList,CloseList,HighList,LowList,VolumeList,NumOfTradeList,AvList,Time]
return klines
def Show(klines,Ema):
plttime = 0
if(plttime == 0):
Data = [go.Candlestick(x=klines[7], open=klines[0], high=klines[2], low=klines[3],
close=klines[1], increasing_line_color='red', decreasing_line_color='green'),
go.Scatter(
x=klines[7],
y=Ema,
name='EMA',
mode='lines',
line=go.Line(
color='#77AAFF'
)
)]
fig = go.Figure(Data)
fig.show()
plttime = 1
def GetKlineState(Open,Close,High,Low):
#print(Open,Close,High,Low)
state = ''
if Close >= Open:
state = 'Green'
if High >Close:
upcandle = True
else:
upcandle = False
if Low < Open:
downcandle = True
else:
downcandle = False
else:
state = 'Red'
if High > Open:
upcandle = True
else:
upcandle = False
if Low < Close:
downcandle = True
else:
downcandle = False
candle = ''
candle = candle + state
if upcandle:
candle = candle + 'Up'
if downcandle:
candle = candle + 'Down'''
return candle
def WinOrLose(Close,High,Low,nowtime):
state = True
time = nowtime + 1
startprice = Close[nowtime]
winprice = startprice * 1.01
loseprice = startprice * 0.993
answer = 0
while state:
if time >= len(Close) -2:
break
high = High[time]
low = Low[time]
if loseprice >= low:
state = False
elif winprice <= high:
answer = 1
state = False
else:
time += 1
#print(f'現在測試時間 :{nowtime},起始價格 :{startprice},測試價格為 :{price},斜率為 :{(price - startprice)/startprice}')
return answer
def GetData(Coin_Money,Interval,Time_Interval,klen):
#Coin = "BTC" # 設定貨幣為"..."
#Money = "USDT" # 設定法幣為為"...""
#Coin_Money = Coin + Money
#Interval = Client.KLINE_INTERVAL_5MINUTE
#Time_Interval = "5 day ago UTC"
klines = GetPrice(Coin_Money, Interval, Time_Interval)
EmaLen = klen
Ema = GetEma(klines[1], EmaLen)
EVolume = GetEVolume(klines[4], klen)
#Show(klines,Ema)
Open = klines[0]
Close = klines[1]
High = klines[2]
Low = klines[3]
Volume = klines[4]
Trade = klines[5]
Av = klines[6]
Time = klines[7]
X = [] #Ma斜率,價格斜率,價格是否大於ma,移動平均volume斜率,9跟k線狀態,k線型態
Y = [] #是否成功
'''
for i in range(len(Open)):
if i < EmaLen:
continue
else:
Ema_Angle = ((Ema[i] - Ema[i-1]) / Ema[i-1])
Price_Angle = ((Close[i] - Close[i-1]) / Close[i-1])
if Close[i] >= Ema[i]:
BullOrBear = 1
else:
BullOrBear = 0
EVolume_angle = (EVolume[i] - EVolume[i-1]) / EVolume[i-1]
Candle = GetKlineState(Open[i], Close[i], High[i], Low[i])
#print(f'EMA:{Ema_Angle},PRICE:{Price_Angle},BullOrBear:{BullOrBear},VOLUME:{EVolume_angle},CANDLE:{Candle}')
X.append([Ema_Angle,Price_Angle,BullOrBear,EVolume_angle])
win = WinOrLose(Close, High, Low, i)
#print(win)
Y.append(win)'''
for i in range(len(Open)):
X.append([Open[i],Close[i],High[i],Low[i],Volume[i],Trade[i]])
win = WinOrLose(Close, High, Low, i)
Y.append(win)
X = np.array(X)
Y = np.array(Y)
return X,Y
if __name__ == '__main__':
X,Y = GetData('BTCUSDT','15m', '5 day ago UTC', 89)
print(X.shape,Y.shape)
print(X[0],Y[0])
|
{"/ann.py": ["/price.py"]}
|
5,614
|
YongminK/gauss-blur-py
|
refs/heads/master
|
/design.py
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'form.ui',
# licensing of 'form.ui' applies.
#
# Created: Sun Apr 7 02:51:25 2019
# by: pyside2-uic running on PySide2 5.12.1
#
# WARNING! All changes made in this file will be lost!
from PySide2 import QtCore, QtGui, QtWidgets
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName("Form")
Form.resize(1073, 771)
self.loadImageButton = QtWidgets.QPushButton(Form)
self.loadImageButton.setGeometry(QtCore.QRect(40, 300, 131, 51))
self.loadImageButton.setObjectName("loadImageButton")
self.oneDimButton = QtWidgets.QPushButton(Form)
self.oneDimButton.setGeometry(QtCore.QRect(380, 300, 131, 51))
self.oneDimButton.setObjectName("oneDimButton")
self.label = QtWidgets.QLabel(Form)
self.label.setGeometry(QtCore.QRect(40, 20, 281, 281))
self.label.setFrameShape(QtWidgets.QFrame.Box)
self.label.setObjectName("label")
self.labelTwo = QtWidgets.QLabel(Form)
self.labelTwo.setGeometry(QtCore.QRect(730, 20, 281, 281))
self.labelTwo.setFrameShape(QtWidgets.QFrame.Box)
self.labelTwo.setObjectName("labelTwo")
self.twoDimButton = QtWidgets.QPushButton(Form)
self.twoDimButton.setGeometry(QtCore.QRect(730, 300, 131, 51))
self.twoDimButton.setObjectName("twoDimButton")
self.timeTwoDim = QtWidgets.QTextBrowser(Form)
self.timeTwoDim.setGeometry(QtCore.QRect(750, 360, 256, 31))
self.timeTwoDim.setObjectName("timeTwoDim")
self.labelOne = QtWidgets.QLabel(Form)
self.labelOne.setGeometry(QtCore.QRect(380, 20, 291, 281))
self.labelOne.setFrameShape(QtWidgets.QFrame.Box)
self.labelOne.setObjectName("labelOne")
self.timeOneDim = QtWidgets.QTextBrowser(Form)
self.timeOneDim.setGeometry(QtCore.QRect(410, 360, 256, 31))
self.timeOneDim.setObjectName("timeOneDim")
self.tableWidget = QtWidgets.QTableWidget(Form)
self.tableWidget.setGeometry(QtCore.QRect(340, 510, 681, 241))
self.tableWidget.setObjectName("tableWidget")
self.tableWidget.setColumnCount(0)
self.tableWidget.setRowCount(0)
self.editSigma = QtWidgets.QTextEdit(Form)
self.editSigma.setGeometry(QtCore.QRect(120, 420, 141, 31))
self.editSigma.setObjectName("editSigma")
self.editRadius = QtWidgets.QTextEdit(Form)
self.editRadius.setGeometry(QtCore.QRect(120, 460, 141, 31))
self.editRadius.setObjectName("editRadius")
self.label_3 = QtWidgets.QLabel(Form)
self.label_3.setGeometry(QtCore.QRect(40, 410, 71, 51))
self.label_3.setWordWrap(True)
self.label_3.setObjectName("label_3")
self.label_4 = QtWidgets.QLabel(Form)
self.label_4.setGeometry(QtCore.QRect(40, 450, 61, 41))
self.label_4.setWordWrap(True)
self.label_4.setObjectName("label_4")
self.tableWidget_2 = QtWidgets.QTableWidget(Form)
self.tableWidget_2.setGeometry(QtCore.QRect(340, 400, 681, 101))
self.tableWidget_2.setObjectName("tableWidget_2")
self.tableWidget_2.setColumnCount(0)
self.tableWidget_2.setRowCount(0)
self.editX = QtWidgets.QTextEdit(Form)
self.editX.setGeometry(QtCore.QRect(60, 570, 51, 41))
self.editX.setObjectName("editX")
self.editY = QtWidgets.QTextEdit(Form)
self.editY.setGeometry(QtCore.QRect(150, 570, 51, 41))
self.editY.setObjectName("editY")
self.label_5 = QtWidgets.QLabel(Form)
self.label_5.setGeometry(QtCore.QRect(40, 580, 16, 31))
self.label_5.setObjectName("label_5")
self.label_6 = QtWidgets.QLabel(Form)
self.label_6.setGeometry(QtCore.QRect(130, 580, 16, 21))
self.label_6.setObjectName("label_6")
self.label_7 = QtWidgets.QLabel(Form)
self.label_7.setGeometry(QtCore.QRect(80, 540, 111, 31))
self.label_7.setObjectName("label_7")
self.compareBrightButton = QtWidgets.QPushButton(Form)
self.compareBrightButton.setGeometry(QtCore.QRect(210, 580, 93, 28))
self.compareBrightButton.setObjectName("compareBrightButton")
self.browseBright = QtWidgets.QTextBrowser(Form)
self.browseBright.setGeometry(QtCore.QRect(50, 620, 256, 71))
self.browseBright.setObjectName("browseBright")
self.showHistButton = QtWidgets.QPushButton(Form)
self.showHistButton.setGeometry(QtCore.QRect(170, 300, 151, 51))
self.showHistButton.setObjectName("showHistButton")
self.showHistButton_2 = QtWidgets.QPushButton(Form)
self.showHistButton_2.setGeometry(QtCore.QRect(510, 300, 151, 51))
self.showHistButton_2.setObjectName("showHistButton_2")
self.showHistButton_3 = QtWidgets.QPushButton(Form)
self.showHistButton_3.setGeometry(QtCore.QRect(850, 300, 151, 51))
self.showHistButton_3.setObjectName("showHistButton_3")
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
Form.setWindowTitle(QtWidgets.QApplication.translate("Form", "Form", None, -1))
self.loadImageButton.setText(QtWidgets.QApplication.translate("Form", "Load", None, -1))
self.oneDimButton.setText(QtWidgets.QApplication.translate("Form", "One", None, -1))
self.label.setText(QtWidgets.QApplication.translate("Form", "Image 1", None, -1))
self.labelTwo.setText(QtWidgets.QApplication.translate("Form", "Two Dim Image", None, -1))
self.twoDimButton.setText(QtWidgets.QApplication.translate("Form", "Two", None, -1))
self.timeTwoDim.setHtml(QtWidgets.QApplication.translate("Form", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'MS Shell Dlg 2\'; font-size:7.8pt; font-weight:400; font-style:normal;\">\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\">Time: </p></body></html>", None, -1))
self.labelOne.setText(QtWidgets.QApplication.translate("Form", "One Dim Image", None, -1))
self.timeOneDim.setHtml(QtWidgets.QApplication.translate("Form", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'MS Shell Dlg 2\'; font-size:7.8pt; font-weight:400; font-style:normal;\">\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\">Time: </p></body></html>", None, -1))
self.label_3.setText(QtWidgets.QApplication.translate("Form", "Отклонение (сигма)", None, -1))
self.label_4.setText(QtWidgets.QApplication.translate("Form", "Радиус апертуры", None, -1))
self.editX.setHtml(QtWidgets.QApplication.translate("Form", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'MS Shell Dlg 2\'; font-size:7.8pt; font-weight:400; font-style:normal;\">\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-size:12pt;\">0</span></p></body></html>", None, -1))
self.editY.setHtml(QtWidgets.QApplication.translate("Form", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'MS Shell Dlg 2\'; font-size:7.8pt; font-weight:400; font-style:normal;\">\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-size:12pt;\">0</span></p></body></html>", None, -1))
self.label_5.setText(QtWidgets.QApplication.translate("Form", "<html><head/><body><p><span style=\" font-size:12pt;\">Х</span></p></body></html>", None, -1))
self.label_6.setText(QtWidgets.QApplication.translate("Form", "<html><head/><body><p><span style=\" font-size:12pt;\">Y</span></p></body></html>", None, -1))
self.label_7.setText(QtWidgets.QApplication.translate("Form", "<html><head/><body><p><span style=\" font-size:11pt;\">Координаты</span></p></body></html>", None, -1))
self.compareBrightButton.setText(QtWidgets.QApplication.translate("Form", "Сравнить", None, -1))
self.showHistButton.setText(QtWidgets.QApplication.translate("Form", "Показать гистограмму", None, -1))
self.showHistButton_2.setText(QtWidgets.QApplication.translate("Form", "Показать гистограмму", None, -1))
self.showHistButton_3.setText(QtWidgets.QApplication.translate("Form", "Показать гистограмму", None, -1))
|
{"/main.py": ["/design.py"]}
|
5,615
|
YongminK/gauss-blur-py
|
refs/heads/master
|
/main.py
|
import numpy as np
import cv2
import matplotlib.pyplot as plt
import sys
from PySide2 import QtWidgets
from PySide2.QtWidgets import QLabel
from PySide2.QtWidgets import QTableWidget, QTableWidgetItem
from PySide2.QtCore import QTranslator
import design
import os
import time
img_name = 'test.bmp'
img = cv2.imread(img_name, cv2.IMREAD_GRAYSCALE)
class ExampleApp(QtWidgets.QMainWindow, design.Ui_Form):
def __init__(self):
super().__init__()
self.setupUi(self) # Это нужно для инициализации нашего дизайна
img_name = self.loadImageButton.clicked.connect(self.loadImage)
self.twoDimButton.clicked.connect(self.twoDimBlur)
self.oneDimButton.clicked.connect(self.oneDimBlur)
self.editRadius.setText("6")
self.editSigma.setText("2")
self.compareBrightButton.clicked.connect(self.compareBright)
self.showHistButton.clicked.connect(self.showHist)
self.showHistButton_2.clicked.connect(self.showHist_2)
self.showHistButton_3.clicked.connect(self.showHist_3)
def loadImage(self):
self.label.setPixmap(img_name)
# directory = QtWidgets.QFileDialog.getOpenFileName(self, "Выберите папку")[0]
# self.label.setPixmap(str(directory))
# return str(directory)
def twoDimBlur(self):
img_out = img.copy()
height = img.shape[0]
width = img.shape[1]
radius = int(self.editRadius.toPlainText())
sigma = float(self.editSigma.toPlainText())
size = int(2*radius+1)
gauss = np.zeros((size,size))
for i in range(size):
for j in range(size):
gauss[i][j] = (1/np.sqrt(2*np.pi*sigma**2))*np.exp(-((i-radius)**2 + (j-radius)**2)/(2*sigma**2))
# print(gauss.sum())
# print()
gauss = gauss / gauss.sum()
self.tableWidget.setRowCount(size)
self.tableWidget.setColumnCount(size)
for i in range(len(gauss)):
for j in range(len(gauss[i])):
newItem = QTableWidgetItem(str(gauss[i][j]))
self.tableWidget.setItem(i, j, newItem)
start_time = time.process_time()
for i in np.arange(radius, height-radius):
for j in np.arange(radius, width-radius):
sum = 0.0
for k in np.arange(-radius, radius+1):
for l in np.arange(-radius, radius+1):
a = img.item(i+k, j+l)
p = gauss[radius+k, radius+l]
sum = sum + (p * a)
b = sum
img_out.itemset((i,j), b)
end_time = time.process_time()
self.timeTwoDim.setText("Time: " + str(end_time - start_time))
newName = 'twoDimImgOut.bmp'
cv2.imwrite(newName, img_out)
self.labelTwo.setPixmap(newName)
plt.imshow(gauss, cmap=plt.get_cmap('jet'), interpolation='nearest')
plt.colorbar()
plt.show()
def oneDimBlur(self):
img_temp = img.copy()
img_out = img.copy()
height = img.shape[0]
width = img.shape[1]
radius = int(self.editRadius.toPlainText())
sigma = float(self.editSigma.toPlainText())
size = int(2*radius+1)
gauss2D = np.zeros((size,size))
for i in range(size):
for j in range(size):
gauss2D[i][j] = (1/np.sqrt(2*np.pi*sigma**2))*np.exp(-((i-radius)**2 + (j-radius)**2)/(2*sigma**2))
# print(gauss2D.sum())
# print()
gauss2D = gauss2D / gauss2D.sum()
gauss = np.zeros((1,size))
for j in range(size):
gauss[0][j] = gauss2D[:][j].sum()
# print(gauss[0][j])
# print(gauss.sum())
gauss = gauss / gauss.sum()
self.tableWidget_2.setRowCount(1)
self.tableWidget_2.setColumnCount(size)
for i in range(len(gauss)):
for j in range(len(gauss[i])):
newItem = QTableWidgetItem(str(gauss[i][j]))
self.tableWidget_2.setItem(i, j, newItem)
gauss_trans = np.zeros((size,1))
for i in range(size):
gauss_trans[i][0] = gauss2D[i][:].sum()
# print(gauss_trans[i][0])
# print(gauss_trans.sum())
gauss_trans = gauss_trans / gauss_trans.sum()
gauss_ish = gauss*gauss_trans #/ (gauss*gauss_trans).sum()
# for i in np.arange(size):
# for j in np.arange(size):
# print(gauss_ish[i][j] - gauss2D[i][j], end=" ")
# print()
start_time = time.process_time()
for i in np.arange(height):
for j in np.arange(radius, width-radius):
sum = 0.0
for l in np.arange(-radius, radius+1):
a = img.item(i, j+l)
p = gauss[0, radius+l]
sum = sum + (p * a)
b = sum
img_temp.itemset((i,j), b)
for i in np.arange(radius, height-radius):
for j in np.arange(width):
sum = 0.0
for k in np.arange(-radius, radius+1):
a = img_temp.item(i+k, j)
p = gauss_trans[radius+k, 0]
sum = sum + (p * a)
b = sum
img_out.itemset((i,j), b)
end_time = time.process_time()
self.timeOneDim.setText("Time: " + str(end_time - start_time))
newName = 'oneDimImgOut.bmp'
cv2.imwrite(newName, img_out)
self.labelOne.setPixmap(newName)
plt.imshow(gauss, cmap=plt.get_cmap('jet'), interpolation='nearest')
plt.colorbar()
plt.show()
def compareBright(self):
x = int(self.editX.toPlainText())
y = int(self.editY.toPlainText())
img_1d = cv2.imread('oneDimImgOut.bmp', cv2.IMREAD_GRAYSCALE)
img_2d = cv2.imread('twoDimImgOut.bmp',cv2.IMREAD_GRAYSCALE)
first_img = img.item(x,y)
oneDim_img = img_1d.item(x,y)
twoDim_img = img_2d.item(x,y)
# test
height = img_1d.shape[0]
width = img_1d.shape[1]
# for x in np.arange(height):
# for y in np.arange(width):
# print(img_1d.item(x, y)-img_2d.item(x,y), end=" ")
# print()
img.itemset((x,y), 0)
self.browseBright.setText(str(first_img)+" "+str(oneDim_img)+" "+str(twoDim_img))
def showHist(self):
plt.hist(img.ravel(),256,[0,256]);
plt.savefig("hist.png")
plt.show()
def showHist_2(self):
img_1d = cv2.imread('oneDimImgOut.bmp', cv2.IMREAD_GRAYSCALE)
plt.hist(img_1d.ravel(),256,[0,256]);
plt.savefig("hist.png")
plt.show()
def showHist_3(self):
img_2d = cv2.imread('twoDimImgOut.bmp',cv2.IMREAD_GRAYSCALE)
plt.hist(img_2d.ravel(),256,[0,256]);
plt.savefig("hist.png")
plt.show()
def main():
app = QtWidgets.QApplication(sys.argv) # Новый экземпляр QApplication
window = ExampleApp() # Создаём объект класса ExampleApp
window.show() # Показываем окно
app.exec_() # и запускаем приложение
# print(timeit.timeit("main()", setup="from __main__ import main", number=1))
if __name__ == '__main__': # Если мы запускаем файл напрямую, а не импортируем
main() # то запускаем функцию main()
# print(sum(sum(gauss)))
# # plt.imshow(gauss, cmap=plt.get_cmap('jet'), interpolation='nearest')
# # plt.colorbar()
# # plt.show()
|
{"/main.py": ["/design.py"]}
|
5,628
|
meteFANS/metview-python
|
refs/heads/master
|
/examples/UC-07-bufr-pandas.py
|
"""
Metview Python use case
UC-07-pandas. The Analyst compute simple differences between observations and analysis
and use pandas to perform further computations
BUFR version - BUFR is not tabular or gridded, but we can use Metview Python
framework to extract a particular parameter to a tabular format (geopoints)
--------------------------------------------------------------------------------
1. Analyst retrieves the analysis from a gridded data file
--------------------------------------------------------------------------------
--------------------------------------------------------------------------------
2. Analyst retrieves an observational parameter from a tabular or a gridded file
--------------------------------------------------------------------------------
--------------------------------------------------------------------------------
3. Analyst calculates the difference between the observational data and the
analysis
--------------------------------------------------------------------------------
--------------------------------------------------------------------------------
4. Analyst converts this data to a pandas dataframe and computes the number
of outliers based on the zscore
--------------------------------------------------------------------------------
"""
import metview as mv
import numpy as np
from scipy import stats
t2m_grib = mv.read('./t2m_grib.grib')
obs_3day = mv.read('./obs_3day.bufr')
t2m_gpt = mv.obsfilter(
parameter = '012004',
output = 'geopoints',
data = obs_3day
)
diff = t2m_grib - t2m_gpt
df = diff.to_dataframe()
print(df)
outliers = np.abs(stats.zscore(df['value'])) > 1.5
print('# of outliers:', outliers.sum())
|
{"/examples/UC-07-bufr-pandas.py": ["/metview/__init__.py"], "/examples/UC-01.py": ["/metview/__init__.py"], "/examples/UC-04-grib.py": ["/metview/__init__.py"], "/examples/UC-03-bufr.py": ["/metview/__init__.py"], "/metview/__main__.py": ["/metview/__init__.py"], "/examples/UC-07-bufr.py": ["/metview/__init__.py"]}
|
5,629
|
meteFANS/metview-python
|
refs/heads/master
|
/examples/UC-01.py
|
"""
Metview Python use case
UC-01. The Analyst produces plots and files for the Product user
--------------------------------------------------------------------------------
1. Analyst creates plots and files thanks to his Python applications and scripts
that benefits from the underlying tools of the framework
--------------------------------------------------------------------------------
Analyst reads data from a GRIB file and derives another quantity from it. Then,
Analyst saves his data as a GRIB file and creates a plot in PNG format.
"""
import metview as mv
mydata = mv.read('../tests/test.grib')
derived = mydata * 2 + 5
mv.write('derived_data.grib', derived)
grid_shade = mv.mcont(
legend = True,
contour = False,
contour_highlight = True,
contour_shade = True,
contour_shade_technique = 'grid_shading',
contour_shade_max_level_colour = 'red',
contour_shade_min_level_colour = 'blue',
contour_shade_colour_direction = 'clockwise',
)
# Macro-like PNG creation:
png = mv.png_output(output_width = 1200, output_name = './myplot')
mv.plot(png, derived, grid_shade)
# Using a different notation:
png_output = {
'output_type': 'png',
'output_width': 1200,
'output_name': './myplot2'
}
mv.plot(derived, grid_shade, **png_output)
|
{"/examples/UC-07-bufr-pandas.py": ["/metview/__init__.py"], "/examples/UC-01.py": ["/metview/__init__.py"], "/examples/UC-04-grib.py": ["/metview/__init__.py"], "/examples/UC-03-bufr.py": ["/metview/__init__.py"], "/metview/__main__.py": ["/metview/__init__.py"], "/examples/UC-07-bufr.py": ["/metview/__init__.py"]}
|
5,630
|
meteFANS/metview-python
|
refs/heads/master
|
/examples/UC-04-grib.py
|
"""
Metview Python use case
UC-04. The Analyst retrieves, for a given time interval, the values of
two parameters and combines their values on the same map
--------------------------------------------------------------------------------
1. Analyst retrieves, for a given time interval, the values of two chosen
parameters (e.g. temperature, and geopotential) from a given source (i.e. MARS,
files, observation databases)
--------------------------------------------------------------------------------
--------------------------------------------------------------------------------
2. Analyst customises many features of his map for each field he wants to plot
(e.g. temperature field as shaded areas and geopotenti2. al field as isolines)
--------------------------------------------------------------------------------
--------------------------------------------------------------------------------
3. Analyst plots the data
--------------------------------------------------------------------------------
Analyst plots data variable t2 with contouring definition t_shade_c, and data
variable z with contouring definition mslp_isolines.
The fields will be plotted in the order they appear in the mv.plot() command,
with the shaded temperature at the bottom, and the geopotential on top.
"""
import metview as mv
# read 2m temperature
t2 = mv.read('./t2_for_UC-04.grib')
# read geopotential
z = mv.read('./z_for_UC-04.grib')
t_shade_c = mv.mcont(
legend = True,
contour_highlight = False,
contour_level_selection_type = "interval",
contour_interval = 10,
contour_shade = True,
contour_shade_max_level = 60,
contour_shade_min_level = -60,
contour_shade_method = "area_fill",
contour_shade_max_level_colour = "red",
contour_shade_min_level_colour = "blue",
contour_shade_colour_direction = "clockwise"
)
z_isolines = mv.mcont(
legend = True,
contour_line_thickness = 2,
contour_line_colour = 'black',
contour_highlight_colour = 'black',
contour_highlight_thickness = 4,
contour_level_selection_type = 'interval',
contour_interval = 5,
contour_legend_text = 'Geopotential',
)
mv.setoutput(mv.png_output(output_width = 1000, output_name = './gribplot'))
mv.plot(t2, t_shade_c, z, z_isolines)
|
{"/examples/UC-07-bufr-pandas.py": ["/metview/__init__.py"], "/examples/UC-01.py": ["/metview/__init__.py"], "/examples/UC-04-grib.py": ["/metview/__init__.py"], "/examples/UC-03-bufr.py": ["/metview/__init__.py"], "/metview/__main__.py": ["/metview/__init__.py"], "/examples/UC-07-bufr.py": ["/metview/__init__.py"]}
|
5,631
|
meteFANS/metview-python
|
refs/heads/master
|
/examples/UC-03-bufr.py
|
"""
Metview Python use case
The Python analyst reads some BUFR data and plots it in various ways
--------------------------------------------------------------------------------
1. Python analyst reads BUFR data and plots it using the default style
--------------------------------------------------------------------------------
--------------------------------------------------------------------------------
2. Python analyst reads BUFR data and applies a visual definition
to alter its plotting style
--------------------------------------------------------------------------------
--------------------------------------------------------------------------------
3. Python analyst reads BUFR data and filters a single parameter from it
and plots it with a colour scale
--------------------------------------------------------------------------------
"""
import metview as mv
# define a view over the area of interest and set land shading on
land_shade = mv.mcoast(
map_coastline_land_shade = True,
map_coastline_land_shade_colour = "RGB(0.98,0.95,0.82)",
map_coastline_sea_shade = False,
map_coastline_sea_shade_colour = "RGB(0.85,0.93,1)"
)
area_view = mv.geoview(
map_area_definition = 'corners',
area = [45.83,-13.87,62.03,8.92],
coastlines = land_shade
)
# Simplest plot:
# NOTE that when plotting a 'raw' BUFR file, Magics will plot synop symbols as shown in
# https://software.ecmwf.int/wiki/display/METV/Data+Part+1 "Plotting BUFR Data"
obs = mv.read('../tests/obs_3day.bufr')
mv.setoutput(mv.png_output(output_width = 1200, output_name = './obsplot1'))
mv.plot(area_view, obs)
# ALTERNATIVELY, add an Observations Plotting visual definition
obs_plotting = mv.mobs(
obs_temperature = False,
obs_cloud = False,
obs_low_cloud = False,
obs_dewpoint_colour = 'purple'
)
mv.setoutput(mv.png_output(output_width = 1200, output_name = './obsplot2'))
mv.plot(area_view, obs, obs_plotting)
# ALTERNATIVELY, if we don't want to plot the whole observations, but instead want to
# extract a specific parameter from the BUFR messages, then we use the Observation Filter
# as shown here:
# dewpoint_t is a 'geopoints' variable
dewpoint_t = mv.obsfilter(
output = "geopoints",
parameter = '012006',
data = obs
)
# add an optional Symbol Plotting definition to get nice coloured circles
# at each point
symb_visdef = mv.msymb(
legend = True,
symbol_type = 'marker',
symbol_table_mode = 'advanced',
symbol_advanced_table_max_level_colour = 'red',
symbol_advanced_table_min_level_colour = 'blue',
symbol_advanced_table_colour_direction = 'clockwise'
)
mv.setoutput(mv.png_output(output_width = 1200, output_name = './obsplot3'))
mv.plot(area_view, dewpoint_t, symb_visdef)
|
{"/examples/UC-07-bufr-pandas.py": ["/metview/__init__.py"], "/examples/UC-01.py": ["/metview/__init__.py"], "/examples/UC-04-grib.py": ["/metview/__init__.py"], "/examples/UC-03-bufr.py": ["/metview/__init__.py"], "/metview/__main__.py": ["/metview/__init__.py"], "/examples/UC-07-bufr.py": ["/metview/__init__.py"]}
|
5,632
|
meteFANS/metview-python
|
refs/heads/master
|
/metview/__main__.py
|
#
# Copyright 2017-2019 B-Open Solutions srl.
# Copyright 2017-2019 European Centre for Medium-Range Weather Forecasts (ECMWF).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import sys
def main(argv=None):
parser = argparse.ArgumentParser()
parser.add_argument('command')
args = parser.parse_args(args=argv)
if args.command == 'selfcheck':
sys.argv = []
print('Trying to connect to a Metview installation...')
try:
from . import bindings as _bindings
except Exception as exp:
print('Could not find a valid Metview installation')
raise(exp)
mv = dict()
_bindings.bind_functions(mv, module_name='mv')
del _bindings
try:
mv['print']('Hello world - printed from Metview!')
except Exception as exp:
print('Could not print a greeting from Metview')
raise(exp)
mv_version_f = mv['version_info']
mv_version = mv_version_f()
mv_maj = str(int(mv_version['metview_major']))
mv_min = str(int(mv_version['metview_minor']))
mv_rev = str(int(mv_version['metview_revision']))
mv_version_string = mv_maj + '.' + mv_min + '.' + mv_rev
print('Metview version', mv_version_string, 'found')
print("Your system is ready.")
else:
raise RuntimeError("Command not recognised %r. See usage with --help." % args.command)
if __name__ == '__main__':
main()
|
{"/examples/UC-07-bufr-pandas.py": ["/metview/__init__.py"], "/examples/UC-01.py": ["/metview/__init__.py"], "/examples/UC-04-grib.py": ["/metview/__init__.py"], "/examples/UC-03-bufr.py": ["/metview/__init__.py"], "/metview/__main__.py": ["/metview/__init__.py"], "/examples/UC-07-bufr.py": ["/metview/__init__.py"]}
|
5,633
|
meteFANS/metview-python
|
refs/heads/master
|
/examples/seaIce_CO2_correlation.py
|
# ==============================================================================
# Authors: ralf mueller, stephan siemen
#
#
# Plan is to create a plot similar to the scatter plot for co2 concentration and
# september minimum of sea ice extent
#
# reference:
# https://www.mpg.de/10579957/W004_Environment_climate_062-069.pdf, p. 7
#
# ==============================================================================
import os
from ecmwfapi import ECMWFDataServer
from cdo import Cdo
from multiprocessing import Pool
from tarfile import TarFile
import matplotlib.pyplot as plt
import matplotlib.transforms as mtrans
# basic setup {{{ ===========================================================
server = ECMWFDataServer()
cdo = Cdo()
cdo.debug = True
tasks = 4
startYear = 1980
endYear = 2014
# }}} ==========================================================================
# helper methods {{{ ===========================================================
def getDataFromTarfile(tarfile):
tf = TarFile(tarfile)
members = [ m.name for m in tf.getmembers()]
if (list(set([os.path.exists(x) for x in members])) != [True]):
tf.extractall()
tf.close
return members
def computeTimeSeries(file,varname,useCellArea=False):
if (useCellArea):
ofile = cdo.mul(input = '-fldmean -selname,%s %s -fldsum -gridarea %s'%(varname,file,file),
options = '-b F32',output = '_'+os.path.basename(file),force=False)
else:
ofile = cdo.fldmean(input = '-selname,%s %s'%(varname,file),
options = '-b F32',output = '_'+os.path.basename(file),force=False)
return ofile
def computeTimeSeriesOfFilelist(pool,files,varname,ofile,useCellArea=False):
results = dict()
for file in files:
rfile = pool.apply_async(computeTimeSeries,(file,varname,False))
results[file] = rfile
pool.close()
pool.join()
for k,v in results.items():
results[k] = v.get()
cdo.yearmean(input = '-cat %s'%(' '.join([results[x] for x in files])),
output = ofile, force=False,
options = '-f nc')
return ofile
# }}} ==========================================================================
# Sea Ice Cover retrival + processing {{{
iceCover_file = "ci_interim_%s-%s-NH.grb"%(startYear, endYear)
if ( not os.path.exists(iceCover_file) ):
server.retrieve({
'stream' : "oper",
'levtype' : "sfc",
'param' : "31.128",
'dataset' : "interim",
'step' : "0",
'grid' : "0.5/0.5",
'time' : "12",
'date' : "%s-01-01/to/%s-01-01"%(startYear,endYear),
'type' : "an",
'class' : "ei",
'area' : "90/-180/0/180",
'target' : iceCover_file
})
else:
print("use existing file '%s'"%(iceCover_file))
# compute the nh ice extent: minimum usually happens in September
iceExtent = 'ice_extent_%s-%s-daymean-SeptMin.nc'%(startYear,endYear)
cdo.setattribute('sea_ice_extent@unit=m2,sea_ice_extent@standard_name=sea_ice_extent',
input = '-setname,sea_ice_extent -yearmin -fldsum -mul -selmon,9 %s -gridarea %s'%(iceCover_file,iceCover_file),
output = iceExtent,force=False,
options = '-f nc')
iceExtent_ds = cdo.readXDataset(iceExtent)
# }}} ==========================================================================
# {{{ CO2 retrieval + processing ===========================================================
# cams return tarballs of netcdf files
co2_tarball = "co2_totalColumn_%s-%s.tar"%(startYear, endYear)
if ( not os.path.exists(co2_tarball) ):
server.retrieve({ #CO2
"dataset" : "cams_ghg_inversions",
"datatype" : "ra",
"date" : "%s-01-01/to/%s-01-01"%(startYear,endYear),
"frequency" : "3h",
"param" : "co2",
"quantity" : "total_column",
"version" : "v16r2",
"target" : co2_tarball
})
else:
print("use existing file '%s'"%(co2_tarball))
co2_files = getDataFromTarfile(co2_tarball)
co2_timeSeries = 'co2_timeseries_%s-%s.nc'%(startYear,endYear)
computeTimeSeriesOfFilelist(Pool(tasks),co2_files,'XCO2',co2_timeSeries,False)
co2_ds = cdo.readXDataset(co2_timeSeries)
# }}} ==========================================================================
# scatter plot {{{ =============================================================
# some debugging output
iceExtent_ds.info()
co2_ds.info()
# shaping the data for plotting it
xSelection = co2_ds.sel(time=slice('%s-01-01'%(startYear), '%s-01-01'%(endYear)))
ySelection = iceExtent_ds.sel(time=slice('%s-01-01'%(startYear), '%s-01-01'%(endYear)))
# create the final scatter plot
fig = plt.figure(figsize=(10, 7))
ax = plt.subplot(1, 1, 1)
trans_offset = mtrans.offset_copy(ax.transData, fig=fig,
x=0.05, y=-0.20, units='inches') # inches because we are in UK
x = xSelection.to_array()[1,:,0,0,0]
y = ySelection.to_array()[1,:,0,0,0]
plt.scatter( x , y)
# put years as labels
years = xSelection.time.dt.year
for _x,_y,_year in zip(x,y,years):
plt.text(_x, _y, '%d'%(_year), transform=trans_offset)
plt.grid(True)
plt.ylabel('sea ice extent [m2]')
plt.xlabel('co2 concentration [ppm]')
plt.title('Correlation of NH Sea Ice extent minimum and CO2 emissions')
plt.savefig('seaIce_CO2_correlation.png')
# }}} ==========================================================================
# vim:fdm=marker
|
{"/examples/UC-07-bufr-pandas.py": ["/metview/__init__.py"], "/examples/UC-01.py": ["/metview/__init__.py"], "/examples/UC-04-grib.py": ["/metview/__init__.py"], "/examples/UC-03-bufr.py": ["/metview/__init__.py"], "/metview/__main__.py": ["/metview/__init__.py"], "/examples/UC-07-bufr.py": ["/metview/__init__.py"]}
|
5,634
|
meteFANS/metview-python
|
refs/heads/master
|
/metview/__init__.py
|
#
# Copyright 2017-2019 B-Open Solutions srl.
# Copyright 2017-2019 European Centre for Medium-Range Weather Forecasts (ECMWF).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# requires a Python 3 interpreter
import sys
if sys.version_info[0] < 3:
raise EnvironmentError("Metview's Python interface requires Python 3. You are using Python "
+ repr(sys.version_info))
# if the user has started via "python -m metview selfcheck"
# then we do not want to import anything yet because we want to
# catch errors differently
if len(sys.argv) != 2 or sys.argv[0] != "-m" or sys.argv[1] != "selfcheck":
from . import bindings as _bindings
_bindings.bind_functions(globals(), module_name=__name__)
# Remove "_bindings" from the public API.
del _bindings
|
{"/examples/UC-07-bufr-pandas.py": ["/metview/__init__.py"], "/examples/UC-01.py": ["/metview/__init__.py"], "/examples/UC-04-grib.py": ["/metview/__init__.py"], "/examples/UC-03-bufr.py": ["/metview/__init__.py"], "/metview/__main__.py": ["/metview/__init__.py"], "/examples/UC-07-bufr.py": ["/metview/__init__.py"]}
|
5,635
|
meteFANS/metview-python
|
refs/heads/master
|
/metview/bindings.py
|
#
# Copyright 2017-2019 B-Open Solutions srl.
# Copyright 2017-2019 European Centre for Medium-Range Weather Forecasts (ECMWF).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import keyword
import os
import pkgutil
import signal
import tempfile
import builtins
from enum import Enum
import cffi
import numpy as np
def string_from_ffi(s):
return ffi.string(s).decode('utf-8')
# -----------------------------------------------------------------------------
# Startup
# -----------------------------------------------------------------------------
class MetviewInvoker:
"""Starts a new Metview session on construction and terminates it on program exit"""
def __init__(self):
"""
Constructor - starts a Metview session and reads its environment information
Raises an exception if Metview does not respond within 5 seconds
"""
self.debug = (os.environ.get("METVIEW_PYTHON_DEBUG", '0') == '1')
# check whether we're in a running Metview session
if 'METVIEW_TITLE_PROD' in os.environ:
self.persistent_session = True
self.info_section = {'METVIEW_LIB': os.environ['METVIEW_LIB']}
return
import atexit
import time
import subprocess
if self.debug:
print('MetviewInvoker: Invoking Metview')
self.persistent_session = False
self.metview_replied = False
self.metview_startup_timeout = 5 # seconds
# start Metview with command-line parameters that will let it communicate back to us
env_file = tempfile.NamedTemporaryFile(mode='rt')
pid = os.getpid()
# print('PYTHON:', pid, ' ', env_file.name, ' ', repr(signal.SIGUSR1))
signal.signal(signal.SIGUSR1, self.signal_from_metview)
# p = subprocess.Popen(['metview', '-edbg', 'tv8 -a', '-slog', '-python-serve',
# env_file.name, str(pid)], stdout=subprocess.PIPE)
metview_startup_cmd = os.environ.get("METVIEW_PYTHON_START_CMD", 'metview')
metview_flags = [metview_startup_cmd, '-nocreatehome', '-python-serve',
env_file.name, str(pid)]
if self.debug:
metview_flags.insert(2, '-slog')
print('Starting Metview using these command args:')
print(metview_flags)
try:
subprocess.Popen(metview_flags)
except Exception as exp:
print("Could not run the Metview executable ('" + metview_startup_cmd + "'); "
"check that the binaries for Metview (version 5 at least) are installed "
"and are in the PATH.")
raise exp
# wait for Metview to respond...
wait_start = time.time()
while (not(self.metview_replied) and
(time.time() - wait_start < self.metview_startup_timeout)):
time.sleep(0.001)
if not(self.metview_replied):
raise Exception('Command "metview" did not respond within '
+ str(self.metview_startup_timeout) + ' seconds. '
'At least Metview 5 is required, so please ensure it is in your PATH, '
'as earlier versions will not work with the Python interface.')
self.read_metview_settings(env_file.name)
# when the Python session terminates, we should destroy this object so that the Metview
# session is properly cleaned up. We can also do this in a __del__ function, but there can
# be problems with the order of cleanup - e.g. the 'os' module might be deleted before
# this destructor is called.
atexit.register(self.destroy)
def destroy(self):
"""Kills the Metview session. Raises an exception if it could not do it."""
if self.persistent_session:
return
if self.metview_replied:
if self.debug:
print('MetviewInvoker: Closing Metview')
metview_pid = self.info('EVENT_PID')
try:
os.kill(int(metview_pid), signal.SIGUSR1)
except Exception as exp:
print("Could not terminate the Metview process pid=" + metview_pid)
raise exp
def signal_from_metview(self, *args):
"""Called when Metview sends a signal back to Python to say that it's started"""
# print ('PYTHON: GOT SIGNAL BACK FROM METVIEW!')
self.metview_replied = True
def read_metview_settings(self, settings_file):
"""Parses the settings file generated by Metview and sets the corresponding env vars"""
import configparser
cf = configparser.ConfigParser()
cf.read(settings_file)
env_section = cf['Environment']
for envar in env_section:
# print('set ', envar.upper(), ' = ', env_section[envar])
os.environ[envar.upper()] = env_section[envar]
self.info_section = cf['Info']
def info(self, key):
"""Returns a piece of Metview information that was not set as an env var"""
return self.info_section[key]
def store_signal_handlers(self):
"""Stores the set of signal handlers that Metview will override"""
self.sigint = signal.getsignal(signal.SIGINT)
self.sighup = signal.getsignal(signal.SIGHUP)
self.sighquit = signal.getsignal(signal.SIGQUIT)
self.sigterm = signal.getsignal(signal.SIGTERM)
self.sigalarm = signal.getsignal(signal.SIGALRM)
def restore_signal_handlers(self):
"""Restores the set of signal handlers that Metview has overridden"""
signal.signal(signal.SIGINT, self.sigint)
signal.signal(signal.SIGHUP, self.sighup)
signal.signal(signal.SIGQUIT, self.sighquit)
signal.signal(signal.SIGTERM, self.sigterm)
signal.signal(signal.SIGALRM, self.sigalarm)
mi = MetviewInvoker()
try:
ffi = cffi.FFI()
ffi.cdef(pkgutil.get_data('metview', 'metview.h').decode('ascii'))
mv_lib = mi.info('METVIEW_LIB')
# is there a more general way to add to a path to a list of paths?
os.environ["LD_LIBRARY_PATH"] = mv_lib + ':' + os.environ.get("LD_LIBRARY_PATH", '')
try:
# Linux / Unix systems
lib = ffi.dlopen(os.path.join(mv_lib, 'libMvMacro.so'))
except OSError:
# MacOS systems
lib = ffi.dlopen(os.path.join(mv_lib, 'libMvMacro'))
except Exception as exp:
print('Error loading Metview/libMvMacro. LD_LIBRARY_PATH='
+ os.environ.get("LD_LIBRARY_PATH", ''))
raise exp
# The C/C++ code behind lib.p_init() will call marsinit(), which overrides various signal
# handlers. We don't necessarily want this when running a Python script - we should use
# the default Python behaviour for handling signals, so we save the current signals
# before calling p_init() and restore them after.
mi.store_signal_handlers()
lib.p_init()
mi.restore_signal_handlers()
# -----------------------------------------------------------------------------
# Classes to handle complex Macro types
# -----------------------------------------------------------------------------
class Value:
def __init__(self, val_pointer):
self.val_pointer = val_pointer
def push(self):
if self.val_pointer is None:
lib.p_push_nil()
else:
lib.p_push_value(self.val_pointer)
# if we steal a value pointer from a temporary Value object, we need to
# ensure that the Metview Value is not destroyed when the temporary object
# is destroyed by setting its pointer to None
def steal_val_pointer(self, other):
self.val_pointer = other.val_pointer
other.val_pointer = None
# enable a more object-oriented interface, e.g. a = fs.interpolate(10, 29.4)
def __getattr__(self, fname):
def call_func_with_self(*args, **kwargs):
return call(fname, self, *args, **kwargs)
return call_func_with_self
# on destruction, ensure that the Macro Value is also destroyed
def __del__(self):
try:
if self.val_pointer is not None and lib is not None:
lib.p_destroy_value(self.val_pointer)
self.val_pointer = None
except Exception as exp:
print("Could not destroy Metview variable ", self)
raise exp
class Request(dict, Value):
verb = "UNKNOWN"
def __init__(self, req):
self.val_pointer = None
# initialise from Python object (dict/Request)
if isinstance(req, dict):
self.update(req)
self.to_metview_style()
if isinstance(req, Request):
self.verb = req.verb
self.val_pointer = req.val_pointer
# initialise from a Macro pointer
else:
Value.__init__(self, req)
self.verb = string_from_ffi(lib.p_get_req_verb(req))
n = lib.p_get_req_num_params(req)
for i in range(0, n):
param = string_from_ffi(lib.p_get_req_param(req, i))
raw_val = lib.p_get_req_value(req, param.encode('utf-8'))
if raw_val != ffi.NULL:
val = string_from_ffi(raw_val)
self[param] = val
# self['_MACRO'] = 'BLANK'
# self['_PATH'] = 'BLANK'
def __str__(self):
return "VERB: " + self.verb + super().__str__()
# translate Python classes into Metview ones where needed
def to_metview_style(self):
for k, v in self.items():
# bool -> on/off
if isinstance(v, bool):
conversion_dict = {True: 'on', False: 'off'}
self[k] = conversion_dict[v]
# class_ -> class (because 'class' is a Python keyword and cannot be
# used as a named parameter)
elif k == 'class_':
self['class'] = v
del self['class_']
def push(self):
# if we have a pointer to a Metview Value, then use that because it's more
# complete than the dict
if self.val_pointer:
Value.push(self)
else:
r = lib.p_new_request(self.verb.encode('utf-8'))
# to populate a request on the Macro side, we push each
# value onto its stack, and then tell it to create a new
# parameter with that name for the request. This allows us to
# use Macro to handle the addition of complex data types to
# a request
for k, v in self.items():
push_arg(v)
lib.p_set_request_value_from_pop(r, k.encode('utf-8'))
lib.p_push_request(r)
def __getitem__(self, index):
return subset(self, index)
def push_bytes(b):
lib.p_push_string(b)
def push_str(s):
push_bytes(s.encode('utf-8'))
def push_list(lst):
# ask Metview to create a new list, then add each element by
# pusing it onto the stack and asking Metview to pop it off
# and add it to the list
mlist = lib.p_new_list(len(lst))
for i, val in enumerate(lst):
push_arg(val)
lib.p_add_value_from_pop_to_list(mlist, i)
lib.p_push_list(mlist)
def push_date(d):
lib.p_push_datestring(np.datetime_as_string(d).encode('utf-8'))
def push_datetime(d):
lib.p_push_datestring(d.isoformat().encode('utf-8'))
def push_datetime_date(d):
s = d.isoformat() + 'T00:00:00'
lib.p_push_datestring(s.encode('utf-8'))
def push_vector(npa):
# convert numpy array to CData
if npa.dtype == np.float64:
cffi_buffer = ffi.cast('double*', npa.ctypes.data)
lib.p_push_vector_from_double_array(cffi_buffer, len(npa), np.nan)
elif npa.dtype == np.float32:
cffi_buffer = ffi.cast('float*', npa.ctypes.data)
lib.p_push_vector_from_float32_array(cffi_buffer, len(npa), np.nan)
else:
raise Exception('Only float32 and float64 numPy arrays can be passed to Metview, not ',
npa.dtype)
class FileBackedValue(Value):
def __init__(self, val_pointer):
Value.__init__(self, val_pointer)
def url(self):
# ask Metview for the file relating to this data (Metview will write it if necessary)
return string_from_ffi(lib.p_data_path(self.val_pointer))
class FileBackedValueWithOperators(FileBackedValue):
def __init__(self, val_pointer):
FileBackedValue.__init__(self, val_pointer)
def __add__(self, other):
return add(self, other)
def __sub__(self, other):
return sub(self, other)
def __mul__(self, other):
return prod(self, other)
def __truediv__(self, other):
return div(self, other)
def __pow__(self, other):
return power(self, other)
def __ge__(self, other):
return greater_equal_than(self, other)
def __gt__(self, other):
return greater_than(self, other)
def __le__(self, other):
return lower_equal_than(self, other)
def __lt__(self, other):
return lower_than(self, other)
def __eq__(self, other):
return equal(self, other)
def __ne__(self, other):
return met_not_eq(self, other)
class ContainerValue(Value):
def __init__(self, val_pointer, macro_index_base, element_type, support_slicing):
Value.__init__(self, val_pointer)
self.idx = 0
self.macro_index_base = macro_index_base
self.element_type = element_type # the type of elements that the container contains
self.support_slicing = support_slicing
def __len__(self):
if self.val_pointer is None:
return 0
else:
return int(count(self))
def __getitem__(self, index):
if isinstance(index, slice):
if self.support_slicing:
indices = index.indices(len(self))
fields = [self[i] for i in range(*indices)]
if len(fields) == 0:
return None
else:
f = fields[0]
for i in range(1, len(fields)):
f = merge(f, fields[i])
return f
else:
raise Exception('This object does not support extended slicing: ' + str(self))
else: # normal index
if isinstance(index, str): # can have a string as an index
return subset(self, index)
else:
return subset(self, index + self.macro_index_base) # numeric index: 0->1-based
def __setitem__(self, index, value):
if (isinstance(value, self.element_type)):
lib.p_set_subvalue(self.val_pointer, index + self.macro_index_base, value.val_pointer)
else:
raise Exception('Cannot assign ', value, ' as element of ', self)
def __iter__(self):
return self
def __next__(self):
if self.idx >= self.__len__():
self.idx = 0
raise StopIteration
else:
self.idx += 1
return self.__getitem__(self.idx - 1)
class Fieldset(FileBackedValueWithOperators, ContainerValue):
def __init__(self, val_pointer=None, path=None):
FileBackedValue.__init__(self, val_pointer)
ContainerValue.__init__(self, val_pointer, 1, Fieldset, True)
if path is not None:
temp = read(path)
self.steal_val_pointer(temp)
def append(self, other):
temp = merge(self, other)
self.steal_val_pointer(temp)
def to_dataset(self):
# soft dependency on cfgrib
try:
from cfgrib import xarray_store
except ImportError:
print("Package cfgrib/xarray_store not found. Try running 'pip install cfgrib'.")
raise
dataset = xarray_store.open_dataset(self.url())
return dataset
class Bufr(FileBackedValue):
def __init__(self, val_pointer):
FileBackedValue.__init__(self, val_pointer)
class Geopoints(FileBackedValueWithOperators, ContainerValue):
def __init__(self, val_pointer):
FileBackedValueWithOperators.__init__(self, val_pointer)
ContainerValue.__init__(self, val_pointer, 0, None, False)
def to_dataframe(self):
try:
import pandas as pd
except ImportError:
print("Package pandas not found. Try running 'pip install pandas'.")
raise
# create a dictionary of columns (note that we do not include 'time'
# because it is incorporated into 'date')
cols = self.columns()
if 'time' in cols:
cols.remove('time')
pddict = {}
for c in cols:
pddict[c] = self[c]
df = pd.DataFrame(pddict)
return df
class NetCDF(FileBackedValueWithOperators):
def __init__(self, val_pointer):
FileBackedValueWithOperators.__init__(self, val_pointer)
def to_dataset(self):
# soft dependency on xarray
try:
import xarray as xr
except ImportError:
print("Package xarray not found. Try running 'pip install xarray'.")
raise
dataset = xr.open_dataset(self.url())
return dataset
class Odb(FileBackedValue):
def __init__(self, val_pointer):
FileBackedValue.__init__(self, val_pointer)
def to_dataframe(self):
try:
import pandas as pd
except ImportError:
print("Package pandas not found. Try running 'pip install pandas'.")
raise
cols = self.columns()
pddict = {}
for col in cols:
pddict[col] = self.values(col)
df = pd.DataFrame(pddict)
return df
class Table(FileBackedValue):
def __init__(self, val_pointer):
FileBackedValue.__init__(self, val_pointer)
def to_dataframe(self):
try:
import pandas as pd
except ImportError:
print("Package pandas not found. Try running 'pip install pandas'.")
raise
df = pd.read_csv(self.url())
return df
class GeopointSet(FileBackedValueWithOperators, ContainerValue):
def __init__(self, val_pointer):
FileBackedValueWithOperators.__init__(self, val_pointer)
ContainerValue.__init__(self, val_pointer, 1, Geopoints, False)
# -----------------------------------------------------------------------------
# Pushing data types to Macro
# -----------------------------------------------------------------------------
def dataset_to_fieldset(val, **kwarg):
# we try to import xarray as locally as possible to reduce startup time
# try to write the xarray as a GRIB file, then read into a fieldset
import xarray as xr
import cfgrib
if not isinstance(val, xr.core.dataset.Dataset):
raise TypeError('dataset_to_fieldset requires a variable of type xr.core.dataset.Dataset;'
' was supplied with ', builtins.type(val))
f, tmp = tempfile.mkstemp(".grib")
os.close(f)
try:
# could add keys, e.g. grib_keys={'centre': 'ecmf'})
cfgrib.to_grib(val, tmp, **kwarg)
except:
print("Error trying to write xarray dataset to GRIB for conversion to Metview Fieldset")
raise
# TODO: tell Metview that this is a temporary file that should be deleted when no longer needed
fs = read(tmp)
return fs
def push_xarray_dataset(val):
fs = dataset_to_fieldset(val)
fs.push()
# try_to_push_complex_type exists as a separate function so that we don't have
# to import xarray at the top of the module - this saves some time on startup
def try_to_push_complex_type(val):
import xarray as xr
if isinstance(val, xr.core.dataset.Dataset):
push_xarray_dataset(val)
else:
raise TypeError('Cannot push this type of argument to Metview: ', builtins.type(val))
class ValuePusher():
"""Class to handle pushing values to the Macro library"""
def __init__(self):
# a set of pairs linking value types with functions to push them to Macro
# note that Request must come before dict, because a Request inherits from dict;
# this ordering requirement also means we should use list or tuple instead of a dict
self.funcs = (
(float, lambda n : lib.p_push_number(n)),
((int, np.number), lambda n : lib.p_push_number(float(n))),
(str, lambda n : push_str(n)),
(Request, lambda n : n.push()),
(dict, lambda n : Request(n).push()),
((list, tuple), lambda n : push_list(n)),
(type(None), lambda n : lib.p_push_nil()),
(FileBackedValue, lambda n : n.push()),
(np.datetime64, lambda n : push_date(n)),
(datetime.datetime, lambda n : push_datetime(n)),
(datetime.date, lambda n : push_datetime_date(n)),
(np.ndarray, lambda n : push_vector(n)),
)
def push_value(self, val):
for typekey, typefunc in self.funcs:
if isinstance(val, typekey):
typefunc(val)
return 1
# if we haven't returned yet, then try the more complex types
try_to_push_complex_type(val)
return 1
vp = ValuePusher()
def push_arg(n):
return vp.push_value(n)
def dict_to_pushed_args(d):
# push each key and value onto the argument stack
for k, v in d.items():
push_str(k)
push_arg(v)
return 2 * len(d) # return the number of arguments generated
# -----------------------------------------------------------------------------
# Returning data types from Macro
# -----------------------------------------------------------------------------
def list_from_metview(val):
mlist = lib.p_value_as_list(val)
result = []
n = lib.p_list_count(mlist)
all_vectors = True
for i in range(0, n):
mval = lib.p_list_element_as_value(mlist, i)
v = value_from_metview(mval)
if all_vectors and not isinstance(v, np.ndarray):
all_vectors = False
result.append(v)
# if this is a list of vectors, then create a 2-D numPy array
if all_vectors and n > 0:
result = np.stack(result, axis=0)
return result
def datestring_from_metview(val):
mdate = string_from_ffi(lib.p_value_as_datestring(val))
dt = datetime.datetime.strptime(mdate, "%Y-%m-%dT%H:%M:%S")
return dt
def vector_from_metview(val):
vec = lib.p_value_as_vector(val, np.nan)
n = lib.p_vector_count(vec)
s = lib.p_vector_elem_size(vec)
if s == 4:
nptype = np.float32
b = lib.p_vector_float32_array(vec)
elif s == 8:
nptype = np.float64
b = lib.p_vector_double_array(vec)
else:
raise Exception('Metview vector data type cannot be handled: ', s)
bsize = n * s
c_buffer = ffi.buffer(b, bsize)
np_array = np.frombuffer(c_buffer, dtype=nptype)
return np_array
def handle_error(val):
msg = string_from_ffi(lib.p_error_message(val))
if "Service" in msg and "Examiner" in msg:
return None
else:
return Exception('Metview error: ' + (msg))
def string_from_metview(val):
return string_from_ffi(lib.p_value_as_string(val))
class MvRetVal(Enum):
tnumber = 0
tstring = 1
tgrib = 2
trequest = 3
tbufr = 4
tgeopts = 5
tlist = 6
tnetcdf = 7
tnil = 8
terror = 9
tdate = 10
tvector = 11
todb = 12
ttable = 13
tgptset = 14
tunknown = 99
class ValueReturner():
"""Class to handle return values from the Macro library"""
def __init__(self):
self.funcs = {}
self.funcs[MvRetVal.tnumber.value] = lambda val : lib.p_value_as_number(val)
self.funcs[MvRetVal.tstring.value] = lambda val : string_from_metview(val)
self.funcs[MvRetVal.tgrib.value] = lambda val : Fieldset(val)
self.funcs[MvRetVal.trequest.value] = lambda val : Request(val)
self.funcs[MvRetVal.tbufr.value] = lambda val : Bufr(val)
self.funcs[MvRetVal.tgeopts.value] = lambda val : Geopoints(val)
self.funcs[MvRetVal.tlist.value] = lambda val : list_from_metview(val)
self.funcs[MvRetVal.tnetcdf.value] = lambda val : NetCDF(val)
self.funcs[MvRetVal.tnil.value] = lambda val : None
self.funcs[MvRetVal.terror.value] = lambda val : handle_error(val)
self.funcs[MvRetVal.tdate.value] = lambda val : datestring_from_metview(val)
self.funcs[MvRetVal.tvector.value] = lambda val : vector_from_metview(val)
self.funcs[MvRetVal.todb.value] = lambda val : Odb(val)
self.funcs[MvRetVal.ttable.value] = lambda val : Table(val)
self.funcs[MvRetVal.tgptset.value] = lambda val : GeopointSet(val)
def translate_return_val(self, val):
rt = lib.p_value_type(val)
try:
return self.funcs[rt](val)
except Exception:
raise Exception('value_from_metview got an unhandled return type: ' + str(rt))
vr = ValueReturner()
def value_from_metview(val):
retval = vr.translate_return_val(val)
if isinstance(retval, Exception):
raise retval
return retval
# -----------------------------------------------------------------------------
# Creating and calling Macro functions
# -----------------------------------------------------------------------------
def _call_function(mfname, *args, **kwargs):
nargs = 0
for n in args:
actual_n_args = push_arg(n)
nargs += actual_n_args
merged_dict = {}
merged_dict.update(kwargs)
if len(merged_dict) > 0:
dn = dict_to_pushed_args(Request(merged_dict))
nargs += dn
lib.p_call_function(mfname.encode('utf-8'), nargs)
def make(mfname):
def wrapped(*args, **kwargs):
err = _call_function(mfname, *args, **kwargs)
if err:
pass # throw Exceception
val = lib.p_result_as_value()
return value_from_metview(val)
return wrapped
def bind_functions(namespace, module_name=None):
"""Add to the module globals all metview functions except operators like: +, &, etc."""
for metview_name in make('dictionary')():
if metview_name.isidentifier():
python_name = metview_name
# NOTE: we append a '_' to metview functions that clash with python reserved keywords
# as they cannot be used as identifiers, for example: 'in' -> 'in_'
if keyword.iskeyword(metview_name):
python_name += '_'
python_func = make(metview_name)
python_func.__name__ = python_name
python_func.__qualname__ = python_name
if module_name:
python_func.__module__ = module_name
namespace[python_name] = python_func
# else:
# print('metview function %r not bound to python' % metview_name)
# add the 'mvl' functions, which are written in Macro and therefore not
# listed by the dictionary() function
for f in ['mvl_ml2hPa', 'mvl_create_netcdf_2d', 'mvl_flextra_etadot', 'mvl_geocircle',
'mvl_geoline', 'mvl_geopotential_on_ml', 'mvl_mxn_subframes', 'mvl_plot_scm_data',
'mvl_regular_layout', 'mvl_regular_layout_area', 'thermo_data_info',
'thermo_parcel_path', 'thermo_parcel_area', 'xy_curve', 'potential_temperature',
'temperature_from_potential_temperature', 'saturation_mixing_ratio', 'mixing_ratio',
'vapour_pressure', 'saturation_vapour_pressure',
'lifted_condensation_level', 'divergence', 'vorticity', 'laplacian',
'geostrophic_wind_pl', 'geostrophic_wind_ml']:
namespace[f] = make(f)
# HACK: some fuctions are missing from the 'dictionary' call.
namespace['neg'] = make('neg')
namespace['nil'] = make('nil')
# override some functions that need special treatment
# FIXME: this needs to be more structured
namespace['plot'] = plot
namespace['setoutput'] = setoutput
namespace['dataset_to_fieldset'] = dataset_to_fieldset
namespace['Fieldset'] = Fieldset
# some explicit bindings are used here
add = make('+')
call = make('call')
count = make('count')
div = make('/')
equal = make('=')
filter = make('filter')
greater_equal_than = make('>=')
greater_than = make('>')
lower_equal_than = make('<=')
lower_than = make('<')
merge = make('&')
met_not_eq = make('<>')
met_plot = make('plot')
nil = make('nil')
png_output = make('png_output')
power = make('^')
prod = make('*')
ps_output = make('ps_output')
read = make('read')
met_setoutput = make('setoutput')
sub = make('-')
subset = make('[]')
# -----------------------------------------------------------------------------
# Particular code for calling the plot() command
# -----------------------------------------------------------------------------
class Plot():
def __init__(self):
self.plot_to_jupyter = False
def __call__(self, *args, **kwargs):
if self.plot_to_jupyter:
f, tmp = tempfile.mkstemp(".png")
os.close(f)
base, ext = os.path.splitext(tmp)
met_setoutput(png_output(output_name=base, output_name_first_page_number='off'))
met_plot(*args)
image = Image(tmp)
os.unlink(tmp)
return image
else:
map_outputs = {
'png': png_output,
'ps': ps_output,
}
if 'output_type' in kwargs:
output_function = map_outputs[kwargs['output_type'].lower()]
kwargs.pop('output_type')
met_plot(output_function(kwargs), *args)
else:
met_plot(*args)
# the Macro plot command returns an empty definition, but
# None is better for Python
return None
plot = Plot()
# On a test system, importing IPython took approx 0.5 seconds, so to avoid that hit
# under most circumstances, we only import it when the user asks for Jupyter
# functionality. Since this occurs within a function, we need a little trickery to
# get the IPython functions into the global namespace so that the plot object can use them
def setoutput(*args):
if 'jupyter' in args:
try:
global Image
global get_ipython
IPython = __import__('IPython', globals(), locals())
Image = IPython.display.Image
get_ipython = IPython.get_ipython
except ImportError as imperr:
print('Could not import IPython module - plotting to Jupyter will not work')
raise imperr
# test whether we're in the Jupyter environment
if get_ipython() is not None:
plot.plot_to_jupyter = True
else:
print("ERROR: setoutput('jupyter') was set, but we are not in a Jupyter environment")
raise(Exception('Could not set output to jupyter'))
else:
plot.plot_to_jupyter = False
met_setoutput(*args)
|
{"/examples/UC-07-bufr-pandas.py": ["/metview/__init__.py"], "/examples/UC-01.py": ["/metview/__init__.py"], "/examples/UC-04-grib.py": ["/metview/__init__.py"], "/examples/UC-03-bufr.py": ["/metview/__init__.py"], "/metview/__main__.py": ["/metview/__init__.py"], "/examples/UC-07-bufr.py": ["/metview/__init__.py"]}
|
5,636
|
meteFANS/metview-python
|
refs/heads/master
|
/examples/UC-07-bufr.py
|
"""
Metview Python use case
UC-07. The Analyst compute simple differences between observations and analysis
and plot the values
BUFR version - BUFR is not tabular or gridded, but we can use Metview Python
framework to extract a particular parameter to a tabular format (geopoints)
--------------------------------------------------------------------------------
1. Analyst retrieves the analysis from a gridded data file
--------------------------------------------------------------------------------
--------------------------------------------------------------------------------
2. Analyst retrieves an observational parameter from a tabular or a gridded file
--------------------------------------------------------------------------------
--------------------------------------------------------------------------------
3. Analyst calculates the difference between the observational data and the
analysis and classified the field values according to the magnitude of the
difference
--------------------------------------------------------------------------------
--------------------------------------------------------------------------------
4. Analyst customises many features of his graph in order to create
publication-quality plots
--------------------------------------------------------------------------------
--------------------------------------------------------------------------------
5. Analyst plots the data
--------------------------------------------------------------------------------
"""
import metview as mv
# define a view over the area of interest
area_view = mv.geoview(
map_area_definition = 'corners',
area = [45.83,-13.87,62.03,8.92]
)
t2m_grib = mv.read('./t2m_grib.grib')
obs_3day = mv.read('./obs_3day.bufr')
t2m_gpt = mv.obsfilter(
parameter = '012004',
output = 'geopoints',
data = obs_3day
)
diff = t2m_grib - t2m_gpt
diff_symb = mv.msymb(
legend = True,
symbol_type = 'marker',
symbol_table_mode = 'advanced',
)
mv.setoutput(mv.png_output(output_width = 1000, output_name = './obsdiff1'))
mv.plot(area_view, diff, diff_symb)
# Extract geopoints that are hotter by 1 deg or more
#hotter = mv.filter(diff, diff >= 1)
hotter = diff.filter(diff >= 1)
# Extract geopoints that are colder by 1 deg or more
#colder = mv.filter(diff, diff <= -1)
colder = diff.filter(diff <= -1)
# Get geopoints that are within +/-1
#exact = mv.filter(diff, (diff > -1) * (diff < 1))
exact = diff.filter((diff > -1) * (diff < 1))
# Symbol visdefs for each classification
red = mv.msymb(
symbol_type = 'marker',
symbol_colour = 'red'
)
blue = mv.msymb(
symbol_type = 'marker',
symbol_colour = 'blue'
)
grey = mv.msymb(
symbol_type = 'marker',
symbol_colour = 'grey'
)
# plot the 'exact' data set with visdef 'grey', 'hotter' with 'red', etc.
mv.setoutput(mv.png_output(output_width = 1000, output_name = './obsdiff2'))
mv.plot(area_view, exact, grey, hotter, red, colder, blue)
|
{"/examples/UC-07-bufr-pandas.py": ["/metview/__init__.py"], "/examples/UC-01.py": ["/metview/__init__.py"], "/examples/UC-04-grib.py": ["/metview/__init__.py"], "/examples/UC-03-bufr.py": ["/metview/__init__.py"], "/metview/__main__.py": ["/metview/__init__.py"], "/examples/UC-07-bufr.py": ["/metview/__init__.py"]}
|
5,658
|
GregLahaye/wikipedia-game
|
refs/heads/master
|
/game.py
|
import wikipedia
def search(root, target):
root = wikipedia.check(root)
target = wikipedia.check(target)
if root and target:
if root != target:
visited = set([root])
queue = [[root]]
found = False
print("Finding the shortest route from '{}' to '{}'...".format(root, target))
while queue and not found:
try:
path = queue[0]
current = path[-1]
print(" > ".join(path))
queue = queue[1:]
nodes = wikipedia.get_links(current)
for node in nodes:
if node not in visited:
if node == target:
result = path + [node]
found = True
visited.add(node)
new_path = path + [node]
queue.append(new_path)
except KeyboardInterrupt:
exit("Keyboard Interrupt")
if found:
print("Shortest path: ")
print(" > ".join(result))
else:
print("No possible route")
else:
print("Root and target are same")
start = input("Page to start at: ")
if start == "?":
start, end = wikipedia.random(2)
else:
end = input("Page to find: ")
search(start, end)
|
{"/game.py": ["/wikipedia.py"]}
|
5,659
|
GregLahaye/wikipedia-game
|
refs/heads/master
|
/wikipedia.py
|
import requests
def random(num=1):
params = {
"format": "json",
"action": "query",
"generator": "random",
"grnnamespace": "0",
"grnlimit": num
}
r = s.get(API_URL, params=params)
response = r.json()
titles = [item["title"] for item in response["query"]["pages"].values()]
return titles
def check(title):
params = {
"action": "query",
"titles": title,
"prop": "categories",
"clcategories": "Category:All disambiguation pages",
"format": "json",
"redirects": "true"
}
r = s.get(API_URL, params=params)
response = r.json()
pageid = list(response["query"]["pages"].keys())[0]
valid = False
if "redirects" in response["query"]:
valid = response["query"]["redirects"][0]["to"]
elif "categories" in response["query"]["pages"][pageid]:
print("'{}' is a disambiguation page".format(title))
elif pageid == "-1":
print("'{}' is not a valid article".format(title))
else:
valid = title
return valid
def get_links(title):
params = {
"action": "query",
"titles": title,
"prop": "links",
"pllimit": "max",
"format": "json",
"plnamespace": 0
}
done = False
while not done:
links = []
try:
r = s.get(API_URL, params=params)
response = r.json()
pageid = list(response["query"]["pages"].keys())[0]
if "links" in response["query"]["pages"][pageid]:
links += [link["title"] for link in response["query"]["pages"][pageid]["links"]]
while "continue" in response:
params["plcontinue"] = response["continue"]["plcontinue"]
r = s.get(API_URL, params=params)
response = r.json()
pageid = list(response["query"]["pages"].keys())[0]
links += [link["title"] for link in response["query"]["pages"][pageid]["links"]]
done = True
except requests.exceptions.ConnectionError:
input("Connect Error, <Enter> to try again")
return links
API_URL = "https://en.wikipedia.org/w/api.php"
s = requests.session()
|
{"/game.py": ["/wikipedia.py"]}
|
5,660
|
shrirangbagdi/LogParser
|
refs/heads/master
|
/PatternFour.py
|
import re
from datetime import datetime
#"[2020-07-07T18:42:01.735] [INFO] ngui - com.unraveldata.ngui.topx.enabled :true"
class PatternFour:
def __init__(self, line):
self.line = line
def IsPatternFour(self):
try:
time_stamp = self.line[1:24]
message_type = self.line[27:31]
message_type_two = self.line[27:32]
return self.IsCorrectTimeStamp(time_stamp) and (self.IsCorrectType(message_type) or self.IsCorrectType(message_type_two))
except:
print("Not Pattern Four")
return False
def IsCorrectTimeStamp(self, TimeStamp):
try:
date = TimeStamp[0:10]
time = TimeStamp[11:23]
character = TimeStamp[10]
date_to_match = re.compile(r'\d\d\d\d-\d\d-\d\d')
time_to_match = re.compile(r'\d\d:\d\d:\d\d.\d\d\d')
return date_to_match.match(date) and character == "T" and time_to_match.match(time)
except:
print("Not Pattern Four")
return False
# create a variable to check for a certain type ....
def IsCorrectType(self, message_type):
return message_type == "ERROR" or message_type == "WARN" or message_type == "INFO"
def ConvertTimestamp(self, TimeStamp):
try:
date = TimeStamp[0:10]
time = TimeStamp[11:23]
return date + " " + time
except:
print("Error converting timestamp")
def GetCurrentType(self):
if self.line[27:32] == "ERROR":
return self.line[27:32]
else:
return self.line[27:31]
def GetTimeStamp(self):
return self.line[1:24]
def GetMessage(self):
return self.line[30:]
if __name__ == '__main__':
Pattern = PatternFour("[2020-07-07T18:42:01.735] [ERROR] ngui - com.unraveldata.ngui.topx.enabled :true")
(Pattern.IsPatternFour())
|
{"/LogParser.py": ["/FileParser.py", "/FolderParser.py"], "/S3BucketParser.py": ["/BucketFolderParser.py"], "/LambdaAWSFunction/LambdaFunction.py": ["/LambdaAWSFunction/log_parser.py"], "/FolderParser.py": ["/FileParser.py"], "/BucketFileParser.py": ["/PatternFour.py", "/PatternOne.py", "/PatternThree.py"], "/BucketFolderParser.py": ["/BucketFileParser.py"], "/FileParser.py": ["/PatternFour.py", "/PatternOne.py", "/PatternThree.py"]}
|
5,661
|
shrirangbagdi/LogParser
|
refs/heads/master
|
/S3BucketParser/S3BucketParser.py
|
import json
import os
import boto3
from S3BucketParser.BucketFolderParser import BucketFolderParser
s3_object = boto3.resource('s3', aws_access_key_id="****",
aws_secret_access_key="****")
class S3BucketParser:
def __init__(self, bucketName):
self.bucketName = bucketName
def Parser(self):
self.bucketParser("unravellogdata")
def bucketParser(self, bucketName):
s3AllObj=s3_object.Bucket(bucketName).objects.all()
for obj in s3AllObj:
bucket_parser = BucketFolderParser(obj)
messages = bucket_parser.parseBucketFolder()
file_name = self.getFileName(obj.key)
self.createJson(messages, file_name)
self.uploadToS3(messages, obj.key)
def getFileName(self, objectKey):
(file, ext) = os.path.splitext(objectKey)
return file.split('/')[-1]
def uploadToS3(self, messages, objectKey):
fileName = self.getFileName(objectKey)
if messages:
s3 = boto3.client('s3')
s3.upload_file('/Users/shrirangbagdi/Desktop/' + fileName + '.json', 'unravellogdata', self.getBucketPathway(objectKey) + ".json")
def createJson(self, messages, fileName):
if messages:
with open("/Users/shrirangbagdi/Desktop/" + fileName + '.json', 'w') as log_file:
log_file.write('\n'.join(json.dumps(i) for i in messages) +
'\n')
def getBucketPathway(self, objectKey):
(file, ext) = os.path.splitext(objectKey)
return file
if __name__ == '__main__':
parser = S3BucketParser("unravellogdata")
parser.Parser()
|
{"/LogParser.py": ["/FileParser.py", "/FolderParser.py"], "/S3BucketParser.py": ["/BucketFolderParser.py"], "/LambdaAWSFunction/LambdaFunction.py": ["/LambdaAWSFunction/log_parser.py"], "/FolderParser.py": ["/FileParser.py"], "/BucketFileParser.py": ["/PatternFour.py", "/PatternOne.py", "/PatternThree.py"], "/BucketFolderParser.py": ["/BucketFileParser.py"], "/FileParser.py": ["/PatternFour.py", "/PatternOne.py", "/PatternThree.py"]}
|
5,662
|
shrirangbagdi/LogParser
|
refs/heads/master
|
/LambdaAWSFunction/log_parser.py
|
import os
import boto3
from LambdaAWSFunction.message import message
class Log_Parser:
def __init__(self):
pass
def parse_file(self, event):
s3 = boto3.resource("s3")
file_obj = event["Records"][0]
bucket_name = str(file_obj['s3']['bucket']['name'])
file_name = str(file_obj['s3']['object']['key'])
file_obj = s3.Object(bucket_name, file_name)
if self.is_log(file_name) and file_name[-1] != "/" and (self.is_filled(file_obj)):
case_id = self.get_id(file_name)
parser = message(case_id, event)
message_list = parser.generate_messages()
return message_list
def get_id(self, fileName):
if "/" in fileName:
return fileName.split("/")[0]
else:
return "No ID"
def is_log(self, fileName):
(file, ext) = os.path.splitext(fileName)
return ext == ".log"
def is_filled(self, fileObj):
file_content = fileObj.get()["Body"].read().decode('utf-8')
if file_content == "":
return False
return True
|
{"/LogParser.py": ["/FileParser.py", "/FolderParser.py"], "/S3BucketParser.py": ["/BucketFolderParser.py"], "/LambdaAWSFunction/LambdaFunction.py": ["/LambdaAWSFunction/log_parser.py"], "/FolderParser.py": ["/FileParser.py"], "/BucketFileParser.py": ["/PatternFour.py", "/PatternOne.py", "/PatternThree.py"], "/BucketFolderParser.py": ["/BucketFileParser.py"], "/FileParser.py": ["/PatternFour.py", "/PatternOne.py", "/PatternThree.py"]}
|
5,663
|
shrirangbagdi/LogParser
|
refs/heads/master
|
/LogParser.py
|
import json
import sys
import os
from FileParser import FileParser
from FolderParser import FolderParser
class LogParser:
def __init__(self, propertyFile):
self.propertyFile = propertyFile
self.parseSpecificDate = False
def GetSource(self):
try:
property_file = open(self.propertyFile, 'r')
for line in property_file:
if "SourcePath" in line:
source_pathway = line.split("=")[-1].strip()
if not os.path.exists(source_pathway):
raise Exception("File does not exist")
else:
return source_pathway
else:
break
except:
raise Exception("Something went wrong with your file, or destination path")
def GetDestination(self):
try:
property_file = open(self.propertyFile, 'r')
for line in property_file:
if "DestinationPath" in line:
return line.split("=")[-1].strip()
except:
raise Exception("Something went wrong with your file, or destination path")
def CreateJsonFile(self, listOfWarnings):
destination = self.GetDestination()
if listOfWarnings:
with open(destination + "AllResults" + '.json', 'w') as log_file:
log_file.write('\n'.join(json.dumps(i) for i in listOfWarnings) + '\n')
#json.dump(listOfWarnings, log_file, indent=4)
def RunParser(self):
command_line_arguments = sys.argv[1:]
source_path = self.GetSource()
destination_path = self.GetDestination()
if (source_path or destination_path) is None:
raise Exception("Something went wrong with your file, source path, or destination path")
elif not command_line_arguments:
parser = FolderParser(source_path, destination_path, 0, 0, "")
list_of_warnings = parser.ParseFolder()
self.CreateJsonFile(list_of_warnings)
elif command_line_arguments[0] == "help":
print("You must put an input parameter along with the necessary arguments. For example: LogParser.py 5 sql")
print("InputParam=0, Input is given by user in terminal to indicate that a folder needs to be parsed")
print("InputParam=1, Input is given by user in terminal to indicate that a specific log file in the "
"folder needs to be parsed")
print("InputParam=2, Input is given by user in terminal to indicate that two specific log files in the "
"folder needs to be parsed")
print("InputParam=3, Input is given by user in terminal to indicate that three specific log files in the "
"folder needs to be parsed")
print("InputParam=4, Input is given by user in terminal to indicate a specific start date & end date")
print("InputParam=5, Input is given by user in terminal to indicate a specific pattern")
else:
command_input = int(command_line_arguments[0])
if len(command_line_arguments) < 1:
raise Exception("Please enter an argument")
elif command_input < 0 or command_input > 5:
raise Exception("Please enter a valid input paramater")
elif command_input == 0:
parser = FolderParser(source_path, destination_path, 0, 0, "")
list_of_warnings = parser.ParseFolder()
self.CreateJsonFile(list_of_warnings)
elif command_input == 1:
# create command for multiple files....
file_name = source_path + "/" + command_line_arguments[1].strip()
parser = FileParser(file_name, destination_path, 0, 0, "")
list_of_all_warnings = parser.ParseFile()
self.CreateJsonFile(list_of_all_warnings)
elif command_input == 2:
first_file_name = source_path + "/" + command_line_arguments[1].strip()
parser_one = FileParser(first_file_name, destination_path, 0, 0, "")
list_one = parser_one.ParseFile()
second_file_name = source_path + "/" + command_line_arguments[2].strip()
parser_two = FileParser(second_file_name, destination_path, 0, 0, "")
list_two = parser_two.ParseFile()
self.CreateJsonFile(list_one + list_two)
elif command_input == 3:
first_file_name = source_path + "/" + command_line_arguments[1].strip()
parser_one = FileParser(first_file_name, destination_path, 0, 0, "")
list_one = parser_one.ParseFile()
second_file_name = source_path + "/" + command_line_arguments[2].strip()
parser_two = FileParser(second_file_name, destination_path, 0, 0, "")
list_two = parser_two.ParseFile()
third_file_name = source_path + "/" + command_line_arguments[3].strip()
parser_three = FileParser(third_file_name, destination_path, 0, 0, "")
list_three = parser_three.ParseFile()
self.CreateJsonFile(list_one + list_two + list_three)
elif command_input == 4:
start_date = command_line_arguments[1]
end_date = command_line_arguments[2]
parser = FolderParser(source_path, destination_path, start_date, end_date, "")
list_one = parser.ParseFolder()
self.CreateJsonFile(list_one)
elif command_input == 5:
pattern = command_line_arguments[1]
parser = FolderParser(source_path, destination_path, 0, 0, pattern)
list_one = parser.ParseFolder()
self.CreateJsonFile(list_one)
if __name__ == '__main__':
LogParser = LogParser("/Users/shrirangbagdi/PycharmProjects/LogParser/LogParser.properties")
LogParser.RunParser()
|
{"/LogParser.py": ["/FileParser.py", "/FolderParser.py"], "/S3BucketParser.py": ["/BucketFolderParser.py"], "/LambdaAWSFunction/LambdaFunction.py": ["/LambdaAWSFunction/log_parser.py"], "/FolderParser.py": ["/FileParser.py"], "/BucketFileParser.py": ["/PatternFour.py", "/PatternOne.py", "/PatternThree.py"], "/BucketFolderParser.py": ["/BucketFileParser.py"], "/FileParser.py": ["/PatternFour.py", "/PatternOne.py", "/PatternThree.py"]}
|
5,664
|
shrirangbagdi/LogParser
|
refs/heads/master
|
/S3BucketParser.py
|
import json
import os
import boto3
from BucketFolderParser import BucketFolderParser
class S3BucketParser:
def __init__(self, bucketName):
self.bucketName = bucketName
def Parser(self):
bucket = self.findBucket()
if not bucket:
print("Bucket not found!")
else:
self.bucketParser(bucket)
def findBucket(self):
s3_object = boto3.resource('s3', aws_access_key_id="AKIASCMI453UEAI6B3OS",
aws_secret_access_key="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")
for each_bucket in s3_object.buckets.all():
if each_bucket.name == "unravellogdata":
return each_bucket
def bucketParser(self, bucket):
for obj in bucket.objects.all():
bucket_parser = BucketFolderParser(obj)
messages = bucket_parser.parseBucketFolder()
file_name = self.getFileName(obj.key)
self.createJson(messages, file_name)
self.uploadToS3(messages, obj.key)
def getFileName(self, objectKey):
(file, ext) = os.path.splitext(objectKey)
return file.split('/')[-1]
def uploadToS3(self, messages, objectKey):
fileName = self.getFileName(objectKey)
if messages:
s3 = boto3.client('s3')
s3.upload_file('/Users/shrirangbagdi/Desktop/' + fileName + '.json', 'unravellogdata', self.getBucketPathway(objectKey) + ".json")
def createJson(self, messages, fileName):
if messages:
with open("/Users/shrirangbagdi/Desktop/" + fileName + '.json', 'w') as log_file:
log_file.write('\n'.join(json.dumps(i) for i in messages) +
'\n')
def getBucketPathway(self, objectKey):
(file, ext) = os.path.splitext(objectKey)
return file
if __name__ == '__main__':
parser = S3BucketParser("unravellogdata")
parser.Parser()
|
{"/LogParser.py": ["/FileParser.py", "/FolderParser.py"], "/S3BucketParser.py": ["/BucketFolderParser.py"], "/LambdaAWSFunction/LambdaFunction.py": ["/LambdaAWSFunction/log_parser.py"], "/FolderParser.py": ["/FileParser.py"], "/BucketFileParser.py": ["/PatternFour.py", "/PatternOne.py", "/PatternThree.py"], "/BucketFolderParser.py": ["/BucketFileParser.py"], "/FileParser.py": ["/PatternFour.py", "/PatternOne.py", "/PatternThree.py"]}
|
5,665
|
shrirangbagdi/LogParser
|
refs/heads/master
|
/LambdaAWSFunction/LambdaFunction.py
|
import boto3
import json
import os
from LambdaAWSFunction.log_parser import Log_Parser
def lambda_handler(event, context):
s3 = boto3.client('s3')
bucket = 'ouput-bucket'
log = Log_Parser()
messages = log.parse_file(event)
if messages:
file_name = str(event["Records"][0]['s3']['object']['key'])
(file, ext) = os.path.splitext(file_name)
changed_file_name = file + ".json"
uploadByteStream = bytes(('\n'.join(json.dumps(i) for i in messages) + '\n').encode('UTF-8'))
s3.put_object(Bucket=bucket, Key=changed_file_name, Body=uploadByteStream)
|
{"/LogParser.py": ["/FileParser.py", "/FolderParser.py"], "/S3BucketParser.py": ["/BucketFolderParser.py"], "/LambdaAWSFunction/LambdaFunction.py": ["/LambdaAWSFunction/log_parser.py"], "/FolderParser.py": ["/FileParser.py"], "/BucketFileParser.py": ["/PatternFour.py", "/PatternOne.py", "/PatternThree.py"], "/BucketFolderParser.py": ["/BucketFileParser.py"], "/FileParser.py": ["/PatternFour.py", "/PatternOne.py", "/PatternThree.py"]}
|
5,666
|
shrirangbagdi/LogParser
|
refs/heads/master
|
/FolderParser.py
|
import os
import boto3
from FileParser import FileParser
class FolderParser:
def __init__(self, sourcePathway, destinationPathway, startDate=0, endDate=0, pattern=""):
self.sourcePathway = sourcePathway
self.destinationPathway = destinationPathway
self.startDate = startDate
self.endDate = endDate
self.pattern = pattern
def ParseFolder(self):
total_list = []
source = self.sourcePathway
destination = self.destinationPathway
for file in os.scandir(source):
if file.path.endswith(".log") and (file.is_file()):
total_list += FileParser(file, destination, self.startDate, self.endDate, self.pattern).ParseFile()
elif file.is_dir():
parser = FolderParser(file.path, self.destinationPathway, self.startDate, self.endDate, self.pattern)
total_list += parser.ParseFolder()
return total_list
if __name__ == '__main__':
Parser = FolderParser('/Users/shrirangbagdi/Desktop/checkLogs', '/Users/shrirangbagdi/Desktop/')
Parser.ParseFolder()
|
{"/LogParser.py": ["/FileParser.py", "/FolderParser.py"], "/S3BucketParser.py": ["/BucketFolderParser.py"], "/LambdaAWSFunction/LambdaFunction.py": ["/LambdaAWSFunction/log_parser.py"], "/FolderParser.py": ["/FileParser.py"], "/BucketFileParser.py": ["/PatternFour.py", "/PatternOne.py", "/PatternThree.py"], "/BucketFolderParser.py": ["/BucketFileParser.py"], "/FileParser.py": ["/PatternFour.py", "/PatternOne.py", "/PatternThree.py"]}
|
5,667
|
shrirangbagdi/LogParser
|
refs/heads/master
|
/BucketFileParser.py
|
import boto3
from PatternFour import PatternFour
from PatternOne import PatternOne
from PatternThree import PatternThree
class BucketFileParser:
def __init__(self, caseID, fileObject):
self.caseID = caseID
self.fileObject = fileObject
def generateMessages(self):
list_of_messages = []
warning = {}
file_object = self.fileObject
file_name = file_object.key.split("/")[-1]
previous_type = ""
first_iteration = True
for line in file_object.get()["Body"].read().decode(encoding="utf-8", errors="ignore").splitlines():
#print(line)
pattern_one = PatternOne(line)
pattern_three = PatternThree(line)
pattern_four = PatternFour(line)
if pattern_one.IsPatternOne():
if (not first_iteration) and (previous_type == "WARN" or previous_type == "ERROR"):
list_of_messages.append(warning)
else:
first_iteration = False
warning = {}
current_type = pattern_one.GetCurrentType()
if current_type == "WARN":
timestamp = pattern_one.GetTimeStamp()
message = pattern_one.GetMessage()
warning = {'File Name': file_name, 'Case ID': self.caseID,
'Timestamp': pattern_one.ConvertTimestamp(timestamp),
'Type': current_type, 'Message': message.strip()}
if current_type == "ERROR":
timestamp = pattern_one.GetTimeStamp()
message = pattern_one.GetMessage()
warning = {'File Name': file_name, 'Case ID': self.caseID,
'Timestamp': pattern_one.ConvertTimestamp(timestamp),
'Type': current_type, 'Message': message.strip()}
previous_type = current_type
elif pattern_three.IsPatternThree():
if (not first_iteration) and (previous_type == "WARN" or previous_type == "ERROR"):
list_of_messages.append(warning)
else:
first_iteration = False
warning = {}
current_type = pattern_three.GetCurrentType()
if current_type == "WARN":
timestamp = pattern_three.GetTimeStamp()
message = pattern_three.GetMessage()
warning = {'File Name': file_name, 'Case ID': self.caseID,
'Timestamp': pattern_three.ConvertTimestamp(timestamp),
'Type': current_type, 'Message': message.strip()}
if current_type == "ERROR":
timestamp = pattern_three.GetTimeStamp()
message = pattern_three.GetMessage()
warning = {'File Name': file_name, 'Case ID': self.caseID,
'Timestamp': pattern_three.ConvertTimestamp(timestamp),
'Type': current_type, 'Message': message.strip()}
previous_type = current_type
elif pattern_four.IsPatternFour():
if (not first_iteration) and (previous_type == "WARN" or previous_type == "ERROR"):
list_of_messages.append(warning)
else:
first_iteration = False
warning = {}
current_type = pattern_four.GetCurrentType()
if current_type == "WARN":
timestamp = pattern_four.GetTimeStamp()
message = pattern_four.GetMessage()
warning = {'File Name': file_name, 'Case ID': self.caseID,
'Timestamp': pattern_four.ConvertTimestamp(timestamp),
'Type': current_type, 'Message': message.strip()}
if current_type == "ERROR":
timestamp = pattern_four.GetTimeStamp()
message = pattern_four.GetMessage()
warning = {'File Name': file_name, 'Case ID': self.caseID,
'Timestamp': pattern_four.ConvertTimestamp(timestamp),
'Type': current_type, 'Message': message.strip()}
previous_type = current_type
elif previous_type == "WARN" or previous_type == "ERROR":
warning["Message"] += line
if previous_type == "ERROR" or previous_type == "WARN":
list_of_messages.append(warning)
return list_of_messages
if __name__ == '__main__':
pass
|
{"/LogParser.py": ["/FileParser.py", "/FolderParser.py"], "/S3BucketParser.py": ["/BucketFolderParser.py"], "/LambdaAWSFunction/LambdaFunction.py": ["/LambdaAWSFunction/log_parser.py"], "/FolderParser.py": ["/FileParser.py"], "/BucketFileParser.py": ["/PatternFour.py", "/PatternOne.py", "/PatternThree.py"], "/BucketFolderParser.py": ["/BucketFileParser.py"], "/FileParser.py": ["/PatternFour.py", "/PatternOne.py", "/PatternThree.py"]}
|
5,668
|
shrirangbagdi/LogParser
|
refs/heads/master
|
/PatternThree.py
|
#Pattern3="[2020-05-11T16:54:33,500][WARN ][o.e.m.j.JvmGcMonitorService] [unravel_s_1] [gc][1126797] overhead, spent [709ms] collecting in the last [1.2s]"
import re
from datetime import datetime
class PatternThree:
def __init__(self, line):
self.line = line
def IsPatternThree(self):
try:
time_stamp = self.line[1:24]
message_type = self.line[26:31]
return self.IsCorrectTimeStamp(time_stamp) and self.IsCorrectType(message_type.strip())
except:
print("Not Pattern Three")
return False
def IsCorrectTimeStamp(self, TimeStamp):
try:
date = TimeStamp[0:10]
time = TimeStamp[11:23]
character = TimeStamp[10]
date_to_match = re.compile(r'\d\d\d\d-\d\d-\d\d')
time_to_match = re.compile(r'\d\d:\d\d:\d\d,\d\d\d')
return date_to_match.match(date) and character == "T" and time_to_match.match(time)
except:
print("Not Pattern Three")
return False
# create a variable to check for a certain type ....
def IsCorrectType(self, message_type):
return message_type == "ERROR" or message_type == "WARN" or message_type == "INFO"
def ConvertTimestamp(self, TimeStamp):
try:
date = TimeStamp[0:10]
time = TimeStamp[11:19]
return date + " " + time + "." + TimeStamp[20:23]
except:
print("Error converting timestamp")
def GetCurrentType(self):
return self.line[26:31].strip()
def GetTimeStamp(self):
return self.line[1:24]
def GetMessage(self):
return self.line[30:]
if __name__ == '__main__':
Pattern = PatternThree("[2020-05-11T16:54:33,500][WARN ][o.e.m.j.JvmGcMonitorService] [unravel_s_1] [gc][1126797] overhead, spent [709ms] collecting in the last [1.2s]")
print(Pattern.ConvertTimestamp("2020-05-11T16:54:33,500"))
|
{"/LogParser.py": ["/FileParser.py", "/FolderParser.py"], "/S3BucketParser.py": ["/BucketFolderParser.py"], "/LambdaAWSFunction/LambdaFunction.py": ["/LambdaAWSFunction/log_parser.py"], "/FolderParser.py": ["/FileParser.py"], "/BucketFileParser.py": ["/PatternFour.py", "/PatternOne.py", "/PatternThree.py"], "/BucketFolderParser.py": ["/BucketFileParser.py"], "/FileParser.py": ["/PatternFour.py", "/PatternOne.py", "/PatternThree.py"]}
|
5,669
|
shrirangbagdi/LogParser
|
refs/heads/master
|
/BucketFolderParser.py
|
import bz2
import gzip
import os
import zipfile
from io import BytesIO
import boto3
from boto3 import s3
from BucketFileParser import BucketFileParser
class BucketFolderParser:
def __init__(self, obj):
self.obj = obj
def parseBucketFolder(self):
directory = self.obj.key
print(directory)
if directory[-1] != "/" and self.isLog(directory) and (not self.isEmpty(self.obj)):
case_id = self.getID(directory)
bucket_parser = BucketFileParser(case_id, self.obj)
message_list = bucket_parser.generateMessages()
return message_list
elif directory[-1] != "/" and self.isCompressed(directory):
pass
def getID(self, directory):
if "/" in directory:
return directory.split("/")[0]
else:
return "No ID"
def isLog(self, directory):
(file, ext) = os.path.splitext(directory)
return ext == ".log"
def isEmpty(self, directory):
if directory.get()['Body'].read().decode('utf-8') == "":
return True
return False
def isCompressed(self, directory):
(file, ext) = os.path.splitext(directory)
return ext == ".tar.gz" or ext == ".log.gz" or ext == ".zip" or ext == ".gz" or ext == ".tar"
if __name__ == '__main__':
parser = BucketFolderParser("f")
parser.isLog("unravel.log")
|
{"/LogParser.py": ["/FileParser.py", "/FolderParser.py"], "/S3BucketParser.py": ["/BucketFolderParser.py"], "/LambdaAWSFunction/LambdaFunction.py": ["/LambdaAWSFunction/log_parser.py"], "/FolderParser.py": ["/FileParser.py"], "/BucketFileParser.py": ["/PatternFour.py", "/PatternOne.py", "/PatternThree.py"], "/BucketFolderParser.py": ["/BucketFileParser.py"], "/FileParser.py": ["/PatternFour.py", "/PatternOne.py", "/PatternThree.py"]}
|
5,670
|
shrirangbagdi/LogParser
|
refs/heads/master
|
/Parser.py
|
import json
import re
class Parser:
def __init__(self, logfile):
self.logfile = logfile
def ParseFile(self):
list_of_warnings = []
log_file = open(self.logfile, 'r')
previous_type = ""
warning = {}
first_iteration = True
first_phrase = 0
second_phrase = 1
third_phrase = 2
for line in log_file:
if self.StartsCorrectly(line):
current_type = line.split()[third_phrase]
if (not first_iteration) and ((previous_type == "WARN") or (previous_type == "ERROR")):
list_of_warnings.append(warning)
warning = {}
if current_type == "WARN":
line_list = line.split()
date = line_list[first_phrase]
time = line_list[second_phrase]
message = line.split("WARN")[second_phrase]
warning = {'Date': date, 'Time': time, 'Type': current_type, 'Message': message}
first_iteration = False
if current_type == "ERROR":
line_list = line.split()
warning = {'Date': line_list[first_phrase], 'Time': line_list[second_phrase], 'Type': current_type,
'Message': line.split("ERROR")[second_phrase]}
first_iteration = False
previous_type = current_type
elif previous_type == "WARN" or previous_type == "ERROR":
warning["Message"] += line
if previous_type == "ERROR" or previous_type == "WARN":
list_of_warnings.append(warning)
with open('/Users/shrirangbagdi/Desktop/Contents.json', 'w') as log_file:
json.dump(list_of_warnings, log_file, indent=4)
def StartsCorrectly(self, line):
first_phrase = 0
second_phrase = 1
minimum_length = 24
minimum_spaces = 3
if (len(line) < minimum_length) or (len(line.split()) < minimum_spaces):
return False
else:
date = line.split()[first_phrase]
time = line.split()[second_phrase]
date_to_match = re.compile(r'\d\d\d\d/\d\d/\d\d')
time_to_match = re.compile(r'\d\d:\d\d:\d\d.\d\d\d')
return date_to_match.match(date) and time_to_match.match(time)
if __name__ == '__main__':
Parser = Parser('/Users/shrirangbagdi/Desktop/l.log')
Parser.ParseFile()
# make code cleaner
# add more methods
# add errors exceptions
# take in a folder, multiple log files, a log file and be able to convert into json object..
|
{"/LogParser.py": ["/FileParser.py", "/FolderParser.py"], "/S3BucketParser.py": ["/BucketFolderParser.py"], "/LambdaAWSFunction/LambdaFunction.py": ["/LambdaAWSFunction/log_parser.py"], "/FolderParser.py": ["/FileParser.py"], "/BucketFileParser.py": ["/PatternFour.py", "/PatternOne.py", "/PatternThree.py"], "/BucketFolderParser.py": ["/BucketFileParser.py"], "/FileParser.py": ["/PatternFour.py", "/PatternOne.py", "/PatternThree.py"]}
|
5,671
|
shrirangbagdi/LogParser
|
refs/heads/master
|
/PatternOne.py
|
import re
from datetime import datetime
class PatternOne:
def __init__(self, line):
self.line = line
def IsPatternOne(self):
try:
time_stamp = self.line[0:23]
message_type = self.line.split()[2]
return self.IsCorrectTimeStamp(time_stamp) and self.IsCorrectType(message_type)
except:
print("Not Pattern One")
return False
def IsCorrectTimeStamp(self, TimeStamp):
try:
date = TimeStamp.split()[0]
time = TimeStamp.split()[1]
date_to_match = re.compile(r'\d\d\d\d/\d\d/\d\d')
time_to_match = re.compile(r'\d\d:\d\d:\d\d.\d\d\d')
return date_to_match.match(date) and time_to_match.match(time)
except:
print("Not Pattern One")
return False
#create a variable to check for a certain type ....
def IsCorrectType(self, message_type):
return message_type == "ERROR" or message_type == "WARN" or message_type == "INFO"
def ConvertTimestamp(self, timestamp):
try:
date = timestamp.split()[0]
time = timestamp.split()[1]
return datetime.strptime(date, '%Y/%m/%d').strftime('%Y-%m-%d') + " " + time
except:
print("Trouble converting timestamp")
def GetCurrentType(self):
return self.line.split()[2]
def GetTimeStamp(self):
return self.line[0:23]
def GetMessage(self):
return self.line.split(self.GetCurrentType())[-1]
if __name__ == '__main__':
Pattern = PatternOne("2020/06/19 21:41:49.537 WARN this message type is pattern one")
Pattern.IsPatternOne()
|
{"/LogParser.py": ["/FileParser.py", "/FolderParser.py"], "/S3BucketParser.py": ["/BucketFolderParser.py"], "/LambdaAWSFunction/LambdaFunction.py": ["/LambdaAWSFunction/log_parser.py"], "/FolderParser.py": ["/FileParser.py"], "/BucketFileParser.py": ["/PatternFour.py", "/PatternOne.py", "/PatternThree.py"], "/BucketFolderParser.py": ["/BucketFileParser.py"], "/FileParser.py": ["/PatternFour.py", "/PatternOne.py", "/PatternThree.py"]}
|
5,672
|
shrirangbagdi/LogParser
|
refs/heads/master
|
/FileParser.py
|
import json
import os
import re
from datetime import datetime
from PatternFour import PatternFour
from PatternOne import PatternOne
from PatternThree import PatternThree
class FileParser:
def __init__(self, logfile, destinationPathway, dateStart=0, dateEnd=0, pattern=""):
self.logfile = logfile
self.destinationPathway = destinationPathway
self.dateStart = dateStart
self.dateEnd = dateEnd
self.pattern = pattern
def ParseFile(self):
list_of_warnings = self.GenerateWarnings()
updated_list = self.UpdateWarningList(list_of_warnings)
return updated_list
def GenerateWarnings(self):
list_of_warnings = []
log_file = open(self.logfile, 'r')
previous_type = ""
current_type = ""
warning = {}
first_iteration = True
count = 0
for line in log_file:
pattern_one = PatternOne(line)
pattern_three = PatternThree(line)
pattern_four = PatternFour(line)
# check if warning is first iteration later......
if pattern_one.IsPatternOne():
if (not first_iteration) and (previous_type == "WARN" or previous_type == "ERROR"):
list_of_warnings.append(warning)
else:
first_iteration = False
warning = {}
current_type = pattern_one.GetCurrentType()
if current_type == "WARN":
timestamp = pattern_one.GetTimeStamp()
message = pattern_one.GetMessage()
warning = {'File Name': self.GetLogFileName(), 'Timestamp': pattern_one.ConvertTimestamp(timestamp),
'Type': current_type, 'Message': message.strip()}
if current_type == "ERROR":
timestamp = pattern_one.GetTimeStamp()
message = pattern_one.GetMessage()
warning = {'File Name': self.GetLogFileName(), 'Timestamp': pattern_one.ConvertTimestamp(timestamp),
'Type': current_type, 'Message': message.strip()}
previous_type = current_type
elif pattern_three.IsPatternThree():
if (not first_iteration) and (previous_type == "WARN" or previous_type == "ERROR"):
list_of_warnings.append(warning)
else:
first_iteration = False
warning = {}
current_type = pattern_three.GetCurrentType()
if current_type == "WARN":
timestamp = pattern_three.GetTimeStamp()
message = pattern_three.GetMessage()
warning = {'File Name': self.GetLogFileName(),
'Timestamp': pattern_three.ConvertTimestamp(timestamp),
'Type': current_type, 'Message': message.strip()}
if current_type == "ERROR":
timestamp = pattern_three.GetTimeStamp()
message = pattern_three.GetMessage()
warning = {'File Name': self.GetLogFileName(),
'Timestamp': pattern_three.ConvertTimestamp(timestamp),
'Type': current_type, 'Message': message.strip()}
previous_type = current_type
elif pattern_four.IsPatternFour():
if (not first_iteration) and (previous_type == "WARN" or previous_type == "ERROR"):
list_of_warnings.append(warning)
else:
first_iteration = False
warning = {}
current_type = pattern_three.GetCurrentType()
if current_type == "WARN":
timestamp = pattern_four.GetTimeStamp()
message = pattern_four.GetMessage()
warning = {'File Name': self.GetLogFileName(),
'Timestamp': pattern_three.ConvertTimestamp(timestamp),
'Type': current_type, 'Message': message.strip()}
if current_type == "ERROR":
timestamp = pattern_four.GetTimeStamp()
message = pattern_four.GetMessage()
warning = {'File Name': self.GetLogFileName(),
'Timestamp': pattern_three.ConvertTimestamp(timestamp),
'Type': current_type, 'Message': message.strip()}
previous_type = current_type
elif previous_type == "WARN" or previous_type == "ERROR":
warning["Message"] += line
if previous_type == "ERROR" or previous_type == "WARN":
list_of_warnings.append(warning)
return list_of_warnings
def UpdateWarningList(self, listOfWarnings):
if (self.dateStart == 0 or self.dateEnd == 0) and self.pattern == "":
self.CreateJsonFile(listOfWarnings)
return listOfWarnings
elif not self.pattern == "":
updated_list = self.FindWarningsWithPattern(listOfWarnings)
self.CreateJsonFile(updated_list)
return updated_list
elif not (self.dateStart == 0 or self.dateStart == 0):
updated_list = self.FindWarningsWithDate(listOfWarnings)
self.CreateJsonFile(updated_list)
return updated_list
return listOfWarnings
def FindWarningsWithPattern(self, listOfWarnings):
updated_list = []
pattern = self.pattern.lower()
for dictionaries in listOfWarnings:
if pattern in dictionaries["Message"].lower():
updated_list.append(dictionaries)
return updated_list
def FindWarningsWithDate(self, listOfWarnings):
updated_list = []
start_date = datetime.strptime(self.dateStart, '%Y/%m/%d')
end_date = datetime.strptime(self.dateEnd, '%Y/%m/%d')
for dictionaries in listOfWarnings:
date_to_compare = datetime.strptime(dictionaries["Date"], '%Y-%m-%d')
if start_date < date_to_compare < end_date:
updated_list.append(dictionaries)
return updated_list
def CreateJsonFile(self, listOfWarnings):
if listOfWarnings:
with open(self.destinationPathway + self.GetJsonFileName() + '.json', 'w') as log_file:
log_file.write('\n'.join(json.dumps(i) for i in listOfWarnings) +
'\n')
# json.dump(listOfWarnings, log_file, indent=0)
def GetJsonFileName(self):
absolute_file_pathway = self.logfile
(file, ext) = os.path.splitext(absolute_file_pathway)
return file.split('/')[-1]
def GetLogFileName(self):
absolute_file_pathway = self.logfile
(file, ext) = os.path.splitext(absolute_file_pathway)
return file.split('/')[-1] + ext
if __name__ == '__main__':
Parser = FileParser('/Users/shrirangbagdi/Desktop/f.log', "/Users/shrirangbagdi/Desktop/")
# Parser = FileParser('/home/ec2-user/logdata', "/Users/shrirangbagdi/Desktop/")
Parser.ParseFile()
|
{"/LogParser.py": ["/FileParser.py", "/FolderParser.py"], "/S3BucketParser.py": ["/BucketFolderParser.py"], "/LambdaAWSFunction/LambdaFunction.py": ["/LambdaAWSFunction/log_parser.py"], "/FolderParser.py": ["/FileParser.py"], "/BucketFileParser.py": ["/PatternFour.py", "/PatternOne.py", "/PatternThree.py"], "/BucketFolderParser.py": ["/BucketFileParser.py"], "/FileParser.py": ["/PatternFour.py", "/PatternOne.py", "/PatternThree.py"]}
|
5,684
|
raysmith619/sudoku
|
refs/heads/master
|
/src/sudoku.py
|
# sudoko.py
"""
This is a program to solve, and someday create, Sudoku puzzles
It was adapted from the Perl program sudoku.pl
To ease the adaption process the original variable, function and file names, where possible, have been preserved.
The Trace.pm module use has been replaced by the select_trace.py module.
"""
##############
## External ##
##############
from math import *
import datetime
import traceback
import time
import os
import argparse
import re
from tkinter import *
###############
## Libraries ##
###############
from select_trace import SlTrace
from select_error import SelectError
base_name = os.path.splitext(os.path.basename(sys.argv[0]))[0]
SlTrace.setLogName(base_name)
SlTrace.lg(f"{base_name} {' '.join(sys.argv[1:])}")
from select_window import SelectWindow
from select_control import SelectControl
from variable_control_window import VariableControlWindow
from trace_control import TraceControl
from tkMath import tkMath
from sudoku_subs import *
import sudoku_globals as gb
gb.initialize_globals()
##################
## User-defined ##
##################
# Set up main board
def prime_exit():
SlTrace.lg("Prime Exit")
gb.running = False
pgm_exit()
mw = gb.Display_mw = Tk() # To support grid layout - MUST be done before wm
mw.title("Sudoku Playing")
mw.protocol("WM_DELETE_WINDOW", prime_exit)
tkMath.setup(mw)
cF = SelectControl(control_prefix="run_control", update_report=update_report)
cF.make_label("Puzzle Dimensions")
gb.nCol = cF.make_val("nCol", 9)
gb.nSubCol =cF.make_val("nSubCol", 3)
gb.nRow = cF.make_val("nRow", 9)
gb.nSubRow = cF.make_val("nSubRow", 3)
cF.make_label("")
cF.make_label("Puzzle Size")
gb.bSize = cF.make_val("bSize", 3) # Main Bd size inches
gb.sSize = cF.make_val("sSize", 2) # Solution Bd size
gb.nFirst = cF.make_val("nFirst", 5) # first n solutions
gb.makePuzzle = cF.make_val("makePuzzle", False) # If defined, generate puzzle with this many cells filled
gb.makePuzzleSym = cF.make_val("PuzzleSym", "c") # Puzzle symetry pref for initial settings
# x, y, center, n - none
gb.traceList = cF.make_val("traceList", "")
gb.UsePuzzle = cF.make_val("UsePuzzle", False)
gb.xPer = cF.make_val("xPer", False) # experimental
cF.make_label("")
cF.make_label("Running Control")
gb.run_after_load = cF.make_val("run_after_load", False)
# Progress display variables
cF.make_label("Display Time")
gb.Display_time = cF.make_val("Display_time", .5) # Display time, None - no display
# 0 - wait for continue
# > 0 delay (sec)
gb.update_time = cF.make_val("update_time", 10.)
gb.Display_board = None
gb.Display_prev_time = 0 # Previous display time
##################
## Main program ##
##################
parser = argparse.ArgumentParser()
###parser.add_argument('--closed_tours', type=str2bool, dest='closed_tours', default=closed_tours)
###parser.add_argument('--display_complete', type=str2bool, dest='display_complete', default=display_complete)
###parser.add_argument('--display_path_board', type=str2bool, dest='display_path_board', default=display_path_board)
###parser.add_argument('--max_look_ahead=', type=int, dest='max_look_ahead', default=max_look_ahead)
parser.add_argument('--cols', type=int, dest='nCol', default=gb.nCol) # Number of cell col
parser.add_argument('--bSize', type=float, dest='bSize', default=gb.bSize) # Board size in inches
parser.add_argument('--displayTime', type=float, dest='Display_time', default=gb.Display_time) # Solution step display time (sec)
# 0 - till user ACK, None - none
parser.add_argument('--first', type=int, dest='nFirst', default=gb.nFirst) # first(atleast) solutions
parser.add_argument('--gcols', type=int, dest='nSubCol', default=gb.nSubCol) # Number of cell col in group
parser.add_argument('--grows=', type=int, dest='nSubRow', default=gb.nSubRow) # Number of cell row in group
parser.add_argument('--makePuzzle', type=int, dest='makePuzzle', default=gb.makePuzzle) # Make random puzzle with n start
parser.add_argument('--msymetric', type=str, dest='makePuzzleSym', default=gb.makePuzzleSym) # Make puzzle symetry
parser.add_argument('--rows', type=int, dest='nRow', default=gb.nRow) # Number of cell row
parser.add_argument('--run_after_load', type=str2bool, dest='run_after_load', default=gb.run_after_load) # Use preset puzzle
parser.add_argument('--sSize=f', type=float, dest='sSize', default=gb.sSize) # Solution board size
parser.add_argument('--traceList=s', type=str, dest='traceList', default=gb.traceList) # Comma separated trace list
parser.add_argument('--uPuzzle', type=str2bool, dest='UsePuzzle', default=gb.UsePuzzle) # Use preset puzzle
parser.add_argument('--update_time', type=str2bool, dest='update_time', default=gb.update_time) # Use preset puzzle
parser.add_argument('--xper=n', type=int, dest='xPer', default=gb.xPer) # Experimental = 1
args = parser.parse_args() # or raise SelectError("Illegal options")
SlTrace.lg(f"args: {args}")
gb.nCol = args.nCol
gb.bSize = args.bSize
gb.Display_time = args.Display_time
gb.nFirst = args.nFirst
gb.nSubCol = args.nSubCol
gb.makePuzzle = args.makePuzzle
gb.makePuzzleSym = args.makePuzzleSym
gb.nRow = args.nRow
gb.run_after_load = args.run_after_load
gb.sSize = args.sSize
gb.traceList = args.traceList
gb.UsePuzzle = args.UsePuzzle
gb.xPer = args.xPer
# Update persistent values
cF.set_val("bSize", gb.bSize) # Main Bd size inches
cF.set_val("sSize", gb.sSize) # Solution Bd size
cF.set_val("nFirst", gb.nFirst) # first n solutions
cF.set_val("makePuzzle", gb.makePuzzle) # If defined, generate puzzle with this many cells filled
cF.set_val("PuzzleSym", gb.makePuzzleSym) # Puzzle symetry pref for initial settings
# x, y, center, n - none
cF.set_val("nCol", gb.nCol)
cF.set_val("nSubCol", gb.nSubCol)
cF.set_val("nRow", gb.nRow)
cF.set_val("nSubRow", gb.nSubRow)
cF.set_val("traceList", gb.traceList)
cF.set_val("update_time", gb.update_time)
cF.set_val("UsePuzzle", gb.UsePuzzle)
cF.set_val("xPer", gb.xPer) # experimental
cF.set_val("Display_time", gb.Display_time)
trace = True if gb.traceList is not None and gb.traceList != "" else False
if trace:
SlTrace.setFlags(gb.traceList)
if gb.nSubCol is None:
nSubCol = int(sqrt(gb.nCol))
if gb.nRow is None:
gb.nRow = gb.nCol # Set square by default
if gb.nSubRow is None:
nSubRow = int(sqrt(gb.nRow))
makePuzzle = int(gb.nCol*gb.nRow/3)
if gb.makePuzzle % 2 == 1:
makePuzzle -= 1 # Make even
mw.update()
bs_in = int(tkMath.inchesToPixels(gb.bSize))
w = bs_in + 200
h = bs_in + 100
gb.nSol = None # Solution window number
gb.top_fr = Frame(mw)
gb.top_fr.pack(side = 'top')
control_fr = Frame(gb.top_fr)
control_fr.pack(side = 'top')
app = SelectWindow(gb.Display_mw,
title="Playing Sudoku",
arrange_selection=False,
pgmExit=prime_exit,
file_open = file_open,
)
app.add_menu_command("Puzzle", file_open) # Dedicated puzzle menu item
app.add_menu_command("Contols", set_controls) # Display variable controls
mw.geometry(f"{w}x{h}")
mw.update()
solve_puzzle = Button(control_fr,
text = "Solve Puzzle", # Guess all remaining
command = solve_main_puzzle,
)
solve_puzzle.pack(side = 'left')
make_puzzle_b = Button(control_fr,
text = "Make Puzzle",
command = make_puzzle,
)
make_puzzle_b.pack(side = 'left')
reset_b = Button(control_fr,
text = "Reset", # Reset to initial setting
command = reset_board
)
reset_b.pack(side = 'left')
clear_b = Button(control_fr,
text = "Clear Board",
command = clear_board,
)
clear_b.pack(side = 'left')
sbox_fr = None # Set value frame
sbox = None # selection box
sbox_row = None # selected cell row
sbox_col = None
sbox_legal_vals = [] # legal vals in selection box
gb.o_data = None # Primary data
gb.o_board = None # Primary board
Initial_data = None # Initial data values
# setup initial position
if gb.UsePuzzle:
use_puzzle() # Use premade puzzle
else:
sols = make_puzzle(gb.makePuzzle)
# Display progress during puzzle solution
mw.mainloop()
|
{"/src/sudoku.py": ["/sudoku_subs.py", "/sudoku_globals.py"], "/puzzle/sudoku_subs.py": ["/sudoku_globals.py"], "/puzzle/SudokuPly.py": ["/sudoku_globals.py"], "/puzzle/sudoku.py": ["/sudoku_subs.py", "/sudoku_globals.py"], "/puzzle/SudokuBoard.py": ["/sudoku_globals.py"], "/sudoku_subs.py": ["/sudoku_globals.py"]}
|
5,685
|
raysmith619/sudoku
|
refs/heads/master
|
/puzzle/sudoku_subs.py
|
# sudoku_subs.py
# Top level subs for sudoku.py
# imported to workaround the lack of forward referencing subroutines
import sys
import os
from tkinter import filedialog
from tkinter import *
import time
import argparse
from math import *
from select_trace import SlTrace
from select_error import SelectError
from resource_group import ResourceEntry
from select_control import SelectControl
from variable_control import VariableControl
import sudoku_globals as g
from SudokuData import SudokuData, CellDesc
from sudoku_vals import SudokuVals
from SudokuPuzzle import SudokuPuzzle
from SudokuBoard import SudokuBoard
from SudokuPly import SudokuPly
from sudoku_puzzle_load import SudokuPuzzleLoad
from sudoku_search_stop import SudokuSearchStop
def helpstr():
retstr = f"""
--cols number_of_cols Default: {g.nCol}
--bSize bd size in inches Default: {g.bSize}
--dispalyTime sec between display, 0 - wait till user ACK
--first number_of_solutions, stop when we have this many Default: {g.nFirst}
--makePuzzle starting_cells, make puzzle with this many filled cells
--gcols number_of_cells_in_group Default: {g.nSubCol}
--grows number_of_cells_in_group Default: {g.nrowGroup}
--rows number_of_rows Default: {g.nRow}
--sSize solution bd size inches Default: {g.sSize}
--traceList comma_separated_trace_options Default: {g.traceList}
--uPuzzle - use preformed puzzle
--xperimental experimental version Default: {g.xPer}
"""
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def update():
""" do any window updating required
"""
if not g.running:
return
if not SlTrace.runningJob:
return
if g.Display_mw is not None:
g.Display_mw.update()
def pgm_exit():
SlTrace.lg("Quitting Sudoku Playing")
# Trace and Log files save by SlTrace onexit
###SlTrace.lg("Properties File: %s"% SlTrace.getPropPath())
###SlTrace.lg("Log File: %s"% SlTrace.getLogPath())
g.running = False
g.res_group.destroy_all()
g.Display_mw.destroy()
g.Display_mw = None
SlTrace.onexit() # Force saving
sys.exit(0)
def set_controls():
cF = SelectControl() # Ref to singleton
if g.vC is not None:
g.vC.destroy()
g.vC = None
g.vC = VariableControl(var_ctl=cF)
def set_puzzle(puzzle, file_name=None):
""" Set/Reset main puzzle
:puzzle: Puzzle to setup
"""
if file_name is not None:
puzzle.file_name = file_name # override if desired
if g.main_puzzle is not None:
g.main_puzzle.destroy()
g.main_puzzle = None
if g.o_board is not None:
g.o_board.destroy()
g.main_puzzle = SudokuData.data2vals(puzzle)
g.puzzle = puzzle
g.o_board = SudokuBoard(mw=g.Display_mw,
frame=new_main_bd_frame(),
data=puzzle,
bdWidth=g.bSize,
bdHeight=g.bSize)
g.o_board.showData(force=True)
cF = SelectControl()
cF.set_val("nRow", g.main_puzzle.nRow)
cF.set_val("nSubRow", g.main_puzzle.nSubRow)
cF.set_val("nCol", g.main_puzzle.nCol)
cF.set_val("nSubCol", g.main_puzzle.nSubCol)
cF.update_settings()#
def use_puzzle(puzzle=None):
""" Use Precreated puzzle
Set reset_data to this
# Pattern
Default: PZ1
n x n lines
- or numeric contents
"""
###global g.o_data, g.nCol, g.nSubCol, g.nRow, g.nrowGroup, g.o_board, g.Initial_data, g.nRow, g.nCol
# Test puzzles
PZ1 = """
- 2 - -
1 4 - -
- - 3 2
- - 1 -
"""
PZ2 = """
3 2 4 1
1 4 2 3
4 1 3 2
2 3 1 4
"""
PZ3 = """
- 2 4 1
1 4 2 3
4 1 3 2
2 3 1 4
"""
PZ4 = """
- - 4 1
1 4 2 3
4 1 3 2
2 3 1 4
"""
if puzzle is None:
puzzle = PZ1
puzzle = PZ2
puzzle = PZ3
puzzle = PZ4
puzzle = PZ1
nrow = 0
ncol = 0
rowcols = [] # array of rows of cols
lines = puzzle.split("\n")
lineno = 0
for line in lines:
lineno += 1
m = re.match(r'^([^#]*)#', line) # Remove comments
if m:
line = m.group(1)
m = re.match(r'^\s+(.*)$', line)
if m:
line = m.group(1)
m = re.match(r'^(.*)\s+$', line)
if m:
line = m.group(1)
if re.match(r'^\s*$', line): # Ignore blank lines
continue
nrow += 1
cs = line.split()
if ncol > 0 and len(cs) < ncol:
raise SelectError("cols not identical in line: lineno")
if len(cs) > ncol:
ncol = len(cs)
rowcols.append(cs)
if ncol != nrow:
raise SelectError(f"number of cols{ncol} != number of rows{nrow}")
g.nRow = nrow
g.nCol = ncol
g.nSubCol = int(sqrt(g.nCol))
g.nSubRow = int(sqrt(g.nRow))
puzzle = SudokuPuzzle(rows=nrow, cols=ncol, grows=g.nSubRow, gcols=g.nSubCol,
desc="Internal Puzzle")
for ri in range(nrow):
row = ri+1
for ci in range(ncol):
col = ci+1
val = rowcols[ri][ci]
if val == '-':
continue # Empty cell
puzzle.add_cell(row=row, col=col, val=int(val))
set_puzzle(puzzle)
# Clear to an empty board
def clear_board():
g.o_data.clear()
g.o_board.showData(g.o_data)
# Close move display window
def search_stop():
###global Display_mw
'''
if Display_mw is not None:
Display_mw.destroy()
'''
SlTrace.lg("search_stop")
SudokuPly.stop_search()
g.res_group.destroy_all()
display_prev_time = None
display_no = 0
def display_rtn(data):
""" Progress display routing
"""
global display_prev_time
global display_no
if not g.running:
return
display_no += 1
###g.main_puzzle.display("display_rtn: main_puzzle")
display_time = g.Display_time
if display_time is None:
return
now = time.time()
new_board = False
searching_board = g.res_group.get_obj("searching_board")
if searching_board is None:
solution_search_display_setup()
new_board = True
searching_board = g.res_group.get_obj("searching_board")
if display_prev_time is None:
display_prev_time = now
g.Display_mw.after(int(1000*display_time))
dur = now - g.solve_start
if now - display_prev_time > g.update_time:
SlTrace.lg(f"display_rtn time:{dur:.3f}")
display_prev_time = now
if searching_board is not None:
searching_board.showData(data, force=new_board)
if SlTrace.trace("display_board"):
searching_board.display(f"display:{display_no}")
# Setup move display
def solution_search_display_setup():
title = "Solution Searching"
SudokuPly.setDisplay(display_rtn, g.Display_time)
searching_mw = Toplevel()
searching_mw.protocol("WM_DELETE_WINDOW", search_stop)
searching_mw.title(title)
x = 400
y = 600
searching_mw.geometry(f"+{x}+{y}")
top_fr = Frame(searching_mw)
top_fr.pack(side = 'top')
c1 = Button(top_fr,
text = "Close", # Guess one
command = search_stop,
)
c1.pack(side = 'left')
if g.res_group.get("searching_board") is not None:
g.res_group.destroy("searching_board")
data = SudokuData.vals2data(g.main_puzzle)
searching_board = SudokuBoard(mw=searching_mw,
data = data,
bdWidth=g.sSize*.8,
bdHeight=g.sSize*.8,
initialData=g.Initial_data,
)
searching_board.showData(force=True)
g.res_group.add(ResourceEntry(searching_board), name="searching_board")
def file_open():
""" Choose puzzle file
"""
start_dir = r"./puzzle"
filename = filedialog.askopenfilename(
initialdir = start_dir,
title = "Select puzzle file",
filetypes = (("supz files","*.supz"),("all files","*.*")))
spl = SudokuPuzzleLoad.set_input(pfile=filename)
SlTrace.lg(f"Puzzle file name:{filename}")
puzzle = spl.procCmdString()
set_puzzle(puzzle, file_name=filename)
puzzle.display("Puzzle Start")
# Create puzzle with number of cells filled in
# Set initial_data to this
def make_puzzle(nfilled=None):
""" Create puzzle with number of cells filled in
Set reset_data to this
:nfilled: Number of cells filled in, None = random
"""
###global o_data, o_board
display_close()
if (g.o_data is None):
g.o_data = SudokuData(cols=g.nCol,
rows=g.nRow,
gcols=g.nSubCol,
grows=g.nSubRow,
)
g.o_data.clear() # Clear data
if g.o_board is None:
g.o_board = SudokuBoard(mw=g.mw,
frame=new_main_bd_frame(),
data=g.o_data,
bdWidth=g.bSize,
bdHeight=g.bSize)
ncell = g.nRow*g.nCol
if (nfilled is None):
nfilled = int(ncell/3)
if nfilled & 1 != 0 and ncell & 1 != 0:
nfilled -= 1 # No possible symitry
# with odd # and even row/col
o_list = SudokuData(base=g.o_data)
a_start_list = o_list.startCells(
nFilled=nfilled,
symRules=g.makePuzzleSym)
if (a_start_list is None):
SlTrace.lg(f"no such pattern for nRow=:{g.nRow}, nCol=:{g.nCol}"
+ f" nFilled={nfilled} symRules={g.makePuzzleSym}")
sys.exit("quitting")
# Display start list
sl = SudokuData(rows=g.nRow, grows=g.nSubRow, cols=g.nCol, gcols=g.nSubCol) # for diagnostic display
SlTrace.lg(f"start list: ")
n = 0
for sc in a_start_list:
val = n
if (n <= ord('Z')-ord('A')):
val = chr(ord('A')+n)
elif (n < 2*(ord('Z')-ord('A'))):
val_ord = ord('a')+n-((ord('Z')-ord('A')-1))
val = chr(val_ord)
sl.setCellVal(sc.row, sc.col,
val)
SlTrace.lg(f" (r:{sc.row}, c:{sc.col})")
n += 1
sl.display()
if (len(a_start_list) != nfilled):
SlTrace.lg(f"Actual list = {len(a_start_list)}"
+ f" Requested list = {nfilled}")
# Set starting arrangement
o_sol = SudokuPly(base=g.o_data)
sols = o_sol.makePuzzle(
startList=a_start_list)
if sols is not None and len(sols) == 1 and defined(sols[0]):
sol = sols[0]
g.o_data = sol.getData(subset=a_start_list)
g.o_board.showData(g.o_data)
g.Initial_data = SudokuData(base=g.o_data)
return sols
main_bd_fr = None # Set if present
def new_main_bd_frame():
""" Create a new main board frame
Deleat old frame if present
"""
global main_bd_fr
if main_bd_fr is not None:
main_bd_fr.destroy()
main_bd_fr = Frame(g.top_fr)
main_bd_fr.pack(side = 'bottom')
return main_bd_fr
# Adjust puzzle to a unique puzzle
# Generally by adding starting filled cells
def adj_puzzle_uniq(sols, nfilled): # Returns: puzzle solution Ply
sol = sols[0]
val_max = g.nRow
if g.nCol > val_max:
val_max = nCol
SlTrace.lg(f"adj_puzzle_uniq\n")
sol_data = sol.getData()
for i in range(nfilled):
nr = int(rand(nRow)+1)
nc = int(rand(nCol)+1)
g.o_data.curCell(row=nr, col=nc)
r_c = None
ntry = 0
# Space values randomly
min_choice = 2 # Attempting to leave multiple choices
tmc = val_max # Only look so much
legalvals = [] # choices for candidate cell
while True:
for i1 in range(rand(val_max)+1):
r_c = o_data.getNextEmpty()
if r_c is not None:
break # No more empty
legalvals = g.o_data.getLegalVals(r_c['row'],
r_c['col'])
tmc -= 1
if tmc <= 0 or len(legalvals) >= min_choice:
break
if (r_c is None):
SlTrace.lg(f"Can't find room for puzzle")
break
nr = r_c['row']
nc = r_c['col']
g.o_data.setCellVal(nr, nc, sol_data.getCellVal(nr, nc))
sol = uniq_sol(g.o_data) # Make unique
return sol
# Reset to initial board
def reset_board():
g.o_data = SudokuData(base=g.Initial_data)
g.o_board.showData(g.o_data)
# Setup move display
def set_move_display(display_time):
###global Display_time
g.Display_time = display_time
# OK to selection
def set_selected_ok():
selecteds = g.sbox.curselection()
si = selecteds[0]
if si is None:
set_selected_delete()
return
selected_val = g.sbox_legal_vals[si]
g.o_data.setCellVal(g.sbox_row, g.sbox_col, selected_val)
g.o_board.showData()
set_selected_delete()
# CANCEL to selection
def set_selected_cancel():
set_selected_delete()
# Remove set_selected
def set_selected_delete():
sbox_fr.destroy()
if exists(sbox_fr):
sbox = None
def clear_solve_main_puzzle():
g.res_group.destroy_all()
SudokuPly.setDisplay(None)
def update_report(ctl=None):
""" Report control variable (cF) update
ctl: control reference for convenience
"""
g.update_control_variables()
# Solve Puzzle
def solve_main_puzzle():
g.solve_start = time.time() # Puzzle start time
g.main_puzzle.display("solve_main_puzzle before destroy_all: main_puzzle")
g.res_group.destroy_all() # Clearout result displays
solutions = [] # Puzzle solution(s)
g.main_puzzle.display("solve_main_puzzle: main_puzzle")
solution_search_display_setup()
Initial_data = g.main_puzzle # Record initial data
SudokuPly.clear_search_stop()
try:
data = SudokuData.vals2data(g.main_puzzle)
solutions = solve_puzzle(data=data)
puzzle_file_name = g.puzzle.file_name
dur = time.time() - g.solve_start
sol_time = f"in {dur:.2f} sec"
if puzzle_file_name is None:
puzzle_file_name = ""
else:
puzzle_file_name = os.path.basename(puzzle_file_name)
if len(solutions) == 0:
SlTrace.lg(f"No solution to puzzle {sol_time} {puzzle_file_name}")
else:
nsol = len(solutions)
SlTrace.lg(f"Puzzle solved - {nsol} solution{'' if nsol == 1 else 's'}"
+ f" {sol_time} {puzzle_file_name}"
)
nth = 0
for r_solution in solutions:
nth += 1
r_solution.display(f"Solution {nth} of {nsol} {puzzle_file_name}")
solve_main_puzzle_display(r_solution,
f"Solution {nth} of {nsol}",
nth,
nsol)
except SudokuSearchStop:
SlTrace.lg("SudokuSearchStop")
clear_solve_main_puzzle()
g.res_group.destroy_all()
SudokuPly.setDisplay(None)
#
def solve_main_puzzle_display(r_solution, title=None, nth=None, nsol=None):
""" Add solution display
:r_position of solution:
:nth: ord positionof solution
:nsol: Number of solutions
"""
if title is not None:
title = "Solution"
if nsol is not None:
nsol = 1
mw = Toplevel()
mw.protocol("WM_DELETE_WINDOW", search_stop)
mw.title(title)
x = 400
y = 200
x += 100*nth
y += 100*nth
mw.geometry(f"+{x}+{y}")
# Find first empty slot, extending if necessary
top_fr = Frame(mw)
top_fr.pack(side = 'top')
c1 = Button(top_fr,
text = "Close", # Guess one
command = [solve_puzzle_close, nth],
)
c1.pack(side = 'left')
if nsol > 1:
c2 = Button(top_fr,
text = "Close All", # Close all
command = solve_puzzle_close_all,
)
c2.pack(side = 'left')
board = SudokuBoard(mw=mw,
data=r_solution,
bdWidth=g.sSize,
bdHeight=g.sSize,
initialData=g.Initial_data,
)
g.res_group.add(ResourceEntry(mw), number=nth)
board.showData(force=True)
# Close solution window
def solve_puzzle_close(si):
mw = g.mws[si]
if mw is not None:
if exists(mw):
mw.destroy()
g.mws[si] = None
# Close all solution windows
def solve_puzzle_close_all():
g.res_group.destroy_all()
def solve_puzzle(data=None): # Returns: ref to solution, else None
r_data = data
if r_data is None:
raise SelectError("solve_uzzle: data missing")
solve_puzzle_close_all()
s_ply = SudokuPly(base=r_data)
return s_ply.solveChoice(first=g.nFirst)
#
def uniq_sol(r_data): #
""" Return a puzzle with a unique solution
:returns: SudokuPly with one solution, else None
"""
### return r_sols[0] #### STUB
s_ply = SudokuPly(base=g.o_data)
sols = s_ply.solveChoice(first=g.nRow)
while (len(sols) > 1):
squares = []
for ri in range(g.nRow):
row = ri + 1
for ci in range(g.nCol):
col = ci + 1
if not r_data.isEmptyCell(row, col):
continue
valh = {}
for r_sol in sols:
val = r_sol.getCellVal(row, col)
if r_data.isEmpty(row, col):
SlTrace.lg(f"Empty sol row={row}, col={col}")
continue
valh[val] = 1
vals = valh.keys()
nval = len(vals) # Number of different values
if nval > 1:
squares.append(CellDesc(nval=nval, row=row, col=col, vals=vals))
squares.sort(key=lambda cell: cell.nval)
r_nc = squares[0] # With most values
r_data.setCellVal(r_nc['row'], r_nc-['col'], r_nc['vals'][0])
s_ply = SudokuPly(base=g.o_data)
sols = s_ply.solveChoice(first=nRow)
return sols[0] # stub - just return first if any
|
{"/src/sudoku.py": ["/sudoku_subs.py", "/sudoku_globals.py"], "/puzzle/sudoku_subs.py": ["/sudoku_globals.py"], "/puzzle/SudokuPly.py": ["/sudoku_globals.py"], "/puzzle/sudoku.py": ["/sudoku_subs.py", "/sudoku_globals.py"], "/puzzle/SudokuBoard.py": ["/sudoku_globals.py"], "/sudoku_subs.py": ["/sudoku_globals.py"]}
|
5,686
|
raysmith619/sudoku
|
refs/heads/master
|
/puzzle/sudoku_vals.py
|
#sudoku_vals.py
# Adapted from SudokuData.pm with just values
#################
## sudokuVals ##
#################
from select_trace import SlTrace
from select_error import SelectError
EMPTY = 0 # Empty cell def
class CellDesc:
""" Cell description
"""
def __init__(self, row=None, col=None,
val=None, vals=[], valId=None):
""" Cell description row,col, optionally value
"""
self.row = row
self.col = col
self.val = val
self.vals = vals
self.valId = valId
x1 = x2 = y1 = y2 = None
def copy(self):
""" Copy info
:Returns: deep copy
"""
cp = CellDesc(row=self.row, col=self.col, val=self.val,
vals=self.vals[:], valId=self.valId)
return cp
class ValueChoice:
""" Attributes of a selection choice
"""
def __init__(self, vals=None, row=None, col=None, nval=None):
self.vals = vals
self.row = row
self.col = col
self.nval = nval
def __repr__(self):
repr_str = "ValueChoice"
repr_str += f" row={self.row} col={self.col} nval={self.nval}"
if self.vals is not None and len(self.vals) > 0:
vals_strs = list(map(str, self.vals))
repr_str += f" vals=[{'<'.join(vals_strs)}]"
return repr_str
class SudokuVals:
EMPTY = 0
def __init__(self, rows=None, cols=None, grows=None, gcols=None,
base = None):
"""
:rows: number of rows down the whole board
:cols: number of cols accross the whole board
:grows: group numbers for conversions to
:gcols: group numbers
"""
if rows is None:
if base is None:
raise SelectError("Neither base nor rows was specified")
rows = base.nRow
grows = base.nSubRow
self.nRow = rows
self.nSubRow = grows
if cols is None:
if base is None:
raise SelectError("Neither base nor cols was specified")
cols = base.nCol
gcols = base.nSubCol
self.nCol = cols
self.nSubCol = gcols
self.vals = [[EMPTY for ci in range(self.nCol)] for ri in range(self.nRow)]
if base is not None:
for ir in range(base.nRow):
for ic in range(base.nCol):
self.vals[ir][ic] = base.vals[ir][ic]
# Clear data to empty
def clear(self):
for ir in range(self.nRow):
for ic in range(self.nCol):
self.clearCell(row=ir+1, col=ic+1)
# Clear sell
def clearCell(self, row=None, col=None):
self.setCellVal(self, row=row, col=col, val=EMPTY)
def copy(self): # Returns: deep copy
copy = SudokuVals(base=self)
return copy
# Get cell info
def getCell(self, row, col): # Returns: data cell
val = self.getCellVal(row=row, col=col)
cell = CellDesc(row=row, col=col, val=val)
return cell
def getCellVal(self, row=None, col=None):
""" get cell Value
:row: row number
:col: col number
"""
if row < 1 or row > self.nRow:
raise SelectError(f"bad row({row}) value should be 1-{self.nRos}")
if col < 1 or col > self.nCol:
raise SelectError(f"bad row({row}) value should be 1-{self.nRos}")
return self.vals[row-1][col-1]
def setCell(self, row=None, col=None, val=None):
""" set cell Value
:row: row number
:col: col number
:val: cell value default: None
"""
self.setCellVal(row=row, col=col, val=val)
return CellDesc(row=row, col=col, val=val)
def setCellVal(self, row=None, col=None, val=None):
""" set cell Value
:row: row number
:col: col number
:val: cell value default: None
"""
if row < 1 or row > self.nRow:
raise SelectError(f"bad row({row}) value should be 1-{self.nRos}")
if col < 1 or col > self.nCol:
raise SelectError(f"bad row({row}) value should be 1-{self.nRos}")
if SlTrace.trace("setCell"):
SlTrace.lg(f"setCellVal row={row} col={col} val={val}")
self.vals[row-1][col-1] = val
# Check if empty
# any non-zero numeric is filled
def isEmpty(self, val=None): # Returns: True iff empty value
if val is None or val == "0" or val == 0:
return True
return False
# Is cell empty
def isEmptyCell(self, row, col): # Returns: True iff empty cell
val = self.getCellVal(row, col)
return self.isEmpty(val)
def destroy(self):
""" Release any resources, cleanup display
"""
pass # Nothing for now
# Simple display of data area
# For diagnostic purposes
def display(self, msg=None):
display_str = ""
if msg is None:
msg = "Data Display"
display_str += f"{msg}\n"
if self.vals is None:
raise SelectError("data gone")
horiz_grp_divider = " " + "-" * (2*self.nCol+self.nSubCol-1) + "\n"
for nr in range(1, self.nRow+1):
if nr % self.nSubRow == 1:
display_str += horiz_grp_divider
for nc in range(1, self.nCol+1):
if nc == 1:
display_str +="|"
val = self.getCellVal(row=nr, col=nc)
disp = " " if self.isEmpty(val) else f"{val} "
display_str += disp
if nc % self.nSubCol == 0:
display_str += "|"
display_str += "\n"
display_str += horiz_grp_divider
SlTrace.lg(display_str)
# Assemble list of next move choices
# sorted in ascending number of values per cell
def getChoices(self): # Returns: ref to sorted array of choices
cells = [] # Array of open cell cells
# to be ordered in increasing
# number of values
for ri in range(self.nRow):
row = ri + 1
for ci in range(self.nCol):
col = ci + 1
if self.isEmptyCell(row=row, col=col):
cells.append(CellDesc(row=row, col=col))
return self.orderChoices(cells)
# Assemble list of next move choices
# sorted in ascending number of values per cell
@staticmethod
def _choices_cmp_val(elm):
return elm.nval
def orderChoices(self, cells):
"""
:cells: List of cells
:returns: ref to sorted array of choices
# to be ordered in increasing
# number of values
"""
choices = [] # Populated with
for cell in cells:
col = cell.col
row = cell.row
if not self.isEmptyCell(row, col):
continue
vals = self.getLegalVals(row, col)
nval = len(vals)
choice = ValueChoice(vals=vals, row=row, col=col, nval=nval)
choices.append(choice)
choices_srt = sorted(choices, key=lambda x: x.nval)
return choices_srt
# Get data values in given column
def getColVals(self, col=None, include_nones=None):
""" Get values in given row
:row: column number
:include_nones: True - include Nones in list
:Returns: values in row
"""
if col is None or col < 1 or col > self.nCol:
raise SelectError(f"bad col number {col}")
vals = []
for ri in range(self.nRow):
row = ri + 1
val = self.getCellVal(row=row, col=col)
if include_nones or not self.isEmpty(val):
vals.append(val)
return vals
def getLegalVals(self, row=None, col=None): # Returns: array of legal values
""" Get all values for given cell given other cells in data
Returns array, possibly empty of legal cell values
returned values are sorted in ascending order
cks for defined row,col and out of bounds
:row: row to consider
:col: col to consider
:returns: sorted candidate values
"""
if (row is None or row < 1 or row > self.nRow
or col is None or col < 1
or col > self.nCol): # Safety check
return []
usedH = {} # Add to list as found
# Allow EMPTY
row_vals = self.getRowVals(row)
for row_val in row_vals:
usedH[row_val] = 1
col_vals = self.getColVals(col)
for col_val in col_vals:
usedH[col_val] = 1
sq3_vals = self.getSq3Vals(row, col)
for sq3_val in sq3_vals:
usedH[sq3_val] = 1
legal_vals = []
for n in range(1, self.nRow+1):
if n not in usedH:
legal_vals.append(n)
if SlTrace.trace("any"):
lvstrs = list(map(str, sorted(legal_vals)))
SlTrace.lg(f"getLegals(row={row}, col={col} = "
+ ", ".join(lvstrs))
return sorted(legal_vals)
def getNonEmptyCells(self):
""" Return array of none empty cells
"""
nonemptys = []
for ri in range(self.nRow):
for ci in range(self.nCol):
val = self.vals[ri][ci]
if not self.isEmpty(val):
row = ri+1
col = ci+1
nonemptys.append(CellDesc(row=row, col=col, val=val))
return nonemptys
def getRowVals(self, row=None, include_nones=None): # Returns: row values
""" Get values in given row
:row: column number
:include_nones: True - include Nones in list
:Returns: values in row
"""
if row is None or row < 1 or row > self.nRow:
raise SelectError(f"bad row number :{row}")
vals = []
for ci in range(self.nCol):
col = ci + 1
val = self.getCellVal(row=row, col=col)
if include_nones or not self.isEmpty(val):
vals.append(val)
return vals
#
def getSq3Vals(self, row=None, col=None):
""" Get valuse in sub-by-sub square
:row: current row
:col: current col
:Returns: list of values in current sub square
"""
if row is None or row < 1 or row > self.nRow:
raise SelectError(f"bad row {row}")
if col is None or col < 1 or col > self.nCol:
raise SelectError(f"bad col {col}")
sq3_vals = []
first_row = 1 + self.nSubRow*int((row-1)/self.nSubRow)
first_col = 1 + self.nSubCol*int((col-1)/self.nSubCol)
for ir in range(self.nSubRow):
for ic in range(self.nSubCol):
r = first_row + ir
c = first_col + ic
val = self.getCellVal(r, c)
if not self.isEmpty(val): # Assumes no duplicates
sq3_vals.append(val)
if SlTrace.trace("any"):
sq3_vals_strs = list(map(str, sq3_vals))
SlTrace.lg(f"getSq3Vals(row={row}, col={col}) = "
+ ", ".join(sq3_vals_strs))
return sq3_vals
def isValid(self):
""" Check for valid arrangement
"""
for ir in range(self.nRow): # Check rows for duplicates
row = ir + 1
vals = {}
for ic in range(self.nCol):
col = ic + 1
val = self.getCellVal(row=row, col=col)
if not self.isEmpty(val):
if val in vals:
SlTrace.lg(f"doing row {row} at col={col} val={val} vals={vals} invalid")
SlTrace.lg(f"row:{row} vals: {self.getRowVals(row)} col:{col} vals: {self.getColVals(col)}")
return False
vals[val] = val
for ic in range(self.nCol): # Check cols for duplicates
col = ic + 1
vals = {}
for ir in range(self.nRow):
row = ir + 1
val = self.getCellVal(row=row, col=col)
if not self.isEmpty(val):
if val in vals:
SlTrace.lg(f"at row={row} doing col={col} val={val} vals={vals} invalid")
SlTrace.lg(f"row:{row} vals: {self.getRowVals(row)} col:{col} vals: {self.getColVals(col)}")
return False
vals[val] = val
return True
|
{"/src/sudoku.py": ["/sudoku_subs.py", "/sudoku_globals.py"], "/puzzle/sudoku_subs.py": ["/sudoku_globals.py"], "/puzzle/SudokuPly.py": ["/sudoku_globals.py"], "/puzzle/sudoku.py": ["/sudoku_subs.py", "/sudoku_globals.py"], "/puzzle/SudokuBoard.py": ["/sudoku_globals.py"], "/sudoku_subs.py": ["/sudoku_globals.py"]}
|
5,687
|
raysmith619/sudoku
|
refs/heads/master
|
/time_example.py
|
import timeit
start = timeit.timeit()
print("hello")
end = timeit.timeit()
print(end - start)
|
{"/src/sudoku.py": ["/sudoku_subs.py", "/sudoku_globals.py"], "/puzzle/sudoku_subs.py": ["/sudoku_globals.py"], "/puzzle/SudokuPly.py": ["/sudoku_globals.py"], "/puzzle/sudoku.py": ["/sudoku_subs.py", "/sudoku_globals.py"], "/puzzle/SudokuBoard.py": ["/sudoku_globals.py"], "/sudoku_subs.py": ["/sudoku_globals.py"]}
|
5,688
|
raysmith619/sudoku
|
refs/heads/master
|
/src/SudokuData.py
|
#SudokuData.py
# Adapted from SudokuData.pm
#################
## sudokuData ##
#################
import re
import copy
from random import randint
from select_trace import SlTrace
from select_error import SelectError
from sudoku_vals import SudokuVals, CellDesc
base=None
"""
Marker difinition
"""
class CellMark:
def __init__(self, row=None, col=None, id=None, rc=None,
tag=None, backColor=None, boundColor=None,
boundWidth=None, boundWidget=None):
if row is None:
row = rc.row
col = rc.col
self.row = row
self.col = col
self.id = id
self.rc = rc,
self.tag = tag,
self.backColor = backColor
self.boundWidth = boundWidth
self.boundWidget = boundWidget
class SudokuData:
@classmethod
def data2vals(cls, data):
""" Convert to SudokuVals from SukokuData or subclasses
:data: SudokuData, SudokuPly, SudokuPuzzle
"""
dv = SudokuVals(rows=data.nRow, grows=data.nSubRow,
cols=data.nCol, gcols=data.nSubCol)
for nr in range(1, data.nRow+1):
for nc in range(1, data.nCol+1):
val = data.getCellVal(row=nr, col=nc)
if val is not None:
dv.setCellVal(row=nr, col=nc, val=val)
return dv
@classmethod
def vals2data(cls, sval):
""" Convert SudokuVals to SukokuData
:sval: SudokuVals
:Returns: SudokuData
"""
sd = SudokuData(rows=sval.nRow, grows=sval.nSubRow,
cols=sval.nCol, gcols=sval.nSubCol)
for nr in range(1, sval.nRow+1):
for nc in range(1, sval.nCol+1):
val = sval.getCellVal(row=nr, col=nc)
if val is not None:
sd.setCellVal(row=nr, col=nc, val=val)
return sd
def __init__(self, rows=None, grows=None, cols=None, gcols=None,
base = None, puzzle=None):
"""
:rows: number of rows down the whole board
:grows: number of row cells in sub square
:cols: number of cols accross the whole board
:gcols: number of col cells
:base: basis for this data
:puzzle: Base puzzle, if known
"""
if puzzle is None:
if base is not None:
puzzle = base.puzzle
self.puzzle = puzzle # Mark main puzzle
if rows is None:
if base is None:
raise SelectError("Neither base nor rows was specified")
rows = base.nRow
self.nRow = rows
if grows is None:
if base is None:
raise SelectError("Neither base nor grows was specified")
grows = base.nSubRow
self.nSubRow = grows
if cols is None:
if base is None:
raise SelectError("Neither base nor cols was specified")
cols = base.nCol
self.nCol = cols
if gcols is None:
if base is None:
raise SelectError("Neither base nor gcols was specified")
gcols = base.nSubCol
self.nSubCol = gcols
self.curRow = self.nRow
self.curCol = self.nCol # Most recently filled
self.markH = {} # Marked cells
self.cells = None # Array of cell data
if base is not None:
self.vals = self.data2vals(base)
else:
self.vals = SudokuVals(rows=rows, grows=grows, cols=cols, gcols=gcols)
def advanceCell(self): # Returns CellDesc array (row,col)
""" Advance to next data cell
Current and only pattern is row1 col1->nCol, row2 col1->nCol, ...
wrapping at nRow,nCol to row1,col1
curRow, curCol are updated
:returns: CellDesc
"""
row = self.curRow
col = self.curCol
col += 1
if col > self.nCol:
row += 1
col = 1
if row > self.nRow:
row = 1
self.curRow = row
self.curCol = col
return CellDesc(row=row, col=col)
# Clear data to empty
def clear(self):
self.vals.clear()
# Clear sell
def clearCell(self, row=None, col=None):
self.vals.clearCell(row=row, col=col)
def copy(self): # Returns: deep copy
copy = SudokuData(base=self)
return copy
# Set / Get current cell
def curCell(self, cd=None, row=None, col=None): # Returns: r_c ref to cell structure
if cd is not None and (row is not None or col is not None):
raise SelectError("curCell: cd and row,col specified - allow only cd or row,col")
if cd is None and row is None and col is None:
return CellDesc(row=self.curRow, col=self.curCol)
if cd is not None:
self.curRow = cd.row
self.curCol = cd.col
return cd
self.curRow = row
self.curCol = col
return CellDesc(row=self.curRow, col=self.curCol)
def destroy(self):
""" Destroy data
"""
pass # Nothing for now
# Simple display of data area
# For diagnostic purposes
def display(self, msg=None):
if msg is None:
msg = "SudokuData"
self.vals.display(msg=msg)
def setCell(self, row=None, col=None, val=None):
""" Set cell value
:returns: updated cell
"""
ret = self.vals.setCell(row=row, col=col, val=val)
if not self.isValid():
SlTrace.lg(f"setCell: row={row}, col={col}, val={val} not valid")
self.display("Invalid arrangement")
SlTrace.lg("by rows")
for ri in range(self.nRow):
nr = ri + 1
SlTrace.lg(f"row:{nr} vals:{self.getRowVals(nr, include_nones=True)}")
SlTrace.lg("by columns")
for ci in range(self.nCol):
nc = ci + 1
SlTrace.lg(f"col:{nc} vals:{self.getColVals(nc, include_nones=True)}")
self.display("After listing")
raise SelectError("Invalid arrangement")
return ret
def isValid(self):
""" Check for valid arrangement
"""
return self.vals.isValid()
def setData(self, r_ds=None):
"""Set data
clear if no data array
:r_ds: array
"""
if self.cells is not None: # Clear
del self.cells
self.cells = [[CellDesc(row=ri+1, col=ci+1) for ci in range(self.nCol)] for ri in range(self.nRow)]
if r_ds is not None:
for ic in range(self.nRow):
for ir in range(self.nCol):
clds = r_ds[ir][ic]
self.cells[ir][ic] = clds.copy()
def copy_cells(self):
""" Copy cells
"""
new_cells = copy.deepcopy(self.cells)
return new_cells
# Deep copy of our data
# copies not refs to all contained data
def deepCopy(self): # Returns: blessed deep copy of our data
copy = SudokuData()
r_c_cells = copy.cells = []
r_cells = self.cells
for ri in range(self.nRow):
for ci in range(self.nCol):
r_c_cells[ri][ci] = r_cells[ri][ci]
copy.curRow = self.curRow
copy.curCol =self.curCol
r_c_markH = {}
r_markH = self.markH
for key in r_markH:
r_c_markH[key] = r_markH[key]
return copy
def getLegalVals(self, row=None, col=None): # Returns: array of legal values
""" Get all values for given cell given other cells in data
Returns array, possibly empty of legal cell values
returned values are sorted in ascending order
cks for defined row,col and out of bounds
:row: row to consider
:col: col to consider
:returns: sorted candidate values
"""
return self.vals.getLegalVals(row=row, col=col)
# Get next empty cell
# continues, starting with (row,col) after most recently filled
# Returns reference to cell descriptor {row=n, col=n) if one
# None if no empty cell found
#
def getNextEmpty(self, cd=None, row=None, col=None): # Returns: cell descriptor, else None
if cd is not None or row is not None:
self.curCell(cd=cd, row=row, col=col)
cell = self.curCell()
row = cell.row
col = cell.col
if (self.isEmptyCell(cell.row, cell.col)):
self.advanceCell()
if self.isEmptyCell(cell.row, cell.col):
return cell
ncell = self.nRow*self.nCol
ntry = 0
if SlTrace.trace("empty"):
SlTrace.lg("getNextEmpty()")
while True:
cd = self.advanceCell()
row,col = cd.row, cd.col
if SlTrace.trace("empty"):
SlTrace.lg(f" getNextEmpty check row={row}, col={col}")
ntry += 1
val = self.getCellVal(row, col)
if self.isEmpty(val):
SlTrace.lg(f"getNextEmpty - got row={row}, col={col}", "empty")
return CellDesc(row=row, col=col) # Return empty cell descriptor
if ntry >= ncell:
if SlTrace.trace("empty"):
SlTrace.lg("getNextEmpty - NONE FOUND")
return None
def getNumEmpty(self):
""" Return number of empty cells
"""
return self.vals.getNumEmpty()
# Select legal/reasonable starting values for list of cells
def pickStartValues(self, startList=None,
clear=True ): # Returns: True iff possible
a_start_list = startList
if clear:
self.clear() # Start with cleared cells
for r_c in a_start_list:
row = r_c.row
if row is None:
raise SelectError("pickStartValues: bad row")
col = r_c.col
legals = self.getLegalVals(row, col)
if legals == 0:
return False
self.setCellVal(row, col, legals[0])
return True
# Check if empty
# any non-zero numeric is filled
def isEmpty(self, val=None): # Returns: True iff empty value
if val is None or (isinstance(val, str) and val == "0") or val == 0:
return True
return False
# Is cell empty
def isEmptyCell(self, row, col): # Returns: True iff empty cell
cell = self.getCell(row, col)
val = cell if cell is None else cell.val
SlTrace.lg(f"isEmpty(row={row} col={col}) val:{val}", "empty")
if cell is None:
return True # Empty
return self.isEmpty(cell.val)
#
def getSq3Vals(self, row=None, col=None):
""" Get valuse in sub-by-sub square
:Returns: col values
"""
sq3_vals = self.vals.getSq3Vals(self, row=row, col=col)
return sq3_vals
def getColVals(self, col=None, include_nones=None):
""" Get values in given row
:row: column number
:include_nones: True - include Nones in list
:Returns: values in row
"""
return self.vals.getColVals(col=col, include_nones=include_nones)
def getRowVals(self, row=None, include_nones=None): # Returns: row values
""" Get values in given row
:row: column number
:include_nones: True - include Nones in list
:Returns: values in row
"""
return self.vals.getRowVals(row=row, include_nones=include_nones)
def setCellVal(self, row=None, col=None, val=None, quiet=False):
""" set Sudoku cell with value - may be EMPTY
:row: 1-nRow
:col: 1-nCol
:val: EMPTY, 1-9, marking value
:quiet: 1 -> no trace, no cell change
"""
self.vals.setCellVal(row=row, col=col, val=val)
if not quiet: # quiet -> move invisibly also
SlTrace.lg(f"setCellVal(row:{row}, col:{col}, val:{val})", "any")
self.curRow = row
self.curCol = col
return val
#
def startCells(self, nFilled=None, symRules=None, ):
""" Find legal list of cells
Sets data
To provide atleast 2-way symitry with an
odd lengthed board, one
adds an "odd" cell to the board center
cell
:returns: list of CellDesc
"""
if nFilled is None:
raise SelectError("startList nFilled is missing")
if symRules is None:
symRules = "c"
symRules = symRules.lower()
sym_c = True if re.search(r'c', symRules) else False
sym_x = True if re.search(r'x', symRules) else False
sym_y = True if re.search(r'y', symRules) else False
nf = 0 # Number filled
start_cells = [] # List of start cells in order
if nFilled % 2!= 0 and (self.nRow % 2 == 1 or self.nCol % 2 == 1):
crow = int((self.nRow+1)/2)
ccol = int((self.nCol+1)/2)
self.setCellVal(crow, ccol, 1)
start_cells.append(CellDesc(row=crow, col=ccol))
nf += 1
while nf < nFilled:
row = randint(1,self.nRow)
col = randint(1, self.nCol)
r_c = self.getNextEmpty(row=row, col=col)
if r_c is None:
break
row = r_c.row # Update iff necessary
col = r_c.col
if sym_c:
srow, scol = self.symCell(symRule='c',
row=row,
col=col)
if self.isEmptyCell(srow, scol):
self.setCellVal(srow, scol, 1)
start_cells.append(CellDesc(row=srow, col=scol))
nf += 1
# Add original if not there
if (self.isEmptyCell(row, row)):
self.setCellVal(row, col, 1)
start_cells.append(CellDesc(row=row,
col=col))
nf += 1
return start_cells
#
def getCell(self, row=None, col=None, quiet = False):
""" get Sudoku cell may be EMPTY
:row: # 1-nRow
:col: # 1-nCol
:quiet:, # supress trace and cell movement default: False
:returns: cell
"""
val = self.vals.getCellVal(row=row, col=col)
return CellDesc(row=row, col=col, val=val)
# Get data cell
def getCellVal(self, row=None, col=None):
return self.vals.getCellVal(row=row, col=col)
# Assemble list of next move choices
# sorted in ascending number of values per cell
def getChoices(self): # Returns: ref to sorted array of choices
return self.vals.getChoices()
def orderChoices(self, cells):
"""
:cells: List of cells
:returns: ref to sorted array of choices
# to be ordered in increasing
# number of values
"""
return self.vals.orderChoices(cells)
# Get list of non-empty cells
def getNonEmptyCells(self): # Returns: array of {row=, col=}
return self.vals.getNonEmptyCells()
# Find symetric cell
def symCell(self, row=None, col=None, symRule=None): # Returns: CellDesc
if symRule is None:
symRule = "c"
symRule = symRule.lower()
srow = row # Symetric cell coord
scol = col
if re.search(r'x', symRule):
scol = self.nCol-col+1
elif re.search(r'y', symRule):
srow = self.nRow-row+1
elif re.search(r'c', symRule):
srow = self.nRow-row+1
scol = self.nCol-col+1
return (srow, scol)
# Remove one or all
def unmarkCell(self, row, col):
r_markh = self.markH
if row is None:
r_markh = {} # Unmark all
else:
rckey = f"{row}:{col}"
if rckey in r_markh:
del r_markh[rckey] # Clear
self.markH = r_markh # Update stored settings
# Mark cell
#
def markCell(self, row, col):
r_markh = self.markH
rckey = f"{row}:{col}"
r_markh[rckey] = 1
# Check if cell is marked
def isMarked(self, row, col): # Returns: 1 if marked else 0
r_markh = self.markH
rckey = f"{row}:{col}"
marked = not rckey in r_markh
return marked
|
{"/src/sudoku.py": ["/sudoku_subs.py", "/sudoku_globals.py"], "/puzzle/sudoku_subs.py": ["/sudoku_globals.py"], "/puzzle/SudokuPly.py": ["/sudoku_globals.py"], "/puzzle/sudoku.py": ["/sudoku_subs.py", "/sudoku_globals.py"], "/puzzle/SudokuBoard.py": ["/sudoku_globals.py"], "/sudoku_subs.py": ["/sudoku_globals.py"]}
|
5,689
|
raysmith619/sudoku
|
refs/heads/master
|
/sudoku_globals.py
|
# sudoku_globals.py
""" Program Globals
We might restucture the program to avoid this at a later date
but to push ahead with most of the structure intqct we have these global variables
"""
from select_trace import SlTrace
from resource_group import ResourceGroup
from select_control import SelectControl
def initialize_globals():
global res_group
global Display_time, Display_prev_time, DisplayRtn, Display_mw, Display_board
global nRow, nSubRow, nCol, nSubCol, si, sbox_fr, sbox, sbox_row, sbox_col, sbox_legal_vals
global o_data, o_board, Initial_data, bSize, sSize, nFirst, makePuzzle, makePuzzleSym
global traceList, UsePuzzle
global main_puzzle, puzzle
global top_bd_fr
global solve_start
global running
global cF, vC # variable control
global update_time
global run_after_load
running = True # Process / Display is running
update_time = None
res_group = ResourceGroup()
Display_time = None
Display_prev_time = None # Previous display time
DisplayRtn = None
Display_mw = None
Display_board = None
puzzle = main_puzzle = None
cF = vC = None
nRow = 9 # number of rows down the whole board
nSubRow = 3 # number of row cells in sub square
nCol = 9 # number of cols accross the whole board
nSubCol = 3 # number of col cells
si = 0 # Selection index
sbox_fr = None # Set value frame
sbox = None # selection box
sbox_row = None # selected cell row
sbox_col = None
sbox_legal_vals = [] # legal vals in selection box
o_data = None # Primary data
o_board = None # Primary board
Initial_data = None # Initial data values
bSize = 3 # Main Bd size inches
sSize = 2 # Solution Bd size
nFirst = 5 # first n solutions
makePuzzle = None # If defined, generate puzzle with this many cells filled
makePuzzleSym = "c" # Puzzle symetry pref for initial settings
traceList = ""
top_bd_fr = None # top display frame
solve_start = 0
run_after_load = False
def update_control_variables():
""" Update control variables
For variables whose values we desire real-time changes
For now, must be customized, changed as required
"""
global Display_time
global run_after_load
cF = SelectControl() # Reference to singleton
Display_time = cF.get_val("Display_time")
run_after_load = cF.get_val("run_after_load")
SlTrace.lg("update_control_variables")
if __name__ == "__main__":
tg = True
###tg = False
if tg:
import sudoku_globals as g
g.initialize_globals()
print(f"nRow={g.nRow} nCol={g.nCol}")
print(f"Display_mw={g.Display_mw}")
print(f"o_data={g.o_data}")
else:
from sudoku_globals import initialize_globals, nRow, nCol, Display_mw, o_data
initialize_globals()
print(f"nRow={nRow} nCol={nCol}")
print(f"Display_mw={Display_mw}")
print(f"o_data={o_data}")
|
{"/src/sudoku.py": ["/sudoku_subs.py", "/sudoku_globals.py"], "/puzzle/sudoku_subs.py": ["/sudoku_globals.py"], "/puzzle/SudokuPly.py": ["/sudoku_globals.py"], "/puzzle/sudoku.py": ["/sudoku_subs.py", "/sudoku_globals.py"], "/puzzle/SudokuBoard.py": ["/sudoku_globals.py"], "/sudoku_subs.py": ["/sudoku_globals.py"]}
|
5,690
|
raysmith619/sudoku
|
refs/heads/master
|
/puzzle/SudokuPuzzle.py
|
#SudokuPuzzle.py
# Adapted from SudokuData.py
#################
## sudokuPuzzle ##
#################
from select_error import SelectError
from SudokuData import SudokuData
class SudokuPuzzle(SudokuData):
def __init__(self, desc=None, file_name=None, **kwargs):
"""
:description: Description of puzzle
:file_name: file name, if known
"""
self.file_name=file_name
if desc is None:
"Basic Sudoku Puzzle"
super(SudokuPuzzle, self).__init__(**kwargs)
def add_cell(self, row=None, col=None, val=None):
""" Add data square to puzzle
:row: row number
:col: column number
:val: square number
"""
if row is None or col is None or val is None:
raise SelectError(f" row, col and val must be specified row={row}, col={col}, val={val}")
self.setCell(row=row, col=col, val=val)
def file2puzzle(self, file=None):
""" convert file name/object to puzzle
:file: name if string, else open file stream
:returns: puzzle, None if failure
"""
if isinstance(file, str):
self.file_name = file
file = open(file)
puzzle_str = file.splitlines()
puzzle = self.str2puzzle(puzzle_str)
return puzzle
def copy(self):
""" Copy puzzle to insulate changes in data
:Returns: copy of data with new objects for cells
"""
rows = self.nRow
grows = self.nSubRow
cols = self.nCol
gcols = self.nSubCol
cp = SudokuPuzzle(rows=rows, grows=grows,
cols=cols, gcols=gcols)
for nr in range(1, rows+1):
for nc in range(1, cols+1):
val = self.getCellVal(row=nr, col=nc)
if val is not None:
cp.add_cell(row=nr, col=nc, val=val)
return cp
|
{"/src/sudoku.py": ["/sudoku_subs.py", "/sudoku_globals.py"], "/puzzle/sudoku_subs.py": ["/sudoku_globals.py"], "/puzzle/SudokuPly.py": ["/sudoku_globals.py"], "/puzzle/sudoku.py": ["/sudoku_subs.py", "/sudoku_globals.py"], "/puzzle/SudokuBoard.py": ["/sudoku_globals.py"], "/sudoku_subs.py": ["/sudoku_globals.py"]}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.