index int64 | repo_name string | branch_name string | path string | content string | import_graph string |
|---|---|---|---|---|---|
55,657 | open-sourcepad/python-testing-framework | refs/heads/master | /helpers/datetime_helper.py | from datetime import datetime as DT
def str_to_date(value, format='%Y-%m-%d %H:%M:%S.%f'):
return DT.strptime(value, format)
def get_beginning_of_week(**kwargs):
day = kwargs['datetime']
if type(kwargs['datetime']) == str:
day = convert_to_datetime(kwargs['datetime'])
result = day - timedelta(days=day.weekday())
return result
def get_end_of_week(**kwargs):
day = kwargs['datetime']
if type(kwargs['datetime']) == str:
day = convert_to_datetime(kwargs['datetime'])
result = day + timedelta(days=(6 - day.weekday()))
return result
| {"/models/base.py": ["/config.py"], "/main.py": ["/config.py", "/procedures.py"], "/config.py": ["/helpers/typecast_helper.py"], "/modules/sample.py": ["/libs/csv_reader.py", "/modules/base.py"], "/modules/base.py": ["/libs/selenium.py", "/config.py", "/libs/callback.py"], "/libs/csv_reader.py": ["/config.py"]} |
55,658 | open-sourcepad/python-testing-framework | refs/heads/master | /procedures.py | from importlib import import_module
class Procedures:
def __init__(self, **kwargs):
self.kwargs = kwargs
self.test = kwargs.get('test')
def run(self):
if self.test:
return self._run(self.test)
def run_all(self):
result = []
for module in self._module_list:
result.append(self._run(module))
return result
@property
def _module_list(self):
return [
'sample'
]
def _run(self, module_name):
package = ''.join([split.capitalize() for split in module_name.split('_')])
module = import_module(f"modules.{module_name}")
return getattr(module, package)(**self.kwargs).run()
| {"/models/base.py": ["/config.py"], "/main.py": ["/config.py", "/procedures.py"], "/config.py": ["/helpers/typecast_helper.py"], "/modules/sample.py": ["/libs/csv_reader.py", "/modules/base.py"], "/modules/base.py": ["/libs/selenium.py", "/config.py", "/libs/callback.py"], "/libs/csv_reader.py": ["/config.py"]} |
55,659 | open-sourcepad/python-testing-framework | refs/heads/master | /models/base.py | # -*- coding: utf-8 -*-
from peewee import *
from config import Database as DB
from datetime import datetime as DT
from decimal import Decimal
class Base(Model):
class Meta:
database = DB().instance
def get_all_keys(self):
return list(self._meta.fields.keys())
def get_data(self, **kwargs):
result = {}
whitelist = kwargs.get('keys')
serializable = kwargs.get('serializable')
for k,v in self.__data__.items():
if whitelist and k not in whitelist:
next
elif serializable and type(v) in (DT, Decimal):
result[k] = str(v)
else:
result[k] = v
return result
def get_filtered_data(self, serializable=False):
return self.get_data(serializable=serializable)
def get_data_except(self, keys = {}, serializable=False):
raw_data = self.get_data(serializable=serializable)
return { x: raw_data[x] for x in raw_data if x not in keys }
def reload(self):
return type(self).get_by_id(self._pk)
@classmethod
def first(self):
return self.select().first()
@classmethod
def last(self):
return self.select().sort(column='id', order='desc').first()
@classmethod
def find_by(cls, column, value):
return cls.select().where(getattr(cls, column)==value).first()
| {"/models/base.py": ["/config.py"], "/main.py": ["/config.py", "/procedures.py"], "/config.py": ["/helpers/typecast_helper.py"], "/modules/sample.py": ["/libs/csv_reader.py", "/modules/base.py"], "/modules/base.py": ["/libs/selenium.py", "/config.py", "/libs/callback.py"], "/libs/csv_reader.py": ["/config.py"]} |
55,660 | open-sourcepad/python-testing-framework | refs/heads/master | /main.py | from config import Config
from procedures import Procedures
from middleware import *
class Main:
def __init__(self, **kwargs):
self.kwargs = kwargs
self._middleware()
# test kwargs is required
def run(self):
return Procedures(**self.kwargs).run()
def run_all(self):
return Procedures(**self.kwargs).run_all()
def _middleware(self):
for middleware in self.kwargs['middleware']:
middleware()
middleware = [
Db
]
if __name__ == "__main__":
configs = Config.default_imports()
configs['middleware'] = middleware
Main(**configs).run_all()
| {"/models/base.py": ["/config.py"], "/main.py": ["/config.py", "/procedures.py"], "/config.py": ["/helpers/typecast_helper.py"], "/modules/sample.py": ["/libs/csv_reader.py", "/modules/base.py"], "/modules/base.py": ["/libs/selenium.py", "/config.py", "/libs/callback.py"], "/libs/csv_reader.py": ["/config.py"]} |
55,661 | open-sourcepad/python-testing-framework | refs/heads/master | /libs/callback.py | import requests
from .logger import Logger
class Callback:
def __init__(self, method='post', url='', data={}, json={}, headers={}):
self.req = getattr(requests, method)
self.url = url
self.data = data
self.json = json
self.headers = headers
def request(self):
try:
result = self.req(self.url, data=self.data, json=self.json, headers=self.headers)
return result
except Exception as e:
Logger(file='callback', message=str(e), type='error').log()
return e
| {"/models/base.py": ["/config.py"], "/main.py": ["/config.py", "/procedures.py"], "/config.py": ["/helpers/typecast_helper.py"], "/modules/sample.py": ["/libs/csv_reader.py", "/modules/base.py"], "/modules/base.py": ["/libs/selenium.py", "/config.py", "/libs/callback.py"], "/libs/csv_reader.py": ["/config.py"]} |
55,662 | open-sourcepad/python-testing-framework | refs/heads/master | /config.py | import yaml
from pathlib import Path
from peewee import *
from playhouse.pool import PooledMySQLDatabase
from helpers.typecast_helper import to_int
class Config:
def __init__(self, file='app'):
pwd = str(Path(__file__).parent)
with open(f"{ pwd }/configs/{ file }.yml") as stream:
try:
data = yaml.safe_load(stream)
self.data = data[data['default']['environment']]
except Exception as e:
print(e)
@classmethod
def default_imports(self):
result = {}
for module in ['pry']:
result[module] = __import__(module)
return result
@property
def get_all(self):
return self.data
def get(self, key):
return self.get_all[key]
class Database:
def __init__(self):
self.config = Config(file='database').get_all
self.database = self.config['database']
self.db_config = {}
for attr in self._attributes:
self.db_config[attr] = to_int(self.config[attr])
@property
def instance(self):
return MySQLDatabase(
self.database,
**self.db_config
)
@property
def _attributes(self):
return [
'user',
'password',
'host',
'port'
]
| {"/models/base.py": ["/config.py"], "/main.py": ["/config.py", "/procedures.py"], "/config.py": ["/helpers/typecast_helper.py"], "/modules/sample.py": ["/libs/csv_reader.py", "/modules/base.py"], "/modules/base.py": ["/libs/selenium.py", "/config.py", "/libs/callback.py"], "/libs/csv_reader.py": ["/config.py"]} |
55,663 | open-sourcepad/python-testing-framework | refs/heads/master | /helpers/list_helper.py | def pluck(**kwargs):
values = kwargs['values']
key = kwargs['key']
if type(values) == dict:
result = [value[key] for value in values]
else:
result = [getattr(value, key) for value in values]
return result
def compact(list_data):
return [x for x in list_data if x is not None]
def uniq(list_data):
return list(set(list_data))
| {"/models/base.py": ["/config.py"], "/main.py": ["/config.py", "/procedures.py"], "/config.py": ["/helpers/typecast_helper.py"], "/modules/sample.py": ["/libs/csv_reader.py", "/modules/base.py"], "/modules/base.py": ["/libs/selenium.py", "/config.py", "/libs/callback.py"], "/libs/csv_reader.py": ["/config.py"]} |
55,664 | open-sourcepad/python-testing-framework | refs/heads/master | /modules/sample.py | from libs.csv_reader import CsvReader
from .base import CallBackBase
class Sample(CallBackBase):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.method = 'get'
# self.json = self._set_json()
# self.headers = self._set_headers()
# self.data = self._set_data()
self.url_endpoint = f"{self.test_url}"
def run(self):
result = self.visit_url(self.url_endpoint)
self.test_result = result.status_code == 400
super().run()
return result
# def _set_json(self):
# return {}
# def _set_headers(self):
# return {}
# def _set_data(self):
# return {}
| {"/models/base.py": ["/config.py"], "/main.py": ["/config.py", "/procedures.py"], "/config.py": ["/helpers/typecast_helper.py"], "/modules/sample.py": ["/libs/csv_reader.py", "/modules/base.py"], "/modules/base.py": ["/libs/selenium.py", "/config.py", "/libs/callback.py"], "/libs/csv_reader.py": ["/config.py"]} |
55,665 | open-sourcepad/python-testing-framework | refs/heads/master | /middleware/db.py | from peewee import ModelSelect
class Db(object):
def __init__(self):
def sort(self, **kwargs):
return self.order_by(getattr(getattr(self.model, kwargs['column']), kwargs['order'])())
def iterable(self):
return list(self)
setattr(ModelSelect, 'sort', sort)
setattr(ModelSelect, 'iterable', iterable)
| {"/models/base.py": ["/config.py"], "/main.py": ["/config.py", "/procedures.py"], "/config.py": ["/helpers/typecast_helper.py"], "/modules/sample.py": ["/libs/csv_reader.py", "/modules/base.py"], "/modules/base.py": ["/libs/selenium.py", "/config.py", "/libs/callback.py"], "/libs/csv_reader.py": ["/config.py"]} |
55,666 | open-sourcepad/python-testing-framework | refs/heads/master | /libs/selenium_request.py | from selenium import webdriver
from seleniumrequests import Firefox
from selenium.webdriver.firefox.options import Options
from seleniumrequests.request import headers
class SeleniumError(Exception):
pass
class Selenium:
def __init__(self, **kwargs):
self.method = kwargs.get('method', 'get')
self.data = kwargs.get('data')
self.headers = kwargs.get('headers')
self.options = Options()
self._options()
def browser(self, url):
try:
driver = Firefox(options=self.options)
headers = self.headers
result = driver.request(self.method, url, data=self.data)
import pry;pry()
return driver
except Exception as e:
raise SeleniumError(f"Error setting instance of Firefox.\nError: {e}")
def _options(self):
self.options.headless = True
| {"/models/base.py": ["/config.py"], "/main.py": ["/config.py", "/procedures.py"], "/config.py": ["/helpers/typecast_helper.py"], "/modules/sample.py": ["/libs/csv_reader.py", "/modules/base.py"], "/modules/base.py": ["/libs/selenium.py", "/config.py", "/libs/callback.py"], "/libs/csv_reader.py": ["/config.py"]} |
55,667 | open-sourcepad/python-testing-framework | refs/heads/master | /modules/base.py | import unittest
from libs.selenium import Selenium
from config import Config
from libs.callback import Callback
class Base:
def __init__(self, **kwargs):
for k, v in kwargs.items():
setattr(self, k, v)
self.test_url = Config().get('test_url')
self.print_results = kwargs.get('print_results', True)
self.url_endpoint = ''
def run(self):
if self.print_results:
print('===========================================================')
print(f"Running endpoint: {self.url_endpoint}")
print(f"{self.__class__.__name__} Passed: {self.test_result}")
print('===========================================================')
class SeleniumBase(Base):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.method = 'get'
self.browser_name = 'Firefox'
self.data = self._set_params()
self.headers = self._set_headers()
def visit_url(self, url):
args = {
'method': self.method,
'data': self.data,
'headers': self.headers
}
result = Selenium(**args).browser(url)
return result
class CallBackBase(Base):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.method = 'get'
for arg in ['data', 'json', 'headers']:
setattr(self, arg, {})
def visit_url(self, url):
return Callback(
url=url,
method=self.method,
data=self.data,
json=self.json,
headers=self.headers
).request()
class UnitTestBase(Base, unittest.TestCase):
pass
| {"/models/base.py": ["/config.py"], "/main.py": ["/config.py", "/procedures.py"], "/config.py": ["/helpers/typecast_helper.py"], "/modules/sample.py": ["/libs/csv_reader.py", "/modules/base.py"], "/modules/base.py": ["/libs/selenium.py", "/config.py", "/libs/callback.py"], "/libs/csv_reader.py": ["/config.py"]} |
55,668 | open-sourcepad/python-testing-framework | refs/heads/master | /libs/csv_reader.py | import csv
from pathlib import Path
from config import Config
from .logger import Logger
class CsvReader:
def __init__(self, **kwargs):
self.config = Config()
pwd = self.config.get('root_url')
file = f"{ pwd }"
self.file = f"{ pwd }/assets/{kwargs.get('file')}.csv"
def read(self):
result = []
with open(self.file) as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
result.append(dict(row))
Logger(file='test', message=result, type='info').log()
return result
| {"/models/base.py": ["/config.py"], "/main.py": ["/config.py", "/procedures.py"], "/config.py": ["/helpers/typecast_helper.py"], "/modules/sample.py": ["/libs/csv_reader.py", "/modules/base.py"], "/modules/base.py": ["/libs/selenium.py", "/config.py", "/libs/callback.py"], "/libs/csv_reader.py": ["/config.py"]} |
55,669 | open-sourcepad/python-testing-framework | refs/heads/master | /libs/thread_base.py | from .logger import Logger
from threading import Thread
class ThreadBase:
def __init__(self, **kwargs):
self.thread = Thread(target=self.perform, kwargs=kwargs)
def run(self):
try:
self.thread.start()
except Exception as e:
self.log_error(message=str(e.__class__))
def log_info(self, **kwargs):
kwargs.update({'type': 'info'})
self._logger(**kwargs)
def log_error(self, **kwargs):
message = "Error occurred {error}".format(error=kwargs['message'])
kwargs.update({
'type': 'error',
'message': message
})
self._logger(**kwargs)
def _logger(self, **kwargs):
Logger(
message=kwargs['message'],
file=kwargs.get('file', 'thread_error'),
type=kwargs['type']
).log()
| {"/models/base.py": ["/config.py"], "/main.py": ["/config.py", "/procedures.py"], "/config.py": ["/helpers/typecast_helper.py"], "/modules/sample.py": ["/libs/csv_reader.py", "/modules/base.py"], "/modules/base.py": ["/libs/selenium.py", "/config.py", "/libs/callback.py"], "/libs/csv_reader.py": ["/config.py"]} |
55,670 | open-sourcepad/python-testing-framework | refs/heads/master | /helpers/string_helper.py | import json
def from_json(value):
try:
return json.loads(value)
except Exception as e:
return value
def to_json(value):
try:
return json.dumps(value)
except Exception as e:
return value
| {"/models/base.py": ["/config.py"], "/main.py": ["/config.py", "/procedures.py"], "/config.py": ["/helpers/typecast_helper.py"], "/modules/sample.py": ["/libs/csv_reader.py", "/modules/base.py"], "/modules/base.py": ["/libs/selenium.py", "/config.py", "/libs/callback.py"], "/libs/csv_reader.py": ["/config.py"]} |
55,672 | deepweaver/1MillionQueens | refs/heads/master | /testmultiprocess.py | import random
from time import sleep
def worker(i, quit, foundit):
print "%d started" % i
while not quit.is_set():
x = random.random()
if x > 0.7:
print '%d found %g' % (i, x)
foundit.set()
break
sleep(0.1)
print "%d is done" % i
if __name__ == "__main__":
import multiprocessing as mp
quit = mp.Event()
foundit = mp.Event()
for i in range(mp.cpu_count()):
p = mp.Process(target=worker, args=(i, quit, foundit))
p.start()
foundit.wait()
quit.set() | {"/multiprocessingmain.py": ["/board.py"], "/board.py": ["/progressbar.py"], "/main.py": ["/board.py"]} |
55,673 | deepweaver/1MillionQueens | refs/heads/master | /multiprocessingmain.py | from board import *
import time
import os
# from multiprocessing import Pool, cpu_count
import multiprocessing as mp
# np.random.seed(0)
# collision = []
bdsize = 100000
# fastest = None
# print(bd.collisions)
# def run(seed):
# np.random.seed(seed)
# bd = board(bdsize, randinit=False,threshold=10)
# bd.repair(withprogressbar=False)
def worker(i, quit, foundit, ccnt):
if ccnt == 1:
pb = True
else:
pb = False
# global fastest
print ("process %d started" % i)
while not quit.is_set():
np.random.seed(i)
bd = board(bdsize, randinit=False,threshold=10,with_progressbar_when_initializing=pb)
bd.repair(withprogressbar=pb)
foundit.set()
# print(bd.collisions)
# fastest = bd.collisions
break
print ("process %d is done" % i)
tic = time.perf_counter()
if bdsize < 5000000 or mp.cpu_count() < 4:
ccnt = 1
else:
ccnt = mp.cpu_count()/2
# ccnt = mp.cpu_count()
# ccnt = 8
quit = mp.Event()
foundit = mp.Event()
for i in range(ccnt):
p = mp.Process(target=worker, args=(i, quit, foundit, ccnt))
p.start()
foundit.wait()
quit.set()
toc = time.perf_counter()
# bd.printall(bd=True, attack=True)
# print(bd.collisions)
# print("total iteration = {}".format(bd.iter_num))
# print(bd.collisions)
print("time = {:.2f}s".format(toc - tic))
| {"/multiprocessingmain.py": ["/board.py"], "/board.py": ["/progressbar.py"], "/main.py": ["/board.py"]} |
55,674 | deepweaver/1MillionQueens | refs/heads/master | /board.py | import numpy as np
# from nqueens_bak import nQueens
from progressbar import update_progress
# np.random.seed(0)
class board:
def __init__(self, size=8, C1=0.45, C2=32, randinit=True, threshold=20,with_progressbar_when_initializing=False):
self.N = size
# self.threshold = threshold
### TODO: dtype reinitialize, reduce memory
self.bd = np.arange(self.N, dtype=np.uint32) ### because len(bin(1000000)) < 32
self.dp = np.zeros(2*self.N, dtype=np.uint32) # [2 .. 2*N-2]
# if use greedy init, dtype can be set uint8 or even smaller
self.dn = np.zeros(2*self.N, dtype=np.uint32) # careful with the indexing
# self.attack = -np.ones(self.N) # row indeces of queens in array queen that are attacked by other queens
self.attack = []
# because we use index starting from 0, so the attack matrix is initialized with -1 instead of 0 (0 means column one)
self.limit = 0
self.collisions = 1
self.number_of_attacks = 0
self.loopcount = 0
self.C1 = C1
self.C2 = C2
if randinit:
self.randinit()
else:
self.threshold = threshold
self.with_progressbar_when_initializing = with_progressbar_when_initializing
self.greedyinit()
self.iter_num = 0
self._old = 0
self._new = 0
def printall(self,bd=True,attack=True):
if self.N > 10:
print("size {} is too large to be printed".format(self.N))
return
# print dn
# print(' '*4, end='')
for i in range(0,2*self.N):
print('{: >2d}'.format(self.dp[i]),end='')
print()
for i in range(self.N-1,-1,-1):
for j in range(self.N):
if self.bd[j] == i:
print(' 1',end='')
else:
print(' 0',end='')
print()
# print(' '*4, end='')
for i in range(0,2*self.N):
print('{: >2d}'.format(self.dn[i]),end='')
print()
if bd:
print("board is {}".format(self.bd))
if attack:
print("attack list is {}".format(self.attack))
def greedyinit(self,):
# pass
np.random.shuffle(self.bd)
# flags = np.zeros(self.N, dtype=np.bool) # all False
# choices = np.arange(self.N)
reached = False
self.threshold = 100
print("greedy initialization")
for i in range(self.N):
if self.with_progressbar_when_initializing:
update_progress(i/self.N)
if not reached:
if self.dp[self.N-self.bd[i]+i] == 0 and self.dn[self.bd[i]+i+1] == 0:
self.dp[self.N-self.bd[i]+i] += 1
self.dn[self.bd[i]+i+1] += 1
else:
foundit = False
for j in range(self.threshold):
# print("i = {}, self.N = {}".format(i,self.N))
tmp = np.random.randint(i,self.N)
if self.dp[self.N-tmp+i] == 0 and self.dn[tmp+i+1] == 0:
self.bd[i], self.bd[tmp] = self.bd[tmp], self.bd[i]
self.dp[self.N-self.bd[i]+i] += 1
self.dn[self.bd[i]+i+1] += 1
foundit = True
break
if j == self.threshold-1:
# print("sldjflsjfas;djf;lsdfj;alskdjfsa;djfs;aokfsalkdjf;asldfja;sldkfjl;sdf")
reached = True
break
# reached = True if j == threshold-1
if not foundit:
self.dp[self.N-self.bd[i]+i] += 1
self.dn[self.bd[i]+i+1] += 1
else:
self.dp[self.N-self.bd[i]+i] += 1
self.dn[self.bd[i]+i+1] += 1
# self.dp[self.N-self.bd[i]+i] += 1
# self.dn[self.bd[i]+i+1] += 1
total = 0
for i in self.dn:
if i > 1:
total += i-1
for j in self.dp:
if j > 1:
total += j-1
self.collisions = total
self.compute_attack_matrix()
def randinit(self,):
# self.bd = np.random.permutation(self.bd)
np.random.shuffle(self.bd)
# self.dp = np.zeros(2*self.N, dtype=np.uint32) # [2 .. 2*N-2]
# self.dp.fill(0)
# if use greedy init, dtype can be set uint8 or even smaller
# self.dn = np.zeros(2*self.N, dtype=np.uint32) # careful with the indexing
# self.dn.fill(0)
self.compute_collisions()
self.compute_attack_matrix()
# print(self.collisions)
def compute_attack_matrix(self,):
self.attack = []
for i in range(self.N):
if self.dp[self.N-self.bd[i]+i] > 1 or self.dn[self.bd[i]+i+1] > 1:
self.attack.append(i)
return self.attack
def compute_collisions(self,):
self.dp.fill(0)
self.dn.fill(0)
for i in range(self.N):
self.dp[self.N-self.bd[i]+i] += 1
self.dn[self.bd[i]+i+1] += 1
total = 0
for i in self.dn:
if i > 1:
total += i-1
for j in self.dp:
if j > 1:
total += j-1
self.collisions = total
return total
def get8collisions(self,i,j):
old_collisions = 0
dpset = [self.N-self.bd[i]+i, self.N-self.bd[i]+j, self.N-self.bd[j]+j,self.N-self.bd[j]+i]
dpset = list(set(dpset))
for k in dpset:
old_collisions += self.dp[k]-1 if self.dp[k] > 1 else 0
dnset = [self.bd[i]+i+1, self.bd[i]+j+1, self.bd[j]+j+1, self.bd[j]+i+1]
dnset = list(set(dnset))
for k in dnset:
old_collisions += self.dn[k]-1 if self.dn[k] > 1 else 0
return old_collisions
def swap_ok(self,i,j):
self._old = self.get8collisions(i,j)
self._perform_swap(i,j)
# self.printall()
self._new = self.get8collisions(i,j)
self._perform_swap(j,i)
# print("old is {}, new is {}".format(old,new))
return self._new < self._old
def _perform_swap(self, i,j):
self.dp[self.N-self.bd[i]+i] -= 1
self.dn[self.bd[i]+i+1] -= 1
self.dp[self.N-self.bd[i]+j] += 1
self.dn[self.bd[i]+j+1] += 1
self.dp[self.N-self.bd[j]+j] -= 1
self.dn[self.bd[j]+j+1] -= 1
self.dp[self.N-self.bd[j]+i] += 1
self.dn[self.bd[j]+i+1] += 1
self.bd[i], self.bd[j] = self.bd[j], self.bd[i]
def perform_swap(self, i,j):
# self._perform_swap(i,j)
# self._old = self.get8collisions(i,j)
# print(old)
self._perform_swap(i,j)
# self._new = self.get8collisions(i,j)
# print(new)
# self._perform_swap(j,i)
self.collisions = self.collisions - self._old + self._new
# print(self.collisions)
# self.compute_collisions()
# print(self.collisions)
# print("上面是两个 collision ")
def repair(self, withprogressbar=False):
self.iter_num = 0
while self.collisions > 0:
self.iter_num += 1
# print("enter first loop")
self.randinit()
self.collisions = self.compute_collisions()
# print(self.collisions)
self.limit = self.collisions * self.C1
# self.compute_attacks()
self.loopcount = 0
# print("iternum = {}".format(self.iter_num))
print("\niteration {}".format(self.iter_num))
while self.loopcount <= self.C2 * self.N:
# print("loop count = {}, self.C2 * self.N = {}".format(self.loopcount,self.C2 * self.N))
# print(float(self.loopcount) / (self.C2 * self.N))
if withprogressbar:
update_progress(float(self.loopcount) / (self.C2 * self.N))
# print("enter second loop")
# for k in range(self.number_of_attacks):
# print(self.attack)
for i in self.attack:
# print("third loop's {}".format(i))
j = np.random.randint(0,self.N) # choose j
if self.swap_ok(i,j):
# print("swap ok@@ ")
# self.printall()
self.perform_swap(i,j) # update collision matrix and board
# self.collisions -= diff # update collision
# print("see if collision is wrong",self.collisions)
if self.collisions == 0:
return
if self.collisions < self.limit:
self.limit = self.C1 * self.collisions
self.compute_attack_matrix()
self.loopcount += len(self.attack)
# break
return True
if __name__ == "__main__":
board = board(4,randinit=True)
board.printall(bd=True, attack=True)
board.repair()
board.printall(bd=True, attack=True)
print(board.collisions)
print("iteration = {}".format(board.iter_num)) | {"/multiprocessingmain.py": ["/board.py"], "/board.py": ["/progressbar.py"], "/main.py": ["/board.py"]} |
55,675 | deepweaver/1MillionQueens | refs/heads/master | /main.py | from board import *
import time
import numpy as np
np.random.seed(0)
# collision = []
bdsize = 1000
# fastest = None
# print(bd.collisions)
# def run(seed):
# np.random.seed(seed)
# bd = board(bdsize, randinit=False,threshold=10)
# bd.repair(withprogressbar=False)
board_sizes = []
with open ("./nqueens.txt") as file:
for line in file:
board_sizes.append(int(line))
print(board_sizes)
with open ("./nqueens_out.txt",'w') as file:
for i in board_sizes:
bd = board(i, randinit=False,threshold=10,with_progressbar_when_initializing=True)
tic = time.perf_counter()
bd.repair(withprogressbar=True)
toc = time.perf_counter()
print("time = {:.2f}s".format(toc - tic))
print("\ntotal iteration = {}".format(bd.iter_num))
file.write('[' + ','.join(map(str,bd.bd)) + ']\n')
# bd.printall(bd=True, attack=True)
# print(bd.collisions)
# print(bd.bd)
# print(bd.collisions)
| {"/multiprocessingmain.py": ["/board.py"], "/board.py": ["/progressbar.py"], "/main.py": ["/board.py"]} |
55,676 | deepweaver/1MillionQueens | refs/heads/master | /progressbar.py | import sys,time
import random
def update_progress(progress):
barLength = 100 # Modify this to change the length of the progress bar
status = ""
if isinstance(progress, int):
progress = float(progress)
if not isinstance(progress, float):
progress = 0
status = "error: progress var must be float\r\n"
if progress < 0:
progress = 0
status = "Halt...\r\n"
if progress >= 1:
progress = 1
status = "Done...\r\n"
block = int(round(barLength*progress))
text = "\rPercent: [{0}] {1}% {2}".format( "#"*block + "-"*(barLength-block), progress*100, status)
sys.stdout.write(text)
sys.stdout.flush()
if __name__ == "__main__":
# for i in range(100):
# time.sleep(0.1)
# update_progress(i/100.0)
x = 0
while x < 100:
x += random.randint(0,10)
time.sleep(0.1)
update_progress(x/100) | {"/multiprocessingmain.py": ["/board.py"], "/board.py": ["/progressbar.py"], "/main.py": ["/board.py"]} |
55,679 | stefanlester/PythonPassword-STEFAN | refs/heads/master | /User.py | class User:
name = "default"
hashed_pw = "default"
def set_name(self, name):
self.name = name
def get_name(self):
return self.name
def set_password(self, pw):
self.hashed_pw = pw
def get_password(self):
return self.hashed_pw
| {"/testPassword.py": ["/Password.py", "/User.py"], "/start.py": ["/User.py", "/Password.py"]} |
55,680 | stefanlester/PythonPassword-STEFAN | refs/heads/master | /testPassword.py | import unittest
from Password import passwordTester as Password
from User import User
class TestPassword(unittest.TestCase):
def setUp(self):
self.password = '123_x&5s'.encode()
def test_user(self):
User.set_name(self, name = "Stefan")
response = User.get_name(self)
self.assertTrue(response, "Stefan")
def test_hash_password_hash_check(self):
hashed_pwd = Password.hash_password(self.password)
self.assertTrue(Password.hash_check(self.password, hashed_pwd), (True))
if __name__ == '__main__':
unittest.main() | {"/testPassword.py": ["/Password.py", "/User.py"], "/start.py": ["/User.py", "/Password.py"]} |
55,681 | stefanlester/PythonPassword-STEFAN | refs/heads/master | /start.py | from User import User
from Password import passwordTester
import hashlib
import os
import bcrypt
password=os.getenv("123_x&5s")
hash_object = bcrypt.hashpw((b'123_x32&'),bcrypt.gensalt())
password = "bobo".encode()
user1 = User()
user1.set_name("Bert")
p = passwordTester()
hashed_password = p.hash_password(password)
user1.set_password(hashed_password)
hashed_password = user1.get_password()
p.hash_check(password, hashed_password)
| {"/testPassword.py": ["/Password.py", "/User.py"], "/start.py": ["/User.py", "/Password.py"]} |
55,682 | stefanlester/PythonPassword-STEFAN | refs/heads/master | /Password.py | #adopted from: https://paragonie.com/blog/2016/02/how-safely-store-password-in-2016
import hmac
import hashlib
import os
import bcrypt
class PasswordTester:
@staticmethod
def hash_password(password_string):
hashed_password = bcrypt.hashpw(password_string, bcrypt.gensalt())
return hashed_password
@staticmethod
def hash_check(cleartext_password, hashed_password):
if bcrypt.checkpw(cleartext_password, hashed_password):
print("Yes")
return True
else:
print("No")
return False
| {"/testPassword.py": ["/Password.py", "/User.py"], "/start.py": ["/User.py", "/Password.py"]} |
55,685 | elfprince13/FreeBuild | refs/heads/master | /scripts/prefs/keymaps/default.py | # This little module maps glfw key definitions to libRocket key definitions
# Change if you have a nonstandard keyboard layout
# Or if one of those two components is replaced in the engine
def getKeyMap():
return keymap
def getEffectiveKeyNames():
return rocket_by_name
def getEffectiveKeyIndices():
return rocket_by_index
def getBackendKeyNames():
return glfw_by_name
def getBackendKeyIndices():
return glfw_by_index
def getModifiersByName():
return modifiers_by_name
def getModifiersByIndex():
return modifiers_by_index
# Constants for check modifiers
modifiers_by_name = {
'ctrl' : 1 << 0,
'shift' : 1 << 1,
'alt' : 1 << 2,
'meta' : 1 << 3,
'caps_lock' : 1 << 4,
'num_lock' : 1 << 5,
'scroll_lock' : 1 << 6
}
modifiers_by_index = { v : k for k,v in modifiers_by_name.iteritems()}
# Constants for glfw key codes
glfw_by_name = {
'"' : 34,
"'" : 39,
'+' : 43,
',' : 44,
'-' : 45,
'.' : 46,
'/' : 47,
'0' : 48,
'1' : 49,
'2' : 50,
'3' : 51,
'4' : 52,
'5' : 53,
'6' : 54,
'7' : 55,
'8' : 56,
'9' : 57,
':' : 58,
';' : 59,
'<' : 60,
'=' : 61,
'>' : 62,
'?' : 63,
'A' : 65,
'B' : 66,
'C' : 67,
'D' : 68,
'E' : 69,
'F' : 70,
'F1' : 258,
'F10' : 267,
'F11' : 268,
'F12' : 269,
'F13' : 270,
'F14' : 271,
'F15' : 272,
'F16' : 273,
'F17' : 274,
'F18' : 275,
'F19' : 276,
'F2' : 259,
'F20' : 277,
'F21' : 278,
'F22' : 279,
'F23' : 280,
'F24' : 281,
'F3' : 260,
'F4' : 261,
'F5' : 262,
'F6' : 263,
'F7' : 264,
'F8' : 265,
'F9' : 266,
'G' : 71,
'H' : 72,
'I' : 73,
'J' : 74,
'K' : 75,
'L' : 76,
'M' : 77,
'N' : 78,
'O' : 79,
'P' : 80,
'Q' : 81,
'R' : 82,
'S' : 83,
'T' : 84,
'U' : 85,
'V' : 86,
'W' : 87,
'X' : 88,
'Y' : 89,
'Z' : 90,
'[' : 91,
'\\' : 92,
']' : 93,
'_' : 95,
'`' : 96,
'backspace' : 295,
'capslock' : 320,
'delete' : 297,
'down-arrow' : 284,
'end' : 301,
'esc' : 257,
'home' : 300,
'insert' : 296,
'lalt' : 291,
'lcontrol' : 289,
'left-arrow' : 285,
'lmeta' : 323,
'lshift' : 287,
'menu' : 325,
'numlock' : 319,
'numpad*' : 313,
'numpad+' : 315,
'numpad-' : 314,
'numpad.' : 316,
'numpad/' : 312,
'numpad0' : 302,
'numpad1' : 303,
'numpad2' : 304,
'numpad3' : 305,
'numpad4' : 306,
'numpad5' : 307,
'numpad6' : 308,
'numpad7' : 309,
'numpad8' : 310,
'numpad9' : 311,
'numpad=' : 317,
'numpad_enter' : 318,
'page-down' : 299,
'page-up' : 298,
'pause' : 322,
'ralt' : 292,
'rcontrol' : 290,
'return' : 294,
'right-arrow' : 286,
'rmeta' : 324,
'rshift' : 288,
'scroll' : 321,
'space' : 32,
'tab' : 293,
'unknown' : -1,
'up-arrow' : 283,
'{' : 123,
'|' : 124,
'}' : 125,
'~' : 126
}
glfw_by_index = {v : k for k,v in glfw_by_name.iteritems()}
rocket_by_name = {
'\'"' : 48,
',<' : 40,
'-_' : 41,
'.>' : 42,
'/?' : 43,
'0' : 2,
'1' : 3,
'2' : 4,
'3' : 5,
'4' : 6,
'5' : 7,
'6' : 8,
'7' : 9,
'8' : 10,
'9' : 11,
';:' : 38,
'=+' : 39,
'A' : 12,
'B' : 13,
'C' : 14,
'D' : 15,
'E' : 16,
'F' : 17,
'F1' : 107,
'F10' : 116,
'F11' : 117,
'F12' : 118,
'F13' : 119,
'F14' : 120,
'F15' : 121,
'F16' : 122,
'F17' : 123,
'F18' : 124,
'F19' : 125,
'F2' : 108,
'F20' : 126,
'F21' : 127,
'F22' : 128,
'F23' : 129,
'F24' : 130,
'F3' : 109,
'F4' : 110,
'F5' : 111,
'F6' : 112,
'F7' : 113,
'F8' : 114,
'F9' : 115,
'G' : 18,
'H' : 19,
'I' : 20,
'J' : 21,
'K' : 22,
'L' : 23,
'M' : 24,
'N' : 25,
'O' : 26,
'P' : 27,
'Q' : 28,
'R' : 29,
'S' : 30,
'T' : 31,
'U' : 32,
'V' : 33,
'W' : 34,
'X' : 35,
'Y' : 36,
'Z' : 37,
'[{' : 45,
'\\|' : 46,
']}' : 47,
'`~' : 44,
'backspace' : 69,
'capslock' : 74,
'delete' : 99,
'down-arrow' : 93,
'end' : 88,
'esc' : 81,
'home' : 89,
'insert' : 98,
'lalt' : 175,
'lcontrol' : 140,
'left-arrow' : 90,
'lmeta' : 101,
'lshift' : 138,
'menu' : 142,
'numlock' : 131,
'numpad*' : 62,
'numpad+' : 63,
'numpad-' : 65,
'numpad.' : 66,
'numpad/' : 67,
'numpad0' : 51,
'numpad1' : 52,
'numpad2' : 53,
'numpad3' : 54,
'numpad4' : 55,
'numpad5' : 56,
'numpad6' : 57,
'numpad7' : 58,
'numpad8' : 59,
'numpad9' : 60,
'numpad=' : 68,
'numpad_enter' : 61,
'page-down' : 87,
'page-up' : 86,
'pause' : 73,
'ralt' : 176,
'rcontrol' : 141,
'return' : 72,
'right-arrow' : 92,
'rmeta' : 102,
'rshift' : 139,
'scroll' : 132,
'space' : 1,
'tab' : 70,
'unknown' : 0,
'up-arrow' : 91
}
rocket_by_index = {v : k for k,v in rocket_by_name.iteritems()}
two_to_one_src = [k for k in glfw_by_name.keys() if k not in rocket_by_name]
two_to_one_map = {k : ((k+k2) if (k+k2) in rocket_by_name else (k2+k)) for k2 in two_to_one_src for k in two_to_one_src if ((k+k2) in rocket_by_name) or ((k2+k) in rocket_by_name)}
keymap = { k : rocket_by_name[v if v in rocket_by_name else two_to_one_map[v]] for k,v in glfw_by_index.iteritems()}
| {"/scripts/gfx/pipeline_builder.py": ["/scripts/util/graphs.py"]} |
55,686 | elfprince13/FreeBuild | refs/heads/master | /scripts/prefs/__init__.py | print "Importing %s..."%(__name__),
import os.path
from scripts.util.package_utils import determine_package
path = os.path.abspath(__file__)
dir_path = os.path.dirname(path)
__all__=determine_package(dir_path)
print "Success!"
print " Contains these submodules: %s"%(", ".join(__all__)) | {"/scripts/gfx/pipeline_builder.py": ["/scripts/util/graphs.py"]} |
55,687 | elfprince13/FreeBuild | refs/heads/master | /scripts/editor/shaderUI.py | from net.cemetech.sfgp.glsl.editor import GLSLEditorPane
import os, os.path
from java.awt import EventQueue, BorderLayout
from java.awt.event import ActionEvent, KeyEvent, ActionListener
from java.lang import Runnable, IllegalStateException
from javax.swing import JMenu, JMenuBar, JMenuItem, KeyStroke, JSeparator, JFrame, WindowConstants
from net.cemetech.sfgp.glsl.compile import CompilerImpl, LinkerTaskSpec, CompilerTaskSpec, TaskResult
from java.util.concurrent import LinkedBlockingQueue,FutureTask, Callable
from org.lwjgl.opengl import Display
from net.cemetech.sfgp.freebuild.gfx import GFX, Shader, GLSLProgram, ShaderManager
from net.cemetech.sfgp.freebuild.console import StringLoggingService
class NativeCompilerTask(CompilerTaskSpec):
def __init__(self, copySpec):
CompilerTaskSpec.__init__(self,copySpec.getKind(), copySpec.getSrc())
self.logger = StringLoggingService(False)
def call(self):
shader = Shader(self.src, self.kind)
return TaskResult(shader.shaderId(), "")
class NativeLinkerTask(LinkerTaskSpec):
def __init__(self, copySpec):
CompilerTaskSpec.__init__(self,copySpec.getShaders())
def call(self):
prog = GLSLProgram()
shaders = {Shader(r.resultId) : False for r in self.shaders}
prog.attach(shaders)
prog.link()
# Need to re-construct the inputs as shaders somehow.
# This will require refactoring
# For now, a dummy test
return TaskResult(prog.progId(), "")
class CleanupTask(Callable):
def __init__(self, peerCon, result):
self.peerCon = peerCon
self.result = result
def call(self):
try:
self.peerCon(self.result.resultId).delete()
return True
except LWJGLException, e:
e.printStackTrace()
return False
class NativeCompiler(CompilerImpl):
def __init__(self):
self.tasks = LinkedBlockingQueue()
def waitOnTask(self, task):
fT = FutureTask(task)
self.tasks.put(fT)
return fT.get()
def compileShader(self, compileTask):
# This leaks! We need some cleanup flag of some kind
return self.waitOnTask(NativeCompilerTask(compileTask))
def linkProgram(self, linkTask):
return self.waitOnTask(NativeLinkerTask(linkTask))
def cleanResult(self, peerCon, result):
return self.waitOnTask(CleanupTask(peerCon, result))
def cleanCompileResult(self, result):
self.cleanResult(Shader, result)
def cleanLinkResult(self, result):
self.cleanResult(GLSLProgram, result)
def menu_with_accelerator(menuText, accelerator_pair):
menu = JMenuItem(menuText)
menu.setAccelerator(KeyStroke.getKeyStroke(*accelerator_pair))
return menu
class RunnableEditor(Runnable):
def __init__(self,ldPath):
self.ldPath = ldPath
# If we go back to this route,
# we should have some way to get the compiler reference in other threads
self.compiler = CompilerImpl
def run(self):
makeEditorFrame(self.ldPath, self.compiler)
def makeEditorFrame(ldPath, compiler):
mb = JMenuBar()
file = JMenu("File")
edit = JMenu("Edit")
run = JMenu("Run")
newMenu = menu_with_accelerator("New",(KeyEvent.VK_N,ActionEvent.META_MASK))
file.add(newMenu)
open = menu_with_accelerator("Open",(KeyEvent.VK_O,ActionEvent.META_MASK))
file.add(open)
save = menu_with_accelerator("Save",(KeyEvent.VK_S,ActionEvent.META_MASK))
file.add(save)
file.add(JSeparator());
resetPipe = menu_with_accelerator("Reset Pipeline",(KeyEvent.VK_N,ActionEvent.META_MASK | ActionEvent.SHIFT_MASK))
file.add(resetPipe)
openPipe = menu_with_accelerator("Open Pipeline",(KeyEvent.VK_O,ActionEvent.META_MASK | ActionEvent.SHIFT_MASK))
file.add(openPipe)
compile = menu_with_accelerator("Compile",(KeyEvent.VK_ENTER, ActionEvent.META_MASK))
run.add(compile)
mb.add(file)
mb.add(edit)
mb.add(run)
f = JFrame("SFGP Shader Editor")
f.setJMenuBar(mb)
c = f.getContentPane()
c.setLayout(BorderLayout())
editor = GLSLEditorPane("",ldPath,compiler)
c.add(editor, BorderLayout.CENTER)
c.doLayout()
f.setSize(1000, 700);
f.setVisible(True);
f.setDefaultCloseOperation(WindowConstants.DISPOSE_ON_CLOSE);
class EditorActionListener(ActionListener):
def makeRelay(srcObj):
return (lambda e: editor.actionPerformed(ActionEvent(srcObj, e.getID(), e.getActionCommand())))
editorActions = {
save : (lambda e: editor.saveCurrent()),
compile : (lambda e: editor.compileCurrent()),
open : makeRelay(editor.openShader),
newMenu : makeRelay(editor.newShader),
openPipe : makeRelay(editor.openPipeline),
resetPipe : makeRelay(editor.resetPipeline)
}
def actionPerformed(self, e):
editorActions = EditorActionListener.editorActions
evtSrc = e.getSource()
if evtSrc in editorActions:
editorActions[evtSrc](e)
else:
raise IllegalStateException("Imaginary menu item registered an ActionEvent: " + evtSrc)
menuListener = EditorActionListener()
compile.addActionListener(menuListener);
newMenu.addActionListener(menuListener);
open.addActionListener(menuListener);
save.addActionListener(menuListener);
resetPipe.addActionListener(menuListener);
openPipe.addActionListener(menuListener);
def init_editor():
compiler = NativeCompiler()
ldPath = os.path.join(os.getcwd(),"data/shaders")
if not os.path.isdir(ldPath):
ldPath = os.getcwd()
makeEditorFrame(ldPath, compiler)
return compiler
| {"/scripts/gfx/pipeline_builder.py": ["/scripts/util/graphs.py"]} |
55,688 | elfprince13/FreeBuild | refs/heads/master | /scripts/gfx/framebuffer_builder.py | from org.lwjgl import BufferUtils, LWJGLException
from org.lwjgl.opengl import Display
from net.cemetech.sfgp.freebuild.gfx import FBO, BufferObject, Texture, GFX
from constants import attachment_points, tex_targets, formats, internal_formats, types
import json
from scala import Option
# Each framebuffer we deserialize must have the following top-level properties:
# * width (number) - defaults to 1 for non-fixed, must be specified otherwise
# * height (number) - defaults to 1 for non-fixed, must be specified otherwise
# * attachments (dictionary)
# Optionally, they may also have fixed_dims, specifying which of width and/or
# height should be interpreter as a fixed pixel size, rather than a fraction
# of Display dimensions.
#
# Each attachment should be keyed by attachment point, and contain
# instructions for building an image to render to
# Each texture must specify the following:
# * target, defaults to GL_TEXTURE_2D
# * format, and internal_format
# * type
def texture_from_description(desc, width, height, suppress_unbind=False):
target = tex_targets[desc.get('target', 'GL_TEXTURE_2D')]
format = formats[desc['format']]
internal_format = internal_formats[desc['internal_format']]
ttype = types[desc['type']]
# TODO - add support for texture parameters - clamping, filters, etc
if target == tex_targets['GL_TEXTURE_2D']:
tex = Texture(0, target)
tex.bind()
tex.allocate2D(0, internal_format, width, height, 0, format, ttype, Option.apply(None))
if not suppress_unbind: tex.unbind()
return tex
else:
raise LWJGLException("FreeBuild Texture objects only know how to allocate 2D textures")
def framebuffer_from_description(description, suppress_unbind=False):
fixed_dims = description.get('fixed_dims', ())
width = (int(description.get('width', 1) * Display.getWidth())
if 'width' not in fixed_dims else description['width'])
height = (int(description.get('height', 1) * Display.getHeight())
if 'height' not in fixed_dims else description['height'])
attachments = {attachment_points[a] :
# thunk to save state changes
(lambda desc, w, h:(lambda: texture_from_description(desc, w, h, True)))(t, width, height)
for a, t in description['attachments'].items()}
buffer = FBO(0)
buffer.bind()
for attachment, texgen in attachments.items():
tex = texgen()
buffer.attach2D(tex, attachment, 0, True)
tex.unbind()
if not suppress_unbind: buffer.unbind()
return buffer
def framebuffer_from_json(path):
with open(path, 'r') as desc_handle:
json_data = json.load(desc_handle)
return lambda: framebuffer_from_description(json_data)
def exec_test(path):
fb = framebuffer_from_json(path)()
try:
fb.bind()
fb.check()
except LWJGLException as e:
import traceback, os.path
top = traceback.extract_stack()[-1]
print ', '.join([str(e), os.path.basename(top[0]), str(top[1])])
print "Couldn't make our framebuffer, checking for OpenGL errors"
GFX.checkNoGLErrors("Error creating textures for inferred rendering:")
finally:
fb.unbind()
fb.delete()
| {"/scripts/gfx/pipeline_builder.py": ["/scripts/util/graphs.py"]} |
55,689 | elfprince13/FreeBuild | refs/heads/master | /scripts/prefs/fonts/__init__.py | print "Importing %s..."%(__name__),
import os.path
from scripts.util.package_utils import determine_package,export_symbols
from net.cemetech.sfgp.freebuild.drivers import Drivers
path = os.path.abspath(__file__)
dir_path = os.path.dirname(path)
__all__=determine_package(dir_path)
export_symbols(Drivers.getMainDriver().settings().get("default_fonts","%s.default" %(__name__)),
globals(),
locals(),
[
"loadDefaultFonts"
])
# We want to make it so that we only have to
# import prefs.keymaps to get the right keymap
# Let's think about this some more! :)
print "Success!"
print " Contains these submodules: %s"%(", ".join(__all__)) | {"/scripts/gfx/pipeline_builder.py": ["/scripts/util/graphs.py"]} |
55,690 | elfprince13/FreeBuild | refs/heads/master | /scripts/util/nuparser.py | from org.fit.cssbox.io import DOMSource
from nu.validator.htmlparser.dom import HtmlDocumentBuilder
from org.xml.sax import InputSource
class NUDomSource(DOMSource):
def __init__(self, src):
DOMSource.__init__(self, src)
def parse(self):
dbuilder = HtmlDocumentBuilder()
return dbuilder.parse(InputSource(self.getDocumentSource().getInputStream()))
| {"/scripts/gfx/pipeline_builder.py": ["/scripts/util/graphs.py"]} |
55,691 | elfprince13/FreeBuild | refs/heads/master | /main.py | import sys
import os,os.path
from net.cemetech.sfgp.freebuild.drivers import Drivers
from net.cemetech.sfgp.ldraw import LDManager
from java.lang import System
from org.lwjgl import LWJGLException
def common_setup(driver,*argv):
# Here we should explicitly load a settings file
import json
with open("data/prefs/defaults.json",'r') as defaults:
for k,v in json.load(defaults).items(): driver.settings()[k] = v
if os.path.isfile("data/prefs/prefs.json"):
with open("data/prefs/prefs.json",'r') as prefs:
for k,v in json.load(prefs).items(): driver.settings()[k] = v
else:
with open("data/prefs/prefs.json",'w') as prefs:
json.dump(driver.settings(), prefs)
LDManager.init()
print driver.settings()
def gfx_setup():
LDManager.parseModel("car.dat")
from scripts.gfx import framebuffer_builder
framebuffer_builder.exec_test("data/shaders/framebuffer-test.json")
def main(*argv):
print "//---------------------------------------------"
print
print "Parsing startup arguments"
if not argv or "--dedicated" not in argv:
Drivers.clearMainDriver()
GFXDriver = Drivers.getNamedDriver("GFXDriver")
driver = GFXDriver()
Drivers.setMainDriver(driver)
common_setup(driver)
gfx_setup()
else:
Drivers.clearMainDriver()
DedicatedDriver = Drivers.getNamedDriver("DedicatedDriver")
driver = DedicatedDriver()
Drivers.setMainDriver(driver)
common_setup(driver)
print
print "---------------------------------------------//"
print
if __name__ == "__main__":
main(*sys.argv[1:])
| {"/scripts/gfx/pipeline_builder.py": ["/scripts/util/graphs.py"]} |
55,692 | elfprince13/FreeBuild | refs/heads/master | /scripts/util/package_utils.py | import os.path
def is_package_dir(parent,path):
rpath = os.path.join(parent,path)
return path if (os.path.exists(rpath) and \
os.path.isdir(rpath) and \
os.path.exists(os.path.join(rpath,"__init__.py"))) else ""
def is_python_module(path):
lp = len(path)
if (lp > 3 and path[-3:] in [".py",".so"]):
ret = path[:-3]
elif (lp > 4 and path[-4:] in [".pyc",".pyd",".pyo"]):
ret = path[:-4]
else:
ret = ""
return ret
def not_excluded_name(path):
return path not in [".","..","__init__.py","__init__.pyc"]
def determine_package(path):
if os.path.isdir(path):
return list(set(is_package_dir(path,fname) or is_python_module(fname) for fname in os.listdir(path) if (is_package_dir(path,fname) or is_python_module(fname)) and not_excluded_name(fname)))
else:
raise ValueError("'%s' is not a valid directory" % (path))
# Note the differing default semantics as compared to __import__
def export_symbols(module_name,globals,locals,fromlist,level=0):
temp_module = __import__(module_name,globals,locals,fromlist,level)
for symbol in fromlist:
globals[symbol] = temp_module.__dict__[symbol] | {"/scripts/gfx/pipeline_builder.py": ["/scripts/util/graphs.py"]} |
55,693 | elfprince13/FreeBuild | refs/heads/master | /scripts/gfx/pipeline_builder.py | import json
from framebuffer_builder import framebuffer_from_description
from scripts.util.graphs import topsort
import os, os.path
from net.cemetech.sfgp.freebuild.gfx import Shader
from scala import Option
def shader_from_bundle(bundle_path,suppress_unbind=False):
bundle_contents = os.listdir(bundle_path)
shader_stages = {'vert' : "", 'geom' : "", 'frag' : ""}
for file in bundle_contents:
name,ext = os.path.splitext(file)
if ext and ext[1:] in shader_stages:
shader_stages[ext[1:]] = os.path.join(bundle_path,file)
return lambda: Shader(shader_stages['vert'],shaders_stages['frag'],shader_stages['geom'])
def stage_from_description(src_path,description):
shader = shader_from_bundle(os.path.join(dir,description["shader_bundle"]))()
target = Option.apply(None if description.get('target','display') else framebuffer_from_description(description))
def pipeline_from_description(src_path,description):
dir,file = os.path.split(src_path)
name,ext = os.path.splitext(file)
stages = {}
def pipeline_from_json(path):
with open(path, 'r') as desc_handle:
json_data = json.load(desc_handle)
return lambda: pipeline_from_description(path,json_data)
| {"/scripts/gfx/pipeline_builder.py": ["/scripts/util/graphs.py"]} |
55,694 | elfprince13/FreeBuild | refs/heads/master | /scripts/ui/__init__.py | print "Importing %s..."%(__name__),
import os.path
from scripts.util.package_utils import determine_package
path = os.path.abspath(__file__)
dir_path = os.path.dirname(path)
__all__=determine_package(dir_path)
def configure_ui(width,height):
print "Executing configure_ui(%d,%d)" % (width,height)
#from _rocketcore import CreateContext, Vector2i
from net.cemetech.sfgp.freebuild.drivers import Drivers
from org.fit.cssbox.layout import BrowserCanvas
from java.awt import Dimension
driver = Drivers.getMainDriver()
if driver == None or width <= 0 or height <= 0:
ret = False
else:
dim = Dimension(width,height)
#driver.setUiHandle()#GLG2DCanvas())
#driver.getUiHandle().setSize(dim)
#CreateContext("primary_ui_context",Vector2i(width,height))
print " Initializing keymap (GLFW -> libRocket)..."
from scripts.prefs.keymaps import getKeyMap
#Drivers.clearKeyMap()
#for g,r in getKeyMap().iteritems():
# Drivers.mapKey(g,r)
print " Finished initalizing keymap."
print " Initializing default fonts..."
from scripts.prefs.fonts import loadDefaultFonts
loadDefaultFonts()
print " Finished initializing default fonts."
import main_menu
main_menu.init(driver.getUiHandle(),dim)
ret = True
return ret
print "Success!"
print " Contains these submodules: %s"%(", ".join(__all__)) | {"/scripts/gfx/pipeline_builder.py": ["/scripts/util/graphs.py"]} |
55,695 | elfprince13/FreeBuild | refs/heads/master | /scripts/ui/main_menu.py | from scripts.util.nuparser import NUDomSource
from org.fit.cssbox.io import DefaultDocumentSource
from org.fit.cssbox.css import DOMAnalyzer, CSSNorm
from java.net import URL
from java.io import File as JFile
from java.awt import Dimension
from org.fit.cssbox.layout import BrowserCanvas
#import _rocketcore
#import _rocketcontrols
import keybindings as kb
def test_keyA():
print "HelloA"
def test_keyB():
print "HelloB"
def showDebugger():
print "Showing debugger"
def showConsole():
print "Showing console"
def init(context,dim):
docSource = DefaultDocumentSource(JFile("./data/ui/logo.html").toURI().normalize().toURL())
parser = NUDomSource(docSource)
doc = parser.parse()
da = DOMAnalyzer(doc, docSource.getURL())
da.attributesToStyles() #convert the HTML presentation attributes to inline styles
da.addStyleSheet(None, CSSNorm.stdStyleSheet(), DOMAnalyzer.Origin.AGENT) #use the standard style sheet
da.addStyleSheet(None, CSSNorm.userStyleSheet(), DOMAnalyzer.Origin.AGENT) #use the additional style sheet
da.addStyleSheet(None, CSSNorm.formsStyleSheet(), DOMAnalyzer.Origin.AGENT)
da.getStyleSheets() #load the author style sheets
browser = BrowserCanvas(da.getRoot(),da,dim,JFile("./data/ui").toURI().normalize().toURL())
docSource.close()
context.setDrawableComponent(browser)
imgTest = browser.getImage()
#from javax.imageio import ImageIO
#from java.io import File
#ImageIO.write(imgTest,"png",File("fleepasheep.png"))
#browser.setSize(200,300)
#browser.setLocation(200,150)
#from javax.swing import JButton
#mbutton = JButton("Howdy doo fellers")
#context.setDrawableComponent(mbutton);
#browser.add(mbutton)
#mbutton.setSize(300,50)
#mbutton.setLocation(50,50)
#logo_doc = context.LoadDocument("/data/ui/logo.rml")
test_binding = kb.BoundKey(test_keyA,kb.named_keys['A'],[kb.named_modifiers['shift']],[kb.named_modifiers['ctrl']] )
test_binding2 = kb.BoundKey(test_keyB,kb.named_keys['B'])
debuggerBinding = kb.BoundKey(showDebugger,kb.named_keys['`~'],[kb.named_modifiers['ctrl']],[kb.named_modifiers['shift']])
consoleBinding = kb.BoundKey(showConsole,kb.named_keys['`~'],[kb.named_modifiers['ctrl'],kb.named_modifiers['shift']])
kb.bindingRegistry[kb.DEFAULT].anywhere.append(test_binding)
kb.bindingRegistry[kb.DEFAULT].anywhere.append(test_binding2)
kb.bindingRegistry[kb.DEFAULT].anywhere.append(debuggerBinding)
kb.bindingRegistry[kb.DEFAULT].anywhere.append(consoleBinding)
#logo_doc.AddEventListener("keydown",kb.globalKeyHandler,True)
#logo_doc.AddEventListener("keyup",kb.globalKeyHandler,False)
#logo_doc.Show()
#context.Update()
| {"/scripts/gfx/pipeline_builder.py": ["/scripts/util/graphs.py"]} |
55,696 | elfprince13/FreeBuild | refs/heads/master | /scripts/util/graphs.py | def topsort(V,E):
E = set(E) # don't want to mutate parent copy
L = []
outgoing,incoming = classify_edges(V,E)
S = {v for v in V if has_no_incoming(v,E,incoming)}
if not S: raise ValueError("Can't topsort a cyclic graph")
else:
while S:
n = S.pop()
L.append(n)
while outgoing[n]:
e = outgoing[n].pop()
m = e[1]
incoming[m].remove(e)
E.remove(e)
if not incoming[m]: S.add(m)
if E: raise ValueError("Can't topsort a cyclic graph")
else: return L
def has_no_incoming(v,E,cache=None):
return set() == find_incoming(v,E,cache)
def find_incoming(v,E,cache=None):
return find_edges(v,E,1) if cache==None else cache[v]
def find_outgoing(v,E,cache=None):
return find_edges(v,E,0) if cache==None else cache[v]
def find_edges(v,E,endpoint=-1):
return {e for e in E if ((v in e) if endpoint < 0 else (e[endpoint] == v))}
def classify_edges(V,E,edge_dim=2):
caches = tuple({v : set() for v in V} for x in xrange(edge_dim))
for e in E:
for i,v in enumerate(e):
caches[i][v].add(e)
return caches
def test_topsort():
V = {7,5,3,11,8,2,9,10}
E = {(7,11),(7,8),
(5,11),
(3,8),(3,10),
(11,2),(11,9),(11,10)}
print topsort(V,E)
| {"/scripts/gfx/pipeline_builder.py": ["/scripts/util/graphs.py"]} |
55,697 | elfprince13/FreeBuild | refs/heads/master | /scripts/ui/keybindings.py |
# Constants for permissioning/binding precedence down the road.
DEFAULT = 0
USER = 1
MOD = 2
KEY_RELEASED = 0
KEY_PRESSED = 1
state_map = {'keyup' : KEY_RELEASED, 'keydown' : KEY_PRESSED}
# Need to choose prefs appropriately!
from scripts.prefs.keymaps import getModifiersByIndex, getModifiersByName, getEffectiveKeyNames, getEffectiveKeyIndices
named_modifiers = getModifiersByName()
indexed_modifiers = getModifiersByIndex()
named_keys = getEffectiveKeyNames()
indexed_keys = getEffectiveKeyIndices()
class BoundKey(object):
def __init__(self,callback,key_index,required_modifiers=[],forbidden_modifiers=[],action_states=(KEY_PRESSED,)):
self.key_index = key_index
self.required = required_modifiers
self.forbidden = forbidden_modifiers
self.callback = callback
self.action_states = action_states
self.required_mask = sum(self.required)
self.forbidden_mask = sum(self.forbidden)
def test(self,key,masks,state):
return (self.key_index == key) and ((masks & self.required_mask) or not self.required_mask) and (not (masks & self.forbidden_mask)) and (state in self.action_states)
def __repr__(self):
return "BoundKey(%s,%s,required_modifiers=%s,forbidden_modifiers=%s)" % \
(repr(self.callback), repr(self.key_index), repr(self.required), repr(self.forbidden))
def __str__(self):
return ("%s %s %s" % (
("-[%s]" % " ".join([indexed_modifiers[m] for m in self.forbidden]))
if self.forbidden else "",
("[%s]" % " ".join([indexed_modifiers[m] for m in self.required]))
if self.required else ""
,indexed_keys[self.key_index])).strip()
def __call__(self,key,modifiers,state,*args,**kwargs):
if(self.test(key,modifiers,state)):
self.callback(*args,**kwargs)
ret = True
else:
ret = False
return ret
class BindingSet(object):
def __init__(self,anywhere=[],byDocument={},byElement={}):
self.anywhere = anywhere
self.byDocument = byDocument
self.byElement = byElement
def __repr__(self):
return "BindingSet(anywhere=%s,byDocument=%s,byElement=%s" % (repr(self.anywhere),repr(self.byDocument),repr(self.byElement))
def __str__(self):
return """BindingSet:\n\tAnywhere: %s\n\tBy document: %s\n\tBy Element: %s""" % (str(self.anywhere),str(self.byDocument), str(self.byElement))
bindingRegistry = {
DEFAULT : BindingSet(),
USER : BindingSet(),
MOD: BindingSet(),
USER|MOD : BindingSet()
}
def globalKeyHandler():
global event,element,self
key = event.parameters['key_identifier']
modifiers = [value for value,name in sorted(indexed_modifiers.iteritems()) if event.parameters['%s_key' % (name)]]
mask = sum(modifiers)
for bound_level,bound_set in sorted(bindingRegistry.iteritems(),reverse=True):
# Hoping for now this is the correct way to compare element/document references:
for bound_key in bound_set.anywhere:
if bound_key(key,mask,state_map[event.type]):
event.StopPropagation()
break
else:
for bound_key in bound_set.byDocument.get(self,[]):
if bound_key(key,mask,state_map[event.type]):
event.StopPropagation()
break
else:
if "element" not in globals():
continue
else:
for bound_key in bound_set.byElement.get(element,[]):
if bound_key(key,mask,state_map[event.type]):
event.StopPropagation()
break
else:
continue
break
| {"/scripts/gfx/pipeline_builder.py": ["/scripts/util/graphs.py"]} |
55,698 | elfprince13/FreeBuild | refs/heads/master | /shader_editor.py | import sys
import os,os.path
from net.cemetech.sfgp.freebuild.drivers import Drivers
from java.lang import System
def main(*argv):
print "//---------------------------------------------"
print
print "Parsing startup arguments for shader editor"
Drivers.clearMainDriver()
from net.cemetech.sfgp.freebuild.gfx import GFX
gfxCtx = GFX.init("Shader Editor Viz. Frame")
if gfxCtx != None:
print "Success!"
else:
print "Initialization failed."
print
print "---------------------------------------------//"
print
from scripts.editor import shaderUI
from java.util.concurrent import TimeUnit
compiler_hook = shaderUI.init_editor()
if gfxCtx != None:
while gfxCtx.open():
task = compiler_hook.tasks.poll(300,TimeUnit.MILLISECONDS) # This blocks too hard
if task != None:
task.run()
if __name__ == "__main__":
main(*sys.argv[1:])
| {"/scripts/gfx/pipeline_builder.py": ["/scripts/util/graphs.py"]} |
55,699 | elfprince13/FreeBuild | refs/heads/master | /scripts/prefs/fonts/default.py | # Load up DejaVu fonts as a nice default
import os, os.path#, _rocketcore
font_path = "./data/ui/fonts/dejavu"
def loadDefaultFonts():
if os.path.exists(font_path) and os.path.isdir(font_path):
paths = os.listdir(font_path)
font_files = [os.path.join(font_path,name) for name in paths if len(name)>4 and name[-4:].lower() == ".ttf"]
for font in font_files:
pass#_rocketcore.LoadFontFace(font)
else:
print " Could not load fonts" | {"/scripts/gfx/pipeline_builder.py": ["/scripts/util/graphs.py"]} |
55,710 | KieranShek/CodeGym_Flask_Full_Stack_Web_App | refs/heads/main | /tests/booking_test.py | import unittest
from repositories.booking_repository import *
from repositories.member_repository import *
from repositories.gym_class_repository import *
from repositories.staff_member_repository import *
class TestBooking(unittest.TestCase):
def setUp(self):
member1 = Member('Jack', 'Premium')
gym_class1 = Gym_class('Spin Class', 1, '2021-09-01', '09:00', 45, 10)
self.booking = Booking(member1, gym_class1)
def test_booking_has_name(self):
self.assertEqual("Spin Class", self.booking.gym_class.name)
def test_booking_has_instructor(self):
self.assertEqual(1, self.booking.gym_class.instructor)
def test_booking_has_member(self):
self.assertEqual("Jack", self.booking.member.name)
def test_booking_has_name(self):
self.assertEqual("Premium", self.booking.member.type)
if __name__ == '__main__':
unittest.main() | {"/tests/booking_test.py": ["/repositories/booking_repository.py", "/repositories/member_repository.py", "/repositories/gym_class_repository.py", "/repositories/staff_member_repository.py"], "/repositories/booking_repository.py": ["/repositories/member_repository.py", "/repositories/gym_class_repository.py"], "/controllers/gym_class_controller.py": ["/repositories/gym_class_repository.py", "/repositories/member_repository.py", "/repositories/booking_repository.py", "/repositories/staff_member_repository.py"], "/controllers/booking_controller.py": ["/repositories/booking_repository.py", "/repositories/member_repository.py", "/repositories/gym_class_repository.py"], "/controllers/staff_member_controller.py": ["/models/staff_member.py", "/repositories/staff_member_repository.py"], "/console.py": ["/models/staff_member.py", "/repositories/gym_class_repository.py", "/repositories/member_repository.py", "/repositories/booking_repository.py", "/repositories/staff_member_repository.py"], "/repositories/staff_member_repository.py": ["/models/staff_member.py"], "/tests/gym_class_test.py": ["/repositories/gym_class_repository.py"], "/run_tests.py": ["/tests/booking_test.py", "/tests/gym_class_test.py", "/tests/member_test.py", "/tests/staff_member_test.py"], "/tests/member_test.py": ["/repositories/member_repository.py"], "/tests/staff_member_test.py": ["/repositories/staff_member_repository.py"]} |
55,711 | KieranShek/CodeGym_Flask_Full_Stack_Web_App | refs/heads/main | /repositories/booking_repository.py | from db.run_sql import run_sql
from models.booking import Booking
import repositories.member_repository as member_repository
import repositories.gym_class_repository as gym_class_repository
import pdb
def save(booking):
sql = "INSERT INTO bookings ( member_id, gym_class_id ) VALUES ( %s, %s) RETURNING id;"
values = [booking.member.id, booking.gym_class.id]
# check_capacity = f"select count(*) from bookings where gym_class_id = {booking.gym_class.id};"
# checked_capacity = run_sql(check_capacity)
# if checked_capacity[0][0] > booking.gym_class.capacity:
# return None
# else:
check_capacity(booking)
results = run_sql( sql, values )
booking.id = results[0]['id']
return booking
def check_capacity(booking):
check_capacity = f"select count(*) from bookings where gym_class_id = {booking.gym_class.id};"
checked_capacity = run_sql(check_capacity)
empty_list = []
empty_list.append(checked_capacity[0][0])
if checked_capacity[0][0] >= booking.gym_class.capacity:
empty_list.append("Full")
return empty_list
else:
empty_list.append("Spaces")
return empty_list
def select_all():
bookings = []
sql = "SELECT * FROM bookings"
results = run_sql(sql)
for row in results:
member = member_repository.select(row['member_id'])
gym_class = gym_class_repository.select(row['gym_class_id'])
booking = Booking(member, gym_class, row['id'])
bookings.append(booking)
return bookings
def delete_all():
sql = "DELETE FROM bookings"
run_sql(sql)
def delete(id):
sql = "DELETE FROM bookings WHERE id = %s"
values = [id]
run_sql(sql, values)
def delete_member_from_class(class_id, member_id):
sql = "DELETE FROM bookings WHERE class_id = %s AND member_id = %s"
values = [class_id, member_id]
# pdb.set_trace()
run_sql(sql, values) | {"/tests/booking_test.py": ["/repositories/booking_repository.py", "/repositories/member_repository.py", "/repositories/gym_class_repository.py", "/repositories/staff_member_repository.py"], "/repositories/booking_repository.py": ["/repositories/member_repository.py", "/repositories/gym_class_repository.py"], "/controllers/gym_class_controller.py": ["/repositories/gym_class_repository.py", "/repositories/member_repository.py", "/repositories/booking_repository.py", "/repositories/staff_member_repository.py"], "/controllers/booking_controller.py": ["/repositories/booking_repository.py", "/repositories/member_repository.py", "/repositories/gym_class_repository.py"], "/controllers/staff_member_controller.py": ["/models/staff_member.py", "/repositories/staff_member_repository.py"], "/console.py": ["/models/staff_member.py", "/repositories/gym_class_repository.py", "/repositories/member_repository.py", "/repositories/booking_repository.py", "/repositories/staff_member_repository.py"], "/repositories/staff_member_repository.py": ["/models/staff_member.py"], "/tests/gym_class_test.py": ["/repositories/gym_class_repository.py"], "/run_tests.py": ["/tests/booking_test.py", "/tests/gym_class_test.py", "/tests/member_test.py", "/tests/staff_member_test.py"], "/tests/member_test.py": ["/repositories/member_repository.py"], "/tests/staff_member_test.py": ["/repositories/staff_member_repository.py"]} |
55,712 | KieranShek/CodeGym_Flask_Full_Stack_Web_App | refs/heads/main | /controllers/gym_class_controller.py | from flask import Flask, render_template, request, redirect
from flask import Blueprint
from models.gym_class import Gym_class
import repositories.gym_class_repository as gym_class_repository
import repositories.member_repository as member_repository
import repositories.booking_repository as booking_repository
import repositories.staff_member_repository as staff_member_repository
from datetime import datetime
import pdb
gym_classes_blueprint = Blueprint("gym_classes", __name__)
@gym_classes_blueprint.route("/gym_classes")
def gym_classes():
capacities = []
instructors = staff_member_repository.select_all()
gym_classes = gym_class_repository.select_all()
for gym_class in gym_classes:
capacities.append(gym_class_repository.check_capacity(gym_class))
return render_template("gym_classes/index.html", gym_classes = gym_classes, capacities = capacities, instructors = instructors)
@gym_classes_blueprint.route("/gym_classes/<id>")
def show(id):
capacities = []
gym_class = gym_class_repository.select(id)
members = gym_class_repository.members(gym_class)
instructors = staff_member_repository.select_all()
gym_classes = gym_class_repository.select_all()
capacities.append(gym_class_repository.check_capacity(gym_class))
return render_template("gym_classes/show.html", members=members, gym_class = gym_class, gym_classes=gym_classes, capacities=capacities, instructors = instructors)
@gym_classes_blueprint.route("/gym_classes/new", methods=['GET'])
def new_class():
gym_class = gym_class_repository.select_all()
staff_members = staff_member_repository.select_all()
return render_template("gym_classes/new.html", gym_class = gym_class, staff_members = staff_members)
@gym_classes_blueprint.route("/gym_classes", methods=['POST'])
def create_task():
name = request.form['class_name']
instructor = request.form['instructor']
date = request.form['date']
time = request.form['time']
duration = request.form['duration']
capacity = request.form['capacity']
staff_member = staff_member_repository.select(instructor)
if staff_member.job_type == "Instructor" or staff_member.job_type == "Manager":
gym_class = Gym_class(name, instructor, date, time, duration, capacity)
gym_class_repository.save(gym_class)
return redirect('/gym_classes')
else:
return render_template("gym_classes/error_permissions.html", staff_member = staff_member)
@gym_classes_blueprint.route("/gym_classes/<id>/edit", methods=['GET'])
def update_class(id):
gym_class = gym_class_repository.select(id)
staff_members = staff_member_repository.select_all()
return render_template("gym_classes/edit.html", gym_class = gym_class, staff_members = staff_members)
@gym_classes_blueprint.route("/gym_classes/<id>", methods=['POST'])
def edit_class(id):
id = id
name = request.form['class_name']
instructor = request.form['instructor']
date = request.form['date']
time = request.form['time']
duration = request.form['duration']
capacity = request.form['capacity']
gym_class = Gym_class(name, instructor, date, time, duration, capacity, id)
gym_class_repository.update(gym_class)
return redirect('/gym_classes')
@gym_classes_blueprint.route("/gym_classes/<id>/delete", methods=['POST'])
def delete_class(id):
gym_class_repository.delete(id)
return redirect('/gym_classes')
@gym_classes_blueprint.route("/gym_classes/<class_id>/<member_id>/delete", methods=['POST'])
def delete_from_class(class_id, member_id):
id = gym_class_repository.find_booking_id(int(class_id), int(member_id))
booking_repository.delete(id[0])
return redirect('/gym_classes') | {"/tests/booking_test.py": ["/repositories/booking_repository.py", "/repositories/member_repository.py", "/repositories/gym_class_repository.py", "/repositories/staff_member_repository.py"], "/repositories/booking_repository.py": ["/repositories/member_repository.py", "/repositories/gym_class_repository.py"], "/controllers/gym_class_controller.py": ["/repositories/gym_class_repository.py", "/repositories/member_repository.py", "/repositories/booking_repository.py", "/repositories/staff_member_repository.py"], "/controllers/booking_controller.py": ["/repositories/booking_repository.py", "/repositories/member_repository.py", "/repositories/gym_class_repository.py"], "/controllers/staff_member_controller.py": ["/models/staff_member.py", "/repositories/staff_member_repository.py"], "/console.py": ["/models/staff_member.py", "/repositories/gym_class_repository.py", "/repositories/member_repository.py", "/repositories/booking_repository.py", "/repositories/staff_member_repository.py"], "/repositories/staff_member_repository.py": ["/models/staff_member.py"], "/tests/gym_class_test.py": ["/repositories/gym_class_repository.py"], "/run_tests.py": ["/tests/booking_test.py", "/tests/gym_class_test.py", "/tests/member_test.py", "/tests/staff_member_test.py"], "/tests/member_test.py": ["/repositories/member_repository.py"], "/tests/staff_member_test.py": ["/repositories/staff_member_repository.py"]} |
55,713 | KieranShek/CodeGym_Flask_Full_Stack_Web_App | refs/heads/main | /controllers/booking_controller.py | from flask import Flask, render_template, request, redirect
from flask import Blueprint
from models.booking import Booking
import repositories.booking_repository as booking_repository
import repositories.member_repository as member_repository
import repositories.gym_class_repository as gym_class_repository
import datetime
import pdb
bookings_blueprint = Blueprint("bookings", __name__)
@bookings_blueprint.route("/bookings")
def bookings():
bookings = booking_repository.select_all()
return render_template("bookings/index.html", bookings = bookings)
@bookings_blueprint.route("/bookings/new", methods=['GET'])
def new_booking():
capacities = []
members = member_repository.select_all()
gym_classes = gym_class_repository.select_all()
for gym_class in gym_classes:
capacities.append(gym_class_repository.check_capacity(gym_class))
return render_template("bookings/new.html", members = members, gym_classes = gym_classes, capacities = capacities)
@bookings_blueprint.route("/bookings", methods=['POST'])
def create_booking():
member_id = request.form['member_id']
gym_class_id = request.form['gym_class_id']
member = member_repository.select(member_id)
gym_class = gym_class_repository.select(gym_class_id)
booking = Booking(member, gym_class)
check = booking_repository.check_capacity(booking)
if check[1] == "Spaces":
if member.type.lower() == "premium":
try:
booking_repository.save(booking)
except:
return render_template("bookings/error_duplicate.html", gym_class = gym_class, member = member)
return redirect('/bookings')
elif member.type.lower() == "deactivated":
return render_template("bookings/error_permissions.html", gym_class = gym_class, member = member)
elif gym_class.time > datetime.time(8, 0) and gym_class.time < datetime.time(10,0):
return render_template("bookings/error_peak_hours.html", gym_class = gym_class, member = member)
elif gym_class.time > datetime.time(16, 0) and gym_class.time < datetime.time(18,0):
return render_template("bookings/error_peak_hours.html", gym_class = gym_class, member = member)
else:
try:
booking_repository.save(booking)
except:
return render_template("bookings/error_duplicate.html", gym_class = gym_class, member = member)
return redirect('/bookings')
else:
return render_template("bookings/error_capacity.html", gym_class = gym_class, member = member, class_capacity = check[0])
@bookings_blueprint.route("/bookings/<id>/delete", methods=['POST'])
def delete_task(id):
booking_repository.delete(id)
return redirect('/bookings') | {"/tests/booking_test.py": ["/repositories/booking_repository.py", "/repositories/member_repository.py", "/repositories/gym_class_repository.py", "/repositories/staff_member_repository.py"], "/repositories/booking_repository.py": ["/repositories/member_repository.py", "/repositories/gym_class_repository.py"], "/controllers/gym_class_controller.py": ["/repositories/gym_class_repository.py", "/repositories/member_repository.py", "/repositories/booking_repository.py", "/repositories/staff_member_repository.py"], "/controllers/booking_controller.py": ["/repositories/booking_repository.py", "/repositories/member_repository.py", "/repositories/gym_class_repository.py"], "/controllers/staff_member_controller.py": ["/models/staff_member.py", "/repositories/staff_member_repository.py"], "/console.py": ["/models/staff_member.py", "/repositories/gym_class_repository.py", "/repositories/member_repository.py", "/repositories/booking_repository.py", "/repositories/staff_member_repository.py"], "/repositories/staff_member_repository.py": ["/models/staff_member.py"], "/tests/gym_class_test.py": ["/repositories/gym_class_repository.py"], "/run_tests.py": ["/tests/booking_test.py", "/tests/gym_class_test.py", "/tests/member_test.py", "/tests/staff_member_test.py"], "/tests/member_test.py": ["/repositories/member_repository.py"], "/tests/staff_member_test.py": ["/repositories/staff_member_repository.py"]} |
55,714 | KieranShek/CodeGym_Flask_Full_Stack_Web_App | refs/heads/main | /controllers/staff_member_controller.py | from flask import Flask, render_template, request, redirect
from flask import Blueprint
from models.staff_member import Staff_Member
import repositories.staff_member_repository as staff_member_repository
import pdb
staff_members_blueprint = Blueprint("staff_members", __name__)
@staff_members_blueprint.route("/staff_members")
def staff_members():
staff_members = staff_member_repository.select_all()
return render_template("staff_members/index.html", staff_members = staff_members)
@staff_members_blueprint.route("/staff_members/<id>")
def show(id):
staff_member = staff_member_repository.select(id)
gym_classes = staff_member_repository.gym_classes(staff_member)
return render_template("staff_members/show.html", staff_member = staff_member, gym_classes = gym_classes)
@staff_members_blueprint.route("/staff_members/new", methods=['GET'])
def new_staff_member():
staff_members = staff_member_repository.select_all()
return render_template("staff_members/new.html", staff_members = staff_members)
@staff_members_blueprint.route("/staff_members", methods=['POST'])
def create_staff_member():
name = request.form['staff_member_name']
job_type = request.form['job_type']
staff_member = Staff_Member(name, job_type)
staff_member_repository.save(staff_member)
return redirect('/staff_members')
@staff_members_blueprint.route("/staff_members/<id>/edit", methods=['GET'])
def update_staff_member(id):
staff_member = staff_member_repository.select(id)
return render_template("staff_members/edit.html", staff_member = staff_member)
@staff_members_blueprint.route("/staff_members/<id>", methods=['POST'])
def edit_staff_member(id):
name = request.form['staff_member_name']
job_type = request.form['job_type']
staff_member = Staff_Member(name, job_type, id)
staff_member_repository.update(staff_member)
return redirect('/staff_members')
@staff_members_blueprint.route("/staff_members/<id>/delete", methods=['POST'])
def delete_staff_member(id):
staff_member_repository.delete(id)
return redirect('/staff_members')
| {"/tests/booking_test.py": ["/repositories/booking_repository.py", "/repositories/member_repository.py", "/repositories/gym_class_repository.py", "/repositories/staff_member_repository.py"], "/repositories/booking_repository.py": ["/repositories/member_repository.py", "/repositories/gym_class_repository.py"], "/controllers/gym_class_controller.py": ["/repositories/gym_class_repository.py", "/repositories/member_repository.py", "/repositories/booking_repository.py", "/repositories/staff_member_repository.py"], "/controllers/booking_controller.py": ["/repositories/booking_repository.py", "/repositories/member_repository.py", "/repositories/gym_class_repository.py"], "/controllers/staff_member_controller.py": ["/models/staff_member.py", "/repositories/staff_member_repository.py"], "/console.py": ["/models/staff_member.py", "/repositories/gym_class_repository.py", "/repositories/member_repository.py", "/repositories/booking_repository.py", "/repositories/staff_member_repository.py"], "/repositories/staff_member_repository.py": ["/models/staff_member.py"], "/tests/gym_class_test.py": ["/repositories/gym_class_repository.py"], "/run_tests.py": ["/tests/booking_test.py", "/tests/gym_class_test.py", "/tests/member_test.py", "/tests/staff_member_test.py"], "/tests/member_test.py": ["/repositories/member_repository.py"], "/tests/staff_member_test.py": ["/repositories/staff_member_repository.py"]} |
55,715 | KieranShek/CodeGym_Flask_Full_Stack_Web_App | refs/heads/main | /console.py | # from CodeGym.models.staff_member import Staff_Member
import pdb
from models.gym_class import Gym_class
from models.member import Member
from models.booking import Booking
from models.staff_member import Staff_Member
import repositories.gym_class_repository as gym_class_repository
import repositories.member_repository as member_repository
import repositories.booking_repository as booking_repository
import repositories.staff_member_repository as staff_member_repository
booking_repository.delete_all()
gym_class_repository.delete_all()
member_repository.delete_all()
staff_member_repository.delete_all()
member1 = Member('Jack', 'Premium')
member_repository.save(member1)
member2 = Member('John', 'Premium')
member_repository.save(member2)
member3 = Member('Gerald', 'Premium')
member_repository.save(member3)
member4 = Member('Carlos', 'Standard')
member_repository.save(member4)
member5 = Member('David', 'Standard')
member_repository.save(member5)
member6 = Member('Andrew', 'Premium')
member_repository.save(member6)
member7 = Member('Ian', 'Premium')
member_repository.save(member7)
member8 = Member('Lewis', 'Premium')
member_repository.save(member8)
member9 = Member('Morven', 'Standard')
member_repository.save(member9)
member10 = Member('Callum', 'Premium')
member_repository.save(member10)
member11 = Member('Vinnie', 'Standard')
member_repository.save(member11)
member12 = Member('Willem', 'Standard')
member_repository.save(member12)
member13 = Member('Tony', 'Premium')
member_repository.save(member13)
member14 = Member('Athina', 'Premium')
member_repository.save(member14)
member15 = Member('Neil', 'Premium')
member_repository.save(member15)
member16 = Member('Andrew', 'Standard')
member_repository.save(member16)
member17 = Member('Lucinda', 'Premium')
member_repository.save(member17)
member18 = Member('Jordan', 'Premium')
member_repository.save(member18)
member19 = Member('Craig', 'Premium')
member_repository.save(member19)
member20 = Member('Janice', 'Standard')
member_repository.save(member20)
member21 = Member('Alex', 'Premium')
member_repository.save(member21)
gym_class1 = Gym_class('Spin Class', 1, '2021-09-01', '09:00', 45, 10)
gym_class_repository.save(gym_class1)
gym_class2 = Gym_class('Leg Day', 2, '2021-09-01', '09:00', 60, 8)
gym_class_repository.save(gym_class2)
gym_class3 = Gym_class('Arm Day', 3, '2021-08-30', '12:00', 60, 12)
gym_class_repository.save(gym_class3)
staff1 = Staff_Member('Steve', 'Instructor')
staff_member_repository.save(staff1)
staff2 = Staff_Member('Stan', 'Instructor')
staff_member_repository.save(staff2)
staff3 = Staff_Member('Jen', 'Instructor')
staff_member_repository.save(staff3)
staff4 = Staff_Member('Craig', 'Instructor')
staff_member_repository.save(staff4)
staff5 = Staff_Member('Pete', 'Instructor')
staff_member_repository.save(staff5)
staff6 = Staff_Member('Jeffrey', 'Manager')
staff_member_repository.save(staff6)
staff7 = Staff_Member('Gymkeeper Willie', 'Cleaner')
staff_member_repository.save(staff7)
booking1 = Booking(member1, gym_class2)
booking_repository.save(booking1)
booking2 = Booking(member2, gym_class2)
booking_repository.save(booking2)
booking3 = Booking(member3, gym_class2)
booking_repository.save(booking3)
booking4 = Booking(member4, gym_class2)
booking_repository.save(booking4)
booking5 = Booking(member5, gym_class2)
booking_repository.save(booking5)
booking6 = Booking(member6, gym_class3)
booking_repository.save(booking6)
booking7 = Booking(member7, gym_class3)
booking_repository.save(booking7)
booking8 = Booking(member8, gym_class3)
booking_repository.save(booking8)
booking9 = Booking(member9, gym_class2)
booking_repository.save(booking9)
booking10 = Booking(member10, gym_class3)
booking_repository.save(booking10)
booking11 = Booking(member10, gym_class1)
booking_repository.save(booking11)
booking12 = Booking(member11, gym_class1)
booking_repository.save(booking12)
booking13 = Booking(member11, gym_class2)
booking_repository.save(booking13)
booking14 = Booking(member12, gym_class1)
booking_repository.save(booking14)
booking15 = Booking(member12, gym_class3)
booking_repository.save(booking15)
booking16 = Booking(member13, gym_class2)
booking_repository.save(booking16)
booking17 = Booking(member14, gym_class1)
booking_repository.save(booking17)
booking18 = Booking(member15, gym_class1)
booking_repository.save(booking18)
booking19 = Booking(member16, gym_class1)
booking_repository.save(booking19)
booking20 = Booking(member17, gym_class3)
booking_repository.save(booking20)
booking21 = Booking(member18, gym_class3)
booking_repository.save(booking21)
# booking5 = Booking(member5, gym_class3)
# booking_repository.save(booking5)
select_all_classes = gym_class_repository.select_all()
# pdb.set_trace()
# print(member_repository.select_all()[0].id)
# print(member_repository.gym_classes(member1))
# print(gym_class_repository.members(gym_class1)) | {"/tests/booking_test.py": ["/repositories/booking_repository.py", "/repositories/member_repository.py", "/repositories/gym_class_repository.py", "/repositories/staff_member_repository.py"], "/repositories/booking_repository.py": ["/repositories/member_repository.py", "/repositories/gym_class_repository.py"], "/controllers/gym_class_controller.py": ["/repositories/gym_class_repository.py", "/repositories/member_repository.py", "/repositories/booking_repository.py", "/repositories/staff_member_repository.py"], "/controllers/booking_controller.py": ["/repositories/booking_repository.py", "/repositories/member_repository.py", "/repositories/gym_class_repository.py"], "/controllers/staff_member_controller.py": ["/models/staff_member.py", "/repositories/staff_member_repository.py"], "/console.py": ["/models/staff_member.py", "/repositories/gym_class_repository.py", "/repositories/member_repository.py", "/repositories/booking_repository.py", "/repositories/staff_member_repository.py"], "/repositories/staff_member_repository.py": ["/models/staff_member.py"], "/tests/gym_class_test.py": ["/repositories/gym_class_repository.py"], "/run_tests.py": ["/tests/booking_test.py", "/tests/gym_class_test.py", "/tests/member_test.py", "/tests/staff_member_test.py"], "/tests/member_test.py": ["/repositories/member_repository.py"], "/tests/staff_member_test.py": ["/repositories/staff_member_repository.py"]} |
55,716 | KieranShek/CodeGym_Flask_Full_Stack_Web_App | refs/heads/main | /repositories/staff_member_repository.py | from db.run_sql import run_sql
from models.gym_class import Gym_class
from models.staff_member import Staff_Member
import pdb
def save(staff_member):
sql = "INSERT INTO staff_members( name, job_type ) VALUES ( %s, %s ) RETURNING id"
values = [staff_member.name, staff_member.job_type]
results = run_sql( sql, values )
staff_member.id = results[0]['id']
return staff_member
def select_all():
list_of_staff = []
sql = "SELECT * FROM staff_members"
results = run_sql(sql)
for result in results:
staff = Staff_Member(result['name'], result['job_type'], result['id'])
list_of_staff.append(staff)
return list_of_staff
def select(id):
staff_member = None
sql = "SELECT * FROM staff_members WHERE id = %s"
values = [id]
result = run_sql(sql, values)[0]
if result is not None:
staff_member = Staff_Member(result['name'], result['job_type'], result['id'])
return staff_member
def delete_all():
sql = "DELETE FROM staff_members"
run_sql(sql)
def gym_classes(staff_member):
gym_classes = []
sql = "SELECT gym_classes.* FROM gym_classes WHERE gym_classes.instructor = %s;"
values = [staff_member.id]
results = run_sql(sql, values)
for row in results:
gym_class = Gym_class(row['name'], row['instructor'], row['date'], row['time'], row['duration'], row['capacity'], row['id'])
gym_classes.append(gym_class)
return gym_classes
def update(staff_member):
sql = "UPDATE staff_members SET (name, job_type) = (%s, %s) WHERE id = %s"
values = [staff_member.name, staff_member.job_type, staff_member.id]
run_sql(sql, values)
def delete(id):
sql = "DELETE FROM staff_members WHERE id = %s"
values = [id]
run_sql(sql, values)
def delete_all():
sql = "DELETE FROM staff_members"
run_sql(sql)
| {"/tests/booking_test.py": ["/repositories/booking_repository.py", "/repositories/member_repository.py", "/repositories/gym_class_repository.py", "/repositories/staff_member_repository.py"], "/repositories/booking_repository.py": ["/repositories/member_repository.py", "/repositories/gym_class_repository.py"], "/controllers/gym_class_controller.py": ["/repositories/gym_class_repository.py", "/repositories/member_repository.py", "/repositories/booking_repository.py", "/repositories/staff_member_repository.py"], "/controllers/booking_controller.py": ["/repositories/booking_repository.py", "/repositories/member_repository.py", "/repositories/gym_class_repository.py"], "/controllers/staff_member_controller.py": ["/models/staff_member.py", "/repositories/staff_member_repository.py"], "/console.py": ["/models/staff_member.py", "/repositories/gym_class_repository.py", "/repositories/member_repository.py", "/repositories/booking_repository.py", "/repositories/staff_member_repository.py"], "/repositories/staff_member_repository.py": ["/models/staff_member.py"], "/tests/gym_class_test.py": ["/repositories/gym_class_repository.py"], "/run_tests.py": ["/tests/booking_test.py", "/tests/gym_class_test.py", "/tests/member_test.py", "/tests/staff_member_test.py"], "/tests/member_test.py": ["/repositories/member_repository.py"], "/tests/staff_member_test.py": ["/repositories/staff_member_repository.py"]} |
55,717 | KieranShek/CodeGym_Flask_Full_Stack_Web_App | refs/heads/main | /repositories/member_repository.py | from db.run_sql import run_sql
from models.gym_class import Gym_class
from models.member import Member
def save(member):
sql = "INSERT INTO members( name, type ) VALUES ( %s, %s ) RETURNING id"
values = [member.name, member.type]
results = run_sql( sql, values )
member.id = results[0]['id']
return member
def select_all():
members = []
sql = "SELECT * FROM members"
results = run_sql(sql)
for row in results:
member = Member(row['name'], row['type'], row['id'])
members.append(member)
return members
def select(id):
member = None
sql = "SELECT * FROM members WHERE id = %s"
values = [id]
result = run_sql(sql, values)[0]
if result is not None:
member = Member(result['name'], result['type'], result['id'])
return member
def delete_all():
sql = "DELETE FROM members"
run_sql(sql)
def gym_classes(member):
gym_classes = []
sql = "SELECT gym_classes.* FROM gym_classes INNER JOIN bookings ON bookings.gym_class_id = gym_classes.id WHERE member_id = %s"
values = [member.id]
results = run_sql(sql, values)
for row in results:
gym_class = Gym_class(row['name'], row['instructor'], row['date'], row['time'], row['duration'], row['capacity'], row['id'])
gym_classes.append(gym_class)
return gym_classes
def update(member):
sql = "UPDATE members SET (name, type) = (%s, %s) WHERE id = %s"
values = [member.name, member.type, member.id]
run_sql(sql, values)
def delete(id):
sql = "DELETE FROM members WHERE id = %s"
values = [id]
run_sql(sql, values)
def delete_all():
sql = "DELETE FROM members"
run_sql(sql)
| {"/tests/booking_test.py": ["/repositories/booking_repository.py", "/repositories/member_repository.py", "/repositories/gym_class_repository.py", "/repositories/staff_member_repository.py"], "/repositories/booking_repository.py": ["/repositories/member_repository.py", "/repositories/gym_class_repository.py"], "/controllers/gym_class_controller.py": ["/repositories/gym_class_repository.py", "/repositories/member_repository.py", "/repositories/booking_repository.py", "/repositories/staff_member_repository.py"], "/controllers/booking_controller.py": ["/repositories/booking_repository.py", "/repositories/member_repository.py", "/repositories/gym_class_repository.py"], "/controllers/staff_member_controller.py": ["/models/staff_member.py", "/repositories/staff_member_repository.py"], "/console.py": ["/models/staff_member.py", "/repositories/gym_class_repository.py", "/repositories/member_repository.py", "/repositories/booking_repository.py", "/repositories/staff_member_repository.py"], "/repositories/staff_member_repository.py": ["/models/staff_member.py"], "/tests/gym_class_test.py": ["/repositories/gym_class_repository.py"], "/run_tests.py": ["/tests/booking_test.py", "/tests/gym_class_test.py", "/tests/member_test.py", "/tests/staff_member_test.py"], "/tests/member_test.py": ["/repositories/member_repository.py"], "/tests/staff_member_test.py": ["/repositories/staff_member_repository.py"]} |
55,718 | KieranShek/CodeGym_Flask_Full_Stack_Web_App | refs/heads/main | /tests/gym_class_test.py | import unittest
from repositories.gym_class_repository import *
class TestGym_Class(unittest.TestCase):
def setUp(self):
self.gym_class = Gym_class('Arm Day', 3, '2021-08-30', '12:00', 60, 12)
def test_gym_class_has_name(self):
self.assertEqual("Arm Day", self.gym_class.name)
def test_guest_has_membership_type(self):
self.assertEqual(3, self.gym_class.instructor)
def test_gym_class_has_name(self):
self.assertEqual('2021-08-30', self.gym_class.date)
def test_gym_class_has_name(self):
self.assertEqual("12:00", self.gym_class.time)
def test_gym_class_has_name(self):
self.assertEqual(60, self.gym_class.duration)
def test_gym_class_has_name(self):
self.assertEqual(12, self.gym_class.capacity)
if __name__ == '__main__':
unittest.main() | {"/tests/booking_test.py": ["/repositories/booking_repository.py", "/repositories/member_repository.py", "/repositories/gym_class_repository.py", "/repositories/staff_member_repository.py"], "/repositories/booking_repository.py": ["/repositories/member_repository.py", "/repositories/gym_class_repository.py"], "/controllers/gym_class_controller.py": ["/repositories/gym_class_repository.py", "/repositories/member_repository.py", "/repositories/booking_repository.py", "/repositories/staff_member_repository.py"], "/controllers/booking_controller.py": ["/repositories/booking_repository.py", "/repositories/member_repository.py", "/repositories/gym_class_repository.py"], "/controllers/staff_member_controller.py": ["/models/staff_member.py", "/repositories/staff_member_repository.py"], "/console.py": ["/models/staff_member.py", "/repositories/gym_class_repository.py", "/repositories/member_repository.py", "/repositories/booking_repository.py", "/repositories/staff_member_repository.py"], "/repositories/staff_member_repository.py": ["/models/staff_member.py"], "/tests/gym_class_test.py": ["/repositories/gym_class_repository.py"], "/run_tests.py": ["/tests/booking_test.py", "/tests/gym_class_test.py", "/tests/member_test.py", "/tests/staff_member_test.py"], "/tests/member_test.py": ["/repositories/member_repository.py"], "/tests/staff_member_test.py": ["/repositories/staff_member_repository.py"]} |
55,719 | KieranShek/CodeGym_Flask_Full_Stack_Web_App | refs/heads/main | /run_tests.py | import unittest
from tests.booking_test import *
from tests.gym_class_test import *
from tests.member_test import *
from tests.staff_member_test import *
if __name__ == '__main__':
unittest.main() | {"/tests/booking_test.py": ["/repositories/booking_repository.py", "/repositories/member_repository.py", "/repositories/gym_class_repository.py", "/repositories/staff_member_repository.py"], "/repositories/booking_repository.py": ["/repositories/member_repository.py", "/repositories/gym_class_repository.py"], "/controllers/gym_class_controller.py": ["/repositories/gym_class_repository.py", "/repositories/member_repository.py", "/repositories/booking_repository.py", "/repositories/staff_member_repository.py"], "/controllers/booking_controller.py": ["/repositories/booking_repository.py", "/repositories/member_repository.py", "/repositories/gym_class_repository.py"], "/controllers/staff_member_controller.py": ["/models/staff_member.py", "/repositories/staff_member_repository.py"], "/console.py": ["/models/staff_member.py", "/repositories/gym_class_repository.py", "/repositories/member_repository.py", "/repositories/booking_repository.py", "/repositories/staff_member_repository.py"], "/repositories/staff_member_repository.py": ["/models/staff_member.py"], "/tests/gym_class_test.py": ["/repositories/gym_class_repository.py"], "/run_tests.py": ["/tests/booking_test.py", "/tests/gym_class_test.py", "/tests/member_test.py", "/tests/staff_member_test.py"], "/tests/member_test.py": ["/repositories/member_repository.py"], "/tests/staff_member_test.py": ["/repositories/staff_member_repository.py"]} |
55,720 | KieranShek/CodeGym_Flask_Full_Stack_Web_App | refs/heads/main | /tests/member_test.py | import unittest
from repositories.member_repository import *
class TestMember(unittest.TestCase):
def setUp(self):
self.member = Member('Jack', 'Premium')
def test_member_has_name(self):
self.assertEqual("Jack", self.member.name)
def test_guest_has_membership_type(self):
self.assertEqual('Premium', self.member.type)
if __name__ == '__main__':
unittest.main() | {"/tests/booking_test.py": ["/repositories/booking_repository.py", "/repositories/member_repository.py", "/repositories/gym_class_repository.py", "/repositories/staff_member_repository.py"], "/repositories/booking_repository.py": ["/repositories/member_repository.py", "/repositories/gym_class_repository.py"], "/controllers/gym_class_controller.py": ["/repositories/gym_class_repository.py", "/repositories/member_repository.py", "/repositories/booking_repository.py", "/repositories/staff_member_repository.py"], "/controllers/booking_controller.py": ["/repositories/booking_repository.py", "/repositories/member_repository.py", "/repositories/gym_class_repository.py"], "/controllers/staff_member_controller.py": ["/models/staff_member.py", "/repositories/staff_member_repository.py"], "/console.py": ["/models/staff_member.py", "/repositories/gym_class_repository.py", "/repositories/member_repository.py", "/repositories/booking_repository.py", "/repositories/staff_member_repository.py"], "/repositories/staff_member_repository.py": ["/models/staff_member.py"], "/tests/gym_class_test.py": ["/repositories/gym_class_repository.py"], "/run_tests.py": ["/tests/booking_test.py", "/tests/gym_class_test.py", "/tests/member_test.py", "/tests/staff_member_test.py"], "/tests/member_test.py": ["/repositories/member_repository.py"], "/tests/staff_member_test.py": ["/repositories/staff_member_repository.py"]} |
55,721 | KieranShek/CodeGym_Flask_Full_Stack_Web_App | refs/heads/main | /tests/staff_member_test.py | import unittest
from repositories.staff_member_repository import *
class TestStaff_Member(unittest.TestCase):
def setUp(self):
self.staff_member = Staff_Member('Steve', 'Instructor')
def test_member_has_name(self):
self.assertEqual("Steve", self.staff_member.name)
def test_guest_has_membership_type(self):
self.assertEqual('Instructor', self.staff_member.job_type)
if __name__ == '__main__':
unittest.main() | {"/tests/booking_test.py": ["/repositories/booking_repository.py", "/repositories/member_repository.py", "/repositories/gym_class_repository.py", "/repositories/staff_member_repository.py"], "/repositories/booking_repository.py": ["/repositories/member_repository.py", "/repositories/gym_class_repository.py"], "/controllers/gym_class_controller.py": ["/repositories/gym_class_repository.py", "/repositories/member_repository.py", "/repositories/booking_repository.py", "/repositories/staff_member_repository.py"], "/controllers/booking_controller.py": ["/repositories/booking_repository.py", "/repositories/member_repository.py", "/repositories/gym_class_repository.py"], "/controllers/staff_member_controller.py": ["/models/staff_member.py", "/repositories/staff_member_repository.py"], "/console.py": ["/models/staff_member.py", "/repositories/gym_class_repository.py", "/repositories/member_repository.py", "/repositories/booking_repository.py", "/repositories/staff_member_repository.py"], "/repositories/staff_member_repository.py": ["/models/staff_member.py"], "/tests/gym_class_test.py": ["/repositories/gym_class_repository.py"], "/run_tests.py": ["/tests/booking_test.py", "/tests/gym_class_test.py", "/tests/member_test.py", "/tests/staff_member_test.py"], "/tests/member_test.py": ["/repositories/member_repository.py"], "/tests/staff_member_test.py": ["/repositories/staff_member_repository.py"]} |
55,722 | KieranShek/CodeGym_Flask_Full_Stack_Web_App | refs/heads/main | /models/staff_member.py | class Staff_Member:
def __init__(self, name, job_type, id = None):
self.name = name
self.job_type = job_type
self.id = id
| {"/tests/booking_test.py": ["/repositories/booking_repository.py", "/repositories/member_repository.py", "/repositories/gym_class_repository.py", "/repositories/staff_member_repository.py"], "/repositories/booking_repository.py": ["/repositories/member_repository.py", "/repositories/gym_class_repository.py"], "/controllers/gym_class_controller.py": ["/repositories/gym_class_repository.py", "/repositories/member_repository.py", "/repositories/booking_repository.py", "/repositories/staff_member_repository.py"], "/controllers/booking_controller.py": ["/repositories/booking_repository.py", "/repositories/member_repository.py", "/repositories/gym_class_repository.py"], "/controllers/staff_member_controller.py": ["/models/staff_member.py", "/repositories/staff_member_repository.py"], "/console.py": ["/models/staff_member.py", "/repositories/gym_class_repository.py", "/repositories/member_repository.py", "/repositories/booking_repository.py", "/repositories/staff_member_repository.py"], "/repositories/staff_member_repository.py": ["/models/staff_member.py"], "/tests/gym_class_test.py": ["/repositories/gym_class_repository.py"], "/run_tests.py": ["/tests/booking_test.py", "/tests/gym_class_test.py", "/tests/member_test.py", "/tests/staff_member_test.py"], "/tests/member_test.py": ["/repositories/member_repository.py"], "/tests/staff_member_test.py": ["/repositories/staff_member_repository.py"]} |
55,723 | KieranShek/CodeGym_Flask_Full_Stack_Web_App | refs/heads/main | /repositories/gym_class_repository.py | from db.run_sql import run_sql
from models.gym_class import Gym_class
from models.member import Member
def save(gym_class):
sql = "INSERT INTO gym_classes(name, instructor, date, time, duration, capacity) VALUES ( %s, %s, %s, %s, %s, %s ) RETURNING id"
values = [gym_class.name, gym_class.instructor, gym_class.date, gym_class.time, gym_class.duration, gym_class.capacity]
results = run_sql( sql, values )
gym_class.id = results[0]['id']
return gym_class
def check_capacity(gym_class):
check_capacity = f"select count(*) from bookings where gym_class_id = {gym_class.id};"
checked_capacity = run_sql(check_capacity)
empty_list = []
empty_list.append(checked_capacity[0][0])
if checked_capacity[0][0] >= gym_class.capacity:
empty_list.append("Full")
return empty_list
else:
empty_list.append("Spaces")
return empty_list
def select_all():
gym_classes = []
sql = "SELECT * FROM gym_classes"
results = run_sql(sql)
for row in results:
gym_class = Gym_class(row['name'], row['instructor'], row['date'], row['time'], row['duration'], row['capacity'], row['id'])
gym_classes.append(gym_class)
return gym_classes
def select(id):
gym_class = None
sql = "SELECT * FROM gym_classes WHERE id = %s"
values = [id]
result = run_sql(sql, values)[0]
if result is not None:
gym_class = Gym_class(result['name'], result['instructor'], result['date'], result['time'], result['duration'], result['capacity'], result['id'] )
return gym_class
def delete_all():
sql = "DELETE FROM gym_classes"
run_sql(sql)
def members(gym_class):
members = []
sql = "SELECT members.* FROM members INNER JOIN bookings ON bookings.member_id = members.id WHERE gym_class_id = %s"
values = [gym_class.id]
results = run_sql(sql, values)
for row in results:
member = Member(row['name'], row['type'], row['id'])
members.append(member)
return members
def update(gym_class):
sql = "UPDATE gym_classes SET (name, instructor, date, time, duration, capacity) = (%s, %s, %s, %s, %s, %s) WHERE id = %s"
values = [gym_class.name, gym_class.instructor, gym_class.date, gym_class.time, gym_class.duration, gym_class.capacity, gym_class.id]
run_sql(sql, values)
def delete(id):
sql = "DELETE FROM gym_classes WHERE id = %s"
values = [id]
run_sql(sql, values)
def find_booking_id(class_id, member_id):
sql = "SELECT id FROM bookings WHERE gym_class_id = %s AND member_id = %s"
values = [class_id, member_id]
# pdb.set_trace()
booking_id = run_sql(sql, values)[0]
return booking_id
| {"/tests/booking_test.py": ["/repositories/booking_repository.py", "/repositories/member_repository.py", "/repositories/gym_class_repository.py", "/repositories/staff_member_repository.py"], "/repositories/booking_repository.py": ["/repositories/member_repository.py", "/repositories/gym_class_repository.py"], "/controllers/gym_class_controller.py": ["/repositories/gym_class_repository.py", "/repositories/member_repository.py", "/repositories/booking_repository.py", "/repositories/staff_member_repository.py"], "/controllers/booking_controller.py": ["/repositories/booking_repository.py", "/repositories/member_repository.py", "/repositories/gym_class_repository.py"], "/controllers/staff_member_controller.py": ["/models/staff_member.py", "/repositories/staff_member_repository.py"], "/console.py": ["/models/staff_member.py", "/repositories/gym_class_repository.py", "/repositories/member_repository.py", "/repositories/booking_repository.py", "/repositories/staff_member_repository.py"], "/repositories/staff_member_repository.py": ["/models/staff_member.py"], "/tests/gym_class_test.py": ["/repositories/gym_class_repository.py"], "/run_tests.py": ["/tests/booking_test.py", "/tests/gym_class_test.py", "/tests/member_test.py", "/tests/staff_member_test.py"], "/tests/member_test.py": ["/repositories/member_repository.py"], "/tests/staff_member_test.py": ["/repositories/staff_member_repository.py"]} |
55,731 | mfaytak/ultramisc | refs/heads/master | /scripts/dim-reduction/punjabi-series-pca-lda.py | '''
punjabi-series-pca-lda: PCA-LDA pipeline as used for Punjabi time series project (Kochetov, Faytak, Nara)
'''
import argparse
import glob
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
import re
import sys
from hashlib import sha1
from scipy.ndimage import median_filter
from sklearn.decomposition import PCA
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA
# read in args
parser = argparse.ArgumentParser()
parser.add_argument("directory", help="Experiment directory containing all subjects")
parser.add_argument("--pca_dim", "-p", help="Number of principal components to retain")
parser.add_argument("--lda_dim", "-l", help="Number of linear discriminants to use")
parser.add_argument("training", help="Set of observations to train LDA on")
args = parser.parse_args()
# check for appropriate directory
expdir = args.directory
try:
assert os.path.exists(args.directory)
except AssertionError:
# TODO raise exception
print("\tDirectory provided doesn't exist")
parser.print_help()
sys.exit(2)
subj = "P" + re.sub("[^0-9]","",expdir)
data_in = os.path.join(expdir,"frames_proc.npy")
data = np.load(data_in)
metadata_in = os.path.join(expdir,"frames_proc_metadata.pickle")
md = pd.read_pickle(metadata_in)
# sanity checks
assert(len(md) == data.shape[0]) # make sure one md row for each frame
assert(md.loc[0, 'sha1_filt'] == sha1(data[0].ravel()).hexdigest()) # checksums
assert(md.loc[len(md)-1,'sha1_filt'] == sha1(data[-1].ravel()).hexdigest())
# TODO subset by words of interest
model_array = data
model_md = md
image_shape = model_array[0].shape
print("Now running PCA...")
n_components = int(args.pca_dim)
pca = PCA(n_components=int(n_components))
array_reshaped = model_array.reshape([
model_array.shape[0],
model_array.shape[1] * model_array.shape[2]
])
pca.fit(array_reshaped)
cumulative_var_exp = sum(pca.explained_variance_ratio_)
print("{}:\tPCA with {} PCs explains {} of variation".format(subj,
n_components,
round(cumulative_var_exp,4)
))
pca_out = pca.transform(array_reshaped)
# create output table headers
pc_headers = ["pc"+str(i+1) for i in range(0,n_components)] # n. of PC columns changes acc. to n_components
meta_headers = list(md.columns.values)
headers = meta_headers + pc_headers
# create output table
headless = np.column_stack((md[meta_headers], pca_out))
d = np.row_stack((headers, headless))
# TODO once relevant, output one table across multiple subjects?
# output eigentongues
if n_components < 5:
n_output_pcs = n_components
else:
n_output_pcs = 5
for n in range(0,n_output_pcs):
dd = pca.components_[n].reshape(image_shape)
mag = np.max(dd) - np.min(dd)
pc_load = (dd-np.min(dd))/mag*255
plt.title("PC{:} min/max loadings, {:}".format((n+1),subj))
#plt.title("PC{:} min/max loadings, ".format((n+1)))
plt.imshow(pc_load, cmap="Greys_r")
file_ending = "{:}-pc{:}.pdf".format(subj, (n+1))
#file_ending = "all-pc{:}.pdf".format((n+1))
savepath = os.path.join(expdir,file_ending) # TODO redefine save path if needed
plt.savefig(savepath)
pca_out = pca.transform(array_reshaped)
# now LDA stuff
# select training and testing sets based on input
if args.training == "stops":
print("Now running LDA (trained on stops)...")
training_list = ["batab", "batrab"]
test_list = ["banab", "banrab"]
elif args.training == "nasals":
print("Now running LDA (trained on nasals)...")
training_list = ["banab", "banrab"]
test_list = ["batab", "batrab"]
else:
print("Could not interpret requested training set, exiting")
sys.exit(2)
training_mask = model_md['stim'].isin(training_list)
training_mask = training_mask.values
training_md = model_md[training_mask].copy()
training_data = pca_out[training_mask]
test_mask = model_md['stim'].isin(test_list)
test_mask = test_mask.values
test_md = model_md[test_mask].copy()
test_data = pca_out[test_mask]
# train LDA on training data
labs = [re.sub("[biau]","", s) for s in np.array(training_md.stim)] # expand dims?
test_labs = [re.sub("[biau]","", s) for s in np.array(test_md.stim)]
cats = ["retroflex" if s in ["tr","nr"] else "dental" for s in labs]
test_cats = ["retroflex" if s in ["tr","nr"] else "dental" for s in test_labs]
train_lda = LDA(n_components = int(args.lda_dim))
train_lda.fit(training_data, cats) # train the model on the data
train_lda_out = train_lda.transform(training_data)
# validate by scoring on training data
train_score = train_lda.score(training_data, cats)
print("{}:\tLDA (1 LD) correctly classifies {} of training {}".format(subj, train_score, args.training))
# score and/or categorize test data according to trained LDA model
if args.training == "stops":
test_class = "nasals"
else:
test_class = "stops"
test_lda_out = train_lda.transform(test_data)
# show score for test data
test_score = train_lda.score(test_data, test_cats)
print("{}:\tLDA (1 LD) correctly classifies {} of test {}".format(subj, test_score, test_class))
# PCA and LDA outputs
# LDA data for csv: training on top of test
ld = pd.DataFrame(np.vstack([train_lda_out, test_lda_out]))
ld = ld.rename(columns = {0:'LD1', 1:'LD2'})
# make PCs into a set of columns
pc_dataframe = pd.DataFrame(pca_out)
pc_dataframe = pc_dataframe.rename(columns=lambda x: "pc"+str(int(x)+1))
# metadata that was read in earlier for csv: training on top of test
out_md = pd.concat([training_md, test_md], axis=0, ignore_index=True)
# classification results: training on top of test
cls = pd.concat(
[pd.DataFrame(train_lda.predict(training_data)),
pd.DataFrame(train_lda.predict(test_data))],
axis=0,
ignore_index=True
)
cls = cls.rename(columns = {0:'cls'})
# combine all of the above into a DataFrame object
ld_md = pd.concat([out_md, ld, cls, pc_dataframe], axis=1)
# save analysis data for the current subject as csv
csv_path = os.path.join(expdir, "{:}_ldas_train_{:}.csv".format(subj, args.training))
ld_md.to_csv(csv_path, index=False)
# TODO validate by classifying on training data
# TODO classification pcts.? | {"/scripts/dim-reduction/nasalcoda-cache-frames.py": ["/ultramisc/ebutils.py"], "/scripts/dim-reduction/suzhou-cache-frames.py": ["/ultramisc/ebutils.py"], "/scripts/video/eb-make-avi.py": ["/ultramisc/ebutils.py"], "/scripts/ssanova/format-con.py": ["/ultramisc/ebutils.py"], "/scripts/ssanova/eb-extract-frames.py": ["/ultramisc/ebutils.py"], "/scripts/file-management/ultrasonix-subsetter.py": ["/ultramisc/ebutils.py"], "/scripts/video/flap-video-viewer.py": ["/ultramisc/ebutils.py"], "/scripts/file-management/subsetter.py": ["/ultramisc/ebutils.py"], "/scripts/video/flap-video-writer.py": ["/ultramisc/ebutils.py"]} |
55,732 | mfaytak/ultramisc | refs/heads/master | /scripts/dim-reduction/punjabi-cache-frames.py | """
punjabi-cache-frames: frame caching method used in Punjabi dental/retroflex project (Kochetov, Faytak, Nara)
"""
# TODO: actually using?
from __future__ import absolute_import, division, print_function
import argparse
import glob
import numpy as np
import os
import pandas as pd
import re
import struct
import subprocess
import sys
from collections import OrderedDict
from hashlib import sha1
from operator import itemgetter
from PIL import Image
from scipy import ndimage
# read in args
parser = argparse.ArgumentParser()
parser.add_argument("directory", help="Experiment directory containing all subjects")
args = parser.parse_args()
# check for appropriate directory
expdir = args.directory
try:
assert os.path.exists(args.directory)
except AssertionError:
# TODO raise exception
print("\tDirectory provided doesn't exist")
parser.print_help()
sys.exit(2)
data = None
recs = []
frames_out = os.path.join(expdir,"frames.npy")
metadata_out = os.path.join(expdir,"frames_metadata.pickle")
png_glob_exp = os.path.join(os.path.normpath(expdir),"*.png")
# for filename in list-of-files:
for filename in glob.glob(png_glob_exp):
# get filename and other metadata
fname = os.path.split(filename)[1]
fname_bare = os.path.splitext(fname)[0]
attr = fname_bare.split('_')
subj = attr[0]
lang = re.sub(r'[0-9]', '', attr[0])
# subj is crucial for subsetting data. Users will want to define this on their own.
# But it might be good to have a function with flat directory structure and subj IDs as inputs...
# ...that caches all data at once.
# this would let people select their desired frame subset however they'd like, and then run all at once.
# on the other hand, having subject as a variable and pulling the data apart is much easier conceptually, and the data is easier to move around as a single large file.
if len(attr) > 2:
stim = attr[1]
token = re.sub(r'[a-zA-Z]', '', attr[2])
else:
stim = re.sub(r'[0-9]', '', attr[1])
token = re.sub(r'[a-zA-Z]', '', attr[1])
if stim in ["banab", "batab"]:
place = "alv"
if stim == "banab":
phone = "n"
else:
phone = "t"
elif stim in ["baNab", "baTab"]:
place = "ret"
if stim == "baNab":
phone = "nr"
else:
phone = "tr"
# get ndarray from image file. issue is probably here. Unconverted RGB:
inframe = np.asarray(Image.open(filename))
# converted from RGB to grayscale (one-channel):
inframe = np.asarray(Image.open(filename).convert("L"))
# converted to uint8:
rawdata = inframe.astype(np.uint8)
# the ravel() seems to work correctly, at least in terms of producing an array of the right size:
# generate metadata object for the current acquisition
recs.append(
OrderedDict([
('filename', fname),
('subject', subj),
('stim', stim),
('token', token),
('phone', phone),
('place', place),
('sha1', sha1(rawdata.ravel()).hexdigest()), # tuple error is thrown here.
('sha1_dtype', rawdata.dtype)
])
)
# add frame ndarray to frames list
if data is None:
data = np.expand_dims(rawdata, axis=0)
else:
data = np.concatenate([data, np.expand_dims(rawdata, axis=0)])
# convert metadata to a DataFrame
md = pd.DataFrame.from_records(recs, columns=recs[0].keys())
# make sure there is one metadata row for each ndarray in the pickle
assert(len(md) == data.shape[0])
# compare checksums
assert(md.loc[0, 'sha1'] == sha1(data[0].ravel()).hexdigest())
assert(md.loc[len(md)-1,'sha1'] == sha1(data[-1].ravel()).hexdigest())
np.save(frames_out, data)
md.to_pickle(metadata_out)
| {"/scripts/dim-reduction/nasalcoda-cache-frames.py": ["/ultramisc/ebutils.py"], "/scripts/dim-reduction/suzhou-cache-frames.py": ["/ultramisc/ebutils.py"], "/scripts/video/eb-make-avi.py": ["/ultramisc/ebutils.py"], "/scripts/ssanova/format-con.py": ["/ultramisc/ebutils.py"], "/scripts/ssanova/eb-extract-frames.py": ["/ultramisc/ebutils.py"], "/scripts/file-management/ultrasonix-subsetter.py": ["/ultramisc/ebutils.py"], "/scripts/video/flap-video-viewer.py": ["/ultramisc/ebutils.py"], "/scripts/file-management/subsetter.py": ["/ultramisc/ebutils.py"], "/scripts/video/flap-video-writer.py": ["/ultramisc/ebutils.py"]} |
55,733 | mfaytak/ultramisc | refs/heads/master | /scripts/file-management/mfa-unflatten.py | # Simple utility to "unflatten" forced alignment output from
# the Montreal Forced Aligner (move audio files and TextGrid
# annotations back into acquisition directories). Assumes
# each ultrasound acquisition in its own subdirectory in expdir.
# Assumes each .TextGrid has a matching directory name.
# Usage: python mfa-unflatten.py [tgdir] [expdir]
import argparse
import os
import shutil
# parse argument(s)
parser = argparse.ArgumentParser()
# read things in
#parser.add_argument("wavdir", "-w", action="store",
# help="Directory containing .wav and .lab \
# files in flat structure; input to MFA"
# )
parser.add_argument("tgdir", action="store",
help="Directory containing alignment \
TextGrids output by MFA"
)
parser.add_argument("expdir", action="store",
help="Experiment directory containing \
ultrasound acquisitions in flat structure; \
destination of TextGrid files."
)
args = parser.parse_args()
tgdir = args.tgdir
expdir = args.expdir
for dirs,subdirs,textgrids in os.walk(tgdir):
for tg in textgrids:
if not tg.endswith(".ch1.TextGrid"):
continue
elif tg == "unaligned.txt":
with open(tgh, "r") as un:
print("WARNING: SOME SOUND FILES NOT ALIGNED")
lines = [line.rstrip('\n') for line in un]
for line in lines:
print(line)
continue
tgh = os.path.join(os.path.abspath(dirs),tg)
timestamp = tg.replace(".ch1.TextGrid","")
dest = os.path.join(expdir,timestamp)
shutil.copy(tgh, dest) | {"/scripts/dim-reduction/nasalcoda-cache-frames.py": ["/ultramisc/ebutils.py"], "/scripts/dim-reduction/suzhou-cache-frames.py": ["/ultramisc/ebutils.py"], "/scripts/video/eb-make-avi.py": ["/ultramisc/ebutils.py"], "/scripts/ssanova/format-con.py": ["/ultramisc/ebutils.py"], "/scripts/ssanova/eb-extract-frames.py": ["/ultramisc/ebutils.py"], "/scripts/file-management/ultrasonix-subsetter.py": ["/ultramisc/ebutils.py"], "/scripts/video/flap-video-viewer.py": ["/ultramisc/ebutils.py"], "/scripts/file-management/subsetter.py": ["/ultramisc/ebutils.py"], "/scripts/video/flap-video-writer.py": ["/ultramisc/ebutils.py"]} |
55,734 | mfaytak/ultramisc | refs/heads/master | /scripts/ssanova/palate-getter.py | # palate getter function - averages all palate traces in ET output .con file
# outputs a text file that can be used in ssanova_palate.R
# usage: python palate-getter.py [directory name]
# where directory contains multiple subjects' palate-finding dirs
# target files MUST end in "palates.con"
import os, sys, argparse
from csv import reader
from numpy import mean
# read in arguments
parser = argparse.ArgumentParser()
parser.add_argument("directory", help="Experiment directory containing all subjects")
args = parser.parse_args()
try:
expdir = args.directory
except IndexError:
print("\tDirectory provided doesn't exist")
ArgumentParser.print_usage
ArgumentParser.print_help
sys.exit(2)
for root, dirs, files in os.walk(expdir):
for con in files:
if not 'palates.con' in con.lower():
continue
out_file = os.path.splitext(con)[0] + "-out.txt"
# reads all frames from a .con file and averages; gets palate
with open(os.path.join(root,con), 'r') as cn:
csvreader = reader(cn, delimiter="\t")
dat = list(csvreader)
nframes = len(dat[0]) # get number of frames from first row
rows = sum(1 for row in dat) # rows = 100, generally
with open(out_file, 'w') as out:
out.write('X' + '\t' + 'Y' + '\n') # header
for row in dat: # go through by point in contour:
row_floats = [float(x.strip()) for x in row if x] # change type, filter out empty strings
x_avg = mean(row_floats[0::2])
y_avg = mean(row_floats[1::2])
y_avg = -y_avg # because ssanova data comes out inverted; has to match
out_row = '\t'.join([str(round(x_avg,2)), str(round(y_avg,2))]) + '\n'
out.write(out_row) | {"/scripts/dim-reduction/nasalcoda-cache-frames.py": ["/ultramisc/ebutils.py"], "/scripts/dim-reduction/suzhou-cache-frames.py": ["/ultramisc/ebutils.py"], "/scripts/video/eb-make-avi.py": ["/ultramisc/ebutils.py"], "/scripts/ssanova/format-con.py": ["/ultramisc/ebutils.py"], "/scripts/ssanova/eb-extract-frames.py": ["/ultramisc/ebutils.py"], "/scripts/file-management/ultrasonix-subsetter.py": ["/ultramisc/ebutils.py"], "/scripts/video/flap-video-viewer.py": ["/ultramisc/ebutils.py"], "/scripts/file-management/subsetter.py": ["/ultramisc/ebutils.py"], "/scripts/video/flap-video-writer.py": ["/ultramisc/ebutils.py"]} |
55,735 | mfaytak/ultramisc | refs/heads/master | /scripts/dim-reduction/nasalcoda-cache-frames.py | '''
Script to cache vowel and nasal data of interest in Mandarin Chinese. Assumes
TextGrid annotations with phone set used in Montreal Forced Aligner for its
pre-trained Mandarin Chinese acoustic model.
Lists of target segments and/or can be input to selectively extract data. If
either list is omitted, no restrictions are
Usage: python nasalcoda-cache-frames.py [expdir] [words] [segments] [--flop -f]
expdir: directory containing all ultrasound acquisitions for a subject
words: list of target words, plaintext
segments: list of target segments, plaintext (including suprasegmentals)
--flop: horizontally mirror the data (if probe was used backwards)
'''
import argparse
import audiolabel
import glob
import numpy as np
import os
import pandas as pd
import re
import shutil
import sys
from collections import OrderedDict
from hashlib import sha1
from operator import itemgetter
from ultratils.rawreader import RawReader
from ultramisc.ebutils import read_echob_metadata, read_stimfile
def read_stimfile(stimfile):
with open(stimfile, "r") as stfile:
stim = stfile.read().rstrip('\n').upper()
return stim
# read in command line arguments
parser = argparse.ArgumentParser()
parser.add_argument("expdir",
help="Experiment directory containing \
acquisitions in flat structure"
)
parser.add_argument("words",
help="Plaintext list of target words to be extracted"
)
parser.add_argument("segments",
help="Plaintext list of target segments to be extracted"
)
parser.add_argument("-f",
"--flop",
help="Horizontally flip the data",
action="store_true"
)
args = parser.parse_args()
# check for appropriate directory
try:
expdir = args.expdir
except IndexError:
print("\tDirectory provided doesn't exist")
ArgumentParser.print_usage
ArgumentParser.print_help
sys.exit(2)
frames_out = os.path.join(expdir,"frames.npy")
metadata_out = os.path.join(expdir,"frames_metadata.pickle")
# glob expression: locates .raw files in subdirs
rawfile_glob_exp = os.path.join(expdir,"*","*.raw")
# create regular expressions for target words and segments
# TODO: default to match ^(?!sil|sp).* for phones
# TODO: default to match "not nothing" for words
with open(args.words, 'r') as mydict:
wrds = [line.strip().split()[0].lower() for line in mydict.readlines()]
with open(args.segments,'r') as mysegm:
segs = [line.strip().split()[0] for line in mysegm.readlines()]
# make a more generally useful regular expression for segments
# TODO set these to "any alphanumeric label which isn't sp or sil" if args
# aren't provided
word_regexp = re.compile("^({})$".format('|'.join(wrds)))
seg_regexp = re.compile("^({})$".format('|'.join(segs)))
# folder path for discards
disc = os.path.join(expdir,"_discards")
# empty data collection objects
data = None
recs = []
# loop through available .raw files
for rf in glob.glob(rawfile_glob_exp):
parent = os.path.dirname(rf)
acq = os.path.split(parent)[1]
stimfile = os.path.join(parent,"stim.txt")
stim = read_stimfile(stimfile)
if stim == "BOLUS" or stim == "PRACTICE":
continue
print("Now working on " + acq)
wav = os.path.join(parent,str(acq + ".ch1.wav"))
tg = os.path.join(parent,str(acq + ".ch1.TextGrid"))
sync = os.path.join(parent,str(acq + '.sync.txt'))
sync_tg = os.path.join(parent,str(acq + ".sync.TextGrid"))
idx_txt = os.path.join(parent,str(acq + ".idx.txt"))
# instantiate RawReader, which extracts ultrasound data from .raw files
if data is None:
try:
nscanlines, npoints, junk = read_echob_metadata(rf)
except ValueError:
print("WARNING: no data in {}.img.txt".format(acq))
nscanlines = int(input("\tnscanlines (usually 64) ")) # TODO update values
npoints = int(input("\tnpoints (usually 1024) "))
junk = int(input("\tjunk (usually 78) "))
rdr = RawReader(rf, nscanlines=nscanlines, npoints=npoints)
# instantiate LabelManager objects for FA transcript and sync pulses
try:
pm = audiolabel.LabelManager(from_file=tg, from_type="praat")
except FileNotFoundError:
print("No alignment TG in {}; skipping".format(acq))
continue
try:
sync_pm = audiolabel.LabelManager(from_file=sync_tg, from_type="praat")
except FileNotFoundError:
print("No sync TG in {}; skipping".format(acq))
continue
for seg,match in pm.tier('phones').search(seg_regexp, return_match=True):
context = pm.tier('words').label_at(seg.center).text
if context in wrds:
before = pm.tier('phones').prev(seg)
# assume default "sp" if there is no following label;
# i.e. empty final interval
after = pm.tier('phones').next(seg)
try:
after_label = after.text
except AttributeError:
after_label = 'sp'
two_after = pm.tier('phones').next(after)
try:
two_after_label = two_after.text
except AttributeError:
two_after_label = 'sp'
# match only the last two segments, sequence VN
# if-else statement can be removed to make the script more general
# (will return all instance of target phones in target words)
if not (after_label == 'sp' or two_after_label == 'sp'):
pass
else:
#print("Found {} in {} in {}".format(seg.text,context,acq))
# separate suprasegmental numbers from seg.text
match = re.match(r"([a-z]+)([0-9]+)", seg.text, re.I)
if match:
out_phone, out_sup = match.groups()
#print(out_phone, out_sup)
else:
out_phone = seg.text
out_sup = "NA"
#print(out_phone, out_sup)
# get midpoint time and find closest ultrasound frame in sync TG
midpoint = seg.center
diff_list = []
diff2_list = []
for frame in sync_pm.tier('pulse_idx'):
diff = abs(frame.t1 - midpoint)
diff_list.append(diff)
for frame in sync_pm.tier('raw_data_idx'):
diff2 = abs(frame.t1 - midpoint)
diff2_list.append(diff2)
# TODO rewrite this chunk, temporary fix added
mid_pulse_idx_num = min(enumerate(diff_list), key=itemgetter(1))[0]
mid_raw_data_idx_num = min(enumerate(diff2_list), key=itemgetter(1))[0]
# get midpoint frame; discard if out of recorded range
try:
raw = rdr.get_frame(mid_pulse_idx_num - 1) # temporary fix
except IndexError: # thrown by RawReader.rdr if no frame at timepoint
# issue warning and move entire acq to discards folder
print("No frame available in {}, discarding".format(acq))
rdr.close()
if not os.path.isdir(disc):
os.mkdir(disc)
shutil.copytree(parent, os.path.join(disc,acq))
shutil.rmtree(parent)
continue
trim = raw[junk:,:]
# flop if needed
if args.flop:
trim = np.fliplr(trim)
if data is None:
data = np.expand_dims(trim, axis=0)
else:
data = np.concatenate([data, np.expand_dims(trim, axis=0)])
recs.append(
OrderedDict([
('speaker', expdir),
('timestamp', acq),
('time', midpoint),
('pulseidx', int(mid_pulse_idx_num)),
('width', nscanlines),
('height', npoints - junk),
('phone', out_phone),
('sup', out_sup),
('stim', stim),
('before', re.sub(r'[0-9]+', '', before.text)),
('after', re.sub(r'[0-9]+', '', after.text)),
('sha1', sha1(trim.ravel()).hexdigest()),
('sha1_dtype', trim.dtype)
])
)
md = pd.DataFrame.from_records(recs, columns=recs[0].keys())
# check that metadata matches data, frame-by-frame
assert(len(md) == data.shape[0])
for idx,row in md.iterrows():
assert(row['sha1'] == sha1(data[idx].ravel()).hexdigest())
np.save(frames_out, data)
md.to_pickle(metadata_out)
| {"/scripts/dim-reduction/nasalcoda-cache-frames.py": ["/ultramisc/ebutils.py"], "/scripts/dim-reduction/suzhou-cache-frames.py": ["/ultramisc/ebutils.py"], "/scripts/video/eb-make-avi.py": ["/ultramisc/ebutils.py"], "/scripts/ssanova/format-con.py": ["/ultramisc/ebutils.py"], "/scripts/ssanova/eb-extract-frames.py": ["/ultramisc/ebutils.py"], "/scripts/file-management/ultrasonix-subsetter.py": ["/ultramisc/ebutils.py"], "/scripts/video/flap-video-viewer.py": ["/ultramisc/ebutils.py"], "/scripts/file-management/subsetter.py": ["/ultramisc/ebutils.py"], "/scripts/video/flap-video-writer.py": ["/ultramisc/ebutils.py"]} |
55,736 | mfaytak/ultramisc | refs/heads/master | /scripts/file-management/acq-share.py |
'''
acq-share.py: by Matt Faytak / Jennifer Kuo
Divide ultrasound subdirectories into approximately
equal shares for processing by multiple workers.
Usage: python acq-share.py [expdir] [shares] [--flat] [--random]
expdir: the experiment folder containing the acquisition
subdirectories to be divided.
shares: the number of shares to divide the acquisitions
into.
--flat: if specified, assume the directory structure is
flat (all files in one folder, which is expdir).
--random: if specified, take randomly selected subsets
(otherwise, contiguous parts of sorted list are taken).
'''
import argparse
import glob
import os
import shutil
import sys
# this fcn courtesy of user tixxit at https://stackoverflow.com/a/2135920
def split(a, n):
k, m = divmod(len(a), n)
return (a[i * k + min(i, m):(i + 1) * k + min(i + 1, m)] for i in range(n))
# read in arguments
parser = argparse.ArgumentParser(description='Divide ultrasound data into approximately equal shares.')
parser.add_argument("expdir",
help="Experiment directory containing all subjects'\
acquisition subfolders"
)
parser.add_argument("shares",
type=int,
help="Number of shares to divide the data into"
)
parser.add_argument("--flat","-f",
action='store_true',
help="If specified, expect a flat directory structure within expdir"
)
parser.add_argument("--random","-r",
action='store_true',
help="If specified, divide pseudo-randomly into subsets; \
otherwise use contiguous parts of sorted file list"
)
args = parser.parse_args()
try:
expdir = args.expdir
except IndexError:
print("\tDirectory provided doesn't exist")
ArgumentParser.print_usage
ArgumentParser.print_help
sys.exit(2)
coders = args.shares
counter = 0
if args.flat:
# look through directory for all files matching basename;
tg_glob = glob.glob(os.path.join(expdir, "*.ch1.TextGrid"))
if not args.random:
tg_glob.sort()
for part in split(tg_glob, coders):
# name output dir according to coder number
counter += 1
out = os.path.join(expdir,"_share{}".format(counter))
os.makedirs(out)
# go through and copy all files matching TG basename
for tg in part:
basename = os.path.split(tg)[-1].split('.')[0]
matching_file_exp = os.path.join(expdir,str(basename + "*"))
for f in glob.glob(matching_file_exp):
out_dest = os.path.join(out,os.path.split(f)[-1])
shutil.copy(f,out_dest)
else:
# look into an additional layer of subdirectories,
# and copy contents of each subdir
# TODO this also captures flattened dirs and all their contents.
tg_glob = glob.glob(os.path.join(expdir, "*", "*.ch1.TextGrid"))
if not args.random:
tg_glob.sort()
if len(tg_glob) == 0:
print("WARNING: No TextGrids found. Are you supposed to be using --flat?")
for part in split(tg_glob, coders):
counter += 1
out = os.path.join(expdir,"_out{}".format(counter))
for tg in part:
parent = os.path.dirname(tg)
#print(parent)
out_dest = os.path.join(out, os.path.split(parent)[-1])
shutil.copytree(parent, out_dest)
| {"/scripts/dim-reduction/nasalcoda-cache-frames.py": ["/ultramisc/ebutils.py"], "/scripts/dim-reduction/suzhou-cache-frames.py": ["/ultramisc/ebutils.py"], "/scripts/video/eb-make-avi.py": ["/ultramisc/ebutils.py"], "/scripts/ssanova/format-con.py": ["/ultramisc/ebutils.py"], "/scripts/ssanova/eb-extract-frames.py": ["/ultramisc/ebutils.py"], "/scripts/file-management/ultrasonix-subsetter.py": ["/ultramisc/ebutils.py"], "/scripts/video/flap-video-viewer.py": ["/ultramisc/ebutils.py"], "/scripts/file-management/subsetter.py": ["/ultramisc/ebutils.py"], "/scripts/video/flap-video-writer.py": ["/ultramisc/ebutils.py"]} |
55,737 | mfaytak/ultramisc | refs/heads/master | /scripts/dim-reduction/suzhou-cache-frames.py | #!/usr/bin/env python
'''
suzhou-cache-frames: frame caching method as used in Suzhou iz/i project.
'''
import argparse
import audiolabel
import glob
import imgphon as iph
import os
import numpy as np
import pandas as pd
import re
import shutil
import sys
from collections import OrderedDict
from hashlib import sha1
from operator import itemgetter
from PIL import Image
from ultratils.rawreader import RawReader
from ultramisc.ebutils import read_echob_metadata, read_stimfile
def read_stimfile(stimfile):
with open(stimfile, "r") as stfile:
stim = stfile.read().rstrip('\n')
return stim
# TODO move this (and the other fcns?) to utils
def read_echob_metadata(rawfile):
'''
Gather information about a .raw file from its .img.txt file.
'''
mfile = os.path.splitext(rawfile)[0] + ".img.txt"
mdict = {}
with open(mfile, 'r') as mf:
k = mf.readline().strip().split("\t")
v = mf.readline().strip().split("\t")
for fld,val in zip(k, v):
mdict[fld] = int(val)
nscanlines = mdict['Height']
npoints = mdict['Pitch']
junk = npoints - mdict['Width'] # number of rows of junk data at outer edge of array
return nscanlines, npoints, junk
class Header(object):
def __init__(self):
pass
class Probe(object):
def __init__(self):
pass
# a list of targets from dict.local, to be updated as required.
target_list = ['IZ', 'BIZX', 'SIZ', 'XIZ',
'IY', 'BIY', 'SIY', 'XIY',
'YZ', 'XYZ',
'EU', 'NYEU', 'XEU',
'SEI',
'AAE', 'BAAE', 'SAAE', 'XAE',
'UW', 'BUW', 'SUW', 'XUEQ',
'OOW', 'BOOW', 'SOOW', 'FOOW',
# 'AHR', 'HAAR', 'NIZ', 'NIY', 'AAW', # excluding 儿 for time being
'SIEX', 'XIEX',
'SZ', 'SZW',
'BUH', 'BOQ', 'FUH', 'FUW']
iz_list = ['IZ', 'BIZX', 'SIZ', 'XIZ']
recs = [] # metadata store
data = None # array that will contain ultrasound data
#frame_dim_1 = None
#frame_dim_2 = None
# TODO set of segments being searched for
vre = re.compile(
"^(IY1|IH1|UH1|UW1|OW1|AE1|SH|S)$"
)
# distance (in frames) away from intended time point that can be subbed in
threshhold = 3
parser = argparse.ArgumentParser()
parser.add_argument("expdir",
help="Experiment directory containing \
acquisitions in flat structure"
)
parser.add_argument("-f",
"--flop",
help="Horizontally flip the data",
action="store_true"
)
args = parser.parse_args()
# check for appropriate directory
try:
expdir = args.expdir
except IndexError:
print("\tDirectory provided doesn't exist")
ArgumentParser.print_usage
ArgumentParser.print_help
sys.exit(2)
# set up copy location
output_dir = os.path.join(expdir,"_copy")
try:
os.mkdir(output_dir)
except FileExistsError:
shutil.rmtree(output_dir)
os.mkdir(output_dir)
logfile = os.path.join(expdir,"frames_log.txt")
discard_folder = os.path.join(expdir,"discards")
frames_out = os.path.join(expdir,"frames.npy")
metadata_out = os.path.join(expdir,"frames_metadata.pickle")
with open(logfile,"w") as header:
header.write("acq"+"\t"+"stim"+"\t"+"phone"+"\t"+"status"+"\t"+"problem"+"\n")
# glob expression
rawfile_glob_exp = os.path.join(expdir,"*","*.raw")
for rf in glob.glob(rawfile_glob_exp):
parent = os.path.dirname(rf)
acq = os.path.split(parent)[1]
# use stim.txt to skip non-trials
stimfile = os.path.join(parent,"stim.txt")
stim = read_stimfile(stimfile)
if stim == "bolus" or stim == "practice":
continue
print("Found "+acq)
# define "support" file names based on .raw
wav = os.path.join(parent,str(acq + ".ch1.wav"))
tg = os.path.join(parent,str(acq + ".ch1.TextGrid"))
sync = os.path.join(parent,str(acq + '.sync.txt'))
sync_tg = os.path.join(parent,str(acq + ".sync.TextGrid"))
idx_txt = os.path.join(parent,str(acq + ".idx.txt"))
# set up RawReader and frame dimensions
if data is None:
try:
nscanlines, npoints, junk = read_echob_metadata(rf)
except ValueError:
print("WARNING: no data in {}.img.txt, please input:".format(acq))
nscanlines = int(input("\tnscanlines (usually 127) "))
npoints = int(input("\tnpoints (usually 1020) "))
junk = int(input("\tjunk (usually 36, or 1020 - 984) "))
#frame_dim_1 = nscanlines
#frame_dim_2 = npoints - junk
rdr = RawReader(rf, nscanlines=nscanlines, npoints=npoints)
# instantiate LabelManagers
pm = audiolabel.LabelManager(from_file=tg, from_type="praat")
sync_pm = audiolabel.LabelManager(from_file=sync_tg, from_type="praat")
# extract ndarray representations of frames from .raw file
for v,m in pm.tier('phone').search(vre, return_match=True):
pron = pm.tier('word').label_at(v.center).text
# skip any tokens from non-target words
if pron not in target_list:
continue
# get phone label, disambiguating IY and IH based on pronunciation
# skip some IY (diphthongized variants in some words)
if v.text == "IY1":
if pron in iz_list: # change if IZ
phone = "IZ1"
elif pron == "YZ" or pron == "XYZ":
phone = "YZ1"
elif pron == "SIEX" or pron == "XIEX":
continue
else:
phone = v.text
elif v.text == "IH1":
if pron == "SZ":
phone = "ZZ1"
elif pron == "SZW":
phone = "ZW1"
elif pron == "EU" or pron == "XEU":
phone = "YY1"
else:
phone = v.text
# TODO make this a bit more extensible; test
before = pm.tier('phone').prev(v).text
if before == "sp":
before = pm.tier('phone').prev(v,skip=1).text
after = pm.tier('phone').next(v).text
if after == "sp":
after = pm.tier('phone').next(v,skip=1).text
# get midpoint time and find closest ultrasound frame in sync TG
# TODO more efficient to duplicate ultratils frame_at approach
mid_timepoint = v.center
diff_list = []
diff2_list = []
for frame in sync_pm.tier('pulse_idx'):
diff = abs(frame.t1 - mid_timepoint)
diff_list.append(diff)
for frame in sync_pm.tier('raw_data_idx'):
diff2 = abs(frame.t1 - mid_timepoint)
diff2_list.append(diff2)
mid_pulse_idx_num = min(enumerate(diff_list), key=itemgetter(1))[0]
mid_raw_data_idx_num = min(enumerate(diff2_list), key=itemgetter(1))[0]
# get frame, and check for NaN frames
change = 0
discard_acq = False
while True:
pre_rawdata = rdr.get_frame(mid_pulse_idx_num)
if pre_rawdata is None:
mid_pulse_idx_num -= 1
mid_raw_data_idx_num -= 1 # TODO: necessary?
change += 1
if change > threshhold:
with open(logfile, "a") as log:
log.write(acq+"\t"+stim+"\t"+phone+"\t"+"discarded"+"\t"+"passed threshhold")
print("Frame change threshhold passed; acq {} discarded".format(acq))
discard_acq = True
break
else:
pass
else:
if change > 0:
with open(logfile, "a") as log:
log.write(acq+"\t"+stim+"\t"+phone+"\t"+"changed by {:}".format(change)+"\t"+"N/A")
print("Changed target in {:} by".format(acq), change, "frames")
break
# discard the acquisition if needed
if discard_acq:
shutil.copytree(parent, os.path.join(discard_folder,acq))
shutil.rmtree(parent)
continue
# preprocessing of images
rawdata = pre_rawdata.astype(np.uint8)
trim_data = rawdata[junk:,:]
if args.flop:
trim_data = np.fliplr(trim_data)
if data is None:
data = np.expand_dims(trim_data, axis=0)
else:
data = np.concatenate([data, np.expand_dims(trim_data, axis=0)])
# generate metadata row for current acq
# TODO check variable names
recs.append(
OrderedDict([
('timestamp', acq),
('time', v.center),
('pulseidx', int(mid_pulse_idx_num)),
('rawdataidx', int(mid_raw_data_idx_num)),
('width', nscanlines),
('height', npoints - junk),
('phone', phone),
('stim', stim),
('pron', pron),
('before', before),
('after', after),
('sha1', sha1(trim_data.ravel()).hexdigest()),
('sha1_dtype', trim_data.dtype)
])
)
md = pd.DataFrame.from_records(recs, columns=recs[0].keys())
# make sure there is one metadata row for each image frame
assert(len(md) == data.shape[0])
# compare checksums
assert(md.loc[0, 'sha1'] == sha1(data[0].ravel()).hexdigest())
assert(md.loc[len(md)-1,'sha1'] == sha1(data[-1].ravel()).hexdigest())
np.save(frames_out, data)
md.to_pickle(metadata_out) | {"/scripts/dim-reduction/nasalcoda-cache-frames.py": ["/ultramisc/ebutils.py"], "/scripts/dim-reduction/suzhou-cache-frames.py": ["/ultramisc/ebutils.py"], "/scripts/video/eb-make-avi.py": ["/ultramisc/ebutils.py"], "/scripts/ssanova/format-con.py": ["/ultramisc/ebutils.py"], "/scripts/ssanova/eb-extract-frames.py": ["/ultramisc/ebutils.py"], "/scripts/file-management/ultrasonix-subsetter.py": ["/ultramisc/ebutils.py"], "/scripts/video/flap-video-viewer.py": ["/ultramisc/ebutils.py"], "/scripts/file-management/subsetter.py": ["/ultramisc/ebutils.py"], "/scripts/video/flap-video-writer.py": ["/ultramisc/ebutils.py"]} |
55,738 | mfaytak/ultramisc | refs/heads/master | /scripts/ssanova/con-checker.py |
# Checks whether
# Can easily be extended to any processing task that takes place
# in a subject directory and involves outputs of easily identifiable
# files.
# Usage: python con-checker.py [directory containing all acquisitions/data/etc]
# Authors: Matthew Faytak (faytak@ucla.edu) Copyright (c) 2018
# Last modified 11-2018
import os, sys
def usage():
print(sys.exit(__doc__))
try:
basedir = os.path.abspath(sys.argv[1])
except IndexError:
usage()
sys.exit(2)
missing_files = 0
# generate the rest of the output file
for dirs, subdirs, files in os.walk(basedir):
# exclude discard, subset, distractor, etc. directories from search. change the set as needed
subdirs[:] = [s for s in subdirs if s not in set(['discards','_discards','affricates','_affricates'])]
for textgrid in files:
# only check for .con files for which a .ch1.TextGrid file exists
if not '.ch1.textgrid' in textgrid.lower():
continue
# get related file names
if 'bpr' in textgrid.lower():
basename = textgrid.split('.')[0] + '.bpr'
else:
basename = textgrid.split('.')[0]
con_file = os.path.join(dirs, str(basename + '.con'))
con_file = con_file.replace('.bpr', '')
if os.path.isfile(con_file):
continue
# TODO check here for other files ending in .con
else:
print("\tNo .con file in {}".format(basename))
missing_files += 1
# TODO check for .con files whose basenames don't match
#elif # another file ends in .con that isn't con_file
# TODO check for multiple .con files
# print out some encouragement
if missing_files == 0:
print("Congratulations, you've finished work in {}!".format(basedir))
else:
print("Almost there! You have a total of {} missing files.".format(missing_files))
| {"/scripts/dim-reduction/nasalcoda-cache-frames.py": ["/ultramisc/ebutils.py"], "/scripts/dim-reduction/suzhou-cache-frames.py": ["/ultramisc/ebutils.py"], "/scripts/video/eb-make-avi.py": ["/ultramisc/ebutils.py"], "/scripts/ssanova/format-con.py": ["/ultramisc/ebutils.py"], "/scripts/ssanova/eb-extract-frames.py": ["/ultramisc/ebutils.py"], "/scripts/file-management/ultrasonix-subsetter.py": ["/ultramisc/ebutils.py"], "/scripts/video/flap-video-viewer.py": ["/ultramisc/ebutils.py"], "/scripts/file-management/subsetter.py": ["/ultramisc/ebutils.py"], "/scripts/video/flap-video-writer.py": ["/ultramisc/ebutils.py"]} |
55,739 | mfaytak/ultramisc | refs/heads/master | /scripts/video/eb-make-avi.py | # eb-extract-all.py: extract all frames as BMPs from a raw file
# WARNING: largely superseded by code in ./flap-video-writer.py
# usage: python eb-extract-all.py expdir (-f / --flop)
import argparse
import audiolabel
import glob
import numpy as np
import os
import re
import shutil
import subprocess
from operator import itemgetter
from PIL import Image
from ultratils.rawreader import RawReader
from ultratils.pysonix.scanconvert import Converter
from ultramisc.ebutils import read_echob_metadata, read_stimfile
class Header(object):
def __init__(self):
pass
class Probe(object):
def __init__(self):
pass
# empty RawReader and Converter handles
rdr = None
conv = None
parser = argparse.ArgumentParser()
parser.add_argument("expdir",
help="Experiment directory containing \
acquisitions in flat structure"
)
parser.add_argument("-f",
"--flop",
help="Horizontally flip the data",
action="store_true"
)
args = parser.parse_args()
# read in expdir
expdir = os.path.normpath(args.expdir)
# set up copy location
output_dir = os.path.join(expdir,"_copy")
try:
os.mkdir(output_dir)
except FileExistsError:
shutil.rmtree(output_dir)
os.mkdir(output_dir)
# glob expression
rawfile_glob_exp = os.path.join(expdir,"*","*.raw")
# loop through acqs and:
for rf in glob.glob(rawfile_glob_exp):
parent = os.path.dirname(rf)
basename = os.path.split(parent)[1]
# use stim.txt to skip non-trials
stimfile = os.path.join(parent,"stim.txt")
stim = read_stimfile(stimfile)
if stim == "bolus" or stim == "practice":
continue
# define RawReader and Converter parameters from first acq
if conv is None:
print("Defining Converter ...")
# get image size data; allow for manual input if problems
try:
nscanlines, npoints, junk = read_echob_metadata(rf)
except ValueError:
print("WARNING: no data in {}.img.txt, please input:".format(basename))
nscanlines = int(input("\tnscanlines (usually 127) "))
npoints = int(input("\tnpoints (usually 1020) "))
junk = int(input("\tjunk (usually 36, or 1020 - 984) "))
# TODO use metadata instead of hard-coded values
header = Header()
header.w = nscanlines # input image width
header.h = npoints - junk # input image height, trimmed
header.sf = 4000000 # magic number, sorry!
probe = Probe()
probe.radius = 10000 # based on '10' in transducer model number
probe.numElements = 128 # based on '128' in transducer model number
probe.pitch = 185 # based on Ultrasonix C9-5/10 transducer
conv = Converter(header, probe)
rdr = RawReader(rf, nscanlines=nscanlines, npoints=npoints)
# define "support" file names based on .raw
wav = os.path.join(parent,str(basename + ".ch1.wav"))
sync = os.path.join(parent,str(basename + '.sync.txt'))
idx_txt = os.path.join(parent,str(basename + ".idx.txt"))
# make destination and copy "support" files for parent file
copy_dir = os.path.join(output_dir,basename)
os.mkdir(copy_dir)
shutil.copy(wav, copy_dir)
shutil.copy(idx_txt, copy_dir)
shutil.copy(stimfile, copy_dir)
# get frame indices
frame_indices = []
with open(idx_txt, "r") as it:
for line in it:
frame_indices.append(int(line.strip()))
start_extr = frame_indices[0]
end_extr = frame_indices[-1]
# extract and convert v.t1 - v.t2 range
for idx in range(start_extr, (end_extr)):
# extract frame using RawReader
unconv_frame = rdr.get_frame(idx)
# trim junk pixels off of top
trimmed_frame = unconv_frame[junk:,:]
if args.flop:
trimmed_frame = np.fliplr(trimmed_frame)
# convert to fan shape
conv_frame = conv.convert(np.flipud(trimmed_frame))
ready_frame = np.flipud(conv_frame)
# create frame handle and save to copy dir
fh = basename + "." + "{0:05d}".format(idx) + ".bmp"
out_img = Image.fromarray(ready_frame)
out_img.save(os.path.join(copy_dir,fh))
frame_exp = os.path.join(copy_dir, basename + ".%05d.bmp")
print(frame_exp)
framerate = 25 # TODO tweak/automatically get
out_fh = basename + '.avi'
out_path = os.path.join(copy_dir, out_fh)
avi_args = ['ffmpeg', '-y',
'-i', frame_exp,
'-framerate', str(framerate),
'-vcodec', 'huffyuv',
out_path]
subprocess.check_call(avi_args) | {"/scripts/dim-reduction/nasalcoda-cache-frames.py": ["/ultramisc/ebutils.py"], "/scripts/dim-reduction/suzhou-cache-frames.py": ["/ultramisc/ebutils.py"], "/scripts/video/eb-make-avi.py": ["/ultramisc/ebutils.py"], "/scripts/ssanova/format-con.py": ["/ultramisc/ebutils.py"], "/scripts/ssanova/eb-extract-frames.py": ["/ultramisc/ebutils.py"], "/scripts/file-management/ultrasonix-subsetter.py": ["/ultramisc/ebutils.py"], "/scripts/video/flap-video-viewer.py": ["/ultramisc/ebutils.py"], "/scripts/file-management/subsetter.py": ["/ultramisc/ebutils.py"], "/scripts/video/flap-video-writer.py": ["/ultramisc/ebutils.py"]} |
55,740 | mfaytak/ultramisc | refs/heads/master | /setup.py | from distutils.core import setup
setup(
name = 'ultramisc',
author = 'Matthew Faytak',
packages = ['ultramisc']
) | {"/scripts/dim-reduction/nasalcoda-cache-frames.py": ["/ultramisc/ebutils.py"], "/scripts/dim-reduction/suzhou-cache-frames.py": ["/ultramisc/ebutils.py"], "/scripts/video/eb-make-avi.py": ["/ultramisc/ebutils.py"], "/scripts/ssanova/format-con.py": ["/ultramisc/ebutils.py"], "/scripts/ssanova/eb-extract-frames.py": ["/ultramisc/ebutils.py"], "/scripts/file-management/ultrasonix-subsetter.py": ["/ultramisc/ebutils.py"], "/scripts/video/flap-video-viewer.py": ["/ultramisc/ebutils.py"], "/scripts/file-management/subsetter.py": ["/ultramisc/ebutils.py"], "/scripts/video/flap-video-writer.py": ["/ultramisc/ebutils.py"]} |
55,741 | mfaytak/ultramisc | refs/heads/master | /scripts/dim-reduction/process-cache.py | '''
process-cache.py: a python command line utility for cleaning up
ultrasound frame data stored in a .npy cache. A metadata
pickle is used to identify frames. Both of these files are
created using one of the *-cache-frames.py scripts also stored
in this repository.
To be more precise, this script fan-converts, optionally
flops (horizontally flips) the converted data, and applies
filtering operations for speckle reduction and edge enhancement.
One run of the script can be applied to multiple speakers' data
sets. A speaker-specific ROI mask is applied to each data set.
Usage: python process-cache.py [expdir] [--flop -f]
expdir: The experiment directory, which contains a folder for
each subject. These in turn contain files called
frames.npy and frames_metadata.pickle.
--flop: If used, horizontally mirror the data (to correct for
ultrasound probe being oriented backwards).
--roi: If used, apply a mask to the image to isolate a
region of interest. Entire image is cached if not used.
--overwrite Overwrite existing outputs, if specified.
'''
import argparse
import glob
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
import re
import sys
from hashlib import sha1
from imgphon import ultrasound as us
from ultratils.pysonix.scanconvert import Converter
# read in arguments from command line
parser = argparse.ArgumentParser()
parser.add_argument("expdir",
help="Experiment directory containing all subjects'\
caches and metadata in separate folders"
)
parser.add_argument("-o",
"--overwrite",
help="Overwrites existing outputs if present.",
action="store_true"
)
<<<<<<< HEAD
parser.add_argument("-f",
"--flop",
help="Horizontally flip the data",
action="store_true"
)
parser.add_argument("-r",
"--roi",
help="Trim data to a region of interest",
action="store_true"
)
=======
>>>>>>> 0f476dee17ab53abaf3fc9322ff58c2e30bd9483
args = parser.parse_args()
# create some objects we will need to instantiate the converter
class Header(object):
def __init__(self):
pass
class Probe(object):
def __init__(self):
pass
# to be defined below on first pass through data
conv = None
# argument checking
try:
expdir = args.expdir
except IndexError:
print("\tDirectory provided doesn't exist")
ArgumentParser.print_usage
ArgumentParser.print_help
sys.exit(2)
# create some output file handles
frames_out = "frames_proc.npy"
metadata_out = "frames_proc_metadata.pickle"
# loop through
for root,directories,files in os.walk(expdir):
for d in directories:
if os.path.exists(os.path.join(root,d,frames_out)):
if args.overwrite:
print("Skipping {}, already processed".format(d))
pass
else:
continue
# folder name without any alphabetic characters
subject = re.sub("[^0-9]","",d)
# read in data and metadata
data_in = os.path.join(root,d,"frames.npy")
pca_data = np.load(data_in)
metadata_in = os.path.join(root,d,'frames_metadata.pickle')
pca_md = pd.read_pickle(metadata_in)
# check that metadata matches data, frame-by-frame
assert(len(pca_md) == pca_data.shape[0])
for idx,row in pca_md.iterrows():
assert(row['sha1'] == sha1(pca_data[idx].ravel()).hexdigest())
# TODO implement a general data subsetter (external lists)
# define Converter parameters from first acq for first subj
if conv is None:
print("Defining Converter ...")
header = Header()
header.w = pca_data[0].shape[1] # input image width
header.h = pca_data[0].shape[0] # input image height, trimmed
header.sf = 4000000 # magic number, sorry!
probe = Probe()
probe.radius = 10000 # based on '10' in transducer model number
probe.numElements = 128 # based on '128' in transducer model number
probe.pitch = 185 # based on Ultrasonix C9-5/10 transducer
conv = Converter(header, probe)
# get mean frame
mean_frame = pca_data.mean(axis=0)
conv_mean = np.flipud(conv.convert(np.flipud(mean_frame)))
plt.title("Mean frame, Spkr {:}".format(subject))
plt.imshow(conv_mean, cmap="Greys_r")
file_ending_mean = "subj{:}_mean.pdf".format(subject)
savepath_mean = os.path.join(root,d,file_ending_mean)
plt.savefig(savepath_mean)
# mask data according to RoI (or lack thereof)
if not args.roi:
# TODO build this into us.roi?
print("No mask applied to data.")
mask = np.ones(pca_data[0].shape, dtype=pca_data[0].dtype)
else:
print("Defining region of interest ...")
# starter boundaries
roi_upper = 600
roi_lower = 200
roi_left = 20
roi_right = 50
# show user masked mean frame, ask for input on mask
while True:
mask = us.roi(mean_frame,
upper=roi_upper,
lower=roi_lower,
left=roi_left,
right=roi_right)
masked_mean = mean_frame * mask
conv_masked = np.flipud(conv.convert(np.flipud(masked_mean)))
plt.title("Mean frame and RoI, Spkr {:}".format(subject))
plt.imshow(conv_masked, cmap="Greys_r")
file_ending_roi = "subj{:}_roi.pdf".format(subject)
savepath_roi = os.path.join(root,d,file_ending_roi)
plt.savefig(savepath_roi)
good_roi = input("Inspect {:}. Good RoI? (Y/N) ".format(savepath_roi))
# If good, go ahead. If not, ask for new bounds.
if good_roi.upper() in ['Y', 'N']:
if good_roi.upper() == "Y":
break
else:
roi_upper = int(input("Please provide a new upper bound for RoI (currently {:}): ".format(roi_upper)))
roi_lower = int(input("Please provide a new lower bound for RoI (currently {:}): ".format(roi_lower)))
roi_left = int(input("Please provide a new left bound for RoI (currently {:}): ".format(roi_left)))
roi_right = int(input("Please provide a new right bound for RoI (currently {:}): ".format(roi_right)))
else:
print("Typo, try again ...")
# some filtering parameters based on image size
conv_frame = conv.convert(pca_data[0])
adj_radius = int(conv_frame.shape[0]/50) # for median filter
# heads-up
print("Preprocessing data for PCA ...")
# make a sample frame for reference and show user
in_sample = pca_data[0]
masked_samp = in_sample * mask # using mask defined above
sradd_samp = us.srad(masked_samp)
convd_samp = np.flipud(conv.convert(np.flipud(sradd_samp)))
clean_samp = us.clean_frame(convd_samp, median_radius=adj_radius)
rescaled_samp = clean_samp * 255
sample_frame = rescaled_samp.astype(np.uint8)
plt.title("Sample frame, Spkr {:}".format(subject))
plt.imshow(sample_frame, cmap="Greys_r")
file_ending_sample = "subj{:}_sample.pdf".format(subject)
savepath_sample = os.path.join(root,d,file_ending_sample)
plt.savefig(savepath_sample)
print("Please check sample frame at {}!".format(savepath_sample))
# save a RoI file for later reference
print("RoI of upper {:} lower {:}, left {:} right {:} used".format(roi_upper,roi_lower,roi_left,roi_right))
file_ending_roi = "subj{:}_roi.txt".format(subject)
savepath_roi = os.path.join(root,d,file_ending_roi)
with open(savepath_roi,"w") as out:
out.write('\t'.join(['upper', 'lower', 'left', 'right']) + '\n')
out.write('\t'.join([str(roi_upper), str(roi_lower), str(roi_left), str(roi_right)]))
# set up ultrasound frame array for PCA
out_frames = np.empty([pca_data.shape[0]] + list(conv_frame.shape)) * np.nan
out_frames = out_frames.astype('uint8')
filt_hds = []
total = out_frames.shape[0]
for idx,frame in enumerate(pca_data):
masked = frame * mask # using mask defined above
sradd = us.srad(masked)
# TODO should remove outer flipud for consistent output
convd = np.flipud(conv.convert(np.flipud(sradd)))
clean = us.clean_frame(convd, median_radius=adj_radius)
# copying to out_frames casts to np.uint8; rescaling required
rescaled = clean * 255
out_frames[idx,:,:] = rescaled
# new sha1 hex: filtered, conv to np.uint8
filt_hds.append(sha1(out_frames[idx].ravel()).hexdigest())
print("\tAdded frame {} of {}".format(idx+1,total))
# add new sha1 hash as a column in the df
pca_md = pca_md.assign(sha1_filt=pd.Series(filt_hds, index=pca_md.index))
# check that metadata matches data, frame-by-frame
assert(len(pca_md) == pca_data.shape[0])
for idx,row in pca_md.iterrows():
assert(row['sha1'] == sha1(pca_data[idx].ravel()).hexdigest())
# output
np.save(os.path.join(root,d,frames_out), out_frames)
pca_md.to_pickle(os.path.join(root,d,metadata_out))
| {"/scripts/dim-reduction/nasalcoda-cache-frames.py": ["/ultramisc/ebutils.py"], "/scripts/dim-reduction/suzhou-cache-frames.py": ["/ultramisc/ebutils.py"], "/scripts/video/eb-make-avi.py": ["/ultramisc/ebutils.py"], "/scripts/ssanova/format-con.py": ["/ultramisc/ebutils.py"], "/scripts/ssanova/eb-extract-frames.py": ["/ultramisc/ebutils.py"], "/scripts/file-management/ultrasonix-subsetter.py": ["/ultramisc/ebutils.py"], "/scripts/video/flap-video-viewer.py": ["/ultramisc/ebutils.py"], "/scripts/file-management/subsetter.py": ["/ultramisc/ebutils.py"], "/scripts/video/flap-video-writer.py": ["/ultramisc/ebutils.py"]} |
55,742 | mfaytak/ultramisc | refs/heads/master | /scripts/file-management/check-adjusting.py | ### TAP/FLAP Project
### Python script for checking if all textgrids have been adjusted
### instructions: In command line/terminal, run python checkadjusting.py TG_DIR,
### where TG_DIR is the directory containing the textgrids.
## Packages that need to be downloaded: audiolabel
##Jennifer Kuo, July 2019
import argparse
import audiolabel
import glob
import os
import numpy as np
import re
# read in arguments
parser = argparse.ArgumentParser()
parser.add_argument("expdir",
help="Experiment directory containing all subjects'\
caches and metadata in separate folders"
)
args = parser.parse_args()
tg_dir = args.expdir
class Header(object):
def __init__(self):
pass
class Probe(object):
def __init__(self):
pass
count = 0
## loop through all textgrids
for textgrid in os.listdir(tg_dir):
acq_name = str(textgrid.split('.')[0])
tg_path = os.path.join(tg_dir,textgrid)
if not '.ch1.textgrid' in tg_path.lower():
continue
pm = audiolabel.LabelManager(from_file=tg_path, from_type="praat")
# var used to check if any 'x' was found in the target word.
adjusted = False
##loop through all words in each textgrids
for word in pm.tier('words'):
phones = []
#ignore frame sentence
if (word.text in ['we','have','before','it','']):
continue
## for the target word(s):
##get all the phones in the target word(s)
for t in (np.arange(word.t1, word.t2, 0.01)):
phon = pm.tier('phones').label_at(t)
phones.append(phon)
phones = set(phones)
## loop through all the phones in target word.
for p in phones:
## if 'x' was found in a phone, check that it corresponds
## to the correct part of the target word/phrase.
if "x" in p.text:
target_word = pm.tier('words').label_at(p.center).text
if not any(s in p.text.lower() for s in ["tx","dx","rx"]):
print("Wrong label in Acq. " + acq_name + ". Label should be on 't', 'd', or 'r'. ")
count = count + 1
if target_word == "hearing":
print("Wrong label in Acq. " + acq_name + ". Label should be on 'hard', not 'hearing'. ")
count = count + 1
elif target_word == "paradise":
print("Wrong label in Acq. " + acq_name + ". Label should be on 'bird', not 'paradise'. ")
count = count + 1
## if no mistakes were found, set adjusted to True
else:
adjusted = True
## Make note if no phones within the target item contained 'x'
if not adjusted:
print("Acq. " + acq_name + " was not labeled.")
count = count + 1
## Print summary message.
if count == 0:
print("Congratulations! You are done adjusting textgrids.")
else:
print(str(count) + " acquisitions need to be fixed.")
| {"/scripts/dim-reduction/nasalcoda-cache-frames.py": ["/ultramisc/ebutils.py"], "/scripts/dim-reduction/suzhou-cache-frames.py": ["/ultramisc/ebutils.py"], "/scripts/video/eb-make-avi.py": ["/ultramisc/ebutils.py"], "/scripts/ssanova/format-con.py": ["/ultramisc/ebutils.py"], "/scripts/ssanova/eb-extract-frames.py": ["/ultramisc/ebutils.py"], "/scripts/file-management/ultrasonix-subsetter.py": ["/ultramisc/ebutils.py"], "/scripts/video/flap-video-viewer.py": ["/ultramisc/ebutils.py"], "/scripts/file-management/subsetter.py": ["/ultramisc/ebutils.py"], "/scripts/video/flap-video-writer.py": ["/ultramisc/ebutils.py"]} |
55,743 | mfaytak/ultramisc | refs/heads/master | /scripts/ssanova/format-con.py | #/usr/bin/env python
'''
format-con.py: Extract pairs of columns from EdgeTrak-produced .con files
to generate CSV files that Jeff Mielke's tongue_ssanova.R can operate on.
Extracts single target frames but can be extended to sequences.
Usage
$ python format-con.py dirname
Arguments
directory dir containing all files described below
Assumptions:
1. Each acquisition (consisting of audio, ultrasound images, a TextGrid, a frame synchronization file, and a .con file output from EdgeTrak) is in a separate subdirectory in the session directory.
2. Speaker ID is given before an underscore in the top-level directory name (i.e., /.../SUBJECT_* is called as directory and fed into basedir).
3. Specific formatting requirements for files (which you may need to alter):
frame synchronization TextGrid with extention .sync.TextGrid (frame acquisition times w.r.t. an associated audio recording)
some number of .bmp ultrasound frame files *.###.bmp, where ### is a frame number
'''
# Authors: Matthew Faytak (faytak@ucla.edu) Copyright (c) 2015
# Last modified 10-2020
import audiolabel
import glob
import os, sys, re
from csv import reader
from ultramisc.ebutils import read_stimfile
# read in arguments
parser = argparse.ArgumentParser()
parser.add_argument("directory", help="Experiment directory containing all subjects")
args = parser.parse_args()
basedir = args.directory
if not os.path.exists(basedir):
print("\tDirectory provided doesn't exist")
ArgumentParser.print_usage
ArgumentParser.print_help
sys.exit(2)
out_file = os.path.join(basedir, os.path.split(basedir)[-1] + "_cons.txt")
# generate header of output file
# this is for a particular project's metadata, but can be retooled
head = '\t'.join(["speaker","acq","token","frameIdx","vowel","X","Y"])
with open(out_file, "w") as out:
out.write(head + "\n")
# find speaker
spkr = "S" + re.sub("[^0-9]", "", basedir)
# project-specific grouping of stimuli
low_set = ["baodian","paoxie","daoyan",
"taoyuan","gaojian","kaojuan"]
# below on line 96, we search for any label on an otherwise empty tier.
# if running directly on forced aligner output, you can search for
# a set of labels like so:
# vow = re.compile("^(AE1|IY1|UW1|OW1|UH1)")
# then search for "vow" (see lines 97-98)
# generate the rest of the output file
# search over .con files in first layer of subdirs
glob_exp = os.path.join(basedir,"*","*.con")
token_ct = []
for con in glob.glob(glob_exp):
parent = os.path.dirname(con)
basename = os.path.split(parent)[1]
# get TGs
tg = os.path.join(parent,str(basename + ".ch1.TextGrid"))
sync_tg = os.path.join(parent,str(basename + ".sync.TextGrid"))
# get stimulus; other metadata
stimfile = os.path.join(parent,"stim.txt")
stim = read_stimfile(stimfile)
if stim in low_set:
tone = "low"
else:
tone = "high"
# get condition ("ba"/"init"), place, aspiration (in that order in md)
# replace with your own metadata as needed
md = os.path.join(parent,"meta.txt")
with open(md) as csvfile:
meta = reader(csvfile, delimiter="\t")
tbl = [row for row in meta] # a little janky but works
condition = tbl[1][0]
place = tbl[1][1]
aspiration = tbl[1][2]
# instantiate audio TG handler; get release time and phone label
# assumes one labeled interval on the searched tier
pm = audiolabel.LabelManager(from_file=tg, from_type='praat')
clos = pm.tier('closure').search(".", return_match=True)[0][0]
# alternately, search for a set: (see lines 56-60)
# v = pm.tier('segments').search(vow, return_match=True)[0][0]
release_time = clos.t2
phone = clos.text
# get token number
token_ct.append(phone)
token = token_ct.count(phone)
# instantiate sync TG handler; get absolute index of frame
sc = audiolabel.LabelManager(from_file=sync_tg, from_type='praat')
release_idx = sc.tier('pulse_idx').label_at(release_time).text
abs_fr_idx = int(release_idx) - 1 # should yield 121 for first file
# get index of first extracted frame
bmp_list = glob.glob(os.path.join(parent,'*.bmp'))
fr_idx = []
for bmp in bmp_list:
fr_num = re.search('.(\d+).bmp$',bmp)
fr_idx.append(int(fr_num.group(1)))
# the int is crucial here: otherwise, min list idx (b/c list of strings!) will be returned
first_fr = min(fr_idx)
# get frame index relative to extracted frames, = column number in .con file
col_n = abs_fr_idx - first_fr
# pull the appropriate columns from .con file and save to CSV
with open(con) as conf:
with open(out_file,"a") as out:
csvreader = reader(conf, delimiter="\t")
d = list(csvreader)
rows = sum(1 for row in d) # rows = 100, generally
x_col = (2*col_n)-2
y_col = (2*col_n)-1
for t in range(0,rows):
try:
x_val = d[t][x_col]
y_val = d[t][y_col]
except IndexError:
print("WARNING: some problem accessing {}):".format(con_file))
sys.exit(2)
row_out = '\t'.join([
spkr,
basename, str(token),
str(abs_fr_idx),
condition,
phone, place, aspiration, tone,
x_val, y_val
])
out.write(row_out + "\n")
| {"/scripts/dim-reduction/nasalcoda-cache-frames.py": ["/ultramisc/ebutils.py"], "/scripts/dim-reduction/suzhou-cache-frames.py": ["/ultramisc/ebutils.py"], "/scripts/video/eb-make-avi.py": ["/ultramisc/ebutils.py"], "/scripts/ssanova/format-con.py": ["/ultramisc/ebutils.py"], "/scripts/ssanova/eb-extract-frames.py": ["/ultramisc/ebutils.py"], "/scripts/file-management/ultrasonix-subsetter.py": ["/ultramisc/ebutils.py"], "/scripts/video/flap-video-viewer.py": ["/ultramisc/ebutils.py"], "/scripts/file-management/subsetter.py": ["/ultramisc/ebutils.py"], "/scripts/video/flap-video-writer.py": ["/ultramisc/ebutils.py"]} |
55,744 | mfaytak/ultramisc | refs/heads/master | /scripts/ssanova/eb-extract-frames.py | #!/usr/bin/env python
# eb-extract-frames.py: extract frame BMPs for contour extraction
# usage: python eb-extract-frames.py expdir (-f / --flop)
import argparse
import audiolabel
import glob
import numpy as np
import os
import re
import shutil
from operator import itemgetter
from PIL import Image
from ultratils.rawreader import RawReader
from ultratils.pysonix.scanconvert import Converter
from ultramisc.ebutils import read_echob_metadata, read_stimfile
class Header(object):
def __init__(self):
pass
class Probe(object):
def __init__(self):
pass
# empty RawReader and Converter handles
rdr = None
conv = None
# set of segments being searched for
vow = re.compile("^(UW1|OW1|UH1|AE1|IY1)")
parser = argparse.ArgumentParser()
parser.add_argument("expdir",
help="Experiment directory containing \
acquisitions in flat structure"
)
parser.add_argument("-f",
"--flop",
help="Horizontally flip the data",
action="store_true"
)
args = parser.parse_args()
# read in expdir
expdir = os.path.normpath(args.expdir)
# set up copy location
output_dir = os.path.join(expdir,"_copy")
try:
os.mkdir(output_dir)
except FileExistsError:
shutil.rmtree(output_dir)
os.mkdir(output_dir)
# glob expression
rawfile_glob_exp = os.path.join(expdir,"*","*.raw")
# loop through acqs and:
for rf in glob.glob(rawfile_glob_exp):
parent = os.path.dirname(rf)
basename = os.path.split(parent)[1]
# use stim.txt to skip non-trials
stimfile = os.path.join(parent,"stim.txt")
stim = read_stimfile(stimfile)
if stim == "bolus" or stim == "practice":
continue
# define RawReader and Converter parameters from first acq
if conv is None:
print("Defining Converter ...")
# get image size data; allow for manual input if problems
try:
nscanlines, npoints, junk = read_echob_metadata(rf)
except ValueError:
print("WARNING: no data in {}.img.txt, please input:".format(basename))
nscanlines = int(input("\tnscanlines (usually 127) "))
npoints = int(input("\tnpoints (usually 1020) "))
junk = int(input("\tjunk (usually 36, or 1020 - 984) "))
# TODO use metadata instead of hard-coded values
header = Header()
header.w = nscanlines # input image width
header.h = npoints - junk # input image height, trimmed
header.sf = 4000000 # magic number, sorry!
probe = Probe()
probe.radius = 10000 # based on '10' in transducer model number
probe.numElements = 128 # based on '128' in transducer model number
probe.pitch = 185 # based on Ultrasonix C9-5/10 transducer
conv = Converter(header, probe)
rdr = RawReader(rf, nscanlines=nscanlines, npoints=npoints)
# define "support" file names based on .raw
wav = os.path.join(parent,str(basename + ".ch1.wav"))
tg = os.path.join(parent,str(basename + ".ch1.TextGrid"))
sync = os.path.join(parent,str(basename + '.sync.txt'))
sync_tg = os.path.join(parent,str(basename + ".sync.TextGrid"))
idx_txt = os.path.join(parent,str(basename + ".idx.txt"))
# instantiate LabelManager
pm = audiolabel.LabelManager(from_file=tg, from_type="praat")
# read in .sync.txt file and get recording window times
sync = os.path.join(parent,str(basename + '.sync.txt'))
sync_times = []
with open(sync, 'r') as s:
for line in s:
try:
sync_times.append(float(line.strip().split("\t")[0]))
except ValueError:
pass # ignore line if a header is present
rec_start = sync_times[0]
rec_end = sync_times[-1]
# extract frame(s) from .raw file
# TODO handle multiple repititions by only taking last rep
for v,m in pm.tier('phone').search(vow, return_match=True):
pron = pm.tier('word').label_at(v.center).text
# skip any tokens from non-target words
if pron not in ["BUH", "FUH", "BUW", "BOOW", "BAAE", "BIY"]:
continue
# correct UH1 vowel depending on pronunciation FUH or BUH
elif pron == "FUH":
phone = "VU"
elif pron == "BUH":
phone = "BU"
elif pron == "BOOW":
phone = "UW"
else:
phone = v.text.replace('1','')
# check segment start or end times: in recording window?
if v.t1 < rec_start or v.t2 > rec_end:
print("SKIPPED {:}, {:} outside recording window".format(basename, v.text))
continue
print("{} - found {} in {}".format(basename, phone, pron))
# make destination and copy "support" files for parent file
copy_dir = os.path.join(output_dir,basename)
# this will throw a warning if the directory is overwritten
# by a second repetition of a target word in the file.
# TODO put some more information in this?
try:
os.mkdir(copy_dir)
except FileExistsError:
print("WARNING: Multiple targets in {}".format(basename))
print("\t Previous repetition overwritten")
shutil.rmtree(copy_dir)
os.mkdir(copy_dir)
shutil.copy(wav, copy_dir)
shutil.copy(tg, copy_dir)
shutil.copy(sync_tg, copy_dir)
shutil.copy(idx_txt, copy_dir)
shutil.copy(stimfile, copy_dir)
shutil.copy(sync, copy_dir)
# get segmental context (non-silence)
skip_back = 0
while True:
if skip_back == 0:
before = pm.tier('phone').prev(v).text
else:
before = pm.tier('phone').prev(v,skip=skip_back).text
if before == "sp":
skip_back += 1
else:
break
skip_ahead = 0
while True:
if skip_ahead == 0:
after = pm.tier('phone').prev(v).text
else:
after = pm.tier('phone').prev(v,skip=skip_ahead).text
if after == "sp":
skip_ahead += 1
else:
break
# TODO store phone, before, after
# TODO move these to format-con? you'll actually need them then
# find frame idx of v.t1 - v.t2 range and of midpoint
t1_diff = [abs(v.t1 - t) for t in sync_times]
t2_diff = [abs(v.t2 - t) for t in sync_times]
tmid_diff = [abs(v.center) - t for t in sync_times]
t1_match = min(enumerate(t1_diff), key=itemgetter(1))[0]
t2_match = min(enumerate(t2_diff), key=itemgetter(1))[0]
tmid_match = min(enumerate(tmid_diff), key=itemgetter(1))[0]
# extract and convert v.t1 - v.t2 range
for idx in range(t1_match, (t2_match+1)):
# extract frame using RawReader
unconv_frame = rdr.get_frame(idx)
# trim junk pixels off of top
trimmed_frame = unconv_frame[junk:,:]
if args.flop:
trimmed_frame = np.fliplr(trimmed_frame)
# TODO filter?
# convert to fan shape
conv_frame = conv.convert(np.flipud(trimmed_frame))
ready_frame = np.flipud(conv_frame)
# create frame handle and save to copy dir
fh = basename + "." + str(idx) + ".bmp"
out_img = Image.fromarray(ready_frame)
out_img.save(os.path.join(copy_dir,fh))
| {"/scripts/dim-reduction/nasalcoda-cache-frames.py": ["/ultramisc/ebutils.py"], "/scripts/dim-reduction/suzhou-cache-frames.py": ["/ultramisc/ebutils.py"], "/scripts/video/eb-make-avi.py": ["/ultramisc/ebutils.py"], "/scripts/ssanova/format-con.py": ["/ultramisc/ebutils.py"], "/scripts/ssanova/eb-extract-frames.py": ["/ultramisc/ebutils.py"], "/scripts/file-management/ultrasonix-subsetter.py": ["/ultramisc/ebutils.py"], "/scripts/video/flap-video-viewer.py": ["/ultramisc/ebutils.py"], "/scripts/file-management/subsetter.py": ["/ultramisc/ebutils.py"], "/scripts/video/flap-video-writer.py": ["/ultramisc/ebutils.py"]} |
55,745 | mfaytak/ultramisc | refs/heads/master | /scripts/dim-reduction/punjabi-series-cache-frames.py | #!/usr/bin/env python
"""
punjabi-series-cache-frames: time series frame caching method used in Punjabi dental/retroflex project (Kochetov, Faytak, Nara)
"""
# TODO: actually using?
from __future__ import absolute_import, division, print_function
import argparse
import glob
import numpy as np
import os
import pandas as pd
import re
import struct
import subprocess
import sys
from collections import OrderedDict
from hashlib import sha1
from operator import itemgetter
from PIL import Image
from scipy import ndimage
# read in args
parser = argparse.ArgumentParser()
parser.add_argument("directory", help="Experiment directory containing all subjects")
args = parser.parse_args()
# check for appropriate directory
expdir = args.directory
try:
assert os.path.exists(args.directory)
except AssertionError:
# TODO raise exception
print("\tDirectory provided doesn't exist")
parser.print_help()
sys.exit(2)
data = None
recs = []
frames_out = os.path.join(expdir,"frames.npy")
metadata_out = os.path.join(expdir,"frames_metadata.pickle")
# for filename in list-of-files:
for path, dirs, files in os.walk(expdir):
for d in dirs:
png_glob_exp = os.path.join(path, d, "*.png")
glob_iter = glob.glob(png_glob_exp)
if len(glob_iter) > 0: # if there are any .png files in directory
idx_list = []
for png in glob_iter:
# retrieve and sort frame indices from file path strings
fr_num = re.search('_(\d+).png$', png)
#print(png, fr_num.group(1))
idx_list.append(int(fr_num.group(1)))
start_idx = min(idx_list)
end_idx = max(idx_list)
#print(start_idx,end_idx)
#print(idx_list)
mid_idx = int(np.floor((start_idx + end_idx)/2))
try:
assert mid_idx in idx_list
except AssertionError:
print("Desired mid frame index {:} isn't available.".format(mid_idx))
sys.exit(2)
# get paths for evenly spaced frames between 0th and midpoint frame
series_frames = []
for idx in np.linspace(start_idx, mid_idx, num=4):
idx_int = int(np.floor(idx))
#rint(idx_int)
glob_finder = glob.glob(os.path.join(path, d, "*_{:}.png".format(idx_int)))
if len(glob_finder) == 1:
try:
# get file path and try to grab some metadata from it
frame_path = glob_finder[0]
subj, word, block, token, filename = frame_path.split('/')
except ValueError:
print("Your directories are not structured right. Readjust!")
sys.exit(2)
elif len(glob_finder) > 1:
print("Could not find file with desired index {}".format(idx_int))
sys.exit(2)
elif len(glob_finder) < 1:
print("Too many files with desired index {}".format(idx_int))
sys.exit(2)
# get more metadata from filename
filename = os.path.splitext(filename)[0]
filename = re.sub("__", "_", filename)
subj_dupl, timestamp, idx_dupl = filename.split("_")
# convertingom RGB to grayscale (one-channel):
print(frame_path)
inframe = np.asarray(Image.open(frame_path).convert("L")) # converted to uint8:
rawdata = inframe.astype(np.uint8)
# TODO filter and ROI somehow (use SRAD) (maybe ROI will have to be skipped?)
# TODO downsample size of images?
series_frames.append(rawdata)
#series = np.hstack(series_frames)
if data is None:
data = np.expand_dims(series_frames, axis=0)
else:
data = np.concatenate([data, np.expand_dims(series_frames, axis=0)])
recs.append(
OrderedDict([
('filename', timestamp),
('subject', subj),
('stim', word),
('token', token),
('index', idx_int),
('sha1', sha1(series_frames[0].ravel()).hexdigest()), # tuple error is thrown here.
('sha1_dtype', series_frames[0].dtype)
])
)
# convert metadata to a DataFrame
md = pd.DataFrame.from_records(recs, columns=recs[0].keys())
# make sure there is one metadata row for each ndarray in the pickle
assert(len(md) == data.shape[0])
# compare checksums
assert(md.loc[0, 'sha1'] == sha1(data[0][0].ravel()).hexdigest())
assert(md.loc[len(md)-1,'sha1'] == sha1(data[-1][0].ravel()).hexdigest())
np.save(frames_out, data)
md.to_pickle(metadata_out) | {"/scripts/dim-reduction/nasalcoda-cache-frames.py": ["/ultramisc/ebutils.py"], "/scripts/dim-reduction/suzhou-cache-frames.py": ["/ultramisc/ebutils.py"], "/scripts/video/eb-make-avi.py": ["/ultramisc/ebutils.py"], "/scripts/ssanova/format-con.py": ["/ultramisc/ebutils.py"], "/scripts/ssanova/eb-extract-frames.py": ["/ultramisc/ebutils.py"], "/scripts/file-management/ultrasonix-subsetter.py": ["/ultramisc/ebutils.py"], "/scripts/video/flap-video-viewer.py": ["/ultramisc/ebutils.py"], "/scripts/file-management/subsetter.py": ["/ultramisc/ebutils.py"], "/scripts/video/flap-video-writer.py": ["/ultramisc/ebutils.py"]} |
55,746 | mfaytak/ultramisc | refs/heads/master | /scripts/ssanova/ultrasonix-extract-frames.py | #!/usr/bin/env python
# TODO RENAME extract frame BMPs for contour extraction
# from ultrasonix data, using ultratils.exp.Exp class
# This is set up to extract Kejom vowel and fricative data
# usage: python ulx-extract-frames.py expdir (-f / --flop)
import os, re, shutil
from PIL import Image
from ultratils.exp import Exp
import numpy as np
import argparse
import audiolabel
# set of segments being searched for
vre = re.compile(
"^(SH|IY1|UW1|IH1|UH1|AA1)"
)
skip_word_list = ["GAA","LAA","FAA","FIY"]
parser = argparse.ArgumentParser()
parser.add_argument("expdir",
help="Experiment directory containing \
acquisitions in flat structure"
)
parser.add_argument("-s", "--skip",
help="Skip an acquisition if it already has BMP files",
action="store_true"
)
parser.add_argument("-f",
"--flop",
help="Horizontally flip the data",
action="store_true"
)
args = parser.parse_args()
# read in expdir
expdir = os.path.normpath(args.expdir)
# set up copy location
output_dir = os.path.join(expdir,"_frames")
try:
os.mkdir(output_dir)
except FileExistsError:
shutil.rmtree(output_dir)
os.mkdir(output_dir)
e = Exp(expdir=expdir) # from command line args
e.gather()
# error logging
mid_skip_count = 0
slice_skip_count = 0
sync_skip_count = 0
window_skip_count = 0
err_log = os.path.join(expdir,"_errors.txt")
head = "\t".join(["acq","problemType"])
with open(err_log, "w") as out:
out.write(head)
for a in e.acquisitions:
patchy = False
# check for sync.txt; skip if one doesn't exist
try:
a.sync_lm
except IOError:
print("SKIPPING: no synchronization for {:}.".format(a.timestamp))
sync_skip_count += 1
out.write("\t".join([a.timestamp,"sync"])+"\n")
continue
# TODO check
basename = a.timestamp
copy_dir = os.path.join(output_dir,basename)
try:
os.mkdir(copy_dir)
except FileExistsError:
# TODO check if the new stimulus is "IH" or "UH"
# if true - then make a separate dir for SH?
# need a more general solution for this problem, both here
# and in the echob script.
print("WARNING: Multiple targets in {}".format(basename))
print("\t Previous repetition overwritten")
shutil.rmtree(copy_dir)
os.mkdir(copy_dir)
# move key files over to new dir
try:
shutil.copy(a.abs_sync_tg, copy_dir)
except IOError:
print("SKIPPING: Incomplete synchronization for {:}.".format(a.timestamp))
sync_skip_count += 1
out.write("\t".join([a.timestamp,"sync"])+"\n")
continue
tg = str(a.abs_image_file + ".ch1.TextGrid")
shutil.copy(tg, copy_dir)
sync_txt = str(a.abs_image_file + ".sync.txt")
shutil.copy(sync_txt, copy_dir)
shutil.copy(a.abs_stim_file, copy_dir)
shutil.copy(os.path.splitext(a.abs_audio_file)[0]+".ch1.wav", copy_dir)
# instantiate LabelManager
pm = audiolabel.LabelManager(from_file=tg, from_type="praat")
# get recording window times
start_window = a.pulse_idx.search(r'\w')[0].t1
end_window = a.pulse_idx.search(r'\w')[-1].t2
# for each segment in vre detected in file:
for v,m in pm.tier('phone').search(vre, return_match=True):
word = pm.tier('word').label_at(v.center).text
if word in skip_word_list:
continue
if v.t1 < start_window or v.t2 > end_window:
print("SKIPPING: part of segment is outside recording window in {:}".format(a.timestamp))
window_skip_count += 1
out.write("\t".join([a.timestamp,"window"])+"\n")
continue
# check for more than 1 consecutive NA in a row in the interval of interest
for c in a.raw_data_idx.tslice(t1=v.t1,t2=v.t2):
if c.text == "NA":
if a.raw_data_idx.prev(c).text == "NA":
print("SKIPPING: {:} is patchy, consec. NA frames".format(a.timestamp))
slice_skip_count += 1
out.write("\t".join([a.timestamp,"slice"])+"\n")
patchy = True
break
if patchy:
continue
# if still good, work over each file's "slices" and get all frames in that range
for l in a.pulse_idx.tslice(t1=v.t1,t2=v.t2):
d_tuple = a.frame_at(l.center,convert=True)
d_array = d_tuple[0]
# if frame is blank (text is "NA"), grab previous frame
if d_array is None:
prevl = a.pulse_idx.prev(l)
d_tuple = a.frame_at(prevl.center,convert=True)
d_array = d_tuple[0]
d = d_array.astype(np.uint8)
d = np.flipud(d)
# flip left-right ("flop") if backwards
if args.flop:
d = np.fliplr(d)
frame = Image.fromarray(d)
imgname = '{:}.{:}.bmp'.format(a.timestamp, l.text)
frame.save(os.path.join(copy_dir, imgname))
| {"/scripts/dim-reduction/nasalcoda-cache-frames.py": ["/ultramisc/ebutils.py"], "/scripts/dim-reduction/suzhou-cache-frames.py": ["/ultramisc/ebutils.py"], "/scripts/video/eb-make-avi.py": ["/ultramisc/ebutils.py"], "/scripts/ssanova/format-con.py": ["/ultramisc/ebutils.py"], "/scripts/ssanova/eb-extract-frames.py": ["/ultramisc/ebutils.py"], "/scripts/file-management/ultrasonix-subsetter.py": ["/ultramisc/ebutils.py"], "/scripts/video/flap-video-viewer.py": ["/ultramisc/ebutils.py"], "/scripts/file-management/subsetter.py": ["/ultramisc/ebutils.py"], "/scripts/video/flap-video-writer.py": ["/ultramisc/ebutils.py"]} |
55,747 | mfaytak/ultramisc | refs/heads/master | /scripts/file-management/ultrasonix-subsetter.py | #!/usr/bin/env python
# subsetter.py: pick out a subset of acquisitions in an experiment directory.
# usage: python subsetter.py expdir
import argparse
import glob
import os
import shutil
import sys
from ultratils.exp import Exp
from ultramisc.ebutils import read_stimfile
# parse argument(s)
parser = argparse.ArgumentParser()
parser.add_argument("expdir",
help="Experiment directory containing \
acquisitions in flat structure"
)
parser.add_argument("-d",
"--delete",
help="Delete non-target files in place \
(default behavior is to copy target \
files to new location)",
action="store_true"
)
args = parser.parse_args()
expdir = os.path.normpath(args.expdir)
# issue warning or set up copy location.
if args.delete:
while True:
try:
warning = input("WARNING: this will delete non-target acqs. \
Make sure your files are backed up. Press Y \
to continue or N to exit.")
assert warning in ["Y", "N"]
except AssertionError:
print("Typo, try again: ")
continue
else:
break
if warning == "N":
sys.exit()
else:
copy_str = os.path.split(expdir)[1] + "_subset"
copy_dir = os.path.join(expdir,copy_str)
try:
os.mkdir(copy_dir)
except FileExistsError:
shutil.rmtree(copy_dir)
os.mkdir(copy_dir)
# desired analysis set; change as required
target_list = ["EBA'", "BV", "EBVUH", "BUW", "BIY", "SHIY", "SHUH", "ESHIH", "GHUH", "KIH", "KUH", "KUW", "KIY"] # last four for ACAL
# differences among all four "elsewhere" high vowels in same frame:
# no IH??
# /a/ TA', SA', FA', KA', GHA'; /i/ EFIY, BIY, SIY, KIY, ETIY; /i/ postalveolar ACHIY;
# /u/ ZHUW, EFUW, BUW, TUW; /1/ EGHIH, CHIH; /0/ GHUH, NYUH; /0/ postalveolar CHUH;
# /0/ labiodental PFUH, EFUH; misc. ETYI, EFYI, BYI', TYI', FYI', KYI', CHI', ETYIH, BYI, BYIH
e = Exp(expdir=expdir) # from command line args
e.gather()
for a in e.acquisitions:
stim = read_stimfile(a.abs_stim_file)
parent = os.path.dirname(a.abs_stim_file)
if args.delete:
if stim not in target_list:
shutil.rmtree(parent)
else:
if stim in target_list:
copy_path = os.path.join(copy_dir,a.timestamp)
shutil.copytree(parent, copy_path)
| {"/scripts/dim-reduction/nasalcoda-cache-frames.py": ["/ultramisc/ebutils.py"], "/scripts/dim-reduction/suzhou-cache-frames.py": ["/ultramisc/ebutils.py"], "/scripts/video/eb-make-avi.py": ["/ultramisc/ebutils.py"], "/scripts/ssanova/format-con.py": ["/ultramisc/ebutils.py"], "/scripts/ssanova/eb-extract-frames.py": ["/ultramisc/ebutils.py"], "/scripts/file-management/ultrasonix-subsetter.py": ["/ultramisc/ebutils.py"], "/scripts/video/flap-video-viewer.py": ["/ultramisc/ebutils.py"], "/scripts/file-management/subsetter.py": ["/ultramisc/ebutils.py"], "/scripts/video/flap-video-writer.py": ["/ultramisc/ebutils.py"]} |
55,748 | mfaytak/ultramisc | refs/heads/master | /scripts/file-management/replace-ts-word.py | '''
replace-ts-word: find and replace words in transcript files.
Intended use is changing spellings to get along with forced alignment
dictionaries.
'''
import os, sys, glob
import argparse
def read_transcript(my_ts_file):
with open(my_ts_file, "r") as tsfile:
sentence = tsfile.read().rstrip('\n')
wordlist = sentence.split()
return wordlist
# parse argument(s)
parser = argparse.ArgumentParser()
# read things in
parser.add_argument("expdir",
help="Experiment directory containing \
acquisitions in flat structure"
)
parser.add_argument("-p", "--problem",
help="Word in transcripts to be changed"
)
parser.add_argument("-s", "--sub",
help="What to change the word to"
)
# pull them together, making an "args" object with 3 attributes
# args.expdir, args.word, args.sub
args = parser.parse_args()
if args.problem is None or args.sub is None:
print("Problem word and/or substitution undefined; exiting.")
sys.exit()
# figure out where all .raw files are
expdir = os.path.normpath(args.expdir)
glob_regex = os.path.join(os.path.normpath(args.expdir), # our subject dir
"*", # wildcard (any directory)
"*.raw" # wildcard (any filename) plus .raw
)
# running glob.glob gives us a list of filepaths matching the regexp above
# and we iterate through this list, finding all ts files
for rf in glob.glob(glob_regex):
parent = os.path.dirname(rf)
tsfile = os.path.join(parent,"transcript.txt")
if os.path.exists(tsfile):
ts_list = read_transcript(tsfile)
print(ts_list)
# Python time! expressions like this are called
# "list comprehensions" and work fine in Python.
# see second answer to this question:
# https://stackoverflow.com/questions/2582138/finding-and-replacing-elements-in-a-list-python
#print(args.problem in ts_list)
mod_ts_list = [args.sub if wd==args.problem else wd for wd in ts_list]
print(mod_ts_list)
with open(tsfile, "w") as ts:
ts.write(' '.join(mod_ts_list))
else:
continue
| {"/scripts/dim-reduction/nasalcoda-cache-frames.py": ["/ultramisc/ebutils.py"], "/scripts/dim-reduction/suzhou-cache-frames.py": ["/ultramisc/ebutils.py"], "/scripts/video/eb-make-avi.py": ["/ultramisc/ebutils.py"], "/scripts/ssanova/format-con.py": ["/ultramisc/ebutils.py"], "/scripts/ssanova/eb-extract-frames.py": ["/ultramisc/ebutils.py"], "/scripts/file-management/ultrasonix-subsetter.py": ["/ultramisc/ebutils.py"], "/scripts/video/flap-video-viewer.py": ["/ultramisc/ebutils.py"], "/scripts/file-management/subsetter.py": ["/ultramisc/ebutils.py"], "/scripts/video/flap-video-writer.py": ["/ultramisc/ebutils.py"]} |
55,749 | mfaytak/ultramisc | refs/heads/master | /scripts/file-management/mfa-flatten.py | # Simple utility to copy audio data recorded with ultrasound to
# to a flat directory structure with appropriate file extensions,
# as expected by the Montreal Forced Aligner.
# Usage: python mfa-flatten.py [expdir]
# expdir - top-level directory for one subject, as output by EchoB/Micro.
import argparse
import glob
import os
import shutil
# parse argument(s)
parser = argparse.ArgumentParser()
# read things in
parser.add_argument("expdir",
help="Experiment directory containing \
acquisitions in flat structure"
)
args = parser.parse_args()
expdir = args.expdir
glob_regexp = os.path.join(expdir,"*","*.raw")
# make new folder
alignment_in = os.path.join(expdir,"_align")
os.makedirs(alignment_in)
for rawh in glob.glob(glob_regexp):
timestamp = os.path.split(os.path.splitext(rawh)[0])[1]
parent = os.path.split(rawh)[0]
wav = os.path.join(parent, str(timestamp + ".ch1.wav"))
transcript = os.path.join(parent,"transcript.txt")
if not os.path.exists(transcript):
continue
# make a new file handle for the transcript file
if os.path.splitext(transcript)[0] != timestamp: # keeping this comparison for the future
transcript_dst = os.path.join(alignment_in, str(timestamp + ".ch1.lab"))
else:
transcript_dst = os.path.join(alignment_in, transcript)
wav_dst = os.path.join(alignment_in, str(timestamp + ".ch1.wav"))
# transfer the files to a new folder
shutil.copy(wav, wav_dst)
shutil.copy(transcript, transcript_dst)
| {"/scripts/dim-reduction/nasalcoda-cache-frames.py": ["/ultramisc/ebutils.py"], "/scripts/dim-reduction/suzhou-cache-frames.py": ["/ultramisc/ebutils.py"], "/scripts/video/eb-make-avi.py": ["/ultramisc/ebutils.py"], "/scripts/ssanova/format-con.py": ["/ultramisc/ebutils.py"], "/scripts/ssanova/eb-extract-frames.py": ["/ultramisc/ebutils.py"], "/scripts/file-management/ultrasonix-subsetter.py": ["/ultramisc/ebutils.py"], "/scripts/video/flap-video-viewer.py": ["/ultramisc/ebutils.py"], "/scripts/file-management/subsetter.py": ["/ultramisc/ebutils.py"], "/scripts/video/flap-video-writer.py": ["/ultramisc/ebutils.py"]} |
55,750 | mfaytak/ultramisc | refs/heads/master | /scripts/dim-reduction/suzhou-process-cache.py | '''
suzhou-process-cache: frame cache processing method as used in Suzhou project.
NOTE: largely superceded by general script, ./process-cache.py, plus
the ./suzhou-pca-lda* scripts run afterwards.
'''
import argparse
import glob
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
import re
import sys
from hashlib import sha1
from imgphon import ultrasound as us
from scipy.ndimage import median_filter
from sklearn.decomposition import PCA
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA
from ultratils.pysonix.scanconvert import Converter
yes_no = ["Y", "N"]
# instantiate converter for pca_data images
class Header(object):
def __init__(self):
pass
class Probe(object):
def __init__(self):
pass
# to be defined below on first pass through data
conv = None
# sample frame output
# sample_frame = None
# read in arguments
parser = argparse.ArgumentParser()
parser.add_argument("directory", help="Experiment directory containing all subjects")
#parser.add_argument("n_pca", help="Number of principal components to start with")
#parser.add_argument("n_lda", help="Number of linear discriminant functions to output")
#parser.add_argument("-v", "--visualize", help="Produce plots of PC loadings on fan",action="store_true")
args = parser.parse_args()
try:
expdir = args.directory
except IndexError:
print("\tDirectory provided doesn't exist")
ArgumentParser.print_usage
ArgumentParser.print_help
sys.exit(2)
frames_out = "frames_proc.npy"
metadata_out = "frames_proc_metadata.pickle"
for root,directories,files in os.walk(expdir):
# TODO RoI stuff diff. for each subj
for d in directories:
subject = re.sub("[^0-9]","",d)
data_in = os.path.join(root,d,"frames.npy")
data = np.load(data_in)
metadata_in = os.path.join(root,d,'frames_metadata.pickle')
md = pd.read_pickle(metadata_in)
# some sanity checks on data checksums
assert(len(md) == data.shape[0]) # make sure one md row for each frame
assert(md.loc[0, 'sha1'] == sha1(data[0].ravel()).hexdigest()) # checksums
assert(md.loc[len(md)-1,'sha1'] == sha1(data[-1].ravel()).hexdigest())
# subset data
#training_list = ["IY1", "SH"] # for use later
#test_list = ["IZ1"] # for use later
vow_mask = (md['pron'].isin(["BIY", "IY", "XIY", "SIY", "BIZX", "IZ", "SIZ", "XIZ", "YZ", "XYZ", "SZ", "SZW"])) & (md['phone'] != "SH") & (md['phone'] != "S")
sh_mask = (md['pron'].isin(["XIZ", "XYZ", "XIY", "XIEX", "XEU"])) & (md['phone'] == "SH")
s_mask = (md['pron'].isin(["SAAE", "SEI", "SUW", "SIEX", "SOOW", "SZ", "SZW"])) & (md['phone'] == "S")
mask = vow_mask | sh_mask | s_mask
mask = mask.as_matrix()
pca_data = data[mask]
pca_md = md[mask]
pca_md = pca_md.reset_index(drop=True)
# define Converter parameters from first acq for first subj
if conv is None:
print("Defining Converter ...")
header = Header()
header.w = pca_data[0].shape[1] # input image width
header.h = pca_data[0].shape[0] # input image height, trimmed
header.sf = 4000000 # magic number, sorry!
probe = Probe()
probe.radius = 10000 # based on '10' in transducer model number
probe.numElements = 128 # based on '128' in transducer model number
probe.pitch = 185 # based on Ultrasonix C9-5/10 transducer
conv = Converter(header, probe)
print("Defining region of interest ...")
# get mean frame and apply mask
mean_frame = pca_data.mean(axis=0)
conv_mean = np.flipud(conv.convert(np.flipud(mean_frame)))
plt.title("Mean frame, Spkr {:}".format(subject))
plt.imshow(conv_mean, cmap="Greys_r")
file_ending_mean = "subj{:}_mean.pdf".format(subject)
savepath_mean = os.path.join(root,d,file_ending_mean)
plt.savefig(savepath_mean)
roi_upper = 600
roi_lower = 300
roi_left = 20
roi_right = 100
# TODO give mask converted shape
while True:
mask = us.roi(mean_frame,
upper=roi_upper,
lower=roi_lower,
left=roi_left,
right=roi_right)
masked_mean = mean_frame * mask
conv_masked = np.flipud(conv.convert(np.flipud(masked_mean)))
plt.title("Mean frame and RoI, Spkr {:}".format(subject))
plt.imshow(conv_masked, cmap="Greys_r")
file_ending_roi = "subj{:}_roi.pdf".format(subject)
savepath_roi = os.path.join(root,d,file_ending_roi)
plt.savefig(savepath_roi)
good_roi = input("Inspect {:}. Good RoI? (Y/N) ".format(savepath_roi))
# TODO improve typo handling
if good_roi.upper() in yes_no:
if good_roi.upper() == "Y":
break
else:
roi_upper = int(input("Please provide a new upper bound for RoI (currently {:}): ".format(roi_upper)))
roi_lower = int(input("Please provide a new lower bound for RoI (currently {:}): ".format(roi_lower)))
roi_left = int(input("Please provide a new left bound for RoI (currently {:}): ".format(roi_left)))
roi_right = int(input("Please provide a new right bound for RoI (currently {:}): ".format(roi_right)))
else:
print("Typo, try again ...")
# preallocate ultrasound frame array for PCA
conv_frame = conv.convert(pca_data[0])
out_frames = np.empty([pca_data.shape[0]] + list(conv_frame.shape)) * np.nan
out_frames = out_frames.astype('uint8')
adj_radius = int(conv_frame.shape[0]/50) # for median filter
print("Preprocessing data for PCA ...")
# make a sample frame for reference
in_sample = pca_data[0]
masked_samp = in_sample * mask # using mask defined above
sradd_samp = us.srad(masked_samp)
convd_samp = np.flipud(conv.convert(np.flipud(sradd_samp)))
clean_samp = us.clean_frame(convd_samp, median_radius=adj_radius)
rescaled_samp = clean_samp * 255
sample_frame = rescaled_samp.astype(np.uint8)
plt.title("Sample frame, Spkr {:}".format(subject))
plt.imshow(sample_frame, cmap="Greys_r")
file_ending_sample = "subj{:}_sample.pdf".format(subject)
savepath_sample = os.path.join(root,d,file_ending_sample)
plt.savefig(savepath_sample)
print("Please check sample frame at {}!".format(savepath_sample))
# fill in preallocated array
# TODO parallelize this part?
# TODO or perhaps the SRAD function
filt_hds = []
total = out_frames.shape[0]
for idx,frame in enumerate(pca_data):
masked = frame * mask # using mask defined above
sradd = us.srad(masked)
convd = np.flipud(conv.convert(np.flipud(sradd)))
clean = us.clean_frame(convd, median_radius=adj_radius)
# copying to out_frames casts to np.uint8; rescaling required
rescaled = clean * 255
out_frames[idx,:,:] = rescaled
print(out_frames[idx].dtype)
# new sha1 hex: filtered, conv to np.uint8
filt_hds.append(sha1(out_frames[idx].ravel()).hexdigest())
print("\tAdded frame {} of {}".format(idx+1,total))
# add new sha1 hex as a column in the df
pca_md = pca_md.assign(sha1_filt=pd.Series(filt_hds, index=pca_md.index))
# make sure there is one metadata row for each image frame
assert(len(pca_md) == out_frames.shape[0])
# for debugging
# pca_md.to_csv(os.path.join(root,d,"test.csv"))
# compare checksums
assert(pca_md.loc[0, 'sha1_filt'] == sha1(out_frames[0].ravel()).hexdigest())
assert(pca_md.loc[len(pca_md)-1,'sha1_filt'] == sha1(out_frames[-1].ravel()).hexdigest())
# outputs
np.save(os.path.join(root,d,frames_out), out_frames)
pca_md.to_pickle(os.path.join(root,d,metadata_out))
| {"/scripts/dim-reduction/nasalcoda-cache-frames.py": ["/ultramisc/ebutils.py"], "/scripts/dim-reduction/suzhou-cache-frames.py": ["/ultramisc/ebutils.py"], "/scripts/video/eb-make-avi.py": ["/ultramisc/ebutils.py"], "/scripts/ssanova/format-con.py": ["/ultramisc/ebutils.py"], "/scripts/ssanova/eb-extract-frames.py": ["/ultramisc/ebutils.py"], "/scripts/file-management/ultrasonix-subsetter.py": ["/ultramisc/ebutils.py"], "/scripts/video/flap-video-viewer.py": ["/ultramisc/ebutils.py"], "/scripts/file-management/subsetter.py": ["/ultramisc/ebutils.py"], "/scripts/video/flap-video-writer.py": ["/ultramisc/ebutils.py"]} |
55,751 | mfaytak/ultramisc | refs/heads/master | /scripts/video/flap-video-viewer.py | #!/usr/bin/env python
import argparse
import easygui
import glob
import os
import time
import vlc
from ultramisc.ebutils import read_stimfile
def play_from_gui(av):
'''
Plays a video file using python VLC bindings, then closes
the video player.
'''
player = vlc.MediaPlayer(av)
player.play()
time.sleep(1) # sleep one second while collecting dur
dur = player.get_length() / 1000 # convert ms to s
time.sleep(dur - 1) # take out length of the second above
player.stop()
# returns None
# read in arguments
parser = argparse.ArgumentParser()
parser.add_argument("expdir",
help="Experiment directory containing all subjects'\
caches and metadata in separate folders"
)
parser.add_argument("outfile",
help="Text file to output annotation data to"
)
args = parser.parse_args()
try:
expdir = args.expdir
except IndexError:
print("\tDirectory provided doesn't exist")
ArgumentParser.print_usage
ArgumentParser.print_help
sys.exit(2)
expdir = args.expdir
out_file = args.outfile
ann = input("Enter your initials: ")
avi_glob_exp = os.path.join(expdir, "*", "*_slow.avi")
# write header to file
with open(out_file, "w") as out:
out.write('\t'.join(["acq", "stim", "before", "after", "voi", "ann", "label"]) + '\n')
# loop through available _slow.avi files
for av in glob.glob(avi_glob_exp):
# gather metadata strings
parent = os.path.dirname(av)
stimfile = os.path.join(parent,"stim.txt")
stim = read_stimfile(stimfile)
beforefile = os.path.join(parent,"before.txt")
before = read_stimfile(beforefile)
afterfile = os.path.join(parent,"after.txt")
after = read_stimfile(afterfile)
voicefile = os.path.join(parent,"voice.txt")
voice = read_stimfile(voicefile)
# find the faster AVI file
basename = os.path.split(parent)[1]
av_fast = os.path.join(parent,str(basename + "_fast.avi"))
# TODO add name to player
while True:
click = easygui.buttonbox(title="Now playing {}".format(basename),
msg='\n'.join(
["Press Play to view {} in {}".format(basename, expdir),
"Word is {}".format(stim) # TODO: implement?
]),
choices=["Play", "Play fast", "Label"]
)
#print(choice)
if click == "Play":
play_from_gui(av)
elif click == "Play fast":
play_from_gui(av_fast)
elif click == "Label":
choice = easygui.buttonbox(title="Select a label, or go back",
choices=["up_flap", "down_flap", "low_tap", "high_tap", "GO BACK"]
)
# if the user mistakenly clicked "Label"
if choice == "GO BACK":
pass
# if the user wants to annotate and move on
else:
with open(out_file, "a") as out:
out.write('\t'.join([basename, stim, before, after, voice, ann, choice]) + '\n')
break
# if the window is X-ed out
else:
choice = "NA"
with open(out_file, "a") as out:
out.write('\t'.join([basename, stim, before, after, voice, ann, choice]) + '\n')
break
| {"/scripts/dim-reduction/nasalcoda-cache-frames.py": ["/ultramisc/ebutils.py"], "/scripts/dim-reduction/suzhou-cache-frames.py": ["/ultramisc/ebutils.py"], "/scripts/video/eb-make-avi.py": ["/ultramisc/ebutils.py"], "/scripts/ssanova/format-con.py": ["/ultramisc/ebutils.py"], "/scripts/ssanova/eb-extract-frames.py": ["/ultramisc/ebutils.py"], "/scripts/file-management/ultrasonix-subsetter.py": ["/ultramisc/ebutils.py"], "/scripts/video/flap-video-viewer.py": ["/ultramisc/ebutils.py"], "/scripts/file-management/subsetter.py": ["/ultramisc/ebutils.py"], "/scripts/video/flap-video-writer.py": ["/ultramisc/ebutils.py"]} |
55,752 | mfaytak/ultramisc | refs/heads/master | /scripts/file-management/subsetter.py | #!/usr/bin/env python
# subsetter.py: pick out a subset of acquisitions in an experiment directory.
# usage: python subsetter.py expdir
# TODO test both options (delete, not delete)
import argparse
import glob
import os
import shutil
import sys
from ultramisc.ebutils import read_stimfile, read_listfile
# parse argument(s)
parser = argparse.ArgumentParser()
parser.add_argument("expdir",
help="Experiment directory containing \
acquisitions in flat structure"
)
parser.add_argument("distractors",
help="Plaintext list of distractor words, \
one per line")
parser.add_argument("-d",
"--delete",
help="Delete distractor files in place \
(default behavior is to move distractor \
files to new location)",
action="store_true"
)
args = parser.parse_args()
expdir = os.path.normpath(args.expdir)
# desired analysis set; change as required
distractor_list = read_listfile("distractors.txt", deaccent=True)
# issue warning or set up copy location.
if args.delete:
while True:
try:
warning = input("WARNING: this will delete non-target acqs. \
Make sure your files are backed up. Press Y \
to continue or N to exit.")
assert warning in ["Y", "N"]
except AssertionError:
print("Typo, try again: ")
continue
else:
break
if warning == "N":
sys.exit()
else:
copy_str = os.path.split(expdir)[1] + "_distractors"
copy_dir = os.path.join(expdir, copy_str)
os.mkdir(copy_dir)# TODO create the copy location
# iterate over directories within expdir with a *.raw file in them
rawfile_glob_exp = os.path.join(os.path.normpath(args.expdir),
"*",
"*.raw"
)
for rf in glob.glob(rawfile_glob_exp):
parent = os.path.dirname(rf)
acq = os.path.split(parent)[1]
stimfile = os.path.join(parent,"word.txt")
try:
stim = read_stimfile(stimfile, deaccent=True)
except FileNotFoundError:
print("No alignment TG in {}; skipping".format(acq))
continue
if args.delete:
if stim in distractor_list:
shutil.rmtree(parent)
else:
if stim in distractor_list:
copy_path = os.path.join(copy_dir,acq)
shutil.copytree(parent, copy_path)
shutil.rmtree(parent)
| {"/scripts/dim-reduction/nasalcoda-cache-frames.py": ["/ultramisc/ebutils.py"], "/scripts/dim-reduction/suzhou-cache-frames.py": ["/ultramisc/ebutils.py"], "/scripts/video/eb-make-avi.py": ["/ultramisc/ebutils.py"], "/scripts/ssanova/format-con.py": ["/ultramisc/ebutils.py"], "/scripts/ssanova/eb-extract-frames.py": ["/ultramisc/ebutils.py"], "/scripts/file-management/ultrasonix-subsetter.py": ["/ultramisc/ebutils.py"], "/scripts/video/flap-video-viewer.py": ["/ultramisc/ebutils.py"], "/scripts/file-management/subsetter.py": ["/ultramisc/ebutils.py"], "/scripts/video/flap-video-writer.py": ["/ultramisc/ebutils.py"]} |
55,753 | mfaytak/ultramisc | refs/heads/master | /scripts/dim-reduction/punjabi-series-process-cache.py | '''
punjabi-series-process-cache.py: process cache as done in Kochetov/Faytak/Nara project.
'''
import argparse
import glob
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import os
import re
import sys
from hashlib import sha1
from imgphon import ultrasound as us
from scipy.ndimage import median_filter
# read in args
parser = argparse.ArgumentParser()
parser.add_argument("directory", help="Experiment directory containing all subjects")
args = parser.parse_args()
# check for appropriate directory
expdir = args.directory
try:
assert os.path.exists(args.directory)
except AssertionError:
# TODO raise exception
print("\tDirectory provided doesn't exist")
parser.print_help()
sys.exit(2)
yes_no = ["Y", "N"]
subject = "P" + re.sub("[^0-9]", "", expdir)
data_in = os.path.join(expdir,"frames.npy")
data = np.load(data_in)
metadata_in = os.path.join(expdir,'frames_metadata.pickle')
md = pd.read_pickle(metadata_in)
# some sanity checks on data checksums
assert(len(md) == data.shape[0]) # make sure one md row for each frame
assert(md.loc[0, 'sha1'] == sha1(data[0][0].ravel()).hexdigest()) # checksums
assert(md.loc[len(md)-1,'sha1'] == sha1(data[-1][0].ravel()).hexdigest())
# TODO how to get the mean frame object for a 4D array?
# make temp flattened array and take mean of that
data_for_mean = data.reshape([
data.shape[0] * data.shape[1],
data.shape[2],
data.shape[3]
])
#print(data_for_mean.shape)
mean_frame = data_for_mean.mean(axis=0)
#conv_mean = np.flipud(conv.convert(np.flipud(mean_frame)))
plt.title("Mean frame, Spkr {:}".format(subject))
plt.imshow(mean_frame, cmap="Greys_r")
file_ending_mean = "subj{:}_mean.pdf".format(subject)
savepath_mean = os.path.join(expdir,file_ending_mean)
plt.savefig(savepath_mean)
roi_upper = 325
roi_lower = 200
roi_left = 200
roi_right = 400
# TODO give mask converted shape
while True:
mask = us.roi(mean_frame,
upper=roi_upper,
lower=roi_lower,
left=roi_left,
right=roi_right)
masked_mean = mean_frame * mask
#conv_masked = np.flipud(conv.convert(np.flipud(masked_mean)))
plt.title("Mean frame and RoI, {:}".format(subject))
plt.imshow(masked_mean, cmap="Greys_r")
file_ending_roi = "subj{:}_roi.pdf".format(subject)
savepath_roi = os.path.join(expdir,file_ending_roi)
plt.savefig(savepath_roi)
good_roi = input("Inspect {:}. Good RoI? (Y/N) ".format(savepath_roi))
# TODO improve typo handling
if good_roi.upper() in yes_no:
if good_roi.upper() == "Y":
break
else:
roi_upper = int(input("Please provide a new upper bound for RoI (currently {:}): ".format(roi_upper)))
roi_lower = int(input("Please provide a new lower bound for RoI (currently {:}): ".format(roi_lower)))
roi_left = int(input("Please provide a new left bound for RoI (currently {:}): ".format(roi_left)))
roi_right = int(input("Please provide a new right bound for RoI (currently {:}): ".format(roi_right)))
else:
print("Typo, try again ...")
adj_radius = int(data[0][0].shape[0]/50) # short side of single frame /50, for median filter
print("Preprocessing data for PCA ...")
in_series = data[0] # array
out_frames_samp = []
padding = 5 # number of pixels to tack on at edges to visually divide frames
for frame in in_series:
crop_samp = frame[roi_lower:roi_upper, roi_left:roi_right]
sradd_samp = us.srad(crop_samp)
clean_samp = us.clean_frame(sradd_samp, median_radius=adj_radius)
# masked_samp = clean_samp * mask # using mask defined above
rescaled_samp = clean_samp * 255
sample_frame = rescaled_samp.astype(np.uint8)
sample_frame = np.pad(sample_frame, padding, 'constant')
out_frames_samp.append(sample_frame)
out_series_samp = np.hstack(out_frames_samp)
plt.title("Sample frames series, Spkr {:}".format(subject))
plt.imshow(out_series_samp, cmap="Greys_r")
file_ending_sample = "subj{:}_sample.pdf".format(subject)
savepath_sample = os.path.join(expdir,file_ending_sample)
plt.savefig(savepath_sample)
print("Please check sample series at {}!".format(savepath_sample))
# preallocate ultrasound frame array for PCA
out_serieses = np.empty([data.shape[0]] + list(out_series_samp.shape)) * np.nan
out_serieses = out_serieses.astype('uint8')
filt_hds = []
total = out_serieses.shape[0]
frames_out = "frames_proc.npy"
metadata_out = "frames_proc_metadata.pickle"
# TODO loop index issues (get IndexError "out of range" at item 5)
for idx,series in enumerate(data):
out_frames = []
for frame in series:
crop = frame[roi_lower:roi_upper, roi_left:roi_right]
sradd = us.srad(crop)
clean = us.clean_frame(sradd, median_radius=adj_radius)
rescaled = clean * 255
out_frame = rescaled.astype(np.uint8)
out_frame = np.pad(out_frame, padding, 'constant')
out_frames.append(out_frame)
out_series = np.hstack(out_frames)
out_serieses[idx,:,:] = out_series
# new sha1 hex: filtered, conv to np.uint8
filt_hds.append(sha1(out_serieses[idx].ravel()).hexdigest())
print("\tAdded series {} of {}".format(idx+1,total))
# add new sha1 hex as a column in the df
md = md.assign(sha1_filt=pd.Series(filt_hds, index=md.index))
# make sure there is one metadata row for each image frame
assert(len(md) == out_serieses.shape[0])
# for debugging
# pca_md.to_csv(os.path.join(root,d,"test.csv"))
# compare checksums
assert(md.loc[0, 'sha1_filt'] == sha1(out_serieses[0].ravel()).hexdigest())
assert(md.loc[len(md)-1,'sha1_filt'] == sha1(out_serieses[-1].ravel()).hexdigest())
# output
np.save(os.path.join(expdir,frames_out), out_serieses)
md.to_pickle(os.path.join(expdir,metadata_out)) | {"/scripts/dim-reduction/nasalcoda-cache-frames.py": ["/ultramisc/ebutils.py"], "/scripts/dim-reduction/suzhou-cache-frames.py": ["/ultramisc/ebutils.py"], "/scripts/video/eb-make-avi.py": ["/ultramisc/ebutils.py"], "/scripts/ssanova/format-con.py": ["/ultramisc/ebutils.py"], "/scripts/ssanova/eb-extract-frames.py": ["/ultramisc/ebutils.py"], "/scripts/file-management/ultrasonix-subsetter.py": ["/ultramisc/ebutils.py"], "/scripts/video/flap-video-viewer.py": ["/ultramisc/ebutils.py"], "/scripts/file-management/subsetter.py": ["/ultramisc/ebutils.py"], "/scripts/video/flap-video-writer.py": ["/ultramisc/ebutils.py"]} |
55,754 | mfaytak/ultramisc | refs/heads/master | /scripts/video/flap-video-writer.py | '''
flap-video-writer: convert designated sequences of ultrasound frames into .AVI movies.
'''
import argparse
import audiolabel
import glob
import imgphon.ultrasound as us # TODO reorganize
import numpy as np
import os
import subprocess
from operator import itemgetter
from PIL import Image # check if configured on VM
from ultratils.rawreader import RawReader
from ultratils.pysonix.scanconvert import Converter
from ultramisc.ebutils import read_echob_metadata, read_stimfile
# read in arguments
parser = argparse.ArgumentParser()
parser.add_argument("expdir",
help="Experiment directory containing all subjects'\
caches and metadata in separate folders"
)
parser.add_argument("-f",
"--flop",
help="Horizontally flip the data",
action="store_true"
)
args = parser.parse_args()
expdir = args.expdir
rawfile_glob_exp = os.path.join(expdir,"*","*.raw")
class Header(object):
def __init__(self):
pass
class Probe(object):
def __init__(self):
pass
conv = None
for rf in glob.glob(rawfile_glob_exp):
parent = os.path.dirname(rf)
basename = os.path.split(parent)[1]
# use stim.txt to skip non-trials, flap.txt to skip words without flaps
stimfile = os.path.join(parent,"stim.txt")
stim = read_stimfile(stimfile)
if stim == "bolus" or stim == "practice":
continue
flapfile = os.path.join(parent,"flap.txt")
flap_set = read_stimfile(flapfile)
if flap_set == "N":
continue
if conv is None:
print("Making converter...")
nscanlines, npoints, junk = read_echob_metadata(rf)
header = Header()
header.w = nscanlines # input image width
header.h = npoints - junk # input image height, trimmed
header.sf = 4000000 # magic number, sorry!
probe = Probe()
probe.radius = 10000 # based on '10' in transducer model number
probe.numElements = 128 # based on '128' in transducer model number
probe.pitch = 185 # based on Ultrasonix C9-5/10 transducer
conv = Converter(header, probe)
print("Now working on {}".format(parent))
rdr = RawReader(rf, nscanlines=nscanlines, npoints=npoints)
wav = os.path.join(parent,str(basename + ".ch1.wav"))
tg = os.path.join(parent,str(basename + ".ch1.TextGrid"))
sync_tg = os.path.join(parent,str(basename + ".sync.TextGrid"))
sync = os.path.join(parent,str(basename + '.sync.txt'))
idx_txt = os.path.join(parent,str(basename + ".idx.txt"))
# instantiate LabelManager objects for FA transcript and sync pulses
try:
pm = audiolabel.LabelManager(from_file=tg, from_type="praat")
except FileNotFoundError:
print("No alignment TG in {}; skipping".format(basename))
continue
try:
sync_pm = audiolabel.LabelManager(from_file=sync_tg, from_type="praat")
except FileNotFoundError:
print("No sync TG in {}; skipping".format(basename))
continue
# search for target words in 'words' tier
for wd in pm.tier('words'):
if not wd.text: # skip intervals in which no word was found
continue
if wd.text.lower() != stim.lower():
# fix for multi-word stimuli:
if wd.text.lower() in ['heard', 'bird', 'hard']:
# take next word's .t2 as word_t2
# as in "heard of it", "bird of paradise", "hard of hearing"
word_t1 = wd.t1
word_t2 = pm.tier('words').next(wd).t2 # invariably "of"
elif wd.text.lower() == 'carta':
# take 'carta' even though it doesn't match any stim (which is Magna Carta for this one)
word_t1 = wd.t1
word_t2 = wd.t2
else:
continue
else:
word_t1 = wd.t1
word_t2 = wd.t2
for ph in pm.tier('phones'):
if ph.t1 < word_t1:
continue
if ph.t2 > word_t2:
continue
if ph.text.upper() not in ["TX", "DX"]:
continue
# TODO check if no flap found, issue warning
before = pm.tier('phones').prev(ph)
if before.text.upper() == "R": # if there's a postvocalic R, then go back an additional interval
before = pm.tier('phones').prev(before)
after = pm.tier('phones').next(ph)
print("Extracting {} {} {} from {}".format(before.text, ph.text, after.text, stim))
start_flap = ph.t1
end_flap = ph.t2
start_time = before.t1
end_time = after.t2
# TODO set from here down as fcn? args t1, t2
# options: highlight=['TX','DX'], fast/slow/both
# then script finds times
# then it extracts images
# then it runs ffmpeg
diff_start = []
diff_end = []
diff_start_flap = []
diff_end_flap = []
for frame in sync_pm.tier('pulse_idx'):
diff_s = abs(frame.center - start_time)
diff_start.append(diff_s)
diff_e = abs(frame.center - end_time)
diff_end.append(diff_e)
diff_sf = abs(frame.center - start_flap)
diff_start_flap.append(diff_sf)
diff_ef = abs(frame.center - end_flap)
diff_end_flap.append(diff_ef)
# get start/end indices; flap indices
start_idx = min(enumerate(diff_start), key=itemgetter(1))[0] - 1
end_idx = min(enumerate(diff_end), key=itemgetter(1))[0] - 1
start_flap_idx = min(enumerate(diff_start_flap), key=itemgetter(1))[0] - 1
end_flap_idx = min(enumerate(diff_end_flap), key=itemgetter(1))[0] - 1
# make sure there are at least 20 frames after end of flap
# credit: Jennifer Kuo
if (end_idx - end_flap_idx) < 15:
end_idx = 15 + end_flap_idx
# get set of indices for timing of flap
is_flap = list(range(start_flap_idx, end_flap_idx))
is_flap = [ix - start_idx for ix in is_flap]
# get frames using RawReader's reader object
target_frames = rdr.data[start_idx:end_idx]
for idx,fr in enumerate(target_frames):
# trim junk pixels off of top
trimmed_frame = fr[junk:,:]
# convert to fan shape
conv_frame = conv.convert(np.flipud(trimmed_frame))
messy_frame = np.flipud(conv_frame)
norm_frame = us.normalize(messy_frame)
cframe = us.clean_frame(norm_frame, median_radius=15, log_sigma=4)
pre_final_frame = (cframe*255).astype(np.uint8)
# reverse if needed
if args.flop:
final_frame = np.fliplr(pre_final_frame)
else:
final_frame = np.copy(pre_final_frame)
# if frame occurs during flap, add a signal
if idx in is_flap:
final_frame[10:80, -80:-10] = 255
# create frame handle and save to copy dir
fh = basename + "." + "{0:05d}".format(idx+1) + ".bmp"
out_img = Image.fromarray(final_frame)
out_img.save(os.path.join(fh))
frame_exp = os.path.join(basename + ".%05d.bmp")
out_fh = basename + '_slow.avi'
out_path = os.path.join(parent, out_fh)
avi_args = ['ffmpeg',
'-loglevel', 'panic',
'-y',
'-framerate', '4', # values that work here include 4, 12.5, and 25
'-i', frame_exp,
#'-r', str(framerate),
'-vcodec', 'huffyuv',
'-vf', 'scale=iw/2:ih/2',
out_path]
subprocess.check_call(avi_args)
out_fh_fast = basename + '_fast.avi'
out_path_fast = os.path.join(parent, out_fh_fast)
avi_args_fast = ['ffmpeg',
'-loglevel', 'panic',
'-y',
'-framerate', '12.5',
'-i', frame_exp,
#'-r', str(framerate),
'-vcodec', 'huffyuv',
'-vf', 'scale=iw/2:ih/2',
out_path_fast]
subprocess.check_call(avi_args_fast)
# delete the .bmp files once done
for item in os.listdir("."):
if item.endswith(".bmp"):
os.remove(item)
| {"/scripts/dim-reduction/nasalcoda-cache-frames.py": ["/ultramisc/ebutils.py"], "/scripts/dim-reduction/suzhou-cache-frames.py": ["/ultramisc/ebutils.py"], "/scripts/video/eb-make-avi.py": ["/ultramisc/ebutils.py"], "/scripts/ssanova/format-con.py": ["/ultramisc/ebutils.py"], "/scripts/ssanova/eb-extract-frames.py": ["/ultramisc/ebutils.py"], "/scripts/file-management/ultrasonix-subsetter.py": ["/ultramisc/ebutils.py"], "/scripts/video/flap-video-viewer.py": ["/ultramisc/ebutils.py"], "/scripts/file-management/subsetter.py": ["/ultramisc/ebutils.py"], "/scripts/video/flap-video-writer.py": ["/ultramisc/ebutils.py"]} |
55,755 | mfaytak/ultramisc | refs/heads/master | /scripts/file-management/move-for-recoding.py | #### 2019 TAP/FlAP ULTRASOUND PROJECT ####
## Script for moving acquisitions into folders sorted by the type of tap/flap
## inter-rater disagreement.
## Jennifer Kuo July 2019
## TO run: python move-for-recoding.py EXPDIR CODEFILE
## Where: EXPDIR is the directory containing the experiment files (acquisitions),
## and CODEFILE is the csv file with all the coding results.
import argparse
import os
import pandas as pd
import shutil
# read in arguments
parser = argparse.ArgumentParser()
parser.add_argument("expdir",
help="Experiment directory containing all subjects'\
caches and metadata in separate folders"
)
parser.add_argument("codefile", action="store",
help="Name of csv file with coding results"
)
args = parser.parse_args()
expdir = args.expdir
codefile = args.codefile
## read coding csv file
coding_results = pd.read_csv(codefile,sep=',')
## loop through rows of the coding file
#(each row corresponding to one acquisition)
for i,j in coding_results.iterrows():
##
acq = j[0] #name of acquisition (timestamp)
err_type = j[14] #type of inter-rater mismatch
start_dir = os.path.join(expdir,acq)
out_dir = os.path.join(expdir + '_recode',err_type)
# if acquisition exists in expdir, move it to a subfolder
# named after the type of inter-rater mismatch.
if os.path.exists(start_dir):
if not os.path.exists(out_dir):
os.makedirs(out_dir)
shutil.move(start_dir,out_dir)
| {"/scripts/dim-reduction/nasalcoda-cache-frames.py": ["/ultramisc/ebutils.py"], "/scripts/dim-reduction/suzhou-cache-frames.py": ["/ultramisc/ebutils.py"], "/scripts/video/eb-make-avi.py": ["/ultramisc/ebutils.py"], "/scripts/ssanova/format-con.py": ["/ultramisc/ebutils.py"], "/scripts/ssanova/eb-extract-frames.py": ["/ultramisc/ebutils.py"], "/scripts/file-management/ultrasonix-subsetter.py": ["/ultramisc/ebutils.py"], "/scripts/video/flap-video-viewer.py": ["/ultramisc/ebutils.py"], "/scripts/file-management/subsetter.py": ["/ultramisc/ebutils.py"], "/scripts/video/flap-video-writer.py": ["/ultramisc/ebutils.py"]} |
55,756 | mfaytak/ultramisc | refs/heads/master | /scripts/dim-reduction/nasalcoda-pca-lda.py | '''
nasalcoda-pca-lda: PCA-LDA pipeline as used in nasal coda project (Liu S., Faytak).
'''
import argparse
import glob
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
import re
import sys
from hashlib import sha1
from imgphon.ultrasound import reconstruct_frame
from sklearn.decomposition import PCA
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA
# read in arguments
parser = argparse.ArgumentParser()
parser.add_argument("directory", help="Experiment directory containing all subjects")
parser.add_argument("--pca_dim", "-p", help="Number of principal components to retain")
parser.add_argument("--lda_dim", "-l", help="Number of linear discriminants to train")
parser.add_argument("--flop", "-f", help="Horizontally flip the data", action="store_true")
args = parser.parse_args()
try:
expdir = args.directory
except IndexError:
print("\tDirectory provided doesn't exist")
ArgumentParser.print_usage
ArgumentParser.print_help
sys.exit(2)
n_components = int(args.pca_dim)
n_lds = int(args.lda_dim)
for root, dirs, files in os.walk(expdir):
for d in dirs:
if d.startswith("."): # don't run on MAC OS hidden directories
continue
subject = re.sub("[^0-9]","",d) # subject is any numbers in directory name
data_in = os.path.join(root,d,"frames_proc.npy")
data = np.load(data_in)
metadata_in = os.path.join(root,d,'frames_proc_metadata.pickle')
md_pre = pd.read_pickle(metadata_in)
# check that metadata matches data, frame-by-frame
assert(len(md_pre) == data.shape[0])
for idx,row in md_pre.iterrows():
assert(row['sha1_filt'] == sha1(data[idx].ravel()).hexdigest())
# get rid of hash columns after checking
md = md_pre.iloc[:,0:11].copy()
# break off nasals specifically (vowels are in these data sets, too)
nas_mask = md['phone'].isin(['n','ng'])
pca_data = data[nas_mask]
pca_md = md[nas_mask]
if args.flop:
# flips all frames on their second axis, i.e. front-back
pca_data = np.flip(pca_data, 2)
# reshape data to prep for PCA
image_shape = pca_data[0].shape
frames_reshaped = pca_data.reshape([
pca_data.shape[0],
pca_data.shape[1] * pca_data.shape[2]
])
# carry out PCA
pca = PCA(n_components=n_components)
pca.fit(frames_reshaped)
total_var_exp = sum(pca.explained_variance_ratio_)
pcvar = pca.explained_variance_ratio_
pca_out = pca.transform(frames_reshaped)
pc_filestring = "NC{:}_pcs.csv".format(subject)
pc_savepath = os.path.join(root,d,pc_filestring)
pc_headers = ["pc"+str(i+1) for i in range(0,n_components)]
meta_headers = pca_md.columns.values
headers = list(meta_headers) + pc_headers
metadata = pca_md.values # md.as_matrix(columns = md.columns[0:11])
out_df = np.row_stack((headers,
np.column_stack((metadata, pca_out))
))
np.savetxt(pc_savepath, out_df, fmt="%s", delimiter = ',')
print("Subj.{}: PCA with {} PCs explains {} of variation".format(subject, str(n_components),
round(total_var_exp,4)
))
print(pca.explained_variance_ratio_)
# output
for n in range(0,n_components):
dd = pca.components_[n].reshape(image_shape)
mag = np.max(dd) - np.min(dd)
pc_load = (dd-np.min(dd))/mag*255
plt.title("PC{:} eigentongue, Subj {:}".format((n+1),subject))
plt.imshow(pc_load, cmap="Greys_r")
file_ending = "subj{:}-pc{:}-filt.pdf".format(subject, (n+1))
savepath = os.path.join(root,d,file_ending)
plt.savefig(savepath)
vectors = pca.components_
pca_md.reset_index(drop=True, inplace=True) # in case there was any subsetting
in_mask = (pca_md['phone'].isin(['n']) & pca_md['before'].isin(['i']))
ing_mask = (pca_md['phone'].isin(['ng']) & pca_md['before'].isin(['i']))
en_mask = (pca_md['phone'].isin(['n']) & pca_md['before'].isin(['e']))
eng_mask = (pca_md['phone'].isin(['ng']) & pca_md['before'].isin(['e']))
an_mask = (pca_md['phone'].isin(['n']) & pca_md['before'].isin(['a']))
ang_mask = (pca_md['phone'].isin(['ng']) & pca_md['before'].isin(['a']))
uan_mask = (pca_md['phone'].isin(['n']) & pca_md['before'].isin(['ua']))
uang_mask = (pca_md['phone'].isin(['ng']) & pca_md['before'].isin(['ua']))
# make dict of masks and strings for plot labels/output titles
output_dict = {"in": in_mask,
"ing": ing_mask,
"en": en_mask,
"eng": eng_mask,
"an": an_mask,
"ang": ang_mask,
"uan": uan_mask,
"uang": uang_mask}
SMALL_SIZE = 8
MEDIUM_SIZE = 10
BIGGER_SIZE = 12
for label in output_dict:
idx_list = pca_md.index[output_dict[label]].tolist() # subset pca_md by each mask
values = pca_out[idx_list]
plt.rcParams['font.sans-serif'] = "Noto Sans UI"
#plt.rcParams['font.family'] = "sans-serif"
SMALL_SIZE = 8
MEDIUM_SIZE = 10
#BIGGER_SIZE = 14
plt.rc('axes', titlesize=MEDIUM_SIZE) # fontsize of the axes title
plt.rc('axes', labelsize=MEDIUM_SIZE) # fontsize of the x and y labels
plt.rc('xtick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('ytick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('legend', fontsize=MEDIUM_SIZE) # legend fontsize
plt.title("Reconstructed {:}, speaker {:}".format(label.upper(), subject))
# TODO need to get max/min values out of reconstruct_frame somehow
plt.imshow(reconstruct_frame(vectors, values, pca.n_components, image_shape), cmap="Blues_r")
clb = plt.colorbar(extend='min', shrink=0.55)
clb.ax.set_title('Pixel\n value')
plt.clim(0)
file_ending = "subj{:}-{:}-recon.pdf".format(subject, label)
savepath = os.path.join(root,d,file_ending)
plt.savefig(savepath)
plt.close("all")
# now LDA
training_list = ["n", "ng"]
training_mask = pca_md['phone'].isin(training_list)
training_mask = training_mask.values
training_md = pca_md[training_mask].copy()
training_data = pca_out[training_mask]
# train LDA on training data
labs = np.array(training_md.phone) # expand dims?
train_lda = LDA(n_components = int(n_lds))
train_lda.fit(training_data, labs) # train the model on the data
train_lda_out = train_lda.transform(training_data)
# save LDs for visualization
ld = pd.DataFrame(np.vstack([train_lda_out]))
ld = ld.rename(columns = {0:'LD1'})
subject_lab = [subject] * ld.shape[0]
subject_column = pd.DataFrame(subject_lab)
subject_column = subject_column.rename(columns = {0:'subj'})
md = training_md
# get classification results
cls = pd.DataFrame(train_lda.predict(training_data))
cls = cls.rename(columns = {0:'cls'})
# combine all of the above into a DataFrame object and save
for df in [subject_column, ld, cls, training_md]:
df.reset_index(drop=True, inplace=True)
ld_md = pd.concat([subject_column, ld, cls, training_md], axis=1)
lda_savepath = os.path.join(root,d,"NC{:}_ldas_1ld.csv".format(subject))
ld_md.to_csv(lda_savepath, index=False)
# TODO output total token counts and token counts for each bin
print("FRAME COUNT")
print("\t" + "Total nasal frames analyzed: {:} ".format(pca_data.shape[0]))
for label in output_dict:
count = sum(output_dict[label]) # add up TRUEs in the masks
print("\t" + "Total frames for {:}, subj {:}: {:}".format(label.upper(), subject, count))
# print pct correct classification of training data
print("LDA ACCURACY")
print("\t" + subject + ": overall correct = " + str(train_lda.score(training_data, labs)))
before_labels = list(np.unique(ld_md.before)) # [a, e, i, ua]
train_labels = list(np.unique(ld_md.phone))
for b in before_labels: # [a, e, i, ua]
set_by_before = ld_md.loc[ld_md.before == b]
for tr in train_labels: # [n, ng]
set_by_before_coda = set_by_before.loc[set_by_before.phone == tr]
total_count = set_by_before_coda.shape[0]
correct_subset = set_by_before_coda.loc[set_by_before_coda.cls == tr]
correct_count = correct_subset.shape[0]
prop_correct = correct_count/total_count
print('\t', b+tr, '\t proportion correct (as {})'.format(tr), prop_correct)
| {"/scripts/dim-reduction/nasalcoda-cache-frames.py": ["/ultramisc/ebutils.py"], "/scripts/dim-reduction/suzhou-cache-frames.py": ["/ultramisc/ebutils.py"], "/scripts/video/eb-make-avi.py": ["/ultramisc/ebutils.py"], "/scripts/ssanova/format-con.py": ["/ultramisc/ebutils.py"], "/scripts/ssanova/eb-extract-frames.py": ["/ultramisc/ebutils.py"], "/scripts/file-management/ultrasonix-subsetter.py": ["/ultramisc/ebutils.py"], "/scripts/video/flap-video-viewer.py": ["/ultramisc/ebutils.py"], "/scripts/file-management/subsetter.py": ["/ultramisc/ebutils.py"], "/scripts/video/flap-video-writer.py": ["/ultramisc/ebutils.py"]} |
55,757 | mfaytak/ultramisc | refs/heads/master | /ultramisc/ebutils.py | import os
from unicodedata import normalize
def read_echob_metadata(rawfile):
'''
Gather information about a .raw file from its .img.txt file.
For legacy .raw data without a header; if a header exists,
use ultratils utilities.
Inputs: a .raw file, which is assumed to have an .img.txt file
with the same base name.
Outputs:
nscanlines, the number of scan lines ("width" of unconverted img)
npoints, the number of pixels in each scan line ("height" of img)
not_junk, the pixel index in each scan line where junk data begins
'''
mfile = os.path.splitext(rawfile)[0] + ".img.txt"
mdict = {}
with open(mfile, 'r') as mf:
k = mf.readline().strip().split("\t")
v = mf.readline().strip().split("\t")
for fld,val in zip(k, v):
mdict[fld] = int(val)
nscanlines = mdict['Height']
npoints = mdict['Pitch']
junk = npoints - mdict['Width'] # number of rows of junk data at outer edge of array
return nscanlines, npoints, junk
def _deaccent(mystr):
'''
Convert string to standardized bytes object and return
decoded string without any accented characters to
facilitate item comparison.
'''
strip_bytes = normalize('NFC', mystr).encode('ascii','ignore')
string_back = strip_bytes.decode('utf-8')
return string_back
def read_stimfile(stimfile, deaccent=False):
'''
Read plaintext stim.txt file; return stim as string.
Cleanup using _deaccent() optional.
'''
with open(stimfile, "r") as stfile:
stim = stfile.read().rstrip('\n')
if deaccent:
stim = _deaccent(stim)
return stim
def read_listfile(listfile, deaccent=False):
'''
Read stimulus list file (for subsetting, naming
distractors, etc.) and return plaintext list.
Cleanup using _deaccent() optional.
'''
with open(listfile, "r") as lfile:
outlist = lfile.read().splitlines()
if deaccent:
outlist = [_deaccent(l) for l in outlist]
return outlist
| {"/scripts/dim-reduction/nasalcoda-cache-frames.py": ["/ultramisc/ebutils.py"], "/scripts/dim-reduction/suzhou-cache-frames.py": ["/ultramisc/ebutils.py"], "/scripts/video/eb-make-avi.py": ["/ultramisc/ebutils.py"], "/scripts/ssanova/format-con.py": ["/ultramisc/ebutils.py"], "/scripts/ssanova/eb-extract-frames.py": ["/ultramisc/ebutils.py"], "/scripts/file-management/ultrasonix-subsetter.py": ["/ultramisc/ebutils.py"], "/scripts/video/flap-video-viewer.py": ["/ultramisc/ebutils.py"], "/scripts/file-management/subsetter.py": ["/ultramisc/ebutils.py"], "/scripts/video/flap-video-writer.py": ["/ultramisc/ebutils.py"]} |
55,758 | mfaytak/ultramisc | refs/heads/master | /scripts/dim-reduction/punjabi-pca.py | '''
punjabi-pca: PCA analysis for Punjabi project (Kochetov, Faytak, Nara).
WARNING: Largely superceded by ./punjabi-series-pca-lda.py
'''
import argparse
import glob
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
import re
import sys
from hashlib import sha1
from scipy.ndimage import median_filter
from sklearn.decomposition import PCA
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA
# read in args
parser = argparse.ArgumentParser()
parser.add_argument("directory", help="Experiment directory containing all subjects")
args = parser.parse_args()
# check for appropriate directory
expdir = args.directory
try:
assert os.path.exists(args.directory)
except AssertionError:
# TODO raise exception
print("\tDirectory provided doesn't exist")
parser.print_help()
sys.exit(2)
data_in = os.path.join(expdir,"frames.npy")
data = np.load(data_in)
metadata_in = os.path.join(expdir,"frames_metadata.pickle")
md = pd.read_pickle(metadata_in)
# sanity checks
assert(len(md) == data.shape[0]) # make sure one md row for each frame
assert(md.loc[0, 'sha1'] == sha1(data[0].ravel()).hexdigest()) # checksums
assert(md.loc[len(md)-1,'sha1'] == sha1(data[-1].ravel()).hexdigest())
n_pca = 4
#n_lda = 1
image_shape = data[0].shape # base off of first frame
subject = []
phase = []
trial = []
phone = []
for s in np.unique(md['subject']):
# subset data by subject ID
subj_mask = (md['subject'] == s)
subj_mask = subj_mask.as_matrix()
model_data = data[subj_mask]
model_md = md[subj_mask]
# preallocate array for ultrasound frames for PCA
model_array = np.empty([model_data.shape[0]] + list(model_data[0].shape)) * np.nan
model_array = model_array.astype('uint8')
# fill in the preallocated array, applying median filter (and any other desired transforms)
for idx,frame in enumerate(model_data):
filt_frame = median_filter(frame, 5)
model_array[idx,:,:] = filt_frame # frame
# run PCA with three PCs
n_components = int(n_pca)
pca = PCA(n_components=int(n_components))
array_reshaped = model_array.reshape([
model_array.shape[0],
model_array.shape[1] * model_array.shape[2]
])
pca.fit(array_reshaped)
cumulative_var_exp = sum(pca.explained_variance_ratio_)
print("Subj.{}: PCA with {} PCs explains {} of variation".format(s,
n_components,
round(cumulative_var_exp,4)
))
pca_out = pca.transform(array_reshaped)
# output PC values by acquisition
# create output table headers
pc_headers = ["pc"+str(i+1) for i in range(0,n_components)] # n. of PC columns changes acc. to n_components
meta_headers = list(md.columns.values)
headers = meta_headers + pc_headers
# create output table
headless = np.column_stack((md[meta_headers], pca_out))
d = np.row_stack((headers, headless))
out_filename = "{}_pca.csv".format(s)
out_path = os.path.join(expdir,out_filename)
np.savetxt(out_path, d, fmt="%s", delimiter ='\t')
# TODO once relevant, output one table across multiple subjects?
# output eigentongues
if n_components < 5:
n_output_pcs = n_components
else:
n_output_pcs = 5
for n in range(0,n_output_pcs):
dd = pca.components_[n].reshape(image_shape)
mag = np.max(dd) - np.min(dd)
pc_load = (dd-np.min(dd))/mag*255
plt.title("PC{:} min/max loadings, Subj. {:}".format((n+1),s))
plt.imshow(pc_load, cmap="Greys_r")
file_ending = "{:}-pc{:}.pdf".format(s, (n+1))
savepath = os.path.join(expdir,file_ending) # TODO redefine save path if needed
plt.savefig(savepath) | {"/scripts/dim-reduction/nasalcoda-cache-frames.py": ["/ultramisc/ebutils.py"], "/scripts/dim-reduction/suzhou-cache-frames.py": ["/ultramisc/ebutils.py"], "/scripts/video/eb-make-avi.py": ["/ultramisc/ebutils.py"], "/scripts/ssanova/format-con.py": ["/ultramisc/ebutils.py"], "/scripts/ssanova/eb-extract-frames.py": ["/ultramisc/ebutils.py"], "/scripts/file-management/ultrasonix-subsetter.py": ["/ultramisc/ebutils.py"], "/scripts/video/flap-video-viewer.py": ["/ultramisc/ebutils.py"], "/scripts/file-management/subsetter.py": ["/ultramisc/ebutils.py"], "/scripts/video/flap-video-writer.py": ["/ultramisc/ebutils.py"]} |
55,759 | mfaytak/ultramisc | refs/heads/master | /scripts/dim-reduction/suzhou-pca-lda-1ld.py | '''
suzhou-pca-lda-1ld: PCA-LDA method, Suzhou project, simple (1LD) model.
'''
import argparse
import glob
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
import re
import sys
from hashlib import sha1
from sklearn.decomposition import PCA
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA
def coart_class(row):
# figure out how to make training words just be "training"
if row['pron'] in test_no_coart_words:
return "no_fric"
elif row['pron'] in apical_words:
return "apical"
else:
return "fric"
# label test case by word type in metadata - coart from fric or not (or apical, or test)
test_no_coart_words = ["IZ", "BIZX", "YZ"] # mark as "no_coart"
apical_words = ["SZ", "SZW"] # third level "apical"; useless for comparisons
# read in arguments
parser = argparse.ArgumentParser()
parser.add_argument("directory", help="Experiment directory containing all subjects")
parser.add_argument("--pca_dim", "-p", help="Number of principal components to retain")
parser.add_argument("--lda_dim", "-l", help="Number of linear discriminants to use")
#parser.add_argument("-v", "--visualize", help="Produce plots of PC loadings on fan",action="store_true")
args = parser.parse_args()
try:
expdir = args.directory
except IndexError:
print("\tDirectory provided doesn't exist")
ArgumentParser.print_usage
ArgumentParser.print_help
sys.exit(2)
n_components = int(args.pca_dim)
n_lds = int(args.lda_dim)
pct_out = "percent_classified_1ld.txt"
pct_out_path = os.path.join(expdir,pct_out)
pct_out_head = "\t".join(["subj", "test", "coart", "classified_as", "pct_class"])
with open(pct_out_path, "w") as out:
out.write(pct_out_head + "\n")
for root,directories,files in os.walk(expdir):
for d in directories:
if d.startswith("."):
continue
subject = re.sub("[^0-9]","",d)
data_in = os.path.join(root,d,"frames_proc.npy")
data = np.load(data_in)
metadata_in = os.path.join(root,d,'frames_proc_metadata.pickle')
md_pre = pd.read_pickle(metadata_in)
# some sanity checks on data checksums
assert(len(md_pre) == data.shape[0]) # make sure one md row for each frame
assert(md_pre.loc[0, 'sha1_filt'] == sha1(data[0].ravel()).hexdigest()) # checksums
assert(md_pre.loc[len(md_pre)-1,'sha1_filt'] == sha1(data[-1].ravel()).hexdigest())
# get rid of hash-related columns after checking
md = md_pre.iloc[:,0:11].copy()
# subset data again to remove unneeded data
vow_mask = (md['pron'].isin(["BIY", "IY", "XIY", "SIY", "BIZX", "IZ", "SIZ", "XIZ", "YZ", "XYZ"])) & (md['phone'] != "SH") & (md['phone'] != "S")
sh_mask = (md['pron'].isin(["XIZ", "XYZ", "XIY", "XIEX", "XEU"])) & (md['phone'] == "SH")
#s_mask = (md['pron'].isin(["SAAE", "SEI", "SUW", "SIEX", "SOOW", "SZ", "SZW"])) & (md['phone'] == "S")
mask = vow_mask | sh_mask
mask = mask.values
pca_data = data[mask]
pca_md = md[mask]
image_shape = pca_data[0].shape
# reshape 2D frame data into 1D vectors and fit PCA
frames_reshaped = pca_data.reshape([
pca_data.shape[0],
pca_data.shape[1] * pca_data.shape[2]
])
pca = PCA(n_components=n_components)
pca.fit(frames_reshaped)
total_var_exp = sum(pca.explained_variance_ratio_)
pcvar = pca.explained_variance_ratio_
# output PC loading plots, with different names from full LDA
if n_components < 6:
n_output_pcs = n_components
else:
n_output_pcs = 6
# save scree plots
plt.title("Scree plot, subj. {:}".format(subject))
plt.plot(np.cumsum(pcvar) * 100)
scree_ending = "subj{:}-scree-1ld.pdf".format(subject)
screepath = os.path.join(root,d,scree_ending)
plt.savefig(screepath)
for n in range(0,n_output_pcs):
dd = pca.components_[n].reshape(image_shape)
mag = np.max(dd) - np.min(dd)
pc_load = (dd-np.min(dd))/mag*255
# conversion would happen here if images weren't converted already
plt.title("PC{:} min/max loadings, subj {:}".format((n+1),subject))
plt.imshow(pc_load, cmap="Greys_r")
file_ending = "subj{:}-pc{:}-filt-1ld.pdf".format(subject, (n+1))
savepath = os.path.join(root,d,file_ending)
plt.savefig(savepath)
# print some info
print("\tSubj.{}: PCA with {} PCs explains {} of variation".format(subject, str(n_components),
round(total_var_exp,4)
))
pca_out = pca.transform(frames_reshaped)
# output PC scores
pc_filestring = "suzh{:}_pcs_1ld.csv".format(subject)
pc_savepath = os.path.join(root,d,pc_filestring)
pc_headers = ["pc"+str(i+1) for i in range(0,n_components)]
meta_headers = md.columns.values
headers = list(meta_headers) + pc_headers
metadata = pca_md.values # md.as_matrix(columns = md.columns[0:11])
out_df = np.row_stack((headers,
np.column_stack((metadata, pca_out))
))
np.savetxt(pc_savepath, out_df, fmt="%s", delimiter = ',')
# subset PCA'ed data into training and testing sets
training_list = ["IY1", "SH"]
# encode as a factor whether there's a fricative or not
# TODO: check this (why SIZ?)
test_list = ["IZ1", "YZ1"]
test_coart_words = ["XIZ", "SIZ", "XYZ"] # mark as "coart"
test_no_coart_words = ["IZ", "BIZX", "YZ"] # mark as "no_coart"
# apical_words = ["SZ", "SZW"] # third level "apical"; useless for comparisons
training_mask = pca_md['phone'].isin(training_list)
training_mask = training_mask.values
training_md = pca_md[training_mask].copy()
training_data = pca_out[training_mask]
test_mask = pca_md['phone'].isin(test_list)
test_mask = test_mask.values
test_md = pca_md[test_mask].copy()
test_data = pca_out[test_mask]
# train LDA on training data
labs = np.array(training_md.phone) # expand dims?
train_lda = LDA(n_components = int(n_lds))
train_lda.fit(training_data, labs) # train the model on the data
train_lda_out = train_lda.transform(training_data)
# score and/or categorize test data according to trained LDA model
test_lda_out = train_lda.transform(test_data)
# LDA data for csv: training on top of test
ld = pd.DataFrame(np.vstack([train_lda_out, test_lda_out]))
ld = ld.rename(columns = {0:'LD1', 1:'LD2'})
# a subject column for csv
subject_lab = [subject] * ld.shape[0]
subject_column = pd.DataFrame(subject_lab)
subject_column = subject_column.rename(columns = {0:'subj'})
training_md["coart_class"] = ["training"] * training_md.shape[0]
test_md["coart_class"] = test_md.apply(lambda row: coart_class (row),axis=1)
# metadata that was read in earlier for csv: training on top of test
md = pd.concat([training_md, test_md], axis=0, ignore_index=True)
# classification results: training on top of test
cls = pd.concat(
[pd.DataFrame(train_lda.predict(training_data)),
pd.DataFrame(train_lda.predict(test_data))],
axis=0,
ignore_index=True
)
cls = cls.rename(columns = {0:'cls'})
# combine all of the above into a DataFrame object
ld_md = pd.concat([subject_column, ld, cls, md], axis=1)
# TODO the below is not quite how you were calculating the score
# add range-normalized linear discriminant values to DataFrame
# ld_range = max(ld_md.LD) - min(ld_md.LD)
# ld_md = ld_md.assign(normLD = (ld_md.LD - min(ld_md.LD)) / ld_range )
# save analysis data for the current subject as csv
lda_savepath = os.path.join(root,"suzh_{:}_ldas_1ld.csv".format(subject))
ld_md.to_csv(lda_savepath, index=False)
# output classification results
laminal_list = ["IZ1", "YZ1"]
apical_list = ["ZZ1", "ZW1"]
train_labels = list(np.unique(training_md.phone))
test_labels = list(np.unique(test_md.phone))
coart_types = list(np.unique(ld_md.coart_class))
# fricative vowel classification by training category and coarticulatory class
rows_laminal = ld_md.loc[(ld_md.phone == "IZ1") | (ld_md.phone == "YZ1")]
for c in coart_types:
if c not in ["fric", "no_fric"]:
continue
rows_by_co = rows_laminal.loc[rows_laminal.coart_class == c]
for t in train_labels:
rows_by_clco = rows_by_co.loc[rows_by_co.cls == t]
prop_class = round(rows_by_clco.shape[0]/rows_by_co.shape[0], 4)
print("\t{}, coart {} \t classified as {} -- {}".format("laminal",c,t,prop_class))
with open(pct_out_path, "a") as out:
out.write("\t".join([subject,"laminal",c,t,str(prop_class)]) + "\n")
print("\t---")
# gather and open all csv files in directory, then put together into one csv file
big_ld_list = []
for root,directories,files in os.walk(expdir):
for f in files:
if f.endswith("ldas_1ld.csv"):
csv_back_in = os.path.join(root,f)
one_subj = pd.read_csv(csv_back_in)
big_ld_list.append(one_subj)
big_ld = pd.concat(big_ld_list, axis=0)
big_ld_csv_path = os.path.join(expdir,"suzhou_all_subj_ldas_1ld.csv")
big_ld.to_csv(big_ld_csv_path, index=False)
# do the same for PCs
big_pc_list = []
for root,directories,files in os.walk(expdir):
for f in files:
if f.endswith("pcs_1ld.csv"):
csv_back_in = os.path.join(root,f)
one_subj = pd.read_csv(csv_back_in)
big_pc_list.append(one_subj)
big_pc = pd.concat(big_pc_list, axis=0)
big_pc_csv_path = os.path.join(expdir,"suzhou_all_subj_pcs_1ld.csv")
big_pc.to_csv(big_pc_csv_path, index=False) | {"/scripts/dim-reduction/nasalcoda-cache-frames.py": ["/ultramisc/ebutils.py"], "/scripts/dim-reduction/suzhou-cache-frames.py": ["/ultramisc/ebutils.py"], "/scripts/video/eb-make-avi.py": ["/ultramisc/ebutils.py"], "/scripts/ssanova/format-con.py": ["/ultramisc/ebutils.py"], "/scripts/ssanova/eb-extract-frames.py": ["/ultramisc/ebutils.py"], "/scripts/file-management/ultrasonix-subsetter.py": ["/ultramisc/ebutils.py"], "/scripts/video/flap-video-viewer.py": ["/ultramisc/ebutils.py"], "/scripts/file-management/subsetter.py": ["/ultramisc/ebutils.py"], "/scripts/video/flap-video-writer.py": ["/ultramisc/ebutils.py"]} |
55,763 | ToxicWar/learn-backbone.js | refs/heads/master | /backbone_and_django/app/views.py | from django.views.generic import TemplateView
from rest_framework import viewsets
from .serializers import TaskSerializer
from .models import Task
class TaskViewSet(viewsets.ModelViewSet):
model = Task
serializer_class = TaskSerializer
class Home(TemplateView):
template_name = 'app/index.html'
home = Home.as_view()
| {"/backbone_and_django/app/views.py": ["/backbone_and_django/app/models.py"]} |
55,764 | ToxicWar/learn-backbone.js | refs/heads/master | /backbone_and_django/app/models.py | from django.db import models
class Task(models.Model):
title = models.CharField('Title', max_length=1000)
is_completed = models.BooleanField('Is completed', default=False)
| {"/backbone_and_django/app/views.py": ["/backbone_and_django/app/models.py"]} |
55,765 | ToxicWar/learn-backbone.js | refs/heads/master | /backbone_and_django/app/urls.py | from django.conf.urls import patterns, url, include
from rest_framework import routers
import views
router = routers.DefaultRouter()
router.register(r'tasks', views.TaskViewSet)
urlpatterns = patterns('',
url(r'^$', 'app.views.home', name='Home'),
url(r'', include(router.urls)),
)
| {"/backbone_and_django/app/views.py": ["/backbone_and_django/app/models.py"]} |
55,767 | diogommartins/reloadable | refs/heads/master | /reloadable/decorators.py | from functools import wraps, partial
from time import sleep
from reloadable import config
def reloadable(exception_callback=None,
sleep_time=0,
stop_condition_exception=None,
max_reloads=None,
return_on_success=False):
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
reload_counter = 0
if not config.ENABLED:
return func(*args, **kwargs)
last_exception = None
while reload_counter != max_reloads:
try:
result = func(*args, **kwargs)
last_exception = None
if return_on_success:
return result
except (stop_condition_exception or config.STOP_CONDITION_EXCEPTION) as e:
raise e
except Exception as e:
if exception_callback:
exception_callback(e)
sleep(sleep_time)
reload_counter += 1
last_exception = e
if last_exception:
raise last_exception
return wrapper
return decorator
retry_on_error = partial(reloadable, return_on_success=True)
| {"/reloadable/decorators.py": ["/reloadable/__init__.py"], "/tests/test_decorators.py": ["/reloadable/__init__.py", "/reloadable/decorators.py", "/reloadable/config.py"], "/tests/test_configure.py": ["/reloadable/__init__.py"], "/reloadable/__init__.py": ["/reloadable/decorators.py"]} |
55,768 | diogommartins/reloadable | refs/heads/master | /tests/test_decorators.py | from unittest import TestCase
try:
from unittest.mock import Mock
except ImportError:
from mock import Mock
from reloadable import configure
from reloadable.decorators import reloadable, retry_on_error
from reloadable.config import STOP_CONDITION_EXCEPTION
class ReloadableDecoratorTests(TestCase):
def setUp(self):
self.exception_callback = Mock()
def test_it_recovers_from_exception_until_KeyboardInterrupt(self):
func = Mock(side_effect=[ValueError, STOP_CONDITION_EXCEPTION], __name__='func')
reloadable_func = reloadable()(func)
with self.assertRaises(STOP_CONDITION_EXCEPTION):
reloadable_func()
self.assertEqual(func.call_count, 2)
def test_it_recovers_from_multiple_exceptions_until_KeyboardInterrupt(self):
exceptions = [ValueError,
TypeError,
IndexError,
KeyError,
IOError,
AttributeError,
Exception,
STOP_CONDITION_EXCEPTION]
func = Mock(side_effect=exceptions, __name__='func')
reloadable_func = reloadable()(func)
with self.assertRaises(STOP_CONDITION_EXCEPTION):
reloadable_func()
self.assertEqual(func.call_count, len(exceptions))
def test_it_calls_the_exception_callback(self):
exceptions = [ValueError,
TypeError,
IndexError,
KeyError,
IOError,
AttributeError,
Exception,
STOP_CONDITION_EXCEPTION]
func = Mock(side_effect=exceptions, __name__='func')
mock_callback = Mock()
reloadable_func = reloadable(exception_callback=mock_callback)(func)
with self.assertRaises(STOP_CONDITION_EXCEPTION):
reloadable_func()
for index, exception_cls in enumerate(exceptions[:-1]):
self.assertIsInstance(mock_callback.call_args_list[index][0][0],
exception_cls)
def test_disable_reloadable(self):
configure(enabled=False)
@reloadable()
def not_reloadable():
raise Exception('Oops')
with self.assertRaises(Exception) as ex:
not_reloadable()
self.assertEqual('Oops', str(ex.exception))
configure(enabled=True)
def test_disable_reloadable_works_after_decorator_has_been_applied(self):
@reloadable()
def not_reloadable():
raise Exception('Oops')
configure(enabled=False)
with self.assertRaises(Exception) as ex:
not_reloadable()
self.assertEqual('Oops', str(ex.exception))
configure(enabled=True)
def test_stops_on_custom_stop_condition(self):
configure(stop_condition_exception=IOError)
@reloadable()
def not_reloadable():
raise IOError('Oops')
with self.assertRaises(IOError) as ex:
not_reloadable()
self.assertEqual('Oops', str(ex.exception))
configure(stop_condition_exception=KeyboardInterrupt)
def test_local_stop_condition_preceeds_global_config(self):
@reloadable(stop_condition_exception=ValueError)
def not_reloadable():
raise ValueError('Oops')
configure(stop_condition_exception=IOError)
self.assertRaises(ValueError, not_reloadable)
configure(stop_condition_exception=KeyboardInterrupt)
def test_it_reloads_function_until_it_reaches_max_reloads(self):
func = Mock(side_effect=[IOError, IOError, Mock()], __name__='func')
decorated_func = reloadable(max_reloads=3, return_on_success=True)(func)
decorated_func()
self.assertEqual(func.call_count, 3)
def test_it_raises_an_error_if_it_reaches_max_reloads_without_success(self):
func = Mock(side_effect=IOError, __name__='func')
decorated_func = reloadable(max_reloads=3)(func)
with self.assertRaises(IOError):
decorated_func()
self.assertEqual(func.call_count, 3)
def test_it_returns_on_sucess(self):
expected_result = Mock()
func = Mock(side_effect=[Exception, expected_result], __name__='func')
decorated_func = reloadable(max_reloads=3, return_on_success=True)(func)
result = decorated_func()
self.assertEqual(func.call_count, 2)
self.assertEqual(expected_result, result)
class RetryOnErrorDecoratorTests(TestCase):
def test_it_returns_on_sucess(self):
expected_result = Mock()
func = Mock(side_effect=[Exception, expected_result], __name__='func')
decorated_func = retry_on_error(max_reloads=3)(func)
result = decorated_func()
self.assertEqual(func.call_count, 2)
self.assertEqual(expected_result, result)
| {"/reloadable/decorators.py": ["/reloadable/__init__.py"], "/tests/test_decorators.py": ["/reloadable/__init__.py", "/reloadable/decorators.py", "/reloadable/config.py"], "/tests/test_configure.py": ["/reloadable/__init__.py"], "/reloadable/__init__.py": ["/reloadable/decorators.py"]} |
55,769 | diogommartins/reloadable | refs/heads/master | /reloadable/config.py | ENABLED = True
STOP_CONDITION_EXCEPTION = KeyboardInterrupt | {"/reloadable/decorators.py": ["/reloadable/__init__.py"], "/tests/test_decorators.py": ["/reloadable/__init__.py", "/reloadable/decorators.py", "/reloadable/config.py"], "/tests/test_configure.py": ["/reloadable/__init__.py"], "/reloadable/__init__.py": ["/reloadable/decorators.py"]} |
55,770 | diogommartins/reloadable | refs/heads/master | /setup.py | # coding=utf-8
from os import path
from setuptools import setup, find_packages
VERSION = '0.1.5'
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.rst')) as file:
long_description = file.read()
setup(
name='reloadable',
version=VERSION,
description='Rerun a function upon failure',
long_description=long_description,
author='Diogo Magalhães Martins',
author_email='magalhaesmartins@icloud.com',
maintainer='www.sieve.com.br',
maintainer_email='ti@sieve.com.br',
python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, <4',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Natural Language :: English',
'Topic :: Utilities',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6'
],
url='https://bitbucket.org/sievetech/reloadable',
keywords='reloadable recover decorator loop cli sieve',
packages=find_packages(exclude=['tests']),
)
| {"/reloadable/decorators.py": ["/reloadable/__init__.py"], "/tests/test_decorators.py": ["/reloadable/__init__.py", "/reloadable/decorators.py", "/reloadable/config.py"], "/tests/test_configure.py": ["/reloadable/__init__.py"], "/reloadable/__init__.py": ["/reloadable/decorators.py"]} |
55,771 | diogommartins/reloadable | refs/heads/master | /tests/test_configure.py | from unittest import TestCase
from reloadable import configure
from reloadable import config
class ConfigureTest(TestCase):
def setUp(self):
self.save_old_config()
def tearDown(self):
self.restore_old_config()
def test_changes_config(self):
setattr(config, 'WTF', True)
configure(wtf=False)
self.assertEqual(False, config.WTF)
def test_searches_only_for_uppercase_configs(self):
setattr(config, 'wtf', True)
with self.assertRaises(ValueError) as ex:
configure(wtf=False)
self.assertEqual("Option 'WTF' doesn't exist for reloadable",
str(ex.exception))
def test_raises_error_if_option_doesnt_exist(self):
self.assertRaises(ValueError, configure, spam=1)
def save_old_config(self):
self.old_config = self.get_configs()
def restore_old_config(self):
# delete all configs
for config_name, _ in self.get_configs().items():
delattr(config, config_name)
# restore old configs
for config_name, config_value in self.old_config.items():
setattr(config, config_name, config_value)
def get_configs(self):
return {config_name: getattr(config, config_name)
for config_name in dir(config)}
| {"/reloadable/decorators.py": ["/reloadable/__init__.py"], "/tests/test_decorators.py": ["/reloadable/__init__.py", "/reloadable/decorators.py", "/reloadable/config.py"], "/tests/test_configure.py": ["/reloadable/__init__.py"], "/reloadable/__init__.py": ["/reloadable/decorators.py"]} |
55,772 | diogommartins/reloadable | refs/heads/master | /reloadable/__init__.py | from .decorators import *
from . import config
def configure(**options):
for option, value in options.items():
config_name = option.upper()
if not hasattr(config, config_name):
raise ValueError("Option '%s' doesn't exist for reloadable" % config_name)
setattr(config, config_name, value)
| {"/reloadable/decorators.py": ["/reloadable/__init__.py"], "/tests/test_decorators.py": ["/reloadable/__init__.py", "/reloadable/decorators.py", "/reloadable/config.py"], "/tests/test_configure.py": ["/reloadable/__init__.py"], "/reloadable/__init__.py": ["/reloadable/decorators.py"]} |
55,774 | wenyongxin/moniotr_system_v0.2 | refs/heads/master | /app/monitor/game_ascription.py | #!/usr/bin/env python
#ecoding:utf-8
#用途:
#用于做游戏归属查询
from app.monitor import monitor
from flask import render_template, request, jsonify, flash
from models import Game_Ascritption
from ..scripts.zabbix_manage import manage_zabbix
from flask_login import login_required, current_user
from app.scripts.tools import get_memcached_value, save_db, save_memcache_value, delete_db,dict_sorted, del_memcache_key
from app import csrf
from ..decorators import user_required
import sys
sys.path.append('../..')
import config
#全局变量
zabbix = manage_zabbix()
#全局
@monitor.route('/monitor/ascription', methods=['GET','POST'])
@login_required
@user_required
def ascription():
#开启页面做一次游戏主机名称的缓存
zabbix.find_hostgroup_names([ '%s_' %n[0:2] for n in config.efun_centers.values() ])
# print current_user.is_admin()
data_html = {
'name':u'游戏负责人',
'datas':Game_Ascritption.query.all()
}
return render_template('monitor/game_ascription.html', **data_html)
#弹框数据回调
@monitor.route('/monitor/ascription/<action>')
@login_required
def ascription_action(action):
#更新op组名称
zabbix.find_op_users()
id = request.args.get('id')
data_html = {
'centers': config.efun_centers,
'efun_op':get_memcached_value('op_users').keys(),
'action':action
}
if action == 'create':
data_html.update({'button_name':u'创建'})
del_memcache_key('%s_id' %current_user.is_me())
elif action == 'edit':
data_html.update({'button_name':u'修改', 'edit_data':Game_Ascritption.query.get(id)})
save_memcache_value('%s_id' %current_user.is_me(), id, 60*24)
elif action == 'del':
find_db = Game_Ascritption.query.get(id)
try:
delete_db(find_db)
flash({'type':'ok','message':u'删除成功'})
except:
flash({'type':'error','message':u'删除错误'})
return jsonify({'code':201})
return render_template('monitor/game_ascription_alert.html', **data_html)
#游戏列表回调
@monitor.route('/monitor/hostgrou')
@login_required
def return_option():
if request.is_xhr and request.method == 'GET':
id = get_memcached_value('%s_id' %current_user.is_me())
center_name = request.args.get('ceneter')
games = dict_sorted(get_memcached_value('center_hostgroup_name')[center_name])
try:
check_groupid = Game_Ascritption.query.get(id).game_name
except:
check_groupid = False
html = ""
for line in games:
if check_groupid:
if int(line[0]) == int(check_groupid):
html += "<option selected=\"selected\" value=%s>%s</option>" %(line[0], line[1])
else:
html += "<option value=%s>%s</option>" %(line[0], line[1])
else:
html += "<option value=%s>%s</option>" %(line[0], line[1])
return html
#数据增删改
#审批只真对zabbix权限的审批。
#在添加完毕之后,会自动的做zabbix权限的更新。
#如果添加人员为管理员则直接更新
#如果添加人员为普通人员则需要通过审批更新
@monitor.route('/monitor/ascription/action.json', methods=['POST'])
@login_required
@csrf.exempt
def data_action():
def is_true(check):
if check == 'true':
return True
else:
return False
if request.is_xhr and request.method == 'POST':
action = request.form['action']
business = request.form['business']
ganmes = request.form['ganmes']
op_one = request.form['op_one']
op_two = request.form['op_two']
operate = request.form['operate']
factory = request.form['factory']
autonmoy = is_true(request.form['autonmoy'])
online = is_true(request.form['online'])
if action == 'create':
games = Game_Ascritption(
center_name = business, game_name = ganmes, game_one = op_one, game_two= op_two,
game_factory = factory, game_autonomy = autonmoy, game_operate = operate, game_online = online)
try:
save_db(games)
flash({'type':'ok','message':u'添加成功'})
except BaseException,e:
flash({'type':'error','message':u'添加失败'})
elif action == 'edit':
id = request.form['id']
find_db = Game_Ascritption.query.get(id)
find_db.center_name = business
find_db.game_name = ganmes
find_db.game_one = op_one
find_db.game_two = op_two
find_db.game_factory = factory
find_db.game_autonomy = autonmoy
find_db.game_operate = operate
find_db.game_online = online
try:
save_db(find_db)
flash({'type':'ok','message':u'更新成功'})
except BaseException,e:
flash({'type':'error','message':u'更新失败'})
#更新成功一次也就更新一次memcached中的信息
ascription_data = { int(name.game_name):'%s %s' %(name.game_one, name.game_two) for name in Game_Ascritption.query.all() }
save_memcache_value('ascription_data', ascription_data, 60*60*1)
return jsonify({'code':200})
| {"/app/decorators.py": ["/app/models.py", "/app/scripts/tools.py"], "/app/report/export_excel.py": ["/app/models.py", "/config.py"], "/app/main/system_manage.py": ["/app/__init__.py", "/app/decorators.py", "/app/models.py", "/app/scripts/zabbix.py", "/app/scripts/tools.py", "/config.py"], "/app/auth/func.py": ["/app/__init__.py", "/app/models.py", "/app/business/models.py", "/app/decorators.py"], "/app/filter.py": ["/config.py", "/app/scripts/redis_manage.py", "/app/scripts/tools.py", "/app/scripts/zabbix_manage.py", "/app/scripts/time_manage.py"], "/app/auth/login.py": ["/app/__init__.py", "/app/models.py", "/app/scripts/tools.py", "/app/decorators.py", "/config.py"], "/app/report/views.py": ["/app/report/__init__.py", "/app/__init__.py", "/app/decorators.py", "/app/models.py", "/config.py"], "/app/auth/permission.py": ["/app/__init__.py", "/app/models.py", "/app/scripts/tools.py", "/app/decorators.py"], "/app/report/trouble_repo.py": ["/app/report/__init__.py", "/app/__init__.py", "/app/decorators.py", "/app/models.py", "/config.py"], "/app/models.py": ["/app/__init__.py", "/config.py"], "/tasks.py": ["/manage.py", "/app/business/models.py", "/config.py", "/app/scripts/tools.py", "/app/scripts/redis_manage.py", "/app/scripts/zabbix_manage.py", "/app/scripts/time_manage.py"], "/app/business/models.py": ["/app/__init__.py"], "/manage.py": ["/app/__init__.py", "/app/models.py"], "/app/main/views.py": ["/app/__init__.py", "/app/models.py"], "/app/__init__.py": ["/config.py", "/app/report/__init__.py", "/app/business/__init__.py", "/app/monitor/__init__.py"], "/app/scripts/tools.py": ["/config.py", "/app/__init__.py", "/app/models.py"], "/app/monitor/models.py": ["/app/__init__.py"], "/app/scripts/redis_manage.py": ["/config.py"]} |
55,775 | wenyongxin/moniotr_system_v0.2 | refs/heads/master | /app/scripts/zabbix_manage.py | #!/usr/bin/env python
#ecoding:utf-8
from zabbix import Efun_Zabbix
from ..scripts.tools import save_memcache_value,get_memcached_value
from ..business.models import Manager_business
import string, json, re, urllib, urllib2, cookielib, sys
from ..models import Sections,db
from redis_manage import Efun_Redis
from multiprocessing import Process
sys.path.append('../..')
import config, time
class zabbix_tools():
@classmethod
def return_sections(cls):
all_centers = db.session.query(Sections).filter(Sections.name.like('%中心')).all()
all_data = {}
for center in all_centers:
all_urls = Sections.query.filter_by(membership=center.id).all()
all_data.update({'0':center.name})
all_data.update({ i.id: '%s %s' %(i.name, i.href) for i in all_urls})
return all_data
@classmethod
def return_db_items(cls, ip):
return Manager_business.query.filter_by(hostip = ip).first()
@classmethod
def return_sort(cls):
return { n:i for n,i in enumerate(list(string.ascii_letters))}
class manage_zabbix(Efun_Zabbix):
#判断checkbox是否为选中状态
def is_checked(self, item, items):
if int(item['itemid']) in items:
item['type'] = True
else:
item['type'] = False
return item
#判断导航栏下面的checkbox是否有选中?如果有选中则该名称
def show_is_checked(self, new_items, application_name, new_datas):
if True in [ b['type'] for b in new_items ]:
if 'is_check' not in application_name:
new_name = '%s (is_check)' %application_name
else:
new_name = application_name
else:
new_name = application_name
new_datas[new_name] = new_items
return new_datas
#整合zabbix的名称,防止因为macro导致名称显示异常现象
def get_zabbix_host_infos(self, ip, check_items):
new_datas = {}
host_macros = { i['macro']:i['value'] for i in self.change_macro_name(ip) }
for application in self.get_application_items(ip):
items, new_items = application['items'], []
if items:
for item in items:
item['name'] = self.change_zone_name(item['key_'], item['name'])
find_macro = re.findall(r'\{\$.*\}', item['name'])
item = self.is_checked(item, check_items)
if find_macro:
for macro_key in host_macros.keys():
if macro_key in '.'.join(find_macro):
item['name'] = item['name'].replace(macro_key, host_macros[macro_key])
new_items.append(item)
if application['items']:
new_datas = self.show_is_checked(new_items, application['name'], new_datas)
new_datas['checked_items'] = check_items
return new_datas
#处从memcached中读取的数据。用于对checkbox做选中状态
def checkbox_checked(self, all_data, items):
new_datas = {}
for application_name, host_items in all_data.items():
if application_name != 'checked_items':
new_items = []
for item in host_items:
new_items.append(self.is_checked(item, items))
new_datas = self.show_is_checked(new_items, application_name, new_datas)
else:
new_datas['checked_items'] = items
return new_datas
#整理zabbix信息。该整理的结果能够自动选中以及以及目录上做标识
#1、从memcached中查找返回的字符串是否存在。
#2、如果存在则读取memcached中的信息
#3、如果不存在则运行get_zabbix_host_infos() 方法,并将返回信息存放在memcached中
#4、存放要求,key以ip命名,存储时间10分钟 60*10
def return_views_info(self, ip, check_items=[], select=False):
if ip:
if get_memcached_value(ip):
new_datas = get_memcached_value(ip)
else:
new_datas = self.get_zabbix_host_infos(ip, check_items)
save_memcache_value(ip, new_datas, 10*60)
if select:
#用于判断数据库中的items与memcached中记录的items是否相同。如果不同则进行处理更新memcached
memcached_data = new_datas['checked_items']
memcached_data.sort()
if memcached_data != check_items:
new_datas = self.checkbox_checked(new_datas, check_items)
return new_datas
else:
return {}
#缓存itemid对应的名称,通过开启对应的页面做第一次缓存。缓存以后不再更改
#缓存到redis,永久保存。判断方式。redis的key名,当前页面的名称。
#通过判断item的数量。有没有变动。确认是否更新缓存。如果不相等则更新缓存
def zabbix_items_names(self,value):
new_items = {}
for line in self.get_items_names(value):
new_items[line['itemid']] = self.change_zone_name(line['key_'], line['name'])
return json.dumps(new_items)
def items_names(self, key, items):
get_redis_datas = Efun_Redis.redis_get(key)
if get_redis_datas:
get_redis_datas = json.loads(Efun_Redis.redis_get(key))
if len(get_redis_datas.keys()) != len(items):
Efun_Redis.redis_set(key, self.zabbix_items_names(items), 0)
else:
Efun_Redis.redis_set(key, self.zabbix_items_names(items), 0)
return json.loads(Efun_Redis.redis_get(key))
#通过历遍的方式获取制定名称的主机组信息,并返回成一个完整的列表格式
#从memcached中读取,如果读取失败则通过api刷新获取。
#刷新成功则再次保存在API中,缓存失效1个小时
def find_hostgroup_names(self, names):
get_info = get_memcached_value('center_hostgroup_name')
if get_info:
return get_info
else:
all_datas = {}
for name in names:
search = {'name':name}
all_datas[name.strip('_')] = { int(i['groupid']):i['name'] for i in self.get_hostgroup_name(search) }
save_memcache_value('center_hostgroup_name', all_datas, 60*60*12)
return all_datas
#返回游戏负责人名称列表
def find_op_users(self):
filter = {"name":u'游戏运维'}
get_user_info = get_memcached_value('op_users')
if get_user_info:
return get_user_info
else:
save_mem = { line['surname'].split(":")[0]:line['surname'].split(":")[1] for line in self.get_ures_names(filter)}
save_memcache_value('op_users', save_mem, 60*60*12)
return save_mem
#返回当前报警trigger是否被关闭报警
def return_trigger_ack_info(self, triggerid, auth):
from ..scripts.time_manage import strftime_to_datetime
try:
all_ack = [ ack['acknowledges'] for ack in self.is_ack(triggerid, auth) ]
if all_ack[0]:
line = all_ack[0][0]
ack_message = "%s|%s|%s" %(line['alias'], strftime_to_datetime(line['clock']), line['message'])
save_memcache_value(triggerid, ack_message, 60*10)
except BaseException,e:
pass
#通过多进程方式快速的调用返回当前项目是否已经被关闭报警
def multi_get_ack(self, triggerids, auth):
process = []
for triggerid in triggerids:
process.append(Process(target=self.return_trigger_ack_info, args=(triggerid, auth)))
for p in process:
p.start()
#做zabbix的cooice的缓存
def download_zabbix_image(self, user_info):
#模拟登陆生成cookle生成cookle
login_opt = urllib.urlencode({"name":user_info['user'],"password":user_info['password'],"autologin":1,"enter":"Sign in"})
cj = cookielib.CookieJar()
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
login_url = r"%s/index.php" % config.zabbix_server
opener.open(login_url,login_opt).read()
return opener
#通过itemid的方式获取图片的二进制代码
def return_item_graph(self, auth, itemid):
user_info = get_memcached_value(auth)
#下载图片信息
get_graph_opt = urllib.urlencode({"sid":user_info['sid'],"period":"3600", "action":'showgraph',"itemids[]":itemid})
save_graph_url = r"%s/chart.php" % config.zabbix_server
data = self.download_zabbix_image(user_info).open(save_graph_url,get_graph_opt).read()
return data
#通过graphid的方式过去汇总的graph信息
def return_graphid_graph(self, auth, graphid):
user_info = get_memcached_value(auth)
stime = int(time.time()) - 3600
get_graph_opt = urllib.urlencode({"graphid":graphid, "period":"3600", "stime":stime})
save_graph_url = r"%s/chart2.php" % config.zabbix_server
data = self.download_zabbix_image(user_info).open(save_graph_url,get_graph_opt).read()
return data
#用于修改当前主机中的macro以及位置变量的信息
def change_all_macro_name(self, itemid, ip):
#目前已经将位置变量名字更新
now_item_info = self.get_items_names(itemid)[0]
new_name = self.change_zone_name(now_item_info['key_'], now_item_info['name'])
#更新macro的名字
macros_host = { m['macro']:m['value'] for m in self.change_macro_name(ip) }
if macros_host:
for macro in re.findall(r'\{\$.*\}', now_item_info['name']):
new_name = new_name.replace(macro, macros_host[macro])
print new_name
return new_name
| {"/app/decorators.py": ["/app/models.py", "/app/scripts/tools.py"], "/app/report/export_excel.py": ["/app/models.py", "/config.py"], "/app/main/system_manage.py": ["/app/__init__.py", "/app/decorators.py", "/app/models.py", "/app/scripts/zabbix.py", "/app/scripts/tools.py", "/config.py"], "/app/auth/func.py": ["/app/__init__.py", "/app/models.py", "/app/business/models.py", "/app/decorators.py"], "/app/filter.py": ["/config.py", "/app/scripts/redis_manage.py", "/app/scripts/tools.py", "/app/scripts/zabbix_manage.py", "/app/scripts/time_manage.py"], "/app/auth/login.py": ["/app/__init__.py", "/app/models.py", "/app/scripts/tools.py", "/app/decorators.py", "/config.py"], "/app/report/views.py": ["/app/report/__init__.py", "/app/__init__.py", "/app/decorators.py", "/app/models.py", "/config.py"], "/app/auth/permission.py": ["/app/__init__.py", "/app/models.py", "/app/scripts/tools.py", "/app/decorators.py"], "/app/report/trouble_repo.py": ["/app/report/__init__.py", "/app/__init__.py", "/app/decorators.py", "/app/models.py", "/config.py"], "/app/models.py": ["/app/__init__.py", "/config.py"], "/tasks.py": ["/manage.py", "/app/business/models.py", "/config.py", "/app/scripts/tools.py", "/app/scripts/redis_manage.py", "/app/scripts/zabbix_manage.py", "/app/scripts/time_manage.py"], "/app/business/models.py": ["/app/__init__.py"], "/manage.py": ["/app/__init__.py", "/app/models.py"], "/app/main/views.py": ["/app/__init__.py", "/app/models.py"], "/app/__init__.py": ["/config.py", "/app/report/__init__.py", "/app/business/__init__.py", "/app/monitor/__init__.py"], "/app/scripts/tools.py": ["/config.py", "/app/__init__.py", "/app/models.py"], "/app/monitor/models.py": ["/app/__init__.py"], "/app/scripts/redis_manage.py": ["/config.py"]} |
55,776 | wenyongxin/moniotr_system_v0.2 | refs/heads/master | /app/monitor/interface.py | #ecoding:utf-8
from flask import request
from flask_login import current_user
from app.monitor import monitor
from app import db
from app.models import Monitor_host, Login_pwd, Login_ssh
import json
import sys, time
from app.scripts.tools import urldecode, return_id, select_and_create, save_db, return_ips, save_memcached_list
from app.scripts.socket_client import Run_Socket_Client
sys.path.append('../..')
import config
conn_memcache = config.conn_memcached()
#将接收的json数据做转换
def insert_db(data):
db.session.add(data)
try:
db.session.commit()
return True
except:
return False
#读取日志文件
def read_log(filename):
with open(filename, 'r') as f:
return return_ips(f.read())
#通过接收post信息,将该信息存储到数据库中
@monitor.route('/monitor/message_interface', methods=['POST','GET'])
def message_interface():
try:datas = eval(request.get_data())
except:datas = urldecode(request.get_data())
if Monitor_host.query.filter_by(token=datas['token']).all():
#当前接收的主机token是否与Monitor_host中的主机token是否相同。如果不相同则跳过写入数据库。拒绝从该ip的连接
if datas.has_key('time'):
now_time = datas['time']
else:
now_time = time.strftime('%H:%M:%S',time.localtime(time.time()))
if int(datas['plan']) > 0 :
#用于在memcached中存储1%--100%的进度
message_data = {'host':datas['host'], 'plan':datas['plan'], 'message':datas['message'], 'time':now_time, 'code':datas['code'], 'token':datas['token']}
save_memcached_list(config.memcached_key_name(datas['userid'])[1], message_data)
conn_memcache.set_multi({datas['host']: datas['plan']}, 3600, key_prefix="plan_")
else:
#用于在memcached中存储-1%的检测信息
conn_memcache.set_multi({datas['host']:datas['message']}, 3600, key_prefix='check_')
return u'%s %s\n' %(datas['host'], datas['message'])
else:
return 'error'
#monitor_history数据库中读取信息到前端口页面
@monitor.route('/monitor/message_info', methods=['GET','POST'])
def message_info():
#局部变量
return_json, check_info, dont_conn= {},{},[]
plan_text, monitor_check = "",""
html_text, userid = '<table class=\"table\">',current_user.id
if request.method == "POST":
#通过memcached获取进度相关的数据
recv_datas = conn_memcache.get(config.memcached_key_name(userid)[0])
try:
ips = recv_datas['ips'].split(',')
plan_item = conn_memcache.get_multi(ips, key_prefix='plan_')
#用于在前端页面显示进度条
for ip,plan in plan_item.items():
if plan:
plan_text += '<span class=\"c_ip\">%s</span><div class=\"progress\"><div class=\"progress-bar progress-bar-striped active\" role=\"progressbar\" aria-valuenow=\"20\" aria-valuemin=\"0\" aria-valuemax=\"100\" style=\"width: %s%%;\"></div></div>' %(ip, plan)
#用于在前端页面显示实时状态
for line in conn_memcache.get(config.memcached_key_name(userid)[1]):
check_info[line['host']] = line['token']
html_text += "<tr><td style=\"color:green\">%s</td><td style=\"color:red\">%s</td><td>%s</td><td>%s%%</td>" %(line['host'], line['time'], line['message'], line['plan'])
html_text += "</table>"
#用于判断当前状态下是否需要做检测
find_cache_check = conn_memcache.get_multi(plan_item.keys(),key_prefix='check_')
rar_list = list(set(plan_item.values()))
if len(rar_list) == 1 and '100' in rar_list:
if not find_cache_check:
check_data = {'conn_pwd':'0new0rd','proxy_ip':recv_datas['proxy'], 'host':check_info,
'monitor_host':config.monitor_url, "userid":userid, "action":"check"}
Run_Socket_Client(check_data, config.monitor_check_host)
if find_cache_check:
monitor_check = "<table class=\"table\">"
for ip,message in find_cache_check.items():
monitor_check += "<tr><td>%s</td>" %ip
for mess in message.split('+'):
if u'异常' in mess:
monitor_check += "<td style=\"color:red;\">%s</td>" %mess
else:
monitor_check += "<td style=\"color:green;\">%s</td>" %mess
monitor_check += "</tr>"
monitor_check += "</table>"
#判断连接异常IP地址的
find_ok_ips = conn_memcache.get(config.memcached_key_name(userid)[2])
if find_ok_ips:
ok_ips = []
for ok_dict in find_ok_ips:
ok_ips.append(ok_dict.keys()[0])
#将对应的用户名密码信息存储到数据库中
find_db = Monitor_host.query.filter_by(ipaddr = ok_dict.keys()[0], user=current_user.username).first()
#用于判断输入的密码或端口号是否存在。如果不存在则添加到数据库中。这里先预留了一个判断真假的功能,如果为真则不刷新redis为假则刷新redi
new_pwd, new_port = ok_dict.values()[0][0], int(ok_dict.values()[0][1])
select_and_create(Login_pwd, new_pwd, 'pwd')
select_and_create(Login_ssh, new_port, 'port')
find_db.login_pwd_id = return_id(Login_pwd, 'pwd', 'r_id', new_pwd)
find_db.login_ssh_id = int(return_id(Login_ssh, 'port', 'r_id', new_port))
save_db(find_db)
if len(ok_ips) != len(ips):
[ ips.remove(o_ip) for o_ip in ok_ips ]
dont_conn = ips
#清空指定ip密码端口临时缓存
conn_memcache.delete(config.memcached_key_name(userid)[2])
except StandardError,e:
pass
return_json['plan'],return_json['message'],return_json['except'],return_json['check'] = plan_text,html_text,','.join(dont_conn),monitor_check
return json.dumps(return_json)
else:
return json.dumps({'code':'404'})
| {"/app/decorators.py": ["/app/models.py", "/app/scripts/tools.py"], "/app/report/export_excel.py": ["/app/models.py", "/config.py"], "/app/main/system_manage.py": ["/app/__init__.py", "/app/decorators.py", "/app/models.py", "/app/scripts/zabbix.py", "/app/scripts/tools.py", "/config.py"], "/app/auth/func.py": ["/app/__init__.py", "/app/models.py", "/app/business/models.py", "/app/decorators.py"], "/app/filter.py": ["/config.py", "/app/scripts/redis_manage.py", "/app/scripts/tools.py", "/app/scripts/zabbix_manage.py", "/app/scripts/time_manage.py"], "/app/auth/login.py": ["/app/__init__.py", "/app/models.py", "/app/scripts/tools.py", "/app/decorators.py", "/config.py"], "/app/report/views.py": ["/app/report/__init__.py", "/app/__init__.py", "/app/decorators.py", "/app/models.py", "/config.py"], "/app/auth/permission.py": ["/app/__init__.py", "/app/models.py", "/app/scripts/tools.py", "/app/decorators.py"], "/app/report/trouble_repo.py": ["/app/report/__init__.py", "/app/__init__.py", "/app/decorators.py", "/app/models.py", "/config.py"], "/app/models.py": ["/app/__init__.py", "/config.py"], "/tasks.py": ["/manage.py", "/app/business/models.py", "/config.py", "/app/scripts/tools.py", "/app/scripts/redis_manage.py", "/app/scripts/zabbix_manage.py", "/app/scripts/time_manage.py"], "/app/business/models.py": ["/app/__init__.py"], "/manage.py": ["/app/__init__.py", "/app/models.py"], "/app/main/views.py": ["/app/__init__.py", "/app/models.py"], "/app/__init__.py": ["/config.py", "/app/report/__init__.py", "/app/business/__init__.py", "/app/monitor/__init__.py"], "/app/scripts/tools.py": ["/config.py", "/app/__init__.py", "/app/models.py"], "/app/monitor/models.py": ["/app/__init__.py"], "/app/scripts/redis_manage.py": ["/config.py"]} |
55,777 | wenyongxin/moniotr_system_v0.2 | refs/heads/master | /app/monitor/scan_page.py | #!/usr/bin/env python
#ecoding:utf-8
from app.monitor import monitor
from flask import render_template, request, jsonify, url_for, redirect, Response
from flask_login import login_required, current_user
from ..decorators import user_required, zabbix_login_required
from ..scripts.zabbix import zabbix_API
from ..scripts.zabbix_manage import manage_zabbix
from ..scripts.tools import save_memcache_value,get_memcached_value
from .models import Game_Ascritption
from ..scripts.xtcaptcha import Captcha
from app import csrf
import sys
from PIL import Image
from io import BytesIO
sys.path.append('../..')
import config
zabbix = manage_zabbix()
#用于做zabbix页面刷新工作
#此处主要先验证当前输入的zabbix账号是否正确。
#如果正确则存储在memcached中。
#默认保存周期为7天。7天以后会自动的删除,届时需要重新的登录
#通过装饰器,做了zabbix的登录验证,如果没有该用户的登录信息。则跳转到登录
#验证通过后保存在缓存中,下次登录装饰器会检查是否能找到,如果找不到则需要重新的登录
@monitor.route('/monitor/zabbix_login', methods=['GET','POST'])
@login_required
@csrf.exempt
def zabbix_login():
html_data = {
'name':u'zabbix绑定'
}
if request.is_xhr and request.method == 'POST':
user = request.form['user']
password = request.form['password']
zabbix = zabbix_API(
action='auth',
zabbix_user=user,
zabbix_password=password,
zabbix_server=config.zabbix_server)
auth = zabbix.login()
sid = Captcha.gene_text(16).lower()
if auth:
#默认保存7天
save_memcache_value(current_user.zabbix_user_key(), auth, 60*60*24*7)
save_memcache_value(auth, {'user':user, 'password':password, 'sid':sid}, 60*60*24*7)
return jsonify({'code':200, 'href': url_for('monitor.zabbix_scan')})
else:
return jsonify({'code':400, 'message':u'验证错误'})
return render_template('monitor/zabbix_login.html', **html_data)
#显示zabbix的页面报警信息
@monitor.route('/monitor/scan', methods=['GET','POST'])
@login_required
@user_required
@zabbix_login_required
def zabbix_scan():
#往memcached存放游戏负责人临时信息
if not get_memcached_value("ascription_data"):
ascription_data = { int(name.game_name):'%s %s' %(name.game_one, name.game_two) for name in Game_Ascritption.query.all() }
save_memcache_value('ascription_data', ascription_data, 60*60*1)
#从memcached中读取当前用户的zabbix验证秘钥 默认有效期为7天
auth = get_memcached_value(current_user.zabbix_user_key())
html_data = {
'name':u'监控查看',
'auth':auth
}
return render_template('monitor/zabbix_scan.html', **html_data)
#用于调用zabbix异常的信息
@monitor.route('/monitor/zabbix.json', methods=['POST'])
@login_required
@zabbix_login_required
@csrf.exempt
def zabbix_police_infos():
problem_triggerids,show_infos = [],[]
if request.is_xhr and request.method == 'POST':
auth,fun = request.form['auth'],request.form['fun']
name_dict = {u'维护':0, u'故障':1}
try:
all_hostids = get_memcached_value('all_hostids')
if not all_hostids:
hostids_dict = { h['hostid']:[ g['groupid'] for g in h['groups']] for h in zabbix.get_hostid(auth=auth) }
save_memcache_value('all_hostids', hostids_dict, 60*30)
if fun.encode('utf8') == '维护':
triggers = zabbix.get_hostids_trigger(all_hostids.keys(), auth=auth, maintenance=True)
elif fun.encode('utf8') == '故障':
triggers = zabbix.get_hostids_trigger(all_hostids.keys(), auth=auth, maintenance=False)
if triggers:
for i in triggers:
if int(i['items'][0]['status']) == 0:
problem_triggerids.append(i['triggerid'])
show_infos.append(i)
#将报警的信息保存在memcached中。id则根据zabbix的usergroup来区分
# try:
# username = get_memcached_value(auth)
# filter = {"alias":username['user']}
# userid = zabbix.get_ures_names(filter)[0]['userid']
# key = 'zabbix_problem_%s_%s' %(zabbix.userid_get_groupid(userid)[0]['usrgrpid'], name_dict[fun])
# if not get_memcached_value(key):
# save_memcache_value(key, show_infos, 60)
# else:
# show_infos = get_memcached_value(key)
# except BaseException,e:
# pass
#通过多线程方式快速获取到已经关闭报警的报警项目
if problem_triggerids:
zabbix.multi_get_ack(problem_triggerids, auth=auth)
except BaseException,e:
pass
return render_template('monitor/monitor_scan_table.html', show_infos=show_infos)
else:
return redirect(url_for('monitor.zabbix_login'))
#图片已数据流的方式返回
@monitor.route('/monitor/graph.png')
@login_required
@zabbix_login_required
@csrf.exempt
def zabbix_police_graph():
#get方式获取itemid
if request.method == 'GET':
itemid, graphid = request.args.get('itemid'), request.args.get('graphid')
auth = get_memcached_value(current_user.zabbix_user_key())
if itemid:
data = zabbix.return_item_graph(auth, itemid)
elif graphid:
data = zabbix.return_graphid_graph(auth, graphid)
out = BytesIO() #获取管道
out.write(data)
out.seek(0) #移动指针到第0个位置,如果不移动下面无法正常读取到该图片
response = Response(out.read(),content_type='image/png')
return response
#点击方式创建一个zabbix页面的url地址
@monitor.route('/monitor/to_zabbix')
@login_required
@zabbix_login_required
def to_zabbix():
if request.is_xhr and request.method == 'GET':
itemid,graphid = request.args.get('itemid'), request.args.get('graphid')
boole = request.args.get('boole')
if itemid:
if boole == 'True':
to_zabbix_url = '%s/history.php?action=showgraph&itemids[]=%s' %(config.zabbix_server, itemid)
elif boole == 'False':
to_zabbix_url = '%s/history.php?action=showvalues&itemids[]=%s' %(config.zabbix_server, itemid)
elif graphid:
hostid = request.args.get('hostid')
to_zabbix_url = '%s/charts.php?hostid=%s&graphid=%s' %(config.zabbix_server, hostid, graphid)
return to_zabbix_url
#############################################################
#用于爬去zabbix信息在前端显示
#############################################################
#zabbix grouph图web页面
@monitor.route('/monitor/showgraph')
@login_required
@zabbix_login_required
@csrf.exempt
def show_item_graphs():
if request.is_xhr and request.method == 'GET':
html_data = {
'itemids': request.args.get('itemids').split(','),
'ip': request.args.get('ip'),
'boole':True
}
return render_template('/temp/showgraph.html', **html_data)
#通过传入的hostid地址,返回其下方有多少个graph信息
@monitor.route('/monitor/selectgraph')
@login_required
@zabbix_login_required
@csrf.exempt
def show_graph_graphs():
if request.is_xhr and request.method == 'GET':
hostid = request.args.get('hostid')
graphids = []
try:
for i in zabbix.hostid_to_graphids(hostid):
graphids.append(i['graphid'])
key = 'graphid_%s' %i['graphid']
save_memcache_value(key, i['name'], 60*60)
except:pass
html_data = {
'graphids':graphids,
'boole':True
}
return render_template('/temp/showgraph.html', **html_data)
#通过传入itemid返回一小时的历史记录
@monitor.route('/monitor/showvalue')
@login_required
@zabbix_login_required
@csrf.exempt
def show_item_value():
if request.is_xhr and request.method == 'GET':
itemid = request.args.get('itemid')
data = []
try:
for i in zabbix.itemid_to_history(itemid):
if i['value']:
data.insert(0,i)
except:pass
html_data = {
'data':data,
'boole':False
}
return render_template('/temp/showgraph.html', **html_data)
| {"/app/decorators.py": ["/app/models.py", "/app/scripts/tools.py"], "/app/report/export_excel.py": ["/app/models.py", "/config.py"], "/app/main/system_manage.py": ["/app/__init__.py", "/app/decorators.py", "/app/models.py", "/app/scripts/zabbix.py", "/app/scripts/tools.py", "/config.py"], "/app/auth/func.py": ["/app/__init__.py", "/app/models.py", "/app/business/models.py", "/app/decorators.py"], "/app/filter.py": ["/config.py", "/app/scripts/redis_manage.py", "/app/scripts/tools.py", "/app/scripts/zabbix_manage.py", "/app/scripts/time_manage.py"], "/app/auth/login.py": ["/app/__init__.py", "/app/models.py", "/app/scripts/tools.py", "/app/decorators.py", "/config.py"], "/app/report/views.py": ["/app/report/__init__.py", "/app/__init__.py", "/app/decorators.py", "/app/models.py", "/config.py"], "/app/auth/permission.py": ["/app/__init__.py", "/app/models.py", "/app/scripts/tools.py", "/app/decorators.py"], "/app/report/trouble_repo.py": ["/app/report/__init__.py", "/app/__init__.py", "/app/decorators.py", "/app/models.py", "/config.py"], "/app/models.py": ["/app/__init__.py", "/config.py"], "/tasks.py": ["/manage.py", "/app/business/models.py", "/config.py", "/app/scripts/tools.py", "/app/scripts/redis_manage.py", "/app/scripts/zabbix_manage.py", "/app/scripts/time_manage.py"], "/app/business/models.py": ["/app/__init__.py"], "/manage.py": ["/app/__init__.py", "/app/models.py"], "/app/main/views.py": ["/app/__init__.py", "/app/models.py"], "/app/__init__.py": ["/config.py", "/app/report/__init__.py", "/app/business/__init__.py", "/app/monitor/__init__.py"], "/app/scripts/tools.py": ["/config.py", "/app/__init__.py", "/app/models.py"], "/app/monitor/models.py": ["/app/__init__.py"], "/app/scripts/redis_manage.py": ["/config.py"]} |
55,778 | wenyongxin/moniotr_system_v0.2 | refs/heads/master | /app/decorators.py | #!/usr/bin/env python
#ecoding:utf-8
from functools import wraps
from flask import abort, redirect, url_for
from flask_login import current_user
from .models import Permission
from app.scripts.tools import get_memcached_value
#1 先判断用户是否为管理员。
#2 如果是管理员直接返回不做检测
#3 如果非管理员则检测路径是否具有访问权限
def permission_required(permission):
def decorator(f):
@wraps(f)
def decorated_function(*args, **kwargs):
if current_user.is_admin() and current_user.can(permission):
return f(*args, **kwargs)
else:
if not current_user.show_sections():
abort(403)
else:
return f(*args, **kwargs)
return decorated_function
return decorator
#限定普通用户的
def user_required(f):
return permission_required(Permission.user)(f)
#限定超级管理员的
def admin_required(f):
return permission_required(Permission.administrator)(f)
#判断当前用户有没有绑定zabbix用户,如果没有绑定则跳转页面
def zabbix_login_required(f):
@wraps(f)
def wrapper(*args,**kwargs):
key = current_user.zabbix_user_key()
if get_memcached_value(key):
return f(*args,**kwargs)
else:
return redirect(url_for('monitor.zabbix_login'))
return wrapper | {"/app/decorators.py": ["/app/models.py", "/app/scripts/tools.py"], "/app/report/export_excel.py": ["/app/models.py", "/config.py"], "/app/main/system_manage.py": ["/app/__init__.py", "/app/decorators.py", "/app/models.py", "/app/scripts/zabbix.py", "/app/scripts/tools.py", "/config.py"], "/app/auth/func.py": ["/app/__init__.py", "/app/models.py", "/app/business/models.py", "/app/decorators.py"], "/app/filter.py": ["/config.py", "/app/scripts/redis_manage.py", "/app/scripts/tools.py", "/app/scripts/zabbix_manage.py", "/app/scripts/time_manage.py"], "/app/auth/login.py": ["/app/__init__.py", "/app/models.py", "/app/scripts/tools.py", "/app/decorators.py", "/config.py"], "/app/report/views.py": ["/app/report/__init__.py", "/app/__init__.py", "/app/decorators.py", "/app/models.py", "/config.py"], "/app/auth/permission.py": ["/app/__init__.py", "/app/models.py", "/app/scripts/tools.py", "/app/decorators.py"], "/app/report/trouble_repo.py": ["/app/report/__init__.py", "/app/__init__.py", "/app/decorators.py", "/app/models.py", "/config.py"], "/app/models.py": ["/app/__init__.py", "/config.py"], "/tasks.py": ["/manage.py", "/app/business/models.py", "/config.py", "/app/scripts/tools.py", "/app/scripts/redis_manage.py", "/app/scripts/zabbix_manage.py", "/app/scripts/time_manage.py"], "/app/business/models.py": ["/app/__init__.py"], "/manage.py": ["/app/__init__.py", "/app/models.py"], "/app/main/views.py": ["/app/__init__.py", "/app/models.py"], "/app/__init__.py": ["/config.py", "/app/report/__init__.py", "/app/business/__init__.py", "/app/monitor/__init__.py"], "/app/scripts/tools.py": ["/config.py", "/app/__init__.py", "/app/models.py"], "/app/monitor/models.py": ["/app/__init__.py"], "/app/scripts/redis_manage.py": ["/config.py"]} |
55,779 | wenyongxin/moniotr_system_v0.2 | refs/heads/master | /app/monitor/views_monitor.py | #ecoding:utf-8
from flask import render_template, request, redirect, url_for
from flask_login import login_required, current_user
from app.monitor import monitor
from app.models import Login_pwd, Proxy, System, Login_ssh, Monitor_host
import sys, json, os
from app.scripts.socket_client import Run_Socket_Client
from app.scripts.tools import urldecode, flush_token, delete_dbs, save_list_db, return_id, return_user, return_input_value, check_tcp, write_log, return_ips, write_file
from app import csrf
from app.decorators import admin_required
#导入配置文件
sys.path.append('../..')
import config
reload(sys)
sys.setdefaultencoding( "utf-8" )
path = os.getcwd()
conn_memcache = config.conn_memcached()
#安装监控页面
@monitor.route('/monitor', methods=['POST','GET'])
@login_required
@admin_required
@csrf.exempt
def monitor_install():
userid = current_user.id
save_data = {'ips':"",'proxy':""}
return_dicts = conn_memcache.get(config.memcached_key_name(userid)[0])
if not return_dicts:
return_dicts = save_data
html_datas = {
'name' : u'监控安装',
'pwds' : return_id(Login_pwd, 'pwd', "list"),
'sshs' : return_id(Login_ssh, 'port', "list"),
'proxys' : return_id(Proxy, 'proxy_ip', "list"),
'systems' : return_id(System, 'sort_name', "list"),
'ips': json.dumps(return_dicts),
'cwp': check_tcp(config.w_install_socket_server, config.w_install_socket_port),
}
if request.method == 'POST':
# 前段返回样式
# mode=false&login_ip=&login_port=22&login_pwd=0new0rd&conn_proxy=103.227.128.16&install_system=c
#{'login_port': '22', 'install_system': 'c', 'login_pwd': '0new0rd', 'conn_proxy': '103.227.128.16', 'login_ip': '172.16.5.240+172.16.5.241', 'mode': 'false'}
#传入 Install_Start 格式 ips, password, port, kwargs
# kwargs = {'filename':'install-agent.V2.0.sh', 'system':'c', 'proxy':'103.227.128.16'}
#自动匹配传回的字符串
#{'login_port': '22', 'install_system': 'c', 'login_pwd': '0new0rd', 'conn_proxy': '103.227.128.16', 'login_ip': '172.16.5.240+172.16.5.241', 'mode': 'true'}
datas = urldecode(request.get_data())
kwargs = {'filename':'install-agent.V2.0.sh', 'system':datas['install_system'], 'proxy':datas['conn_proxy'], 'user_id':int(current_user.id)}
ips = return_ips(datas['login_ip'])
insert_dbs = []
#指定清空memcached中的信息
for mem_name in config.memcached_key_name(userid):
try:conn_memcache.delete(mem_name)
except:pass
#清空检测结果
try:conn_memcache.delete_multi(ips, key_prefix='check_')
except:pass
#清空进度
try:conn_memcache.delete_multi(ips, key_prefix='plan_')
except:pass
ssh_password = return_input_value(datas, 'pwd')
ssh_tcp_port = return_input_value(datas, 'port')
# #安装监控之前,将Monitor_Host中该用户的安装记录全部删除
# find_user = Monitor_host.query.filter_by(user=current_user.username).all()
# delete_dbs(find_user)
if datas['install_system'] == 'w':
#Windows跳转安装开始
install_datas = {'conn_pwd': config.conn_pwd, 'monitor_url':config.monitor_url, 'userid':userid}
ip_info,new_dict = [], {}
for ip in ips:
new_dict[ip] = [ssh_password, 3389]
now_token = flush_token()
ip_info += [[ip, datas['login_pwd'], datas['conn_proxy'], now_token]]
insert_dbs.append(Monitor_host(ipaddr = ip,
login_user = return_user(datas),
login_pwd_id = return_id(Login_pwd, 'pwd', 'r_id', ssh_password),
proxy_id = return_id(Proxy, 'proxy_ip', 'r_id', datas['conn_proxy']),
system_id = return_id(System, 'sort_name', 'r_id', datas['install_system']),
user = current_user.username,
token = now_token ))
install_datas['datas'] = ip_info
conn_memcache.set(config.memcached_key_name(userid)[2], new_dict, 3600)
try:
Run_Socket_Client(install_datas, config.w_install_socket_server)
except BaseException,e:
write_log(path, current_user.username, e)
#Windows跳转安装结束
else:
#Linux 跳转安装开始
#为每个ip生成token
for ip in ips:
now_token = flush_token()
kwargs[ip] = now_token
insert_dbs.append(Monitor_host(ipaddr = ip,
login_user = return_user(datas),
proxy_id = return_id(Proxy, 'proxy_ip', 'r_id', datas['conn_proxy']),
system_id = return_id(System, 'sort_name', 'r_id', datas['install_system']),
user = current_user.username,
token = now_token))
kwargs['memcached_key_name'] = config.memcached_key_name(userid)[2]
print kwargs
#定义每种模式都传入不同形式的ssh端口号与密码
if datas['mode'] == 'false':
infos = [[ ip, ssh_password, ssh_tcp_port, kwargs ] for ip in ips ]
else:
infos = [html_datas['pwds'], html_datas['sshs'], kwargs]
#向被监控机发送方执行命令
try:
#整理数据以后,将信息发送到linux跳板机实现跳转安装
install_infos = {'conn_pwd':config.conn_pwd,'mode':datas['mode'],
'ips':ips,'login_id':int(userid),
'action':'install', 'infos':infos,
'path':config.monitor_url}
Run_Socket_Client(install_infos, config.monitor_check_host)
except BaseException,e:
write_log(path, current_user.username, e)
#Linux跳转安装结束
#将安装的信息写入到数据库中
for ip in ips:
#先冲数据库中匹配该ip是否存在,如果存在则做删除工作。
find_ipaddr = Monitor_host.query.filter_by(ipaddr = ip).all()
if find_ipaddr:
delete_dbs(find_ipaddr)
#做完删除工作后将数据再次输入到数据库中
save_list_db(insert_dbs)
#将添加监控的信息临时存入到memcache中,超时时间2小时
save_data['ips'],save_data['proxy'] = ','.join(ips), datas['conn_proxy']
conn_memcache.set(config.memcached_key_name(userid)[0], save_data, 3600)
return redirect(url_for('main.monitor_install'))
return render_template('monitor/monitor.html', **html_datas)
| {"/app/decorators.py": ["/app/models.py", "/app/scripts/tools.py"], "/app/report/export_excel.py": ["/app/models.py", "/config.py"], "/app/main/system_manage.py": ["/app/__init__.py", "/app/decorators.py", "/app/models.py", "/app/scripts/zabbix.py", "/app/scripts/tools.py", "/config.py"], "/app/auth/func.py": ["/app/__init__.py", "/app/models.py", "/app/business/models.py", "/app/decorators.py"], "/app/filter.py": ["/config.py", "/app/scripts/redis_manage.py", "/app/scripts/tools.py", "/app/scripts/zabbix_manage.py", "/app/scripts/time_manage.py"], "/app/auth/login.py": ["/app/__init__.py", "/app/models.py", "/app/scripts/tools.py", "/app/decorators.py", "/config.py"], "/app/report/views.py": ["/app/report/__init__.py", "/app/__init__.py", "/app/decorators.py", "/app/models.py", "/config.py"], "/app/auth/permission.py": ["/app/__init__.py", "/app/models.py", "/app/scripts/tools.py", "/app/decorators.py"], "/app/report/trouble_repo.py": ["/app/report/__init__.py", "/app/__init__.py", "/app/decorators.py", "/app/models.py", "/config.py"], "/app/models.py": ["/app/__init__.py", "/config.py"], "/tasks.py": ["/manage.py", "/app/business/models.py", "/config.py", "/app/scripts/tools.py", "/app/scripts/redis_manage.py", "/app/scripts/zabbix_manage.py", "/app/scripts/time_manage.py"], "/app/business/models.py": ["/app/__init__.py"], "/manage.py": ["/app/__init__.py", "/app/models.py"], "/app/main/views.py": ["/app/__init__.py", "/app/models.py"], "/app/__init__.py": ["/config.py", "/app/report/__init__.py", "/app/business/__init__.py", "/app/monitor/__init__.py"], "/app/scripts/tools.py": ["/config.py", "/app/__init__.py", "/app/models.py"], "/app/monitor/models.py": ["/app/__init__.py"], "/app/scripts/redis_manage.py": ["/config.py"]} |
55,780 | wenyongxin/moniotr_system_v0.2 | refs/heads/master | /app/report/export_excel.py | #coding:utf-8
import sys,os,re
from openpyxl import Workbook
from openpyxl.styles import Color, PatternFill, Font, Border,Side, Alignment, Protection
from openpyxl.styles import colors
from openpyxl.cell import Cell
from openpyxl.comments import Comment
from .. models import User,Trouble_repo,Trouble_repo_add,Month_trouble_repo,Month_trouble_log,Anomaly_log
from config import basedir
from openpyxl.chart import (
PieChart,
ProjectedPieChart,
Reference
)
from openpyxl.chart.series import DataPoint
def trouble(trouble_times,stab_per,trouble_times_1,stab_per_1,trouble_list,title,head):
wb = Workbook()
#背景颜色
redFill = PatternFill(start_color='A52A2A',end_color='A52A2A',fill_type='solid')
headFill = PatternFill(start_color='357ebd',end_color='357ebd',fill_type='solid')
#单元格对齐样式
headtext = Alignment(horizontal='center',vertical='center')
bodytext = Alignment(horizontal='center',vertical='center',wrap_text=True)
#字体样式
ft = Font(color='ffffff',size=14,bold=True)
ft_black = Font(color='000000',size=14,bold=True)
#边框样式
border = Border(left=Side(border_style='thin',color='FF000000'),
right=Side(border_style='thin',color='FF000000'),
top=Side(border_style='thin',color='FF000000'),
bottom=Side(border_style='thin',color='FF000000'),
diagonal=Side(border_style='thin',color='FF000000'),
diagonal_direction=20,
outline=Side(border_style='thin',color='FF000000'),
vertical=Side(border_style='thin',color='FF000000'),
horizontal=Side(border_style='thin',color='FF000000')
)
protection = Protection(locked=False,hidden=True)
ws = wb.create_sheet(u'故障报告', 0)
#设置行高
ws.row_dimensions[1].height = 40.0
ws.row_dimensions[2].height = 40.0
#ws['A1']=head
ws['B1']= u'核心服务故障时间:%s分钟 ' % trouble_times
ws['C1']= u'稳定性:%s%%' % stab_per
ws['F1']=u'非核心服务故障时间:%s分钟 ' % trouble_times_1
ws['G1']=u'稳定性:%s%%' % stab_per_1
head_text = [u"日期",u"运营中心",u"业务模块",u"事件",u"影响范围",u"是否内部故障",u"影响时长(分钟)",u"是否影响用户体验",u"影响用户",u"直接经济损失(美元)",u"数据来源",u"是否核心服务",u"故障类型",u"处理负责人",u"归属",u"状态",u"故障原因",u"处理过程",u"教训总结",u"改进"]
ws.append(head_text)
ws['C2'].comment = Comment(text="游戏明\n储值\n登陆\n后台\n所有", author="业务模块")
ws['D2'].comment = Comment(text="描素事件现象", author="事件")
ws['F2'].comment = Comment(text="如网络、CDN、原厂、渠道等外部因素引起的故障都不是内部故障", author="是否内部故障")
ws['I2'].comment = Comment(text="1、昨天在线数据-今天储值数据;\n2、如数据值为负,表示故障时段数据比昨天上升,则故障影响不大;\n3、若数据为0,则表示无影响。", author="影响用户")
ws['J2'].comment = Comment(text="1、昨天储值数据-今天储值数据;\n2、如数据值为负,表示故障时段数据比昨天上升,则故障影响不大 \n3、若数据为0,则表示无影响", author="经济损失")
n = 3
for trouble in trouble_list:
ws.append(trouble)
for i in 'ABCDEFGHIJKLMNOPQRST':
ws['%s%d' % (i,n) ].border = border
ws['%s%d' % (i,n) ].alignment = bodytext
ws['%s%d' % (i,n) ].protection = protection
ws['Q%d' % n].alignment = Alignment(horizontal='general', vertical='center', wrap_text=True)
ws['R%d' % n].alignment = Alignment(horizontal='general',vertical='center',wrap_text=True)
ws['S%d' % n].alignment = Alignment(horizontal='general',vertical='center',wrap_text=True)
ws['T%d' % n].alignment = Alignment(horizontal='general',vertical='center',wrap_text=True)
ws.row_dimensions[n].height = 30.0
n+=1
for i in 'ABCDEFGHIJKLMNOPQRST':
#ws['%s1' % i ].fill = redFill
ws['%s2' % i ].fill = headFill
ws['%s2' % i ].border = border
ws['%s1' % i ].font = ft_black
ws['%s2' % i ].font = ft
ws['%s1' % i ].alignment = headtext
ws['%s2' % i ].alignment = headtext
ws['%s1' % i ].protection = protection
#设置宽度
ws.column_dimensions[i].width = 20.0
ws.column_dimensions['B'].width = 28.0
ws.column_dimensions['F'].width = 30.0
ws.column_dimensions['Q'].width = 35.0
ws.column_dimensions['R'].width = 40.0
# ws['F2'].fill = PatternFill(start_color='e26b0a',end_color='e26b0a',fill_type='solid')
# ws['G2'].fill = PatternFill(start_color='e26b0a',end_color='e26b0a',fill_type='solid')
wb.save(title)
def anomaly(anomaly_list, title):
wb = Workbook()
# 背景颜色
redFill = PatternFill(start_color='A52A2A', end_color='A52A2A', fill_type='solid')
headFill = PatternFill(start_color='357ebd', end_color='357ebd', fill_type='solid')
# 单元格对齐样式
headtext = Alignment(horizontal='center', vertical='center')
bodytext = Alignment(horizontal='center', vertical='center', wrap_text=True)
# 字体样式
ft = Font(color='ffffff', size=14, bold=True)
# 边框样式
border = Border(left=Side(border_style='thin', color='FF000000'),
right=Side(border_style='thin', color='FF000000'),
top=Side(border_style='thin', color='FF000000'),
bottom=Side(border_style='thin', color='FF000000'),
diagonal=Side(border_style='thin', color='FF000000'),
diagonal_direction=20,
outline=Side(border_style='thin', color='FF000000'),
vertical=Side(border_style='thin', color='FF000000'),
horizontal=Side(border_style='thin', color='FF000000')
)
protection = Protection(locked=False, hidden=True)
ws = wb.create_sheet(u'异常记录', 0)
# 设置行高
ws.row_dimensions[1].height = 35.0
head_text = [u"异常事件", u"运营中心", u"异常反馈来源",u"异常反馈类型",u"业务模块",u"异常级别", u"是否误报", u"是否回收/维护", u"影响范围", u"发生时间", u"报错时间",
u"开始处理时间", u"处理结束时间",u"异常归属", u"处理人", u"处理结果", u"5分钟反馈", u"15分钟反馈", u"30分钟反馈", u"1小时反馈", u"2小时反馈",
u"监控评价", u"监控跟进人"]
ws.append(head_text)
n = 2
for trouble in anomaly_list:
ws.row_dimensions[n].height = 25.0
ws.append(trouble)
for i in 'ABCDEFGHIJKLMNOPQRSTUVW':
ws['%s%d' % (i, n)].border = border
ws['%s%d' % (i, n)].alignment = bodytext
ws['%s%d' % (i, n)].protection = protection
ws['P%d' % n].alignment = Alignment(horizontal='general', vertical='center', wrap_text=True)
ws['Q%d' % n].alignment = Alignment(horizontal='general', vertical='center', wrap_text=True)
ws['R%d' % n].alignment = Alignment(horizontal='general', vertical='center', wrap_text=True)
ws['S%d' % n].alignment = Alignment(horizontal='general', vertical='center', wrap_text=True)
ws['T%d' % n].alignment = Alignment(horizontal='general', vertical='center', wrap_text=True)
ws['U%d' % n].alignment = Alignment(horizontal='general', vertical='center', wrap_text=True)
n += 1
for i in 'ABCDEFGHIJKLMNOPQRSTUVW':
ws['%s1' % i].fill = headFill
ws['%s1' % i].border = border
ws['%s1' % i].font = ft
ws['%s1' % i].alignment = headtext
ws['%s1' % i].protection = protection
# 设置宽度
ws.column_dimensions[i].width = 20.0
wb.save(title)
def monthrepo(**data):
wb = Workbook()
# 背景颜色
titleFill = PatternFill(start_color='A52A2A', end_color='A52A2A', fill_type='solid')
headFill = PatternFill(start_color='357ebd', end_color='357ebd', fill_type='solid')
# 单元格对齐样式
headtext = Alignment(horizontal='center', vertical='center')
bodytext = Alignment(horizontal='center', vertical='center', wrap_text=True)
# 字体样式
ft = Font(name=u'微软雅黑',color='ffffff', size=14, bold=False)
ft_ps = Font(color='FF0000', size=12, bold=True)
#ft_pm = Font(color='e26b0a', size=12, bold=True)
ft_pm = Font(name=u'微软雅黑',color='FFC000', size=12, bold=True)
# 边框样式
border = Border(left=Side(border_style='thin', color='FF000000'),
right=Side(border_style='thin', color='FF000000'),
top=Side(border_style='thin', color='FF000000'),
bottom=Side(border_style='thin', color='FF000000'),
diagonal=Side(border_style='thin', color='FF000000'),
diagonal_direction=20,
outline=Side(border_style='thin', color='FF000000'),
vertical=Side(border_style='thin', color='FF000000'),
horizontal=Side(border_style='thin', color='FF000000')
)
protection = Protection(locked=False, hidden=True)
ws = wb.create_sheet(u'月故障分析', 0)
# 设置行高
ws.row_dimensions[1].height = 35.0
###############################表格一###################################
ws.merge_cells('A1:E1')
ws.merge_cells('A2:E2')
ws['A1'] = u'各类型故障信息统计'
ws['A2'] = u'月度可用率基准值暂定为99%,各项指标可用率均超过99%,处于可接受范围'
ws['A2'].font = ft_ps
thead_1 = [u'故障指标',u'运营中心',u'业务模块',u'故障时间',u'月可用率']
ws.append(thead_1)
AE_N =data['AE_row']
AE_N += 3
ws.merge_cells('B4:B%d' % AE_N)
ws['B4'] = u'亚欧'
HT_N = data['HT_row']
HT_N += AE_N
AE_N += 1
ws.merge_cells('B%d:B%d' % (AE_N,HT_N))
ws['B%d' % AE_N] = u'港台'
KR_N = data['KR_row']
KR_N += HT_N
HT_N += 1
ws.merge_cells('B%d:B%d' % (HT_N,KR_N))
ws['B%d' % HT_N] = u'韩国'
CN_N = data['CN_row']
CN_N += KR_N
KR_N +=1
ws.merge_cells('B%d:B%d' % (KR_N,CN_N))
ws['B%d' % KR_N] = u'国内'
GB_N = data['GB_row']
GB_N += CN_N
CN_N +=1
ws.merge_cells('B%d:B%d' % (CN_N,GB_N))
ws['B%d' % CN_N] = u'全球'
ALL_N = data['ALL_row']
if ALL_N >0:
ALL_N += GB_N
GB_N+=1
ws.merge_cells('B%d:B%d' % (GB_N,ALL_N))
ws['B%d' % GB_N] = u'所有地区'
else:
pass
#####################################################
N = 4
#亚欧一级指标
ws['C%d' % N] = u'登陆'
ws['D%d' % N] = data['trouble_time_AE_login']
ws['E%d' % N] = "%.2f%%" % ((1 - float(data['trouble_time_AE_login']) / data['month_time']) * 100)
N += 1
ws['C%d' % N] = u'储值'
ws['D%d' % N] = data['trouble_time_AE_store']
ws['E%d' % N] = "%.2f%%" % ((1 - float(data['trouble_time_AE_store']) / data['month_time']) * 100)
N += 1
ws['C%d' % N] = u'注册'
ws['D%d' % N] = data['trouble_time_AE_register']
ws['E%d' % N] = "%.2f%%" % ((1 - float(data['trouble_time_AE_register']) / data['month_time']) * 100)
N += 1
ws['C%d' % N] = u'游戏故障'
ws['D%d' % N] = data['trouble_time_AE_game']
ws['E%d' % N] = "%.2f%%" % ((1 - float(data['trouble_time_AE_game']) / data['month_time']) * 100)
if data['trouble_time_AE_all'] >0:
N +=1
ws['C%d' % N] = u'ALL'
ws['D%d' % N] = data['trouble_time_AE_all']
ws['E%d' % N] = "%.2f%%" % ((1 - float(data['trouble_time_AE_all']) / data['month_time']) * 100)
#####################################################
# 港台一级指标
N += 1
ws['C%d' % N] = u'登陆'
ws['D%d' % N] = data['trouble_time_HT_login']
ws['E%d' % N] = "%.2f%%" % ((1 - float(data['trouble_time_HT_login']) / data['month_time']) * 100)
N += 1
ws['C%d' % N] = u'储值'
ws['D%d' % N] = data['trouble_time_HT_store']
ws['E%d' % N] = "%.2f%%" % ((1 - float(data['trouble_time_HT_store']) / data['month_time']) * 100)
N += 1
ws['C%d' % N] = u'注册'
ws['D%d' % N] = data['trouble_time_HT_register']
ws['E%d' % N] = "%.2f%%" % ((1 - float(data['trouble_time_HT_register']) / data['month_time']) * 100)
N += 1
ws['C%d' % N] = u'游戏故障'
ws['D%d' % N] = data['trouble_time_HT_game']
ws['E%d' % N] = "%.2f%%" % ((1 - float(data['trouble_time_HT_game']) / data['month_time']) * 100)
if data['trouble_time_HT_all'] > 0:
N += 1
ws['C%d' % N] = u'ALL'
ws['D%d' % N] = data['trouble_time_HT_all']
ws['E%d' % N] = "%.2f%%" % ((1 - float(data['trouble_time_HT_all']) / data['month_time']) * 100)
#####################################################
# 韩国一级指标
N += 1
ws['C%d' % N] = u'登陆'
ws['D%d' % N] = data['trouble_time_KR_login']
ws['E%d' % N] = "%.2f%%" % ((1 - float(data['trouble_time_KR_login']) / data['month_time']) * 100)
N += 1
ws['C%d' % N] = u'储值'
ws['D%d' % N] = data['trouble_time_KR_store']
ws['E%d' % N] = "%.2f%%" % ((1 - float(data['trouble_time_KR_store']) / data['month_time']) * 100)
N += 1
ws['C%d' % N] = u'注册'
ws['D%d' % N] = data['trouble_time_KR_register']
ws['E%d' % N] = "%.2f%%" % ((1 - float(data['trouble_time_KR_register']) / data['month_time']) * 100)
N += 1
ws['C%d' % N] = u'游戏故障'
ws['D%d' % N] = data['trouble_time_KR_game']
ws['E%d' % N] = "%.2f%%" % ((1 - float(data['trouble_time_KR_game']) / data['month_time']) * 100)
if data['trouble_time_KR_all'] > 0:
N += 1
ws['C%d' % N] = u'ALL'
ws['D%d' % N] = data['trouble_time_KR_all']
ws['E%d' % N] = "%.2f%%" % ((1 - float(data['trouble_time_KR_all']) / data['month_time']) * 100)
#####################################################
# 国内一级指标
N += 1
ws['C%d' % N] = u'登陆'
ws['D%d' % N] = data['trouble_time_CN_login']
ws['E%d' % N] = "%.2f%%" % ((1 - float(data['trouble_time_CN_login']) / data['month_time']) * 100)
N += 1
ws['C%d' % N] = u'储值'
ws['D%d' % N] = data['trouble_time_CN_store']
ws['E%d' % N] = "%.2f%%" % ((1 - float(data['trouble_time_CN_store']) / data['month_time']) * 100)
N += 1
ws['C%d' % N] = u'注册'
ws['D%d' % N] = data['trouble_time_CN_register']
ws['E%d' % N] = "%.2f%%" % ((1 - float(data['trouble_time_CN_register']) / data['month_time']) * 100)
N += 1
ws['C%d' % N] = u'游戏故障'
ws['D%d' % N] = data['trouble_time_CN_game']
ws['E%d' % N] = "%.2f%%" % ((1 - float(data['trouble_time_CN_game']) / data['month_time']) * 100)
if data['trouble_time_CN_all'] > 0:
N += 1
ws['C%d' % N] = u'ALL'
ws['D%d' % N] = data['trouble_time_CN_all']
ws['E%d' % N] = "%.2f%%" % ((1 - float(data['trouble_time_CN_all']) / data['month_time']) * 100)
#####################################################
# 全球一级指标
N += 1
ws['C%d' % N] = u'登陆'
ws['D%d' % N] = data['trouble_time_GB_login']
ws['E%d' % N] = "%.2f%%" % ((1 - float(data['trouble_time_GB_login']) / data['month_time']) * 100)
N += 1
ws['C%d' % N] = u'储值'
ws['D%d' % N] = data['trouble_time_GB_store']
ws['E%d' % N] = "%.2f%%" % ((1 - float(data['trouble_time_GB_store']) / data['month_time']) * 100)
N += 1
ws['C%d' % N] = u'注册'
ws['D%d' % N] = data['trouble_time_GB_register']
ws['E%d' % N] = "%.2f%%" % ((1 - float(data['trouble_time_GB_register']) / data['month_time']) * 100)
N += 1
ws['C%d' % N] = u'游戏故障'
ws['D%d' % N] = data['trouble_time_GB_game']
ws['E%d' % N] = "%.2f%%" % ((1 - float(data['trouble_time_GB_game']) / data['month_time']) * 100)
if data['trouble_time_GB_all'] > 0:
N += 1
ws['C%d' % N] = u'ALL'
ws['D%d' % N] = data['trouble_time_GB_all']
ws['E%d' % N] = "%.2f%%" % ((1 - float(data['trouble_time_GB_all']) / data['month_time']) * 100)
if ALL_N >0:
if data['trouble_time_ALL_login'] >0:
N += 1
ws['C%d' % N] = u'登陆'
ws['D%d' % N] = data['trouble_time_ALL_login']
ws['E%d' % N] = "%.2f%%" % ((1 - float(data['trouble_time_ALL_login']) / data['month_time']) * 100)
if data['trouble_time_ALL_store'] > 0:
N += 1
ws['C%d' % N] = u'储值'
ws['D%d' % N] = data['trouble_time_ALL_store']
ws['E%d' % N] = "%.2f%%" % ((1 - float(data['trouble_time_ALL_store']) / data['month_time']) * 100)
if data['trouble_time_ALL_register'] > 0:
N += 1
ws['C%d' % N] = u'注册'
ws['D%d' % N] = data['trouble_time_ALL_register']
ws['E%d' % N] = "%.2f%%" % ((1 - float(data['trouble_time_ALL_register']) / data['month_time']) * 100)
if data['trouble_time_ALL_game'] > 0:
N += 1
ws['C%d' % N] = u'游戏故障'
ws['D%d' % N] = data['trouble_time_ALL_game']
ws['E%d' % N] = "%.2f%%" % ((1 - float(data['trouble_time_ALL_game']) / data['month_time']) * 100)
if data['trouble_time_ALL_all'] > 0:
N += 1
ws['C%d' % N] = u'ALL'
ws['D%d' % N] = data['trouble_time_ALL_all']
ws['E%d' % N] = "%.2f%%" % ((1 - float(data['trouble_time_ALL_all']) / data['month_time']) * 100)
N += 1
ws.merge_cells('A4:A%d' % N)
ws['A4'] = u'一级指标'
ws.merge_cells('B%d:C%d' % (N,N))
ws['B%d' % N] = u'合计(一级指标)'
ws['D%d' % N] = data['trouble_time_is_core']
ws['E%d' % N] = "%.2f%%" % ((1 - float(data['trouble_time_is_core']) / data['month_time']) * 100)
ws['D%d' % N].font = ft_pm
ws['E%d' % N].font = ft_pm
#################################
two_N = N + 1
N += 1
if data['list_AE'] and data['list_AE'][0][1] >0:
AE_n = N
for i in data['list_AE']:
if i[1]>0:
ws['C%d' % N] = i[0]
ws['D%d' % N] = i[1]
ws['E%d' % N] = "%.2f%%" % ((1 - float(i[1]) / data['month_time']) * 100)
N += 1
ws.merge_cells('B%d:B%d' % (AE_n,(N-1)))
ws['B%d' % AE_n] = u'亚欧'
if data['list_HT'] and data['list_HT'][0][1] > 0:
HT_n = N
for i in data['list_HT']:
if i[1] >0:
ws['C%d' % N] = i[0]
ws['D%d' % N] = i[1]
ws['E%d' % N] = "%.2f%%" % ((1 - float(i[1]) / data['month_time']) * 100)
N += 1
ws.merge_cells('B%d:B%d' % (HT_n,(N-1)))
ws['B%d' % HT_n] = u'港台'
if data['list_KR'] and data['list_KR'][0][1] > 0:
KR_n = N
for i in data['list_KR']:
if i[1] > 0:
ws['C%d' % N] = i[0]
ws['D%d' % N] = i[1]
ws['E%d' % N] = "%.2f%%" % ((1 - float(i[1]) / data['month_time']) * 100)
N += 1
ws.merge_cells('B%d:B%d' % (KR_n,(N-1)))
ws['B%d' % KR_n] = u'韩国'
if data['list_CN'] and data['list_CN'][0][1] > 0:
CN_n = N
for i in data['list_CN']:
if i[1] >0:
ws['C%d' % N] = i[0]
ws['D%d' % N] = i[1]
ws['E%d' % N] = "%.2f%%" % ((1 - float(i[1]) / data['month_time']) * 100)
N += 1
ws.merge_cells('B%d:B%d' % (CN_n,(N-1)))
ws['B%d' % CN_n] = u'国内'
if data['list_GB'] and data['list_GB'][0][1] > 0:
GB_n = N
for i in data['list_GB']:
if i[1] >0:
ws['C%d' % N] = i[0]
ws['D%d' % N] = i[1]
ws['E%d' % N] = "%.2f%%" % ((1 - float(i[1]) / data['month_time']) * 100)
N += 1
ws.merge_cells('B%d:B%d' % (GB_n,(N-1)))
ws['B%d' % GB_n] = u'全球'
ws.merge_cells('B%d:C%d' % (N,N))
ws['B%d' % N]= u'合计(二级指标)'
ws['D%d' % N] = data['trouble_time_not_core']
ws['E%d' % N] = "%.2f%%" % ((1 - float(data['trouble_time_not_core']) / data['month_time']) * 100)
ws.merge_cells('A%d:A%d' % (two_N,N))
ws['A%d' % two_N] = u'二级指标'
N += 1
ws.merge_cells('A%d:C%d' % (N, N))
ws['A%d' % N] = u'合计(总)'
ws['D%d' % N] = data['trouble_time_not_core'] + data['trouble_time_is_core']
ws['E%d' % N] = "%.2f%%" % ((1 - float((data['trouble_time_not_core'] + data['trouble_time_is_core'])) / data['month_time']) * 100)
for i in "ABCDE":
ws.column_dimensions[i].width = 20.0
ws['%s1' % i].fill = titleFill
ws['%s1' % i].alignment = headtext
ws['%s1' % i].protection = protection
ws['%s1' % i].border = border
ws['%s2' % i].border = border
ws['%s1' % i].font = ft
for j in range(3,N+1):
ws['%s%d' % (i, j)].fill = headFill
ws['%s%d' % (i, j)].alignment = headtext
ws['%s%d' % (i, j)].protection = protection
ws['%s%d' % (i, j)].border = border
ws['%s%d' % (i, j)].font = ft
ws['A2'].font = ft_ps
###############################表格二###################################
last_month = Month_trouble_log.query.filter_by(trouble_month=data['last_month_date']).first()
ws.merge_cells('G1:K1')
ws['G1'] = u'本月与上月故障信息对比'
ws['G2'] = u'业务指标'
ws['H2'] = u'运营中心'
ws['I2'] = u'业务模块'
ws['J2'] = u'本月'
ws['K2'] = u'上月'
N = 3
n = 4
if data['trouble_time_AE_all'] > 0 or last_month.trouble_time_AE_all_core > 0:
n = 5
AE = 3
N += n
ws.merge_cells('H%d:H%d' % (AE,(N-1)))
ws['H%d' % AE] = u'亚欧'
n = 4
if data['trouble_time_HT_all'] > 0 or last_month.trouble_time_HT_all_core > 0:
n = 5
HT = N
N += n
ws.merge_cells('H%d:H%d' % (HT,(N-1)))
ws['H%d' % HT] = u'港台'
n = 4
if data['trouble_time_KR_all'] > 0 or last_month.trouble_time_KR_all_core > 0:
n = 5
KR = N
N += n
ws.merge_cells('H%d:H%d' % (KR,(N-1)))
ws['H%d' % KR] = u'韩国'
n = 4
if data['trouble_time_CN_all'] > 0 or last_month.trouble_time_CN_all_core > 0:
n = 5
CN = N
N += n
ws.merge_cells('H%d:H%d' % (CN, (N-1)))
ws['H%d' % CN] = u'国内'
n = 4
if data['trouble_time_GB_all'] > 0 or last_month.trouble_time_GB_all_core > 0:
n = 5
GB = N
N += n
ws.merge_cells('H%d:H%d' % (GB, (N-1)))
ws['H%d' % GB] = u'全球'
if data['ALL_row_pk'] > 0:
n = data['ALL_row_pk']
ALL = N
N += n
ws.merge_cells('H%d:H%d' % (ALL, (N-1)))
ws['H%d' % ALL] = u'所有地区'
ws.merge_cells('H%d:I%d' % (N,N))
N += 1
ws.merge_cells('H%d:I%d' % (N,N))
ws.merge_cells('G3:G%d' % N)
ws['G3'] = u'核心业务'
N = 3
#亚欧一级指标
ws['I%d' % N] = u'登陆'
ws['J%d' % N] = data['trouble_time_AE_login']
ws['K%d' % N] = last_month.trouble_time_AE_login_core
N += 1
ws['I%d' % N] = u'储值'
ws['J%d' % N] = data['trouble_time_AE_store']
ws['K%d' % N] = last_month.trouble_time_AE_store_core
N += 1
ws['I%d' % N] = u'注册'
ws['J%d' % N] = data['trouble_time_AE_register']
ws['K%d' % N] = last_month.trouble_time_AE_register_core
N += 1
ws['I%d' % N] = u'游戏故障'
ws['J%d' % N] = data['trouble_time_AE_game']
ws['K%d' % N] = last_month.trouble_time_AE_game_core
if data['trouble_time_AE_all'] > 0 or last_month.trouble_time_AE_all_core > 0:
N += 1
ws['I%d' % N] = u'ALL'
ws['J%d' % N] = data['trouble_time_AE_all']
ws['K%d' % N] = last_month.trouble_time_AE_all_core
# 港台一级指标
N += 1
ws['I%d' % N] = u'登陆'
ws['J%d' % N] = data['trouble_time_HT_login']
ws['K%d' % N] = last_month.trouble_time_HT_login_core
N += 1
ws['I%d' % N] = u'储值'
ws['J%d' % N] = data['trouble_time_HT_store']
ws['K%d' % N] = last_month.trouble_time_HT_store_core
N += 1
ws['I%d' % N] = u'注册'
ws['J%d' % N] = data['trouble_time_HT_register']
ws['K%d' % N] = last_month.trouble_time_HT_register_core
N += 1
ws['I%d' % N] = u'游戏故障'
ws['J%d' % N] = data['trouble_time_HT_game']
ws['K%d' % N] = last_month.trouble_time_HT_game_core
if data['trouble_time_HT_all'] > 0 or last_month.trouble_time_HT_all_core > 0:
N += 1
ws['I%d' % N] = u'ALL'
ws['J%d' % N] = data['trouble_time_HT_all']
ws['K%d' % N] = last_month.trouble_time_HT_all_core
# 韩国一级指标
N += 1
ws['I%d' % N] = u'登陆'
ws['J%d' % N] = data['trouble_time_KR_login']
ws['K%d' % N] = last_month.trouble_time_KR_login_core
N += 1
ws['I%d' % N] = u'储值'
ws['J%d' % N] = data['trouble_time_KR_store']
ws['K%d' % N] = last_month.trouble_time_KR_store_core
N += 1
ws['I%d' % N] = u'注册'
ws['J%d' % N] = data['trouble_time_KR_register']
ws['K%d' % N] = last_month.trouble_time_KR_register_core
N += 1
ws['I%d' % N] = u'游戏故障'
ws['J%d' % N] = data['trouble_time_KR_game']
ws['K%d' % N] = last_month.trouble_time_KR_game_core
if data['trouble_time_KR_all'] > 0 or last_month.trouble_time_KR_all_core > 0:
N += 1
ws['I%d' % N] = u'ALL'
ws['J%d' % N] = data['trouble_time_KR_all']
ws['K%d' % N] = last_month.trouble_time_KR_all_core
# 国内一级指标
N += 1
ws['I%d' % N] = u'登陆'
ws['J%d' % N] = data['trouble_time_CN_login']
ws['K%d' % N] = last_month.trouble_time_CN_login_core
N += 1
ws['I%d' % N] = u'储值'
ws['J%d' % N] = data['trouble_time_CN_store']
ws['K16'] = last_month.trouble_time_CN_store_core
N += 1
ws['I%d' % N] = u'注册'
ws['J%d' % N] = data['trouble_time_CN_register']
ws['K%d' % N] = last_month.trouble_time_CN_register_core
N += 1
ws['I%d' % N] = u'游戏故障'
ws['J%d' % N] = data['trouble_time_CN_game']
ws['K%d' % N] = last_month.trouble_time_CN_game_core
if data['trouble_time_CN_all'] > 0 or last_month.trouble_time_CN_all_core > 0:
N += 1
ws['I%d' % N] = u'ALL'
ws['J%d' % N] = data['trouble_time_CN_all']
ws['K%d' % N] = last_month.trouble_time_CN_all_core
# 全球一级指标
N += 1
ws['I%d' % N] = u'登陆'
ws['J%d' % N] = data['trouble_time_GB_login']
ws['K%d' % N] = last_month.trouble_time_GB_login_core
N += 1
ws['I%d' % N] = u'储值'
ws['J%d' % N] = data['trouble_time_GB_store']
ws['K%d' % N] = last_month.trouble_time_GB_store_core
N += 1
ws['I%d' % N] = u'注册'
ws['J%d' % N] = data['trouble_time_GB_register']
ws['K%d' % N] = last_month.trouble_time_GB_register_core
N += 1
ws['I%d' % N] = u'游戏故障'
ws['J%d' % N] = data['trouble_time_GB_game']
ws['K%d' % N] = last_month.trouble_time_GB_game_core
if data['trouble_time_GB_all'] > 0 or last_month.trouble_time_GB_all_core > 0:
N += 1
ws['I%d' % N] = u'ALL'
ws['J%d' % N] = data['trouble_time_GB_all']
ws['K%d' % N] = last_month.trouble_time_GB_all_core
if data['ALL_row_pk'] >0:
if data['trouble_time_ALL_login'] > 0 or last_month.trouble_time_ALL_login_core > 0:
N += 1
ws['I%d' % N] = u'登陆'
ws['J%d' % N] = data['trouble_time_ALL_login']
ws['K%d' % N] = last_month.trouble_time_ALL_login_core
if data['trouble_time_ALL_store'] > 0 or last_month.trouble_time_ALL_store_core > 0:
N += 1
ws['I%d' % N] = u'储值'
ws['J%d' % N] = data['trouble_time_ALL_store']
ws['K%d' % N] = last_month.trouble_time_ALL_store_core
if data['trouble_time_ALL_register'] > 0 or last_month.trouble_time_ALL_register_core > 0:
N += 1
ws['I%d' % N] = u'注册'
ws['J%d' % N] = data['trouble_time_ALL_register']
ws['K%d' % N] = last_month.trouble_time_ALL_register_core
if data['trouble_time_ALL_game'] > 0 or last_month.trouble_time_ALL_game_core > 0:
N += 1
ws['I%d' % N] = u'游戏故障'
ws['J%d' % N] = data['trouble_time_ALL_game']
ws['K%d' % N] = last_month.trouble_time_ALL_game_core
if data['trouble_time_ALL_all'] > 0 or last_month.trouble_time_ALL_all_core > 0:
N += 1
ws['I%d' % N] = u'ALL'
ws['J%d' % N] = data['trouble_time_ALL_all']
ws['K%d' % N] = last_month.trouble_time_ALL_all_core
N += 1
ws['H%d' % N] = u'合计(核心业务)'
ws['J%d' % N] = data['trouble_time_is_core']
ws['K%d' % N] = last_month.trouble_time_is_core
N += 1
ws['H%d' % N] = u'时长占比'
ws['J%d' % N] = "%.2f%%" % ((1 - float(data['trouble_time_is_core']) / data['month_time']) * 100)
ws['K%d' % N] = "%.2f%%" % ((1 - float(last_month.trouble_time_is_core) / data['month_time']) * 100)
two_n = N +1
N += 1
if data['list_AE']:
AE_n = N
for i in data['list_AE']:
ws['I%d' % N] = i[0]
ws['J%d' % N] = i[1]
ws['K%d' % N] = i[2]
N += 1
ws.merge_cells('H%d:H%d' % (AE_n,(N-1)))
ws['H%d' % AE_n] = u'亚欧'
if data['list_HT']:
HT_n = N
for i in data['list_HT']:
ws['I%d' % N] = i[0]
ws['J%d' % N] = i[1]
ws['K%d' % N] = i[2]
N += 1
ws.merge_cells('H%d:H%d' % (HT_n,(N-1)))
ws['H%d' % HT_n] = u'港台'
if data['list_KR']:
KR_n = N
for i in data['list_KR']:
ws['I%d' % N] = i[0]
ws['J%d' % N] = i[1]
ws['K%d' % N] = i[2]
N += 1
ws.merge_cells('H%d:H%d' % (KR_n,(N-1)))
ws['H%d' % KR_n] = u'韩国'
if data['list_CN']:
CN_n = N
for i in data['list_CN']:
ws['I%d' % N] = i[0]
ws['J%d' % N] = i[1]
ws['K%d' % N] = i[2]
N += 1
ws.merge_cells('H%d:H%d' % (CN_n,(N-1)))
ws['H%d' % CN_n] = u'国内'
if data['list_GB']:
GB_n = N
for i in data['list_GB']:
ws['I%d' % N] = i[0]
ws['J%d' % N] = i[1]
ws['K%d' % N] = i[2]
N += 1
ws.merge_cells('H%d:H%d' % (GB_n,(N-1)))
ws['H%d' % GB_n] = u'全球'
ws.merge_cells('H%d:I%d' % (N,N))
ws['H%d' % N]= u'合计(非核心业务)'
ws['J%d' % N] = data['trouble_time_not_core']
ws['K%d' % N] = last_month.trouble_time_not_core
N += 1
ws.merge_cells('H%d:I%d' % (N,N))
ws['H%d' % N] = u'时长占比'
ws['J%d' % N] = "%.2f%%" % ((1 - float(data['trouble_time_not_core']) / data['month_time']) * 100)
ws['K%d' % N] = "%.2f%%" % ((1 - float(last_month.trouble_time_not_core) / data['month_time']) * 100)
ws.merge_cells('G%d:G%d' % (two_n, N))
ws['G%d' % two_n] = '非核心业务'
N += 1
ws.merge_cells('G%d:I%d' % (N,N))
ws['G%d' % N] = u'合计(总)'
ws['J%d' % N] = data['trouble_time_is_core'] + data['trouble_time_not_core']
ws['K%d' % N] = int(last_month.trouble_time_is_core) + int(last_month.trouble_time_not_core)
N += 1
ws.merge_cells('G%d:I%d' % (N, N))
ws['G%d' % N] = u'总时长占比'
ws['J%d' % N] = "%.2f%%" % ((1 - float((data['trouble_time_is_core'] + data['trouble_time_not_core'])) / data['month_time']) * 100)
ws['K%d' % N] = "%.2f%%" % ((1 - float((int(last_month.trouble_time_is_core) + int(last_month.trouble_time_not_core))) / data['month_time']) * 100)
for i in "GHIJK":
ws.column_dimensions[i].width = 20.0
ws['%s1' % i].fill = titleFill
ws['%s1' % i].alignment = headtext
ws['%s1' % i].protection = protection
ws['%s1' % i].border = border
ws['%s2' % i].border = border
ws['%s3' % i].border = border
ws['%s1' % i].font = ft
for n in range(2,N+1):
ws['%s%d' % (i,n)].fill = headFill
ws['%s%d' % (i, n)].alignment = headtext
ws['%s%d' % (i, n)].protection = protection
ws['%s%d' % (i, n)].border = border
ws['%s%d' % (i, n)].font = ft
###############################表格三###################################
inner_N= N + 7
ws.merge_cells('A%d:K%d' % (inner_N, inner_N))
ws['A%d' % inner_N] = '内外故障信息统计'
ws.merge_cells('A%d:B%d' % (inner_N+1, inner_N+1))
ws['A%d' % (inner_N +1)] = '归属'
ws['C%d' % (inner_N + 1)] = '故障时间'
ws['D%d' % (inner_N + 1)] = '占比'
ws.merge_cells('A%d:A%d' % (inner_N + 2, inner_N + 4))
ws['A%d' % (inner_N + 2)] = '内部故障'
ws['B%d' % (inner_N + 2)] = '运维'
ws['C%d' % (inner_N + 2)] = data['trouble_time_yw_inner']
ws['D%d' % (inner_N + 2)] = "%.2f%%" % ((float(data['trouble_time_yw_inner']) / data['trouble_time_all']) * 100)
ws['B%d' % (inner_N + 3)] = '业务开发'
ws['C%d' % (inner_N + 3)] = data['trouble_time_ywkf_inner']
ws['D%d' % (inner_N + 3)] = "%.2f%%" % ((float(data['trouble_time_ywkf_inner']) / data['trouble_time_all']) * 100)
ws['B%d' % (inner_N + 4)] = '基础开发'
ws['C%d' % (inner_N + 4)] = data['trouble_time_jckf_inner']
ws['D%d' % (inner_N + 4)] = "%.2f%%" % ((float(data['trouble_time_jckf_inner']) / data['trouble_time_all']) * 100)
ws['B%d' % (inner_N + 5)] = '运营'
ws['C%d' % (inner_N + 5)] = data['trouble_time_yy']
ws['D%d' % (inner_N + 5)] = "%.2f%%" % ((float(data['trouble_time_yy']) / data['trouble_time_all']) * 100)
ws['A%d' % (inner_N + 6)] = '外部故障'
ws['B%d' % (inner_N + 6)] = '第三方'
ws['C%d' % (inner_N + 6)] = data['trouble_time_out']
ws['D%d' % (inner_N + 6)] = "%.2f%%" % ((float(data['trouble_time_out']) / data['trouble_time_all']) * 100)
ws.merge_cells('A%d:B%d' % (inner_N+7, inner_N+7))
ws['A%d' % (inner_N +7)] = '合计'
ws['C%d' % (inner_N + 7)] = data['trouble_time_all']
ws['D%d' % (inner_N + 7)] = "%.2f%%" % ((float(data['trouble_time_all']) / data['trouble_time_all']) * 100)
for i in "ABCD":
for n in range(inner_N,inner_N+8):
ws['%s%d' % (i, n)].fill = headFill
ws['%s%d' % (i, n)].alignment = headtext
ws['%s%d' % (i, n)].protection = protection
ws['%s%d' % (i, n)].border = border
ws['%s%d' % (i, n)].border = border
ws['%s%d' % (i, n)].border = border
ws['%s%d' % (i, n)].font = ft
for i in "ABCDEFGHIJK":
ws['%s%d' % (i,inner_N)].fill = titleFill
###############################表格四###################################
branch_N = inner_N + 16
ws.merge_cells('A%d:K%d' % (branch_N, branch_N))
ws['A%d' % branch_N] = u'各部门故障信息统计'
ws['A%d' % (branch_N + 1)] = u'归属'
ws['B%d' % (branch_N + 1)] = u'负责人'
ws['C%d' % (branch_N + 1)] = u'核心服务'
ws['D%d' % (branch_N + 1)] = u'非核心服务'
ws['E%d' % (branch_N + 1)] = u'小计'
ws['A%d' % (branch_N + 2)] = '运维'
ws['B%d' % (branch_N + 2)] = '钱建峰'
ws['C%d' % (branch_N + 2)] = data['trouble_time_yw_core']
ws['D%d' % (branch_N + 2)] = data['trouble_time_yw_ncore']
ws['E%d' % (branch_N + 2)] = data['trouble_time_yw']
ws['A%d' % (branch_N + 3)] = '业务开发'
ws['B%d' % (branch_N + 3)] = '肖朋'
ws['C%d' % (branch_N + 3)] = data['trouble_time_ywkf_core']
ws['D%d' % (branch_N + 3)] = data['trouble_time_ywkf_ncore']
ws['E%d' % (branch_N + 3)] = data['trouble_time_ywkf']
ws['A%d' % (branch_N + 4)] = '基础开发'
ws['B%d' % (branch_N + 4)] = '黄谦'
ws['C%d' % (branch_N + 4)] = data['trouble_time_jckf_core']
ws['D%d' % (branch_N + 4)] = data['trouble_time_jckf_ncore']
ws['E%d' % (branch_N + 4)] = data['trouble_time_jckf']
ws['A%d' % (branch_N + 5)] = '运营'
ws['B%d' % (branch_N + 5)] = '运营'
ws['C%d' % (branch_N + 5)] = data['trouble_time_yy_core']
ws['D%d' % (branch_N + 5)] = data['trouble_time_yy_ncore']
ws['E%d' % (branch_N + 5)] = data['trouble_time_yy']
ws['A%d' % (branch_N + 6)] = '第三方'
ws['B%d' % (branch_N + 6)] = '第三方'
ws['C%d' % (branch_N + 6)] = data['trouble_time_dsf_core']
ws['D%d' % (branch_N + 6)] = data['trouble_time_dsf_ncore']
ws['E%d' % (branch_N + 6)] = data['trouble_time_dsf']
ws.merge_cells('A%d:B%d' % (branch_N + 7, branch_N + 7))
ws['A%d' % (branch_N + 7)] = '合计'
ws['C%d' % (branch_N + 7)] = data['trouble_time_core']
ws['D%d' % (branch_N + 7)] = data['trouble_time_ncore']
ws['E%d' % (branch_N + 7)] = data['trouble_time_all']
for i in "ABCDE":
for n in range(branch_N, branch_N + 8):
ws['%s%d' % (i, n)].fill = headFill
ws['%s%d' % (i, n)].alignment = headtext
ws['%s%d' % (i, n)].protection = protection
ws['%s%d' % (i, n)].border = border
ws['%s%d' % (i, n)].border = border
ws['%s%d' % (i, n)].border = border
ws['%s%d' % (i, n)].font = ft
for i in "ABCDEFGHIJK":
ws['%s%d' % (i, branch_N)].fill = titleFill
###############################表格五###################################
type_N= branch_N + 16
ws.merge_cells('A%d:K%d' % (type_N, type_N))
ws['A%d' % type_N] = '各类型故障信息统计'
ws['A%d' % (type_N + 1)] = '故障类型'
ws['B%d' % (type_N + 1)] = '故障时间'
ws['C%d' % (type_N + 1)] = '占比'
ws['A%d' % (type_N + 2)] = '服务器类型故障'
ws['B%d' % (type_N + 2)] = data['trouble_time_server']
ws['C%d' % (type_N + 2)] = "%.2f%%" % ((float(data['trouble_time_server']) / data['trouble_time_all']) * 100)
ws['A%d' % (type_N + 3)] = '人为错误'
ws['B%d' % (type_N + 3)] = data['trouble_time_perple']
ws['C%d' % (type_N + 3)] = "%.2f%%" % ((float(data['trouble_time_perple']) / data['trouble_time_all']) * 100)
ws['A%d' % (type_N + 4)] = 'BUG类型故障'
ws['B%d' % (type_N + 4)] = data['trouble_time_bug']
ws['C%d' % (type_N + 4)] = "%.2f%%" % ((float(data['trouble_time_bug']) / data['trouble_time_all']) * 100)
ws['A%d' % (type_N + 5)] = '安全类型故障'
ws['B%d' % (type_N + 5)] = data['trouble_time_safe']
ws['C%d' % (type_N + 5)] = "%.2f%%" % ((float(data['trouble_time_safe']) / data['trouble_time_all']) * 100)
ws['A%d' % (type_N + 6)] = '偶然性故障'
ws['B%d' % (type_N + 6)] = data['trouble_time_once']
ws['C%d' % (type_N + 6)] = "%.2f%%" % ((float(data['trouble_time_once']) / data['trouble_time_all']) * 100)
ws['A%d' % (type_N + 7)] = '网络故障'
ws['B%d' % (type_N + 7)] = data['trouble_time_net']
ws['C%d' % (type_N + 7)] = "%.2f%%" % ((float(data['trouble_time_net']) / data['trouble_time_all']) * 100)
ws['A%d' % (type_N + 8)] = '第三方故障'
ws['B%d' % (type_N + 8)] = data['trouble_time_dsf_t']
ws['C%d' % (type_N + 8)] = "%.2f%%" % ((float(data['trouble_time_dsf_t']) / data['trouble_time_all']) * 100)
ws['A%d' % (type_N + 9)] = '合计'
ws['B%d' % (type_N + 9)] = data['trouble_time_all']
ws['C%d' % (type_N + 9)] = "%d%%" % 100
for i in "ABC":
for n in range(type_N,type_N+10):
ws['%s%d' % (i,n)].fill = headFill
ws['%s%d' % (i, n)].alignment = headtext
ws['%s%d' % (i, n)].protection = protection
ws['%s%d' % (i, n)].border = border
ws['%s%d' % (i, n)].border = border
ws['%s%d' % (i, n)].border = border
ws['%s%d' % (i, n)].font = ft
for i in "ABCDEFGHIJK":
ws['%s%d' % (i, type_N)].fill = titleFill
#画饼图
pie = PieChart()
labels = Reference(ws, min_col=2, min_row=inner_N + 2, max_row=inner_N + 6)
data_1 = Reference(ws, min_col=3, min_row=inner_N + 2, max_row=inner_N + 6)
pie.add_data(data_1, titles_from_data=False)
pie.set_categories(labels)
pie.title = "内外故障信息统计"
ws.add_chart(pie, "G%d" % (inner_N + 1))
pie = PieChart()
labels = Reference(ws, min_col=2, min_row=branch_N + 2, max_row=branch_N + 6)
data_1 = Reference(ws, min_col=5, min_row=branch_N + 2, max_row=branch_N + 6)
pie.add_data(data_1, titles_from_data=False)
pie.set_categories(labels)
pie.title = "各部门故障信息统计"
ws.add_chart(pie, "G%d" % (branch_N + 1))
pie = PieChart()
labels = Reference(ws, min_col=1, min_row=type_N+2, max_row=type_N+8)
data_1 = Reference(ws, min_col=2, min_row=type_N+2, max_row=type_N+8)
pie.add_data(data_1, titles_from_data=False)
pie.set_categories(labels)
pie.title = "各类型故障信息统计"
ws.add_chart(pie, "G%d" % (type_N+1))
###################################################################################
ws = wb.create_sheet(u'故障详细列表', 1)
# 设置行高
ws.row_dimensions[1].height = 40.0
ws.row_dimensions[2].height = 40.0
# ws['A1']=head
trouble_infos = Trouble_repo.query.filter(Trouble_repo.trouble_date.ilike("%s%%" % data['this_month']),Trouble_repo.trouble_status==u'完成').order_by(Trouble_repo.trouble_date)
trouble_list = []
trouble_times_core = 0
trouble_times_ncore = 0
for i in trouble_infos:
if i.isnot_core == '是':
try:
affect_time = int(i.affect_time)
except:
affect_time = 0
trouble_times_core += affect_time
else:
try:
affect_time = int(i.affect_time)
except:
affect_time = 0
trouble_times_ncore += affect_time
List = [i.trouble_date, i.operating_center, i.business_module, i.trouble_affair, i.affect_scope, i.isnot_inner,
i.affect_time, i.isnot_experience, i.affect_user, i.affect_money,
i.data_source, i.isnot_core, i.trouble_type, i.heading_user, i.trouble_attr, i.trouble_status,
i.trouble_cause, i.whith_process, i.lesson_course, i.improve]
trouble_list.append(List)
stab_per_core = 0
stab_per_ncore = 0
stab_per_core = "%.2f" % (100 - float(trouble_times_core)/data['month_time']*100)
stab_per_ncore = "%.2f" % (100 - float(trouble_times_ncore) / data['month_time'] * 100)
ws['B1'] = u'核心服务故障时间:%s分钟 ' % trouble_times_core
ws['C1'] = u'稳定性:%s%%' % stab_per_core
ws['F1'] = u'非核心服务故障时间:%s分钟 ' % trouble_times_ncore
ws['G1'] = u'稳定性:%s%%' % stab_per_ncore
head_text = [u"日期", u"运营中心", u"业务模块", u"事件", u"影响范围", u"是否内部故障", u"影响时长(分钟)", u"是否影响用户体验", u"影响用户", u"直接经济损失(美元)",
u"数据来源", u"是否核心服务", u"故障类型", u"处理负责人", u"归属", u"状态", u"故障原因", u"处理过程", u"教训总结", u"改进"]
ws.append(head_text)
ws['C2'].comment = Comment(text="游戏明\n储值\n登陆\n后台\n所有", author="业务模块")
ws['D2'].comment = Comment(text="描素事件现象", author="事件")
ws['F2'].comment = Comment(text="如网络、CDN、原厂、渠道等外部因素引起的故障都不是内部故障", author="是否内部故障")
ws['I2'].comment = Comment(text="1、昨天在线数据-今天储值数据;\n2、如数据值为负,表示故障时段数据比昨天上升,则故障影响不大;\n3、若数据为0,则表示无影响。", author="影响用户")
ws['J2'].comment = Comment(text="1、昨天储值数据-今天储值数据;\n2、如数据值为负,表示故障时段数据比昨天上升,则故障影响不大 \n3、若数据为0,则表示无影响", author="经济损失")
n = 3
for trouble in trouble_list:
ws.append(trouble)
for i in 'ABCDEFGHIJKLMNOPQRST':
ws['%s%d' % (i, n)].border = border
ws['%s%d' % (i, n)].alignment = bodytext
ws['%s%d' % (i, n)].protection = protection
ws['Q%d' % n].alignment = Alignment(horizontal='general', vertical='center', wrap_text=True)
ws['R%d' % n].alignment = Alignment(horizontal='general', vertical='center', wrap_text=True)
ws['S%d' % n].alignment = Alignment(horizontal='general', vertical='center', wrap_text=True)
ws['T%d' % n].alignment = Alignment(horizontal='general', vertical='center', wrap_text=True)
n += 1
for i in 'ABCDEFGHIJKLMNOPQRST':
ws['%s1' % i].fill = titleFill
ws['%s2' % i].fill = headFill
ws['%s2' % i].border = border
ws['%s1' % i].font = ft
ws['%s2' % i].font = ft
ws['%s1' % i].alignment = headtext
ws['%s2' % i].alignment = headtext
ws['%s1' % i].protection = protection
# 设置宽度
ws.column_dimensions[i].width = 30.0
ws.column_dimensions['I'].width = 32.0
ws['F2'].fill = PatternFill(start_color='e26b0a', end_color='e26b0a', fill_type='solid')
ws['G2'].fill = PatternFill(start_color='e26b0a', end_color='e26b0a', fill_type='solid')
# 正式环境
# title = u'/opt/Flask/app/static/file/%s月故障分析.xlsx' % data['this_month']
# 测试环境
title = u'%s/app/static/files/report/%s月故障分析.xlsx' % (basedir,data['this_month'])
wb.save(title)
| {"/app/decorators.py": ["/app/models.py", "/app/scripts/tools.py"], "/app/report/export_excel.py": ["/app/models.py", "/config.py"], "/app/main/system_manage.py": ["/app/__init__.py", "/app/decorators.py", "/app/models.py", "/app/scripts/zabbix.py", "/app/scripts/tools.py", "/config.py"], "/app/auth/func.py": ["/app/__init__.py", "/app/models.py", "/app/business/models.py", "/app/decorators.py"], "/app/filter.py": ["/config.py", "/app/scripts/redis_manage.py", "/app/scripts/tools.py", "/app/scripts/zabbix_manage.py", "/app/scripts/time_manage.py"], "/app/auth/login.py": ["/app/__init__.py", "/app/models.py", "/app/scripts/tools.py", "/app/decorators.py", "/config.py"], "/app/report/views.py": ["/app/report/__init__.py", "/app/__init__.py", "/app/decorators.py", "/app/models.py", "/config.py"], "/app/auth/permission.py": ["/app/__init__.py", "/app/models.py", "/app/scripts/tools.py", "/app/decorators.py"], "/app/report/trouble_repo.py": ["/app/report/__init__.py", "/app/__init__.py", "/app/decorators.py", "/app/models.py", "/config.py"], "/app/models.py": ["/app/__init__.py", "/config.py"], "/tasks.py": ["/manage.py", "/app/business/models.py", "/config.py", "/app/scripts/tools.py", "/app/scripts/redis_manage.py", "/app/scripts/zabbix_manage.py", "/app/scripts/time_manage.py"], "/app/business/models.py": ["/app/__init__.py"], "/manage.py": ["/app/__init__.py", "/app/models.py"], "/app/main/views.py": ["/app/__init__.py", "/app/models.py"], "/app/__init__.py": ["/config.py", "/app/report/__init__.py", "/app/business/__init__.py", "/app/monitor/__init__.py"], "/app/scripts/tools.py": ["/config.py", "/app/__init__.py", "/app/models.py"], "/app/monitor/models.py": ["/app/__init__.py"], "/app/scripts/redis_manage.py": ["/config.py"]} |
55,781 | wenyongxin/moniotr_system_v0.2 | refs/heads/master | /app/main/system_manage.py | #coding: utf-8
import time,datetime,json,re,calendar,sys
from sqlalchemy import or_
from app.main import main
from app import db, csrf
from flask_login import login_required
from app.decorators import permission_required
from flask import render_template, flash, redirect, session, url_for, request,Response
from app.models import Sections, Permission_Model, Permission,Maintenance,Zabbix_group
from app.scripts.zabbix import zabbix_login,get_api_data
from app.scripts.tools import delete_dbs
sys.path.append('../..')
import config
@main.route('/sysmg', methods=['POST','GET'])
@permission_required(Permission.user, path='/sysmg', app='main')
@login_required
@csrf.exempt
#故障报告展示
def sysmg():
return render_template('system_manage.html',**locals())
| {"/app/decorators.py": ["/app/models.py", "/app/scripts/tools.py"], "/app/report/export_excel.py": ["/app/models.py", "/config.py"], "/app/main/system_manage.py": ["/app/__init__.py", "/app/decorators.py", "/app/models.py", "/app/scripts/zabbix.py", "/app/scripts/tools.py", "/config.py"], "/app/auth/func.py": ["/app/__init__.py", "/app/models.py", "/app/business/models.py", "/app/decorators.py"], "/app/filter.py": ["/config.py", "/app/scripts/redis_manage.py", "/app/scripts/tools.py", "/app/scripts/zabbix_manage.py", "/app/scripts/time_manage.py"], "/app/auth/login.py": ["/app/__init__.py", "/app/models.py", "/app/scripts/tools.py", "/app/decorators.py", "/config.py"], "/app/report/views.py": ["/app/report/__init__.py", "/app/__init__.py", "/app/decorators.py", "/app/models.py", "/config.py"], "/app/auth/permission.py": ["/app/__init__.py", "/app/models.py", "/app/scripts/tools.py", "/app/decorators.py"], "/app/report/trouble_repo.py": ["/app/report/__init__.py", "/app/__init__.py", "/app/decorators.py", "/app/models.py", "/config.py"], "/app/models.py": ["/app/__init__.py", "/config.py"], "/tasks.py": ["/manage.py", "/app/business/models.py", "/config.py", "/app/scripts/tools.py", "/app/scripts/redis_manage.py", "/app/scripts/zabbix_manage.py", "/app/scripts/time_manage.py"], "/app/business/models.py": ["/app/__init__.py"], "/manage.py": ["/app/__init__.py", "/app/models.py"], "/app/main/views.py": ["/app/__init__.py", "/app/models.py"], "/app/__init__.py": ["/config.py", "/app/report/__init__.py", "/app/business/__init__.py", "/app/monitor/__init__.py"], "/app/scripts/tools.py": ["/config.py", "/app/__init__.py", "/app/models.py"], "/app/monitor/models.py": ["/app/__init__.py"], "/app/scripts/redis_manage.py": ["/config.py"]} |
55,782 | wenyongxin/moniotr_system_v0.2 | refs/heads/master | /app/scripts/zabbix.py | #!/usr/bin/env python
#ecoding:utf-8
import json, sys, urllib2, re, time
import config
reload(sys)
sys.setdefaultencoding('utf-8')
class zabbix_API():
def __init__(self, action='application', zabbix_user=None, zabbix_password=None, zabbix_server=None):
if action == 'test':
self.zabbix_user = config.zabbix_user
self.zabbix_password = config.zabbix_pwd
self.zabbix_server = config.zabbix_server
elif action == 'application':
self.zabbix_user = config.zabbix_user
self.zabbix_password = config.zabbix_pwd
self.zabbix_server = config.zabbix_server
elif action == 'auth' and zabbix_user and zabbix_password and zabbix_server:
self.zabbix_password = zabbix_password
self.zabbix_server = zabbix_server
self.zabbix_user = zabbix_user
def login(self):
user_info = {'user':self.zabbix_user,'password':self.zabbix_password}
obj = {"jsonrpc":"2.0","method":'user.login',"params":user_info,"id":0}
json_obj = json.dumps(obj)
content = self.postRequest(json_obj)
try:return content['result']
except:return False
def postRequest(self, json_obj):
header = {'Content-Type':'application/json-rpc','User-Agent':'python/zabbix_api'}
url = '%s/api_jsonrpc.php' % self.zabbix_server
request = urllib2.Request(url,json_obj,header)
result = urllib2.urlopen(request)
content = json.loads(result.read())
return content
def get_json_obj(self, method, params, auth=None):
if auth:
get_obj = {"jsonrpc":"2.0","method":method,"params":params,"auth":auth,"id":1}
else:
get_obj = {"jsonrpc":"2.0","method":method,"params":params,"auth":self.login(),"id":1}
return json.dumps(get_obj)
#返回结果
def get_api_data(self, params, method, auth=None):
get_obje = self.get_json_obj(method, params, auth)
try:
return self.postRequest(get_obje)['result']
except:pass
class Efun_Zabbix(zabbix_API):
#修改key的位置变量名称
def change_zone_name(self, key, name):
find_macro = re.findall(r'\$\d+', name)
if find_macro:
change_num = [ int(mac.strip('$')) for mac in find_macro ]
change_list_key = re.findall(r'\[.*\]', key)[0].strip('[]').split(',')
for zone in change_num:
name = name.replace('$%s' %zone, change_list_key[zone-1])
return name
else:
return name
#修改macro的名称
def change_macro_name(self, ip):
params = {"output": ["macro","value"], "hostids":self.get_interface_hostid(ip)}
method = "usermacro.get"
return self.get_api_data(params, method)
#通过interface方式获取hostid
def get_interface_hostid(self, ip):
method = "hostinterface.get"
params = {"output": "extend", "filter":{"ip":ip}}
return self.get_api_data(params, method)[0]['hostid']
#获取hostid
def get_hostid(self, ip=None, auth=None):
if ip:
params = {"output": ['hostid','name', 'host'], "filter":{'host':ip}}
else:
params = {"output": ['hostid','name',"status", "groups"], "selectGroups":["groupid","name"], "filter":{'status':0}}
method = "host.get"
return self.get_api_data(params, method, auth)
#用于做获取当前item的值
def get_items(self, ip):
try:
hostid = self.get_hostid(ip)[0]['hostid']
params = {
"output":['itemid', 'key_', 'name', 'description'],
"hostids":hostid, "filter":{'status':0, 'error':''}
}
method = "item.get"
return self.get_api_data(params, method)
except:
return False
#通过items的id获取对应的名称
def get_items_names(self, items):
params = {
"output":["itemid", "key_", "name"],
"itemids":items
}
method = 'item.get'
return self.get_api_data(params, method)
#通过application获取其下方的item名称
def get_application_items(self,ip):
params = {
"output":"extend", "hostids":self.get_interface_hostid(ip),
"selectItems":['itemid', 'key_', 'name'],
"filter":{'status':0, 'error':''}
}
method = "application.get"
return self.get_api_data(params, method)
#通过itemid获取当前的值
def get_items_value(self, items):
params = {
"output": ["itemid","lastvalue","key_","name"], "itemids": items,
"filter":{"state":0}
}
method = 'item.get'
return self.get_api_data(params, method)
#通过itemid获取trigger信息
def get_items_trigger(self, items):
params = {
"output":"extend",
"itemids":items,
"filter":{"value":1, "status":0},
"selectItems":["itemid","status"],
"expandDescription":""
}
method = 'trigger.get'
return self.get_api_data(params, method)
#通过hostid获取异常的trigger信息
#value 1 为故障状态的trigger
#status 0 开启状态的trigger
#maintenance True 有制作维护计划 False没有维护计划
def get_hostids_trigger(self, hostids, auth=None, maintenance=False):
params = {"output":["triggerid", "description", "priority", "value", "status", "lastchange"],
# params = {"output":"extend",
"hostids":hostids,
"maintenance":maintenance,
"selectHosts":["hostid","status", "error", "name"],
"selectItems":["itemid", "lastns", "lastclock", "units", "hostid", "lifetime", "error", "history", "status", "name", "key_", "prevvalue"],
"selectGroups":"groupid",
"expandDescription":"",
"filter":{"value":1, "status":0}}
method = 'trigger.get'
return self.get_api_data(params, method, auth)
#获取关闭报警状态,确认是否做报警关闭
def is_ack(self, triggerids, auth=None):
params = {
"output":"extend",
"objectids":triggerids,
"select_acknowledges": "extend",
"sortfield": ["clock", "eventid"],
"sortorder": "DESC"
}
method = 'event.get'
return self.get_api_data(params, method, auth)
#获取user group的名称
def get_hostgroup_name(self, search):
params = {"output":["groupid", "name"], "search":search}
method = 'hostgroup.get'
return self.get_api_data(params, method)
#获取user的完整名字以及其它信息
def get_ures_names(self, filter):
params = {"output":["surname", "name", "alias", "userid"], "filter":filter}
method = "user.get"
return self.get_api_data(params, method)
#获取usergroup的信息
def get_usergroup_info(self, username):
params = {"output":["surname", "name", "alias", "userid"], "filter":filter}
method = "user.get"
return self.get_api_data(params, method)
#通过hostid获取其下方有多少个graph信息
def hostid_to_graphids(self, hostid):
params = {"output":"extend","hostids":hostid}
method = "graph.get"
return self.get_api_data(params, method)
#通过userid获取其主机组的id值
def userid_get_groupid(self, userid):
params = {"output":"extend", "status":0, "selectUsers":"extend", "userids":userid}
method = "usergroup.get"
return self.get_api_data(params, method)
#通过itemid获取其1小时内的历史数据
def itemid_to_history(self, itemid):
now_time = int(time.time())
params = {"output":"extend",
"itemids":itemid,
"time_from":now_time-60*30,
"time_till":now_time,
"sortfield": "clock",
"sortorder": "DESC",
"limit": 10}
method = "history.get"
for hi in [0, 1, 2, 3, 4]:
params['history'] = hi
data = self.get_api_data(params, method)
if data:
return data
if __name__=='__main__':
a = Efun_Zabbix()
print 'x'*40
print a.itemid_to_history('27650') | {"/app/decorators.py": ["/app/models.py", "/app/scripts/tools.py"], "/app/report/export_excel.py": ["/app/models.py", "/config.py"], "/app/main/system_manage.py": ["/app/__init__.py", "/app/decorators.py", "/app/models.py", "/app/scripts/zabbix.py", "/app/scripts/tools.py", "/config.py"], "/app/auth/func.py": ["/app/__init__.py", "/app/models.py", "/app/business/models.py", "/app/decorators.py"], "/app/filter.py": ["/config.py", "/app/scripts/redis_manage.py", "/app/scripts/tools.py", "/app/scripts/zabbix_manage.py", "/app/scripts/time_manage.py"], "/app/auth/login.py": ["/app/__init__.py", "/app/models.py", "/app/scripts/tools.py", "/app/decorators.py", "/config.py"], "/app/report/views.py": ["/app/report/__init__.py", "/app/__init__.py", "/app/decorators.py", "/app/models.py", "/config.py"], "/app/auth/permission.py": ["/app/__init__.py", "/app/models.py", "/app/scripts/tools.py", "/app/decorators.py"], "/app/report/trouble_repo.py": ["/app/report/__init__.py", "/app/__init__.py", "/app/decorators.py", "/app/models.py", "/config.py"], "/app/models.py": ["/app/__init__.py", "/config.py"], "/tasks.py": ["/manage.py", "/app/business/models.py", "/config.py", "/app/scripts/tools.py", "/app/scripts/redis_manage.py", "/app/scripts/zabbix_manage.py", "/app/scripts/time_manage.py"], "/app/business/models.py": ["/app/__init__.py"], "/manage.py": ["/app/__init__.py", "/app/models.py"], "/app/main/views.py": ["/app/__init__.py", "/app/models.py"], "/app/__init__.py": ["/config.py", "/app/report/__init__.py", "/app/business/__init__.py", "/app/monitor/__init__.py"], "/app/scripts/tools.py": ["/config.py", "/app/__init__.py", "/app/models.py"], "/app/monitor/models.py": ["/app/__init__.py"], "/app/scripts/redis_manage.py": ["/config.py"]} |
55,783 | wenyongxin/moniotr_system_v0.2 | refs/heads/master | /test/aa.py | a = {
'103.227.128.16':
{
'47.91.163.77':[
'login.efun.com',
'ptlogin.efun.com',
'loginsea.efunsea.com',
'ptlogin.efunsea.com',
'https://sdksea-file-download.efunsea.com/policy/all/en_US/efun.html'
],
'47.91.163.76':[
'award.efun.com',
'awardip.efun.com',
'exchange.efun.com',
'exchangeip.efun.com',
'pay.efun.com',
'payip.efun.com',
'ptpay.efun.com',
'paysea.efunsea.com'
],
'47.90.109.102':[
'login.aseugame.com',
'loginsea.aseugame.com',
'ptlogin.aseugame.com',
'ptloginsea.aseugame.com',
'https://sdksea-file-download.aseugame.com/policy/all/en_US/efun.html'
],
'47.90.109.103':[
'award.aseugame.com',
'pay.aseugame.com',
'paysea.aseugame.com'
],
'47.90.109.104':[
'activity.aseugame.com',
'e.aseugame.com',
'fb.aseugame.com',
'fbsea.aseugame.com',
'game.aseugame.com',
'gamesea.aseugame.com',
'pf.aseugame.com',
'pfid.aseugame.com',
'pfnvn.aseugame.com'
],
'47.90.120.12':[
'pf-assist.efunen.com',
'push.efun.com'
],
'47.91.159.181':[
'activity-assist.efun.com'
],
'103.227.130.141':[
'ad-h5.eflygame.com',
'h5.eflygame.com',
'www.eflygame.com',
'login-h5.eflygame.com',
'pay-h5.eflygame.com',
'user-h5.eflygame.com',
'pf-h5.eflygame.com',
'lua-h5.eflygame.com'
]
},
'50.18.119.120':
{
'52.53.74.206':[
'activity.app4gamer.com',
'activity-assist.app4gamer.com',
'activity-assist.efunsa.com',
'activity.efunsa.com',
'ad.efunsa.com',
'adus.app4gamer.com',
'assist.efunsa.com',
'exchange.efunsa.com',
'fb.app4gamer.com',
'fb.efunsa.com',
'game.app4gamer.com',
'game.efunsa.com',
'login.app4gamer.com',
'login.efunsa.com',
'm.app4gamer.com',
'm.efunsa.com',
'pay.app4gamer.com',
'pay.efunsa.com',
'pf.app4gamer.com',
'pf-assist.app4gamer.com',
'pf-assist.efunsa.com',
'pf.efunsa.com',
'https://sdksa-file-download.app4gamer.com/policy/all/en_US/efun.html',
'https://sdksa-file-download.efunsa.com/policy/all/en_US/efun.html',
'toosl.efunsa.com',
'www.app4gamer.com',
'www.efunsa.com'
],
'52.52.168.218':[
'adsa.app4gamer.com',
'adsa.aseugame.com',
'assistsa.app4gamer.com',
'assistsa.aseugame.com',
'exchangesa.app4gamer.com',
'exchangesa.aseugame.com',
'fbsa.app4gamer.com',
'fbsa.aseugame.com',
'gamesa.app4gamer.com',
'gamesa.aseugame.com',
'loginsa.app4gamer.com',
'loginsa.aseugame.com',
'paysa.app4gamer.com',
'paysa.aseugame.com',
'https://sdksa-file-download.aseugame.com/policy/all/en_US/efun.html',
'toolssa.app4gamer.com',
'toolssa.aseugame.com'
]
},
'203.69.109.117':{
'52.197.36.117':[
'ad.efunjp.com',
'ad.reginaet.jp',
'assist.efunjp.com',
'assist.reginaet.jp',
'exchange.efunjp.com',
'exchange.reginaet.jp',
'fb.efunjp.com',
'fb.reginaet.jp',
'game.efunjp.com',
'game.reginaet.jp',
'login.efunjp.com',
'login.reginaet.jp',
'pay.efunjp.com',
'pay.reginaet.jp',
'sdk-file-download.efunjp.com',
'sdk-file-download.reginaet.jp',
'tools.efunjp.com'
]
},
'58.229.180.29':{
'52.79.191.172':[
'activity-assist.yh666kr.com',
'activity.yh666kr.com',
'ad.yh666kr.com',
'assist.yh666kr.com',
'esou.yh666kr.com',
'exchange.yh666kr.com',
'fb.yh666kr.com',
'game.yh666kr.com',
'login.yh666kr.com',
'pay.yh666kr.com',
'pf.yh666kr.com',
'tools.yh666kr.com',
'www.yh666kr.com'
],
'58.229.184.41':[
'activity.chaseol.com',
'ad.chaseol.com',
'assist.chaseol.com',
'exchange.chaseol.com',
'fb.chaseol.com',
'game.chaseol.com',
'log.chaseol.com',
'login.chaseol.com',
'pay.chaseol.com',
'pf-assist.chaseol.com',
'pf.chaseol.com',
'sdk-file-download.chaseol.com',
'www.chaseol.com',
'activity-assist.chaseol.com'
]
}
}
| {"/app/decorators.py": ["/app/models.py", "/app/scripts/tools.py"], "/app/report/export_excel.py": ["/app/models.py", "/config.py"], "/app/main/system_manage.py": ["/app/__init__.py", "/app/decorators.py", "/app/models.py", "/app/scripts/zabbix.py", "/app/scripts/tools.py", "/config.py"], "/app/auth/func.py": ["/app/__init__.py", "/app/models.py", "/app/business/models.py", "/app/decorators.py"], "/app/filter.py": ["/config.py", "/app/scripts/redis_manage.py", "/app/scripts/tools.py", "/app/scripts/zabbix_manage.py", "/app/scripts/time_manage.py"], "/app/auth/login.py": ["/app/__init__.py", "/app/models.py", "/app/scripts/tools.py", "/app/decorators.py", "/config.py"], "/app/report/views.py": ["/app/report/__init__.py", "/app/__init__.py", "/app/decorators.py", "/app/models.py", "/config.py"], "/app/auth/permission.py": ["/app/__init__.py", "/app/models.py", "/app/scripts/tools.py", "/app/decorators.py"], "/app/report/trouble_repo.py": ["/app/report/__init__.py", "/app/__init__.py", "/app/decorators.py", "/app/models.py", "/config.py"], "/app/models.py": ["/app/__init__.py", "/config.py"], "/tasks.py": ["/manage.py", "/app/business/models.py", "/config.py", "/app/scripts/tools.py", "/app/scripts/redis_manage.py", "/app/scripts/zabbix_manage.py", "/app/scripts/time_manage.py"], "/app/business/models.py": ["/app/__init__.py"], "/manage.py": ["/app/__init__.py", "/app/models.py"], "/app/main/views.py": ["/app/__init__.py", "/app/models.py"], "/app/__init__.py": ["/config.py", "/app/report/__init__.py", "/app/business/__init__.py", "/app/monitor/__init__.py"], "/app/scripts/tools.py": ["/config.py", "/app/__init__.py", "/app/models.py"], "/app/monitor/models.py": ["/app/__init__.py"], "/app/scripts/redis_manage.py": ["/config.py"]} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.