index
int64 | repo_name
string | branch_name
string | path
string | content
string | import_graph
string |
|---|---|---|---|---|---|
15,917
|
prudhvikumar22/proper_modern_webui_automation
|
refs/heads/master
|
/tests/conftest.py
|
from webui.webui import create_driver
from pytest import fixture, mark
from webui.webui import WebUI, create_driver
@fixture(scope='session')
def driver():
driver = create_driver('CHROME')
yield driver
@fixture(scope='session')
def browser(driver):
browser = WebUI(driver)
yield browser
|
{"/tests/test_youtube_site.py": ["/tests/conftest.py"], "/tests/conftest.py": ["/webui/webui.py"]}
|
15,984
|
dotJPG/dotJPG.github.io
|
refs/heads/master
|
/successFail.py
|
import bs4, csv, pandas
from datetime import datetime
import matplotlib.pyplot as plt
import numpy as np
class successFail(object):
resultsCSV = ''
indexHTML = ''
statisticsCSV = ''
imagePath = ''
failCount = 0
def __init__(self, resultsCSV, indexHTML, statisticsCSV, imagePath):
self.resultsCSV = resultsCSV
self.indexHTML = indexHTML
self.statisticsCSV = statisticsCSV
self.imagePath = imagePath
def checkFailures(self):
with open(self.resultsCSV) as resultsFile:
reader = csv.reader(resultsFile, delimiter = ',')
for row in reader:
if "FAILURE" == row[2]:
self.failCount += 1
if "ERROR" == row[2]:
self.failCount += 1
def areFails(self):
if self.failCount > 0:
return True
else:
return False
def count(self):
return self.failCount
def setButton(self, button):
with open(self.indexHTML, "r") as index:
txt = index.read()
soup = bs4.BeautifulSoup(txt)
failButton = soup.find('a')
failButton['class'] = button
with open(self.indexHTML, "w") as out:
out.write(str(soup))
def updateStatistics(self):
lines = []
with open(self.statisticsCSV, "r") as stats:
reader = csv.reader(stats, delimiter= ',')
for row in reader:
lines.append(row)
for line in lines:
print (line)
lines.pop(1)
lines.append([datetime.strftime(datetime.now(), '%b %d'), str(self.count())])
for line in lines:
print (line)
with open(self.statisticsCSV, "w") as statsout:
writer = csv.writer(statsout, dialect='excel', lineterminator = '\n')
for line in lines:
writer.writerow(line)
def drawGraph(self):
colnames = ['Date', 'Errors']
data = pandas.read_csv(self.statisticsCSV, header=None, names=colnames)
dates = data.Date.tolist()
errors = data.Errors.tolist()
y_pos = range(len(dates) - 1)
plt.bar(y_pos, errors[1:], align='center', color='#f09100')
plt.xticks(y_pos, dates[1:])
plt.ylim([0,5])
plt.xlabel('Day')
plt.ylabel('Errors & Failures')
plt.title('Site Link Performance')
plt.savefig(self.imagePath)
|
{"/updatePage.py": ["/successFail.py"]}
|
15,985
|
dotJPG/dotJPG.github.io
|
refs/heads/master
|
/updatePage.py
|
import successFail
resultOrganizer = successFail.successFail('Serenity/results.csv', 'index.html', 'statistics/weekPerformance.csv', 'img/barGraph.png')
resultOrganizer.checkFailures()
if resultOrganizer.areFails():
resultOrganizer.setButton('btn btn-danger btn-lg')
else:
resultOrganizer.setButton('btn btn-success btn-lg')
resultOrganizer.updateStatistics()
resultOrganizer.drawGraph()
|
{"/updatePage.py": ["/successFail.py"]}
|
16,019
|
theavey/QM-calc-scripts
|
refs/heads/master
|
/gautools/out_to_list.py
|
#! /usr/bin/env python3
########################################################################
# #
# #
# #
# Known issues: #
# Methods which are declared with non-alphanumeric characters #
# #
# #
# #
# This script was written by Thomas Heavey in 2015. #
# theavey@bu.edu thomasjheavey@gmail.com #
# #
# Copyright 2015, 2019 Thomas J. Heavey IV #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or #
# implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
# #
########################################################################
import argparse # For parsing commandline arguments
import glob # Allows referencing file system/file names
import re # RegEx package for sorting data
from . import geomRegex
def outtolist(base_name: str, method: str,
singlefile: bool = False, geometries: int = 0,
interactive: bool = False):
"""This function reads command line arguments (use -h for help)
to take output from quantum chemistry calculations and put useful
data such as energy and geometries into (a) separate file(s)."""
in_name_list = glob.glob(base_name + '*out')
num_files = len(in_name_list)
in_name_list.sort() # sort files alphanumerically
in_name_list.sort(key=len) # sort by length (because otherwise would
# out 1,10,11,... as opposed to 1,...,9,10,...
# if number 01,02,... They should all be the same length and the
# second sort won't do anything.
yes = ['y', 'yes', '1']
# process in case of single file
if singlefile:
if num_files == 0:
in_name_list = glob.glob(base_name)
num_files = len(in_name_list)
print('number of files is {}'.format(num_files))
if num_files == 1:
in_name = in_name_list[0]
print('(single) file name is {}'.format(in_name))
if num_files > 1:
print('More than one file matched input file name.')
if (interactive and
input('Get energies from multiple output files?') in yes):
singlefile = False
else:
raise SyntaxError('Okay, well files that matched your input '
'are {}'.format(in_name_list))
# Print some stuff if not a single file. Note, not "else" because
# I might set singlefile to false above.
if not singlefile:
print('base file name is {}'.format(base_name))
print('number of files is {:,}'.format(num_files))
# todo add energies to comment line of geoms file
# will be difficult/require changes, but would be good, I think
if geometries == 1:
# todo Need to figure this out obvs!!!!!!
raise NotImplementedError('Not able to pull out single geom yet.')
# Collect all geometries from files for -gg (or more)
if geometries > 1:
geoms_out_name = base_name + '_allgeoms'
with open(geoms_out_name, 'w') as geom_file:
if singlefile:
geoms = geomRegex.findallgeoms(in_name)
for geom in geoms:
num_atoms = len(geom)
geom_file.write(str(num_atoms) + '\n')
geom_file.write('geometry from {}\n'.format(base_name))
for atom in geom:
geom_file.write(atom + '\n')
else:
for file_name in in_name_list:
geoms = geomRegex.findallgeoms(file_name)
for geom in geoms:
num_atoms = len(geom)
geom_file.write(str(num_atoms) + '\n')
geom_file.write('geometry from {}\n'.format(base_name))
for atom in geom:
geom_file.write(atom + '\n')
out_name = base_name + '_energies'
# Define methods I might use that change how to process file.
methods = {'wb97xd': 'dft', 'mp2': 'mp2', 'pbe50': 'sfdft',
'eom-ccsd': 'sf', 'hf': 'hf', 'pbe1pbe': 'gdft',
'cis': 'gcis'}
# reading more about regex, I really should use them. I could
# define how it matches based on the method, as opposed to all
# the If Then loops I currently have.
# I could essentially have a dictionary of regular expressions and just
# reference the appropriate one for the type of output it's reading.
# Alternatively, I could just break it up into a lot of subroutines
# that could be called depending on the type of output.
i = 0
# Initialize not knowing method used
_method = None
# If given as commandline argument, set with the given method
if method is not None:
try:
_method = methods[method.lower()]
except KeyError:
print('method {} unrecognized. Going to '.format(method) +
'try to find method based on output file.')
# Gaussian method regex:
# finds non-whitespace characters before a / and after a
# whitespace character. Requires that the input be in the form
# (method)/(something, normally basis set).
gregex = re.compile(r'\s+\S+/')
# QChem SF methods energy regex:
sfenergy = re.compile(r'[=:]\s*-\d*\.\d*')
# Gaussian DFT methods ground state energy regex:
# (could use sfenergy, or combine into one)
gdftenergy = re.compile(r'=\s*-\d*\.\d*')
# Gaussian TD-DFT methods excited state energies regex:
tdgdftenergy = re.compile(r'\s-*\d+\.\d+\s+ev')
# Gaussian CIS ground state energy regex:
# Goes to end of the line because given in scientific notation.
gcisenergy = re.compile(r'eump2\s*=\s*-.+')
# todo write geometry regex
# todo find some way to find "stationary point" and process as needed
# todo add geometry getting to all this multifile stuff
with open(out_name, 'w') as out_file:
for name in in_name_list:
energy_list = []
with open(name, 'r') as in_file:
for line in in_file:
line = line.lower().strip()
if _method is None:
if 'method' in line or 'exchange' in line:
# make the line into a list split by spaces
linelist = re.split(' +', line)
# could maybe shorten these next lines with
# a Try Except construction
if linelist[-1] in methods:
_method = methods[linelist[-1]]
else:
print('Unknown method {} used'.format(
linelist[-1]))
print('Assuming output formatted as HF')
_method = 'hf'
if 'entering gaussian sys' in line:
_method = 'gaugen'
# Gaussian output file, method undetermined
continue
if _method is 'gaugen':
if line.startswith('#'):
gmethodmatch = gregex.search(line)
# todo do this with only one regex using groups
# and non-capturing groups
if gmethodmatch:
gmethod = re.search(r'\w+',
gmethodmatch.group())
if gmethod:
try:
_method = methods[gmethod.group()]
except KeyError:
print('unknown Gaussian method. ' +
'Assuming (g)dft.')
_method = 'gdft'
if re.search(r'\btd\s*[(=]', line):
_method = 'td' + _method
# Note, this will cause problems if TD is
# declared on a line before the
# functional/method.
continue
if _method is 'dft':
if line.startswith('total energy'):
# make the line into a list split by spaces
linelist = re.split(' +', line)
out_file.write(linelist[-1])
out_file.write('\n')
continue
if _method is 'mp2':
if 'total energy' in line:
# make the line into a list split by spaces
linelist = re.split(' +', line)
out_file.write(linelist[-2])
out_file.write('\n')
continue
if _method is 'sf':
if 'otal energy' in line:
if 'basis set' in line:
# Ignore HF energy
continue
# use RegEx to find energy in the line:
match = sfenergy.search(line)
energy_list.append(match.group()[2:])
continue
if _method is 'sfdft':
if 'otal energy' in line:
# use RegEx to find energy in the line:
match = sfenergy.search(line)
energy_list.append(match.group()[2:])
continue
if _method.endswith('gdft'):
# Ground state energy search for (TD)DFT with Gauss
if 'scf done' in line:
match = gdftenergy.search(line)
energy_list.append(match.group()[2:])
continue
# Note on this line below: because I don't set the
# method name for TD methods in one step, the "is"
# comparison here will fail, because they point at
# different places, but the (slower) equality
# comparison will work because it will go through
# and actually compare each character.
if _method == 'tdgdft':
if line.startswith('excited state'):
match = tdgdftenergy.search(line)
if match:
energy_list.append(match.group()[:-3])
continue
if _method is 'gcis':
if 'eump2' in line:
match = gcisenergy.search(line)
energy_list.append(match.group()[8:])
continue
if line.startswith('excited state'):
match = tdgdftenergy.search(line)
if match:
energy_list.append(match.group()[:-3])
continue
# if energy_list:
# Only true if not empty
if True:
# Using for now because some error blank files
# should still produce a line in the output, even if blank
out_file.write(str(energy_list) + '\n')
i += 1
# todo save files with desired information
print("Opened {0} files, and wrote data to {1}".format(i, out_name))
print('Files processed for {} method.'.format(_method))
try:
# will only work if geoms_out_name defined above
print('geometries written to {}'.format(geoms_out_name))
except NameError:
print('No geometries saved')
# todo write statements about files that were saved
if __name__ == "__main__":
descrip = ('This function takes a base file name for output files and makes'
' a file basename_energies that is a list of the energies from '
'the the read output files.')
parser = argparse.ArgumentParser(description=descrip)
parser.add_argument('base_name', help='base name of files to read')
# todo use argparse to check for multiple positional arguments?
# If I pass it as list set it all as in_name_list?
parser.add_argument('-m', '--method',
help=('calculation method (changes how files '
'are interpreted)'))
parser.add_argument('-s', '--singlefile', action='store_true',
help=('use if output is single file with '
'multiple geometries'))
parser.add_argument('-g', '--geometries', action='count', default=0,
help=('Flag for creating file of geometries in '
'XYZ style format.'
'\n-g for stationary points, -gg for all.'))
# todo add option for saving all energies -e?
# todo add flag for gaussian vs. qchem?
# maybe to separate file with different name? Probably not
args = parser.parse_args()
outtolist(base_name=args.basename, method=args.method,
singlefile=args.singlefile, geometries=args.geometries,
interactive=True)
|
{"/gautools/tools.py": ["/gautools/oniom.py"]}
|
16,020
|
theavey/QM-calc-scripts
|
refs/heads/master
|
/gautools/submit_gaussian.py
|
#! /usr/bin/env python3
########################################################################
# #
# This script was written by Thomas Heavey in 2015. #
# theavey@bu.edu thomasjheavey@gmail.com #
# #
# Copyright 2015 Thomas J. Heavey IV #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or #
# implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
# #
########################################################################
# This is written to work with python 3 because it should be good to
# be working on the newest version of python.
from __future__ import print_function
import argparse # For parsing commandline arguments
import datetime
import glob # Allows referencing file system/file names
import os
import re
import readline # Allows easier file input (with tab completion?)
import subprocess # Allows for submitting commands to the shell
from warnings import warn
from thtools import cd, make_obj_dir, save_obj, resolve_path
yes = ['y', 'yes', '1']
# An input function that can prefill in the text entry
# Not sure if this works in 3.5+ because raw_input is gone
def rlinput(prompt, prefill=''):
readline.set_startup_hook(lambda: readline.insert_text(prefill))
try:
return input(prompt)
finally:
readline.set_startup_hook()
def _dir_and_file(path):
warn('_dir_and_file is deprecated. Use os.path.split instead',
DeprecationWarning)
if '/' in path:
rel_dir, f_name = path.rsplit('/', 1)
rel_dir = rel_dir + '/'
else:
rel_dir = ''
f_name = path
return rel_dir, f_name
def create_gau_input(coord_name, template, verbose=True):
"""
make gaussian input file by combining header and coordinates files
This function takes as input a file with a set of molecular
coordinates (the form should not matter, it will just be copied
into the next file) and a template file that should be the header
for the desired calculation (including charge and multiplicity),
returns the name of the file, and creates a Gaussian input file ending
with '.com'
:param str coord_name: name of file with coordinates in a format
Gaussian can read
:param str template: name of file with header for Gaussian calculation
(up to and including the charge and multiplicity)
:param bool verbose: If True, some status messages will be printed
(including file names)
:return: name of the written file
:rtype: str
"""
if verbose:
print('Creating Gaussian input file...')
_out_name = coord_name.rsplit('.', 1)[0] + '.com'
with open(_out_name, 'w') as out_file:
with open(template, 'r') as templ_file:
if verbose:
print('opened {}'.format(template))
for line in templ_file:
out_file.write(line)
if '\n' not in line:
out_file.write('\n')
with open(coord_name, 'r') as in_file:
if verbose:
print('opened {}'.format(coord_name))
for i, line in enumerate(in_file):
if i < 2:
# ignore first two lines
# number of atoms and the title/comment
continue
# if line.strip().isdigit():
# # the first line is the number of atoms
# continue
# # XYZ files created by mathematica have a comment
# # as the second line saying something like:
# # "Created by mathematica". Obv. want to ignore that
# if line.strip().startswith('Create') or
# line.strip().startswith('generated'):
# continue
# else:
out_file.write(line)
out_file.write('\n\n\n')
if verbose:
print('created Gaussian input file {}'.format(_out_name))
return _out_name
def get_input_files(base_name, batch):
_in_name_list = glob.glob(base_name + '*')
_in_name_list.sort() # sort files alphanumerically
_in_name_list.sort(key=len) # sort by length (because otherwise would
# put 1,10,11,... as opposed to 1,...,9,10,...
# if number 01,02,... They should all be the same length and the
# second sort won't do anything.
if not batch:
num_files = len(_in_name_list)
if num_files > 1:
print('Multiple files starting with {}'.format(base_name))
if input('Did you mean to execute a batch job? ') in yes:
batch = True
else:
print('What file name shall I use?')
_in_name_list = [rlinput('file name: ', base_name)]
return _in_name_list, batch
def use_template(template, in_names, verbose):
made_name_list = []
for in_name in in_names:
out_name = create_gau_input(in_name, template, verbose=verbose)
made_name_list.append(out_name)
if verbose:
print('Added {} to files to possibly submit.'.format(out_name))
_in_name_list = made_name_list
_in_name_list.sort()
_in_name_list.sort(key=len)
return _in_name_list
def write_sub_script(input_name, num_cores=16, time='12:00:00', verbose=False,
mem='125', executable='g09',
chk_file=None, copy_chk=False,
ln_running=None,
hold_jid=None, xyz=None, make_xyz=None, make_input=False,
ugt_dict=None):
"""
Write submission script for (Gaussian) jobs for submission to queue
If make_xyz is not None, the file make_xyz will be checked to exist
first to make sure to not waste time when missing a necessary input file.
:param str input_name: Name of the file to use as input
:param int num_cores: Number of cores to request
:param str time: Amount of time to request in the format 'hh:mm:ss'
:param bool verbose: If True, print out some status messages and such
:type mem: int or str
:param mem: Minimum amount of memory to request
:param str executable: Executable file to use for the job
Example, 'g09', 'g16'
:param str chk_file: If not None, this file will be copied back after the
job has completed. If this is not None and make_input is True,
this will also be passed to use_gen_template.
:param bool copy_chk: If this is True, the script will attempt to copy
what should be an existing checkpoint file to the scratch directory
before running the job. `chk_file` must be not None as well.
:param str ln_running: If not None, this will be the base name for
linking the output file to the current directory. If chk_file is not
None, it will also be linked with the same base name.
:param str hold_jid: Job on which this job should depend.
This should be the name of another job in the queuing system.
:param str xyz: Name of an xyz file to use as input to use_gen_template
(if make_input is True).
:param str make_xyz: The name of a file to pass to obabel to be used to
create an xyz file to pass to use_gen_template.
:param bool make_input: If True, use_gen_template will be used to create
input for the Gaussian calculation.
:param dict ugt_dict: dict of arguments to pass to use_gen_template.
This should not include out_file, xyz, nproc, mem, or checkpoint
because those will all be used from other arguments to this function.
out_file will be input_name; xyz will be xyz or a time-based name if
make_xyz is not None; nproc will be $NSLOTS (useful if this gets
changed after job submission); mem will be mem; and checkpoint will
be chk_file.
:return: The name of the script file
:rtype: str
"""
rel_dir, file_name = os.path.split(input_name)
if file_name.endswith('.com'):
short_name = os.path.splitext(file_name)[0]
if not short_name + '.com' == file_name:
raise SyntaxError('problem interpreting file name. ' +
'Period in file name?')
out_name = short_name + '.out'
elif '.' in file_name:
short_name, input_extension = os.path.splitext(file_name)
if not short_name + '.' + input_extension == file_name:
raise SyntaxError('problem interpreting file name. ' +
'Period in file name?')
out_name = short_name + '.out'
else:
short_name = file_name
file_name = short_name + '.com'
print('Assuming input file is {}'.format(file_name))
out_name = short_name + '.out'
job_name = re.match(r'.*?([a-zA-Z].*)', short_name).group(1)
if len(job_name) == 0:
job_name = 'default'
_script_name = os.path.join(rel_dir, 'submit'+short_name+'.sh')
temp_xyz = os.path.abspath('.temp' +
datetime.datetime.now().strftime('%H%M%S%f') +
'.xyz')
if xyz is None or make_xyz is not None:
n_xyz = temp_xyz
else:
n_xyz = resolve_path(xyz)
temp_pkl = temp_xyz[:-4]
if ugt_dict is not None:
make_obj_dir()
pkl_path = save_obj(ugt_dict, temp_pkl)
if chk_file is not None:
chk_line = 'checkpoint=\'{}\','.format(chk_file)
else:
chk_line = ''
with open(_script_name, 'w') as script_file:
sfw = script_file.write
sfw('#!/bin/bash -l\n\n')
sfw('#$ -pe omp {}\n'.format(num_cores))
sfw('#$ -M theavey@bu.edu\n')
sfw('#$ -m eas\n')
sfw('#$ -l h_rt={}\n'.format(time))
sfw('#$ -l mem_total={}G\n'.format(mem))
sfw('#$ -N {}\n'.format(job_name))
sfw('#$ -j y\n')
sfw('#$ -o {}.log\n\n'.format(short_name))
if hold_jid is not None:
sfw('#$ -hold_jid {}\n\n'.format(hold_jid))
if make_xyz is not None:
sfw('if [ ! -f {} ]; then\n'.format(
os.path.abspath(make_xyz)) +
' exit 17\n'
'fi\n\n')
sfw('module load wxwidgets/3.0.2\n')
sfw('module load openbabel/2.4.1\n\n')
sfw('obabel {} -O {}\n\n'.format(os.path.abspath(
make_xyz), os.path.abspath(n_xyz)))
if make_input:
sfw('python -c "from gautools.tools import '
'use_gen_template as ugt;\n'
'from thtools import load_obj, get_node_mem;\n'
'm = get_node_mem();\n'
'd = load_obj(\'{}\');\n'.format(
os.path.abspath(pkl_path)) +
'ugt(\'{}\',\'{}\','.format(
file_name, os.path.abspath(n_xyz)) +
'nproc=$NSLOTS,mem=m,{}'.format(chk_line) +
'**d)"\n\n')
sfw('INPUTFILE={}\n'.format(file_name))
sfw('OUTPUTFILE={}\n'.format(out_name))
if chk_file is not None:
sfw('CHECKFILE={}\n\n'.format(chk_file))
else:
sfw('\n')
if ln_running is not None:
sfw('WORKINGOUT={}.out\n'.format(ln_running))
if chk_file is not None:
sfw('WORKINGCHK={}.chk\n\n'.format(ln_running))
else:
sfw('\n')
sfw('CURRENTDIR=`pwd`\n')
sfw('SCRATCHDIR=/scratch/$USER\n')
sfw('mkdir -p $SCRATCHDIR\n\n')
sfw('cd $SCRATCHDIR\n\n')
sfw('cp $CURRENTDIR/$INPUTFILE .\n')
if chk_file is not None:
sfw('# ') if not copy_chk else None
sfw('cp $CURRENTDIR/$CHECKFILE .\n\n')
else:
sfw('\n')
if ln_running is not None:
sfw('ln -s -b /net/`hostname -s`$PWD/$OUTPUTFILE '
'$CURRENTDIR/$WORKINGOUT\n')
if chk_file is not None:
sfw('ln -s -b /net/`hostname -s`$PWD/$CHECKFILE '
'$CURRENTDIR/$WORKINGCHK\n\n')
else:
sfw('\n')
sfw('echo About to run {} in /net/`'.format(executable) +
'hostname -s`$SCRATCHDIR\n\n')
sfw('{} <$INPUTFILE > $OUTPUTFILE'.format(executable))
sfw('\n\n')
if ln_running is not None:
sfw('rm $CURRENTDIR/$WORKINGOUT')
if chk_file is not None:
sfw(' $CURRENTDIR/$WORKINGCHK\n\n')
else:
sfw('\n\n')
sfw('cp $OUTPUTFILE $CURRENTDIR/.\n')
if chk_file is not None:
sfw('cp $CHECKFILE $CURRENTDIR/.\n\n')
else:
sfw('\n')
sfw('echo ran in /net/`hostname -s`$SCRATCHDIR\n')
sfw('echo output was copied to $CURRENTDIR\n\n')
if verbose:
print('script written to {}'.format(_script_name))
return _script_name
def submit_scripts(scripts, batch=False, submit=False, verbose=False):
outputs = []
if batch:
if submit or input('submit all jobs? ') in yes:
for script in scripts:
rd, f = _dir_and_file(script)
with cd(rd, ignore_blank=True):
cl = ['qsub', f]
# Don't really know how this works. Copied from
# http://stackoverflow.com/questions/4256107/
# running-bash-commands-in-python
process = subprocess.Popen(cl,
stdout=subprocess.PIPE,
universal_newlines=True)
output = process.communicate()[0]
if verbose:
print(output)
outputs.append(output)
else:
if verbose:
print('No jobs submitted, but scripts created')
else:
if submit or input('submit job {}? '.format(scripts[0])) in yes:
rd, f = _dir_and_file(scripts[0])
with cd(rd, ignore_blank=True):
cl = ['qsub', f]
# Don't really know how this works. Copied from
# http://stackoverflow.com/questions/4256107/
# running-bash-commands-in-python
process = subprocess.Popen(cl,
stdout=subprocess.PIPE,
universal_newlines=True)
output = process.communicate()[0]
if verbose:
print(output)
outputs.append(output)
else:
if verbose:
print('{} not submitted'.format(scripts))
_job_info = [' '.join(output.split(' ')[2:4]) for output in outputs]
return _job_info
if __name__ == '__main__':
description = 'Create and submit a script to run a Gaussian job on SCC'
parser = argparse.ArgumentParser(description=description)
parser.add_argument('in_name',
help='Name of Gaussian input file')
parser.add_argument('-c', '--numcores', type=int, default=16,
help='Number of cores for job')
# I should probably check validity of this time request
# Maybe it doesn't matter so much because it just won't
# submit the job and it will give quick feedback about that?
parser.add_argument('-t', '--time',
help='Time required as "hh:mm:ss"',
default='12:00:00')
parser.add_argument('-e', '--executable', type=str, default='g09',
help='name of executable to run')
parser.add_argument('-b', '--batch', action='store_true',
help='create multiple scripts (batch job)')
parser.add_argument('-x', '--template', default=None,
help='template file for creating input from coords')
parser.add_argument('-s', '--submit', action='store_true',
help='Automatically submit jobs?')
parser.add_argument('-v', '--verbose', action='store_true',
help='make program more verbose')
parser.add_argument('-j', '--nojobinfo', action='store_false',
help='Do not return the submitted job information')
parser.add_argument('-k', '--chk_file', default=None,
help='checkpoint file to be written and copied back')
parser.add_argument('--copy_chk', action='store_true',
help='Copy check file to the scratch directory')
parser.add_argument('-l', '--ln_running', type=str, default=None,
help='base name for linking output to cwd while '
'running')
parser.add_argument('-d', '--hold_jid', default=None,
help='job on which this job should depend')
args = parser.parse_args()
in_name_list, args.batch = get_input_files(args.in_name, args.batch)
if args.template:
in_name_list = use_template(args.template, in_name_list, args.verbose)
script_list = []
for in_name in in_name_list:
script_name = write_sub_script(input_name=in_name,
num_cores=args.numcores,
time=args.time,
verbose=args.verbose,
executable=args.executable,
chk_file=args.chk_file,
copy_chk=args.copy_chk,
ln_running=args.ln_running,
hold_jid=args.hold_jid)
script_list.append(script_name)
if not len(script_list) == len(in_name_list):
# This should never be the case as far as I know, but I would
# like to make sure everything input gets a script and all the
# script names are there to be submitted.
raise IOError('num scripts dif. from num names given')
job_info = submit_scripts(script_list, args.batch, args.submit,
args.verbose)
if job_info and args.nojobinfo:
for job in job_info:
print(job)
if args.verbose:
print('Done. Completed normally.')
|
{"/gautools/tools.py": ["/gautools/oniom.py"]}
|
16,021
|
theavey/QM-calc-scripts
|
refs/heads/master
|
/gautools/geomRegex.py
|
__author__ = 'Thomas Heavey'
import re
filename = "testg.out"
def findallgeoms(filename):
"""A function that takes a file name and returns a list of
geometries. Works with Gaussian output, haven't checked with
Q-Chem."""
relevantelem = [1,3,4,5]
xyzformat = '{:>2} {: f} {: f} {: f}'
geomregex = re.compile(
r'(?:Standard orientation)' # non-capturing (nc) start of geometry
r'(?:.+?)' # nc geometry header
r'((?:(?:\s+\d+\s+)' # nc atom number
r'(\d+\s+)' # (capturing) atomic number
r'(?:\d+\s+)' # nc atomic type
r'(-?\d+\.\d+\s*){3,3}' # 3 cartesian coordinates (x,y,z)
r')+)' # repeat for at least one atom
r'(?:-)' # nc end at line of dashes
, re.DOTALL)
with open(filename, 'r') as file:
allxyz = []
geoms = geomregex.finditer(file.read())
for geom in geoms:
thisxyz = []
mlgeom = geom.group(1)
for line in mlgeom.split('\n'):
# Ignore blank lines:
if len(line) < 2:
continue
xyzelemstring = [line.split()[i] for i in relevantelem]
xyzelemnum = [float(i) for i in xyzelemstring]
xyzelemnum[0] = int(xyzelemstring[0])
thisxyz.append(xyzformat.format(*xyzelemnum))
allxyz.append(thisxyz)
return(allxyz)
# I don't know if I like this format. It would be reasonable for
# Mathematica, but somewhat odd for Python. I guess for outputting
# it though it won't be terrible because I can just double
# iterate over the nested list, writing lines from the strings.
# I'll need to pick a separator for between geometries maybe but that's
# not a problem. Also with this format, should be easy to count number
# of atoms.
# Still need to have way to just find stationary points
if __name__ == "__main__":
print(findallgeoms(filename))
# Ugly because returned as list of list of strings
|
{"/gautools/tools.py": ["/gautools/oniom.py"]}
|
16,022
|
theavey/QM-calc-scripts
|
refs/heads/master
|
/gautools/create_runs.py
|
#! /usr/bin/env python3.4
########################################################################
# #
# #
# #
# Known issues: #
# None #
# #
# . #
# #
# This script was written by Thomas Heavey in 2015. #
# theavey@bu.edu thomasjheavey@gmail.com #
# #
# Copyright 2015 Thomas J. Heavey IV #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or #
# implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
# #
########################################################################
# This is written to work with python 3.4 because it should be good to
# be working on the newest version of python.
import fileinput # allows easy iteration over a file
import sys # For importing the arguments given
import re # RegEx package for sorting data
import os.path # Allows for checking if file already exists
import glob # Iteration over files in directory
base_name = sys.argv[1]
template_file = sys.argv[2]
in_name_list = glob.glob(base_name + '*.inp')
in_name_list.sort()
in_name_list.sort(key=len)
print('base file name is {}'.format(base_name))
print('number of files is {:,}'.format(len(in_name_list)))
print('template file name is {}'.format(template_file))
for in_name in in_name_list:
job_name = in_name.replace('.inp', '')
run_name = job_name + '.run'
out_name = job_name + '.out'
with open(run_name, 'w') as run_file:
with open(template_file, 'r') as templ:
for line in templ:
# Note, in most places, these lines are "stripped" because
# they come with leading spaces, which messes with
# startswith and the split function, too I think.
if line.strip().startswith('cd'):
# make the line into a list split by slashes
line = line.replace('ReplaceMe', base_name)
run_file.write(line)
continue
if line.strip().startswith('qchem'):
line = line.replace('ReplaceMeIn', in_name )
line = line.replace('ReplaceMeOut', out_name)
run_file.write(line)
continue
if line.strip().startswith('#PBS'):
line = line.replace('ReplaceMe', job_name)
run_file.write(line)
continue
# else:
run_file.write(line)
|
{"/gautools/tools.py": ["/gautools/oniom.py"]}
|
16,023
|
theavey/QM-calc-scripts
|
refs/heads/master
|
/setup.py
|
from setuptools import setup
setup(
name='gautools',
packages=['gautools'],
scripts=[
'gautools/aml.py',
'gautools/create_runs.py',
'gautools/geomRegex.py',
'gautools/out_to_list.py',
'gautools/out_to_list_sf.py',
'gautools/submit_gaussian.py',
'gautools/xtorun.py',
'gautools/xyz_to_inp.py',
'gautools/xyz_to_inpglob.py',
],
url='https://github.com/theavey/QM-calc-scripts',
license='Apache License 2.0',
author='Thomas Heavey',
author_email='thomasjheavey@gmail.com',
description='A set of scripts that are useful for creating, submitting, '
'and processing QM calculations',
install_requires=[
'MDAnalysis>=0.17.0',
'thtools',
'numpy',
'six',
'paratemp',
'parmed',
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Science/Research',
'Programming Language :: Python :: 3',
],
zip_safe=True,
)
|
{"/gautools/tools.py": ["/gautools/oniom.py"]}
|
16,024
|
theavey/QM-calc-scripts
|
refs/heads/master
|
/ConfigFileManager.py
|
#! /usr/bin/env python3.4
########################################################################
# #
# #
# #
# Known issues: #
# #
# #
# #
# This script was written by Thomas Heavey in 2015. #
# theavey@bu.edu thomasjheavey@gmail.com #
# #
# Copyright 2015 Thomas J. Heavey IV #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or #
# implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
# #
########################################################################
# Import Statements
import os # Generally good for referencing files
# todo define as a class?
def configmanager(basename, filename=".config"):
"""Function for managing a file for the current state of a
calculation.
.config is the file name by default
It has a structure of keyword (whitespace) value on each line
lines starting with # are completely ignored, but also unrecognized
keywords are currently not interpretted as anything (I think)."""
configexists = checkforconfig(filename)
# todo print if one exists, and what is being done about it
pass
# todo Obviously need to do this
def checkforconfig(filename):
"""Return True if one exists, False if not.
Uses os"""
return(os.path.isfile(filename))
def makenewconfig(basename, filename, **starting_values):
""""""
with open(filename, 'x') as file:
file.write('basename {}\n'.format(basename))
for key in starting_values:
file.write('{} {}'.format(key, starting_values[key]))
print("Wrote new configuration file to {}".format(filename))
def readconfig(filename):
""""""
filedata = dict()
with open(filename, 'r') as file:
for line in file:
# take off leading and trailing whitespace
line = line.strip()
# ignore commented lines
if line.startswith("#"):
continue
# add first two parts of each line to the dictionary for
# output as key: value pairs
filedata.update(line.split()[0:1])
print("Read configuration file {}".format(filename))
return(filedata)
def updateconfig(filename, **added_values):
""""""
with open(filename, 'a') as file:
for key in added_values:
file.write('{} {}'.format(key, added_values[key]))
|
{"/gautools/tools.py": ["/gautools/oniom.py"]}
|
16,025
|
theavey/QM-calc-scripts
|
refs/heads/master
|
/gautools/tools.py
|
"""
A set of tools for working with computational chemistry files and such
"""
########################################################################
# #
# This script was written by Thomas Heavey in 2019. #
# theavey@bu.edu thomasjheavey@gmail.com #
# #
# Copyright 2015-2019 Thomas J. Heavey IV #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or #
# implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
# #
########################################################################
import re
import pathlib
from six import string_types
from .oniom import OniomUniverse
from paratemp import copy_no_overwrite
def fix_atom_names_xyz(xyz, dry_run=False, return_list=False):
"""
Fix atomic symbols in xyz file
VMD (sometimes?) writes XYZ files with the atomic symbols replaced with
the atom names, which cause issues with other programs interpreting them.
This should fix them by leaving only the first letter of each atom
name/symbol.
!! NOTE !! This is only written to work for C, H, O, N, and F currently !!
:param str xyz: Name of the XYZ file to fix. It will be written to the
same name but a backup will be made with '.bak.' added at the
beginning of the name.
:param bool dry_run: Default: False. If true, the output will not be
written (but a file named 'test-temp.xyz' will be created/overwritten).
:param bool return_list: Default: False. If True, the output will be
written to 'test-temp.xyz' and the lines of the fixed file will be
returned as a list of strings.
:return: If return_list, a list of strings of the lines of the fixed XYZ
file, otherwise None.
:rtype: List[str] or None
"""
if not dry_run and not return_list:
xyz_c = '.bak.'+xyz
copy_no_overwrite(xyz, xyz_c)
else:
xyz_c = xyz
xyz = 'test-temp.xyz'
def atom_name_sub(m):
return m.group(1) + ' '*(len(m.group(0))-1)
lines = [] # Could make this actually not write to the temp file
with open(xyz_c, 'r') as f_in, open(xyz, 'w') as f_out:
for i, line in enumerate(f_in):
if i > 1: # skip header lines
line = re.sub(r'([CHONF])\S*',
atom_name_sub,
line)
lines += line
if not dry_run:
f_out.write(line)
else:
print(line)
if return_list:
return lines
def use_gen_template(out_file, xyz, job_name='default job name',
checkpoint='checkpoint.chk',
rwf='readwrite.rwf',
nproc=16, mem=125,
opt='opt', td=False,
func='wb97xd', basis='6-31g(d)',
charg_mult='0 1',
footer='\n\n',
template='/projectnb/nonadmd/theavey'
'/qm-basics/templ-gen.txt'):
"""
Use general template file to write Gaussian input file
:type out_file: str or TextIOBase
:param out_file: name of file or open file object to write output to
:type xyz: str or list
:param xyz: List of lines from an xyz file or string of path to an xyz
file.
:param str job_name: Default: 'default job name'. Name of the job to put
into the Gaussian input.
:param str checkpoint: Default: 'checkpoint.cpt'. File name for the
checkpoint file.
:param str rwf: Default: 'readwrite.rwf'. File name for the read-write
file.
:type nproc: int or str
:param nproc: Default: 16. Number of processors to tell Gaussian to use.
Note, this now uses the newer '%cpu' syntax, and I'm not sure how
that will work using fewer than all CPUs on the node because it says
to use 0 to nproc-1.
:type mem: int or str
:param mem: Default: 125. Number of gigabytes of memory to tell Gaussian
to use.
:param str opt: Default: 'opt'. Opt keywords to tell Gaussian.
If True, this will be set to 'opt'.
If this evaluates to False, it will be set to the blank string.
:param str td: Default: False. TD keywords to tell Gaussian.
If True, this will be set to TD.
If this evaluates to False, it will be set to the blank string.
:param str func: Default: 'wb97xd'. Functional for Gaussian to use.
:param str basis: Default: '6-31g(d)'. Basis set for Gaussian to use.
:param str charg_mult: Default: '0 1'. Charge and multiplicity line.
:param str footer: Default: '\n\n'. Footer of input file. Useful for RESP
charge calculation jobs and such.
:param str template: Default: '~nbth/qm-basics/templ-gen.txt'.
The general template file to use. It should have keywords in curly
braces with the same names as the keyword arguments to this function.
:return: None
"""
if opt:
if opt is True:
opt = 'opt'
else:
opt = ''
if td:
if td is True:
td = 'TD'
else:
td = ''
d_fill = dict(job_name=job_name,
checkpoint=checkpoint, rwf=rwf,
nproc=str(int(nproc)-1), mem=str(mem),
opt=opt, td=td,
func=func, basis=basis,
charg_mult=charg_mult)
xyz_lines = _get_xyz_lines(xyz)
own_handle = False
if isinstance(out_file, string_types):
own_handle = True
out_file = open(out_file, 'x')
try:
with open(template, 'r') as f_templ:
line = '' # To satisfy IDE in case of empty template
for line in f_templ:
line = line.format(**d_fill)
out_file.write(line)
if '\n' not in line:
out_file.write('\n')
for line in xyz_lines:
out_file.write(line)
out_file.write(footer)
finally:
if own_handle:
out_file.close()
def make_gaussian_input(out_file, xyz, job_name='default job name',
checkpoint='checkpoint.chk',
rwf='readwrite.rwf',
nproc=16, mem=125,
route=None,
opt='opt', td=False,
func='wb97xd', basis='6-31g(d)',
route_other=None,
charg_mult='0 1',
footer=None,
oniom: dict = None):
"""
Write Gaussian input file
:type out_file: str or TextIOBase
:param out_file: name of file or open file object to write output to
:type xyz: str or list
:param xyz: List of lines from an xyz file or string of path to an xyz
file.
:param str job_name: Default: 'default job name'. Name of the job to put
into the Gaussian input.
:param str checkpoint: Default: 'checkpoint.cpt'. File name for the
checkpoint file.
:param str rwf: Default: 'readwrite.rwf'. File name for the read-write
file.
:type nproc: int or str
:param nproc: Default: 16. Number of processors to tell Gaussian to use.
Note, this now uses the newer '%cpu' syntax, and I'm not sure how
that will work using fewer than all CPUs on the node because it says
to use 0 to nproc-1.
:type mem: int or str
:param mem: Default: 125. Number of gigabytes of memory to tell Gaussian
to use.
:param str route: If not None, this will be the entire route section and
the following commands will be ignored: `opt`, `td`, `func`, `basis`.
:param str opt: Default: 'opt'. Opt keywords to tell Gaussian.
If True, this will be set to 'opt'.
If this evaluates to False, it will be set to the blank string.
If something else, it will be set to the given string.
:param str td: Default: False. TD keywords to tell Gaussian.
If True, this will be set to TD.
If this evaluates to False, it will be set to the blank string.
If something else, it will be set to the given string.
:param str func: Default: 'wb97xd'. Functional for Gaussian to use.
If True or evaluates as false, it will be set to a blank string,
which will likely be an error.
:param str basis: Default: '6-31g(d)'. Basis set for Gaussian to use.
If True or evaluates as false, it will be set to a blank string,
which will likely be an error.
:param str route_other: Other commands to use in the route section
(e.g., 'SCRF=(solvent=dichloromethane) Int=Ultrafile freq')
:param str charg_mult: Default: '0 1'. Charge and multiplicity line.
:param str footer: Default: None. Footer of input file. Useful for RESP
charge calculation jobs and such.
:param dict oniom: dict to pass to :py:class:`gautools.oniom.OniomUniverse`
constructor. The create object will then be used to make the molecule
specification, and add the connectivity and MM parameters to the footer.
:return: The Path to the written file
:rtype: pathlib.Path
"""
link0 = _make_link0(checkpoint, rwf, str(int(nproc)-1), mem)
route_sec = _make_route(route, opt, td, func, basis, route_other)
if oniom is not None:
ou = OniomUniverse(**oniom)
xyz_lines = ou.molecule_section
bon_sec = ''.join(ou.bonds_section)
par_sec = ''.join(ou.params_section)
if footer is None:
footer_list = [bon_sec, par_sec]
else:
footer_list = [bon_sec, footer, par_sec]
# This should be the right order in most cases:
# http://gaussian.com/input/
footer = '\n'.join(footer_list)
else:
xyz_lines = _get_xyz_lines(xyz)
if _geom_checkpoint(route_sec):
xyz_lines = []
own_handle = False
if isinstance(out_file, string_types):
own_handle = True
out_file_path = pathlib.Path(out_file)
out_file = open(out_file, 'x')
else:
out_file_path = pathlib.Path(out_file.name)
try:
out_file.write(link0)
out_file.write(route_sec)
out_file.write('\n') # blank line between sections
out_file.write(_make_newline_terminated(job_name))
out_file.write('\n') # blank line between sections
out_file.write(_make_newline_terminated(charg_mult))
if xyz_lines:
line = '' # in case xyz_lines is empty (for IDE)
for line in xyz_lines:
out_file.write(line)
sec_break = '\n' if (line[-1] == '\n') else '\n\n'
out_file.write(sec_break)
else:
out_file.write('\n') # blank line between sections
if footer:
out_file.write(_make_newline_terminated(footer))
out_file.write('\n') # blank line before end of file
finally:
if own_handle:
out_file.close()
return out_file_path.resolve()
_link0_template_dict = {'nproc': '%cpu=0-{nproc}',
'mem': '%mem={mem}GB',
'rwf': '%rwf={rwf}\n%NoSave',
'checkpoint': '%chk={checkpoint}'}
def _make_link0(checkpoint, rwf, nproc, mem):
# http://gaussian.com/link0/
output = []
kwarg_dict = dict()
# want at least rwf and checkpoint to be ordered (for %NoSave),
# so this might not be perfect in Python < 3.6
kwarg_dict['mem'] = mem
kwarg_dict['nproc'] = nproc
kwarg_dict['rwf'] = rwf
kwarg_dict['checkpoint'] = checkpoint
for key in kwarg_dict:
if kwarg_dict[key]:
output.append(_link0_template_dict[key].format(**kwarg_dict))
if output:
return _make_newline_terminated('\n'.join(output))
else:
return str()
_route_template = '# {opt} {td} {func}/{basis} {route_other}'
def _make_route(route, opt, td, func, basis, route_other):
if route:
if not route[0] == '#':
route = '# ' + route
return _make_newline_terminated(route)
kwarg_dict = dict(opt=opt, td=td, func=func,
basis=basis, route_other=route_other)
defaults_dict = dict(
opt='opt', td='TD',
func='', basis='', # not sure what good defaults are here
route_other='')
for key in kwarg_dict:
kwarg_dict[key] = _process_keyword(kwarg_dict[key],
defaults_dict[key])
return _make_newline_terminated(_route_template.format(**kwarg_dict))
def _process_keyword(key, key_default):
if key:
key = key_default if key is True else key
else:
key = ''
return key
def _get_xyz_lines(xyz):
if isinstance(xyz, string_types):
xyz_lines = open(xyz, 'r').readlines()[2:]
else:
xyz_lines = xyz[2:]
return xyz_lines
def _make_newline_terminated(line):
if line[-1] == '\n':
return line
else:
return line + '\n'
def _geom_checkpoint(route):
match = re.search(r'geom=\S*?checkpoint',
route, re.IGNORECASE)
return True if match else False
|
{"/gautools/tools.py": ["/gautools/oniom.py"]}
|
16,026
|
theavey/QM-calc-scripts
|
refs/heads/master
|
/gautools/aml.py
|
#! /usr/bin/env python3
"""
Run QM calculations at multiple levels consecutively, using queuing system
Automate Multi-Level calculations
This should help with running a set of QM calculations at several levels (e.g.,
increasing basis set size), while intelligently using the queuing system such
as Sun Grid Engine.
It can receive the signal from the queuing system that the job will be killed
soon and consequently submit a continuation of the job, using the
intermediate files to speed up subsequent calculations.
"""
########################################################################
# #
# This script/module was written by Thomas Heavey in 2019. #
# theavey@bu.edu thomasjheavey@gmail.com #
# #
# Copyright 2018-2019 Thomas J. Heavey IV #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or #
# implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
# #
########################################################################
import json
import logging
import MDAnalysis as mda
import numpy as np
import os
import pandas as pd
import paratemp
from paratemp.geometries import XYZ
import pathlib
import random
import re
import shutil
import signal
import subprocess
import sys
import threading
import thtools
import time
from typing import List
from gautools import tools
import functools
import filecmp
if not sys.version_info >= (3, 6):
raise ValueError('Python >= 3.6 is required')
log = logging.getLogger(__name__)
log.setLevel(logging.DEBUG)
def _setup_log(level=logging.WARNING):
global handler
handler = logging.StreamHandler()
handler.setLevel(level=level)
formatter = logging.Formatter('%(asctime)s - %(name)s - '
'%(levelname)s - %(message)s')
handler.setFormatter(formatter)
log.addHandler(handler)
obabel_module_lines = ('\n'
'module load wxwidgets/3.0.2\n'
'module load openbabel/2.4.1\n')
signal_catch_lines = ('PY_PID=$!\n'
'trap "kill -n 12 $PY_PID" usr2\n'
'wait\n\n')
def log_exception(f):
@functools.wraps(f)
def log_exc(*args, **kwargs):
try:
return f(*args, **kwargs)
except Exception:
log.exception(f'An exception was raised in {f.__name__}!')
raise
return log_exc
class Calc(object):
"""
An object for consecutive Gaussian calculation using a queuing system
Environment variables that need to be defined:
* HOSTNAME
* JOB_ID
* NSLOTS
* SGE_STDOUT_PATH
An exception will be raised if any of these are not defined when doing
:func:`Calc.run_calc()`.
"""
def __init__(self, status=None, base_name=None, ind=None,
geometry=None, top=None, traj=None,
criteria=None, react_dist=None, mgi_dicts=None):
"""
:param str status: The path to the status file to be read for a
calculation restart. If this is not a restarted job, this should
be None (the default).
:param str base_name:
:param int ind:
:type geometry: pathlib.Path or str
:param geometry: File from which to get the starting coordinates. This
argument will take priority if top and traj are also given.
Currently, this must be an XYZ file, but it shouldn't be too hard
to implement using openbabel to convert to an XYZ.
:type top: pathlib.Path or str
:param top:
:type traj: pathlib.Path or str
:param traj:
:param dict criteria: The criteria for selecting frames from the
trajectory.
This is a dict with distance names (or other columns that will
be in `Universe.data`) as the keys and the values being a
List-like of min and max values.
For example, `{'c1_c2': (1.5, 4.0), 'c1_c3': (2.2, 5.1)}` will
select frames where 'c1_c2' is between 1.5 and 4.0 and 'c1_c3'
is between 2.2 and 5.1.
:type react_dist: str or float
:param react_dist: Distance to set between the two reacting atoms (
with indices 20 and 39).
If this argument as given evaluates to False, no movement/changes
to the geometry will be made.
:param List[dict] mgi_dicts:
:return:
"""
if status is not None:
# TODO this could be done (better?) with a classmethod
self.rerun = True
self._status = StatusDict(status)
self._json_name = status
try:
self.args = self.status['args']
except KeyError:
self.log.error('Attempting to do a calculation restart, '
f'but it seems that the status file {status} '
'was empty or at least did not have "args" in '
'it.')
raise
a = self.args
base_name = a['base_name']
ind = a['ind']
try:
geometry = a['geometry']
except KeyError:
geometry = None
top = a['top']
traj = a['traj']
criteria = a['criteria']
react_dist = a['react_dist']
try:
mgi_dicts = a['mgi_dicts']
except KeyError:
mgi_dicts = a['ugt_dicts']
self._base_name = self.status['base_name']
else:
self.rerun = False
self.args = {
'base_name': base_name,
'ind': ind, 'geometry': geometry,
'top': top, 'traj': traj,
'criteria': criteria,
'react_dist': react_dist,
'mgi_dicts': mgi_dicts}
self.oniom = True if 'oniom' in mgi_dicts[0].keys() else False
self.check_args()
self._base_name = '{}-ind{}'.format(base_name, ind)
self._json_name = '{}.json'.format(self._base_name)
self._status = StatusDict(self._json_name)
self.geometry = geometry
self.top = top
self.traj = traj
self.criteria = criteria
self.react_dist = react_dist
self.mgi_dicts = mgi_dicts
self.log = logging.getLogger(self.__class__.__name__)
self.log.setLevel(logging.DEBUG)
f_handler = logging.FileHandler('{}.log'.format(self._base_name))
f_handler.setLevel(logging.DEBUG)
f_formatter = logging.Formatter('%(asctime)s - %(name)s - '
'%(levelname)s - %(message)s')
f_handler.setFormatter(f_formatter)
self.log.addHandler(f_handler)
self.log.addHandler(handler)
self.log.debug('Initializing the log')
self.mem, self.node = None, None
self.scratch_path: pathlib.Path = None
self.last_scratch_path: pathlib.Path = None
self.n_slots, self.last_node = None, None
self.cwd_path: pathlib.Path = None
self.output_scratch_path: pathlib.Path = None
self.chk_ln_path: pathlib.Path = None
self.h_rt: str = None
self.stdout_file: str = None
self.resub_cl: List[str] = None
self.job_id: str = None
self.next_job_id: str = None
self.resubmitted: bool = False
def check_args(self):
args = self.args.copy()
geom = args.pop('geometry')
top, traj = args.pop('top'), args.pop('traj')
crit = args.pop('criteria')
if (geom is None and
(top is None or traj is None or crit is None) and
(not self.oniom)):
raise ValueError('either geometry or top, traj, and criteria must '
'be given')
for key in args:
if args[key] is None:
raise ValueError(f'Argument "{key}" cannot be None')
@property
def status(self):
return self._status
@property
def current_lvl(self):
try:
return self.status['current_lvl']
except KeyError:
raise AttributeError('Could not find current level')
@current_lvl.setter
def current_lvl(self, value):
if not (isinstance(value, int) or value is None):
raise TypeError('current_level must be an int or None.'
f'Given type {type(value)}')
self.status['current_lvl'] = value
@property
def between_levels(self):
# Might need to look for rwf file otherwise should start the
# calculation again
self.log.debug('Checking to see if calculation left off between '
'calculation levels')
between = self.status['between_levels']
if between is None:
self.log.warning('No key in status for determining if between '
'levels currently')
lvl = self.current_lvl
if self.status[f'g_in_{lvl}'] == self.status['g_in_curr']:
out_path = pathlib.Path(
self.status['g_in_curr']).with_suffix('.out')
if out_path.exists():
self._advance_level()
between = True
else:
between = False
else:
between = True
self.status['between_levels'] = between
return between
@between_levels.setter
def between_levels(self, value):
if not isinstance(value, bool):
raise TypeError(f'between_levels must be a bool, given type '
f'{type(value)}')
self.status['between_levels'] = value
@property
def cleaned_up(self):
cleaned = self.status['cleaned_up']
if cleaned is None:
self.log.warning('Could not find "cleaned_up" in status. Assuming '
'dirty')
cleaned = False
self.status['cleaned_up'] = cleaned
return cleaned
@cleaned_up.setter
def cleaned_up(self, value):
if not isinstance(value, bool):
raise TypeError(f'cleaned_up must be a bool, given type '
f'{type(value)}')
self.status['cleaned_up'] = value
def _startup_tasks(self):
"""
Some startup tasks to set variables for later use
This requires the environment variables HOSTNAME and NSLOTS be set.
:return: None
"""
self.log.debug('Running some introductory tasks and setting variables')
try:
node = os.environ['HOSTNAME'].split('.')[0]
except KeyError:
self.log.exception('Could not find HOSTNAME!')
raise
self.node = node
scratch_path = pathlib.Path('/net/{}/scratch/theavey'.format(node))
scratch_path.mkdir(exist_ok=True)
self.scratch_path = scratch_path
try:
self.job_id = os.environ['JOB_ID']
except KeyError:
self.log.exception('Could not find JOB_ID!')
raise
self.mem = thtools.job_tools.get_node_mem()
try:
n_slots = int(os.environ['NSLOTS'])
except KeyError:
self.log.exception('Could not find NSLOTS!')
raise
self.n_slots = n_slots
self.h_rt = self._get_h_rt()
self.log.info(f'Running on {node} using {n_slots} cores and up to '
f'{self.mem} GB mem for {self.h_rt} seconds')
try:
self.stdout_file = os.environ['SGE_STDOUT_PATH']
self.log.debug(f'Using stdout path: {self.stdout_file}')
except KeyError:
self.log.exception('Could not find SGE_STDOUT_PATH!')
raise
if self.rerun:
self.last_node = self.status['current_node']
self.status['last_node'] = self.last_node
node_list = self.status['node_list']
self.last_scratch_path = pathlib.Path(self.status[
'current_scratch_dir'])
self.status['last_scratch_dir'] = str(self.last_scratch_path)
self._get_chk_ln_path()
self._get_output_scratch_path()
else:
self.status['args'] = self.args
self.status['base_name'] = self._base_name
node_list = []
self.status['node_list'] = node_list + [node]
self.status['current_node'] = node
self.status['current_scratch_dir'] = str(scratch_path)
self.status['job_id'] = self.job_id
self.cwd_path = pathlib.Path('.').resolve()
self.status['cwd'] = str(self.cwd_path)
self.log.info('Submitted from {} and will be running in {}'.format(
self.cwd_path, self.scratch_path))
self._make_resub_sh_and_cl()
def _get_output_scratch_path(self):
self.log.debug('Getting path to scratch output')
output_scratch_path_ = self.status['output_scratch_path']
if output_scratch_path_ is not None:
self.output_scratch_path = pathlib.Path(
output_scratch_path_)
else:
self.output_scratch_path = self.last_scratch_path.joinpath(
self.status['g_in_curr']).with_suffix('.out')
def _get_chk_ln_path(self):
self.log.debug('Getting path to linked chk file')
chk_ln_path_ = self.status['chk_ln_path']
if chk_ln_path_ is not None:
self.chk_ln_path = pathlib.Path(chk_ln_path_)
else:
self.chk_ln_path = pathlib.Path(
f'{self._base_name}-running.chk').resolve()
@log_exception
def run_calc(self):
"""
The primary function to start (or restart) running a calculation
:return: None
"""
self.log.debug('Welcome. Just starting to run this calculation')
self._startup_tasks()
if self.rerun:
self.log.info('loaded previous status file: {}'.format(
self._json_name))
self.resume_calc()
else:
self.log.warning('No previous status file found. '
'Starting new calculation?')
self.new_calc()
def _make_rand_xyz(self):
self.log.debug('Making XYZ file to start calculation')
import tables
u = paratemp.Universe(self.top, self.traj, verbosity=0)
while True:
try:
u.read_data()
break
except tables.HDF5ExtError:
self.log.warning(
'HDF5ExtError raised. Likely because trying to read the '
'store at the same time as another process. Waiting 5 '
'seconds and trying again.')
time.sleep(5)
continue
frames = u.select_frames(self.criteria, 'QM_frames')
select = random.choice(frames)
self.status['source_frame_num'] = int(select)
system: mda.AtomGroup = u.select_atoms('all')
xyz_name = self._base_name + '.xyz'
with mda.Writer(xyz_name, system.n_atoms) as w:
u.trajectory[select]
for frag in u.atoms.fragments:
mda.lib.mdamath.make_whole(frag)
# This should at least make the molecules whole if not
# necessarily in the correct unit cell together.
w.write(system)
self.log.info(f'Wrote xyz file from frame {select} to {xyz_name}')
return pathlib.Path(xyz_name).resolve()
def _move_reactant_atoms(self, xyz_path):
self.log.debug('Moving reactant atoms (20 and 39) to '
f'{self.react_dist}')
xyz_name = str(xyz_path)
bak_name = xyz_name + '.bak'
paratemp.copy_no_overwrite(xyz_name, bak_name)
self.status['original_xyz'] = bak_name
self.log.info(f'Copied original geometry to {bak_name}')
xyz = XYZ(xyz_name)
diff = xyz.coords[19] - xyz.coords[38]
direction = diff / np.linalg.norm(diff)
xyz.coords[19] = xyz.coords[38] + self.react_dist * direction
xyz.write(xyz_name)
self.log.info(f'Wrote updated xyz file to {xyz_name}')
def new_calc(self):
self.log.debug('Setting up a new calculation')
self.current_lvl = 0
if self.geometry is not None:
self.log.debug(f'Using provided geometry from {self.geometry}')
xyz_path = pathlib.Path(self.geometry).resolve()
elif self.oniom:
self.log.debug('Using geometry from ONIOM inputs')
xyz_path = 'oniom_input'
else:
xyz_path = self._make_rand_xyz()
if self.react_dist:
if self.oniom:
self.log.warning('ONIOM calculation with react_dist not '
'implemented. Ignoring react_dist.')
else:
self._move_reactant_atoms(xyz_path)
self.status['starting_xyz'] = str(xyz_path)
if not self.oniom and not xyz_path.exists():
raise FileNotFoundError('Could not find start geometry that was '
f'supposed to be at {xyz_path}')
com_name = self._make_g_in(xyz_path)
self._setup_and_run(com_name)
def _setup_and_run(self, com_name):
self.log.debug('Starting setup to run Gaussian')
bn = self._base_name
chk_ln_path = pathlib.Path(f'{bn}-running.chk').resolve()
self.chk_ln_path = chk_ln_path
self.status['chk_ln_path'] = str(chk_ln_path)
chk_ln_path.symlink_to(self.scratch_path.joinpath(f'{bn}.chk'))
self.log.info(f'Linked checkpoint file as {chk_ln_path}')
if not self.resubmitted:
self.resub_calc()
self.status['manual_input'] = None
self.status['manual_restart'] = None
self.status['g_in_curr'] = com_name
self.cleaned_up = False
self.between_levels = False
self.status['calc_cutoff'] = None
self.status['gaussian_failed'] = None
killed = self._run_gaussian(com_name)
self.status['calc_cutoff'] = killed
if killed:
self.log.info('Exited from function running Gaussian because '
'SIGUSR2')
else:
try:
self._check_normal_completion(self.output_scratch_path)
self.log.info(f'Seemed to correctly finish level '
f'{self.current_lvl} calculation. Moving on to '
f'next level')
self._advance_level()
finally:
self._copy_and_cleanup()
self._next_calc()
def _advance_level(self):
self.log.debug(f'Advancing from {self.current_lvl}')
self.between_levels = True
self.current_lvl += 1
def _make_g_in(self, xyz_path):
self.log.debug(f'Making new Gaussian input from {xyz_path}')
bn = self._base_name
lvl = self.current_lvl
com_name = f'{bn}-lvl{lvl}.com'
try:
mgi_dict = self.mgi_dicts[lvl]
except IndexError:
self.log.warning('Seems that there are no more calculation '
'levels to complete')
raise self.NoMoreLevels
tools.make_gaussian_input(
out_file=com_name,
xyz=str(xyz_path),
job_name=bn,
checkpoint=f'{bn}.chk',
rwf=f'{bn}.rwf',
nproc=self.n_slots, mem=self.mem,
**mgi_dict
)
self.log.info('Wrote Gaussian input for '
f'level {lvl} job to {com_name}')
self.status[f'g_in_{lvl}'] = com_name
return com_name
def _run_gaussian(self, com_name):
self.log.debug('Doing final setup to run Gaussian')
out_name = com_name.replace('com', 'out')
com_path: pathlib.Path = self.cwd_path.joinpath(com_name)
if not com_path.exists():
raise FileNotFoundError('Gaussian input {} not found in '
'{}'.format(com_name, self.cwd_path))
out_path: pathlib.Path = self.scratch_path.joinpath(out_name)
self.output_scratch_path = out_path
self.status['output_scratch_path'] = str(out_path)
old_sigusr1 = signal.signal(signal.SIGUSR1, self._signal_catch_done)
old_sigusr2 = signal.signal(signal.SIGUSR2, self._signal_catch_time)
cl = ['g16', ]
killed = False
with com_path.open('r') as f_in, out_path.open('w') as f_out:
self.log.info('Starting Gaussian with input {} and writing '
'output to {}'.format(com_path, out_path))
self.status['running'] = True
proc = subprocess.Popen(cl, stdin=f_in, stdout=f_out,
cwd=str(self.scratch_path))
self.log.info('Started Gaussian; waiting for it to finish or '
'timeout')
try:
thread = threading.Thread(target=self._check_proc, args=(proc,))
thread.start()
signal.pause()
except self.TimesUp:
killed = True
proc.terminate() # Should be within `with` clause?
self.log.info('Gaussian process terminated because of SIGUSR2')
except self.GaussianDone:
self.log.info('Gaussian process completed')
finally:
signal.signal(signal.SIGUSR1, old_sigusr1)
signal.signal(signal.SIGUSR2, old_sigusr2)
self.status['running'] = False
return killed
def _signal_catch_time(self, signum, frame):
self.log.warning(f'Caught {signal.Signals(signum).name} signal! '
'Trying to quit Gaussian')
raise self.TimesUp
def _signal_catch_done(self, signum, frame):
self.log.warning(f'Caught {signal.Signals(signum).name} signal! '
f'Likely, this was because Gaussian process exited')
raise self.GaussianDone
def _check_proc(self, proc):
self.log.debug('Started process to check on Gaussian completion')
while proc.poll() is None:
time.sleep(15)
self.log.warning('Gaussian process no longer running. Sending SIGUSR1')
os.kill(os.getpid(), signal.SIGUSR1)
def _copy_and_cleanup(self):
self.log.debug('Attempting to copy back files and unlink chk file')
com_name: str = self.status['g_in_curr']
cc = self.status['calc_cutoff']
killed: bool = True if cc is None else cc
if killed:
scratch_path = pathlib.Path(self.status['last_scratch_dir'])
else:
scratch_path = self.scratch_path
if not killed:
out_path = pathlib.Path(com_name.replace('com', 'out'))
if self.status['gaussian_failed'] is True:
out_path = self._make_unique_output_path(
f'{out_path.stem}-failed')
else:
out_path = self._make_unique_output_path(com_name[:-4])
try:
paratemp.copy_no_overwrite(str(self.output_scratch_path),
str(out_path))
self.log.debug(f'Copied back output file to {out_path}')
except FileExistsError:
if filecmp.cmp(str(self.output_scratch_path), str(out_path),
shallow=False):
self.log.debug("Don't need to copy back output as it's already "
f"at {out_path}")
else:
self.log.error('Output files differ; unable to copy back.\n'
f'New: {self.output_scratch_path}\n'
f'Existing: {out_path}')
raise
if self.chk_ln_path.exists():
self.chk_ln_path.unlink()
self.log.debug(f'Unlinked checkpoint run file: {self.chk_ln_path}')
chk_name = f'{self._base_name}.chk'
scr_chk_path = scratch_path.joinpath(chk_name)
if scr_chk_path.exists():
shutil.copy(str(scr_chk_path), chk_name)
self.log.debug(f'Copied back checkpoint file to {chk_name}')
else:
self.log.debug(f'chk file not found at {scr_chk_path} so not '
f'copied back')
self.cleaned_up = True
def _make_unique_output_path(self, com_base_name):
outs = [str(p) for p in self.cwd_path.glob(com_base_name + '-*.out')]
if not outs:
new_out = f'{com_base_name}-1.out'
else:
def get_ind(s):
match = re.search(r'(\d+)\.out', s)
return int(match.group(1))
outs.sort(key=get_ind)
ind = get_ind(outs[-1])
new_out = f'{com_base_name}-{ind+1}.out'
return pathlib.Path(new_out).resolve()
def _check_normal_completion(self, filepath):
self.log.debug('Attempting to check for completion status of Gaussian')
output = subprocess.check_output(['tail', '-n', '1', str(filepath)],
universal_newlines=True)
if 'normal termination' not in output.lower():
self.log.error(f'Abnormal termination of Gaussian job in output: '
f'{filepath}')
self._qdel_next_job()
self.status['gaussian_failed'] = True
raise self.GaussianError('Gaussian did not finish normally. '
f'See output: {filepath}')
self.status['gaussian_failed'] = False
self.log.info(f'Normal termination of Gaussian job! Output at '
f'{filepath}')
def resub_calc(self):
self.log.info(f'resubmitting job with the following commandline:\n'
f'{self.resub_cl}')
proc = subprocess.run(self.resub_cl,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
universal_newlines=True)
self.log.info(f'The following was returned from qsub:\n{proc.stdout}')
if proc.returncode:
self.log.exception('Resubmission of calculation failed with '
f'returncode {proc.returncode}')
match = re.search(r'(\d+)\s\("(\w.*)"\)', proc.stdout)
if match:
self.next_job_id = match.group(1)
self.resubmitted = True
else:
self.log.warning('Could not find submitted job id from qsub '
'command. Will not be able to cancel it if this '
'Calc is completed')
def _make_resub_sh_and_cl(self):
"""
Make command line for a calculation for resuming in another job
Requires SGE_STDOUT_PATH and JOB_ID for running `qstat`
:return: None
"""
self.log.debug('Setting up for calculation resubmission')
arg_d = dict(pe=f'omp {self.n_slots}', M='theavey@bu.edu', m='eas',
l=f'h_rt={self.h_rt}', N=self._base_name, j='y',
o=self.stdout_file, notify='', hold_jid=self.job_id)
resub_dir_path = self.cwd_path.joinpath('resub_scripts')
resub_dir_path.mkdir(exist_ok=True)
sub_sh_path = resub_dir_path.joinpath(f'resub-{self._base_name}.sh')
curr_file = pathlib.Path(__file__).resolve()
with sub_sh_path.open('w') as sub_sh:
sub_sh.write('#!/bin/bash -l\n\n')
for key in arg_d:
sub_sh.write(f'#$ -{key} {arg_d[key]}\n')
sub_sh.write(obabel_module_lines)
sub_sh.write(f'\n{curr_file} --restart {self._json_name} &\n\n')
sub_sh.write(signal_catch_lines)
self.log.info(f'Wrote resubmission script to {sub_sh_path}')
self.resub_cl = ['qsub', str(sub_sh_path)]
def _get_h_rt(self):
"""
Find the amount of time requested for the currently running job
Requires JOB_ID and uses `qstat`.
:return:
"""
self.log.debug('Attempting to find requested job run time')
cl = ['qstat', '-j', self.job_id]
output: str = subprocess.check_output(cl, universal_newlines=True)
for line in output.splitlines():
m = re.search(r'h_rt=(\d+)', line)
if m:
self.log.debug(f'Found required info: {m.group(0)}')
return m.group(1)
self.log.error('Could not find requested run time! Assuming 24 hours')
return '24:00:00'
def resume_calc(self):
self.log.debug('Attempting to resume calculation')
if not self.cleaned_up:
self._copy_and_cleanup()
manual_input = self.status['manual_input']
manual_restart = self.status['manual_restart']
if manual_input is not None:
com_name = self._update_g_in_memory_cpu_request(manual_input)
self._setup_and_run(com_name)
elif self.between_levels:
self._copy_in_restart('chk')
self._next_calc()
else:
if manual_restart is not None:
com_name = self._update_g_in_memory_cpu_request(manual_restart)
else:
com_name = self._update_g_in_for_restart()
self._copy_in_restart()
self._setup_and_run(com_name)
def _copy_in_restart(self, file='both'):
f_type_options = ['chk', 'rwf']
if file == 'both':
f_types = f_type_options
else:
if file not in f_type_options:
self.log.warning(f'unrecognized file type {file}; trying it '
f'anyway...')
f_types = [file]
self.log.debug(f'Copying {f_types} files to scratch for restart')
bn = self._base_name
for f_type in f_types:
old_path = self.last_scratch_path.joinpath(f'{bn}.{f_type}')
if not old_path.exists():
mes = f'Could not find old {f_type} file at {old_path}'
self.log.error(mes)
raise FileNotFoundError(mes)
new_path = self.scratch_path.joinpath(old_path.name)
if new_path.exists() and (new_path.stat().st_mtime >
old_path.stat().st_mtime):
self.log.debug(f'current {f_type} file newer than old '
f'({old_path}). Not replacing.')
else:
try:
shutil.copy(str(old_path), str(self.scratch_path))
except shutil.SameFileError:
self.log.info('Working on the same node; no need to '
f'copy {f_type} file')
self.log.info(f'If necessary, copied {f_types} files from last '
f'scratch directory: {self.last_scratch_path}\nto '
f'node scratch dir: {self.scratch_path}')
def _update_g_in_memory_cpu_request(self, com_name=None):
self.log.debug('Updating Gaussian memory and cpu request for this node')
com_name = self.status['g_in_curr'] if com_name is None else com_name
lines = open(com_name, 'r').readlines()
paratemp.copy_no_overwrite(com_name, com_name+'.bak')
with open(com_name, 'w') as f_out:
for line in lines:
if '%mem=' in line:
line = f'%mem={self.mem}GB\n'
elif '%cpu=' in line:
line = f'%cpu=0-{int(self.n_slots)-1}\n'
f_out.write(line)
os.remove(pathlib.Path(com_name+'.bak'))
self.log.info(f'Updated Gaussian input to use all the memory '
f'on this node')
return com_name
def _update_g_in_for_restart(self):
self.log.debug('Updating Gaussian input for restart')
com_name = self._update_g_in_memory_cpu_request()
lines = open(com_name, 'r').readlines()
paratemp.copy_no_overwrite(com_name, com_name+'.bak')
with open(com_name, 'w') as f_out:
for line in lines:
if line.startswith('#'):
line = '# Restart\n'
f_out.write(line)
os.remove(pathlib.Path(com_name+'.bak'))
self.log.info(f'Updated Gaussian input to do a calculation restart')
return com_name
def _next_calc(self):
self.log.debug('Moving on to next level calculation')
out_path = pathlib.Path(self.status['g_in_curr']).with_suffix('.out')
try:
xyz_path_str = self._create_opt_xyz(out_path)
except:
self.log.error(f'Failed to create xyz file for {out_path}')
self._qdel_next_job()
raise
try:
com_name = self._make_g_in(xyz_path_str)
except self.NoMoreLevels:
self.log.info('No more calculation levels to complete! Completed '
f'all {self.current_lvl} levels')
self._qdel_next_job()
return None
except FileExistsError:
self.log.warning(f'Gaussian input file for level '
f'{self.current_lvl} already exists! This file '
f'will be used to start a new calculation')
com_name = f'{self._base_name}-lvl{self.current_lvl}.com'
self._setup_and_run(com_name)
# This will get nested, but likely no more than twice (unless the
# optimizations are very quick). This shouldn't be an issue,
# and should never get near the recursion limit unless something goes
# very wrong.
def _qdel_next_job(self):
self.log.debug('Deleting the re-submitted job from the queue')
if self.next_job_id is not None:
cl = ['qdel', self.next_job_id]
output = subprocess.check_output(cl, stderr=subprocess.STDOUT)
self.log.info('Cancelled job resubmission. qdel said: '
f'{output}')
else:
self.log.warning('Do not know job id of resubmission so '
'unable to delete it.')
def _create_opt_xyz(self, out_path: pathlib.Path):
self.log.debug('Converting output to xyz file for next level')
xyz_path_str = str(out_path.with_suffix('.xyz'))
success = self._run_obabel(out_path, xyz_path_str)
if success:
return xyz_path_str
fchk_path = self._create_fchk()
success = self._run_obabel(fchk_path, xyz_path_str)
if success:
return xyz_path_str
raise self.NoOptXYZError
def _run_obabel(self, out_path: pathlib.Path, xyz_path_str: str):
self.log.debug('Running openbabel to convert geometry')
cl = ['obabel', str(out_path), '-O',
xyz_path_str]
proc = subprocess.run(cl, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
universal_newlines=True)
if not (proc.returncode or
'1 molecule converted' not in proc.stdout.lower()):
self.log.info(f'Converted optimized structure to xyz file: '
f'{xyz_path_str}')
return True
mes = (f'obabel failed to convert {out_path} to an xyz file. '
f'It said: {proc.stdout}')
self.log.warning(mes)
return False
def _create_fchk(self):
self.log.debug('Converting chk to formatted checkpoint')
chk_name = f'{self._base_name}.chk'
fchk_name = f'{self._base_name}.fchk'
cl = ['formchk', chk_name, fchk_name]
proc = subprocess.run(cl, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
universal_newlines=True)
self.log.debug(f'Ran Gaussian formchk and it said: {proc.stdout}')
proc.check_returncode()
return pathlib.Path(fchk_name).resolve()
class SignalMessage(Exception):
pass
class TimesUp(SignalMessage):
pass
class GaussianDone(SignalMessage):
pass
class NoMoreLevels(Exception):
pass
class GaussianError(Exception):
pass
class NoOptXYZError(Exception):
pass
class StatusDict(dict):
"""
A dict subclass that writes the dict to disk every time a value gets set
Note, any other action on the dict will not currently trigger a write to
disk.
The dict will be written in JSON to the path given at instantiation. If
there is already a file at that path, it will be read and used as
the initial definition of the dict. Otherwise, it will be instantiated as
an empty dict.
Keys in dictionary:
* args: a Dict of the arguments given when starting the calculation.
* current_node: Name of the node on which the job is currently running.
This is set during :func:`Calc._startup_tasks`. It should be formatted as
'scc-xxx' (e.g., 'scc-na1').
* last_node: This is set during :func:`Calc._startup_tasks` if it's a
restarted calculation. It will be set from the last 'current_node'.
* node_list: This is a list of past and current nodes on which this
calculation has run.
* current_scratch_dir: str of the absolute path to the scratch directory
on the current node.
* base_name: the base name for this calculation including the index of
this calculation.
* cwd: str of the absolute path from which the current calculation was
submitted.
* last_scratch_dir: str of the absolute path to the scratch directory
from which the last job was run, if this is not a new calculation.
* source_frame_num: The index of the frame in the trajectory that was
used to create the initial configuration.
* original_xyz: str of the name of file with the coordinates as they were
taken from the trajectory, before moving the reacting atoms to the
correct distance. This will not be set if no distance correction is made.
* starting_xyz: str of the name of the file with the coordinates for
starting the calculation, before any optimization.
* g_in_0: str of the name of the file with the initial input to Gaussian.
* g_in_curr: str of the name of the currently running or most recent
Gaussian input
* current_lvl: int of current level of calculation running (max is len(
ugt_dicts))
* calc_cutoff: bool of whether the job finished or if it was cutoff
because of running out of time.
* cleaned_up: bool of whether linked files and old outputs have been
cleaned up and copied back to the starting directory
* chk_ln_path: str of path to where checkpoint file is linked in
submission directory
* output_scratch_path: str of path to where output is in the scratch
directory
* job_id: str of the job number from the sun grid system
* between_levels: bool of if between levels (not in the middle of a
calculation). Useful for figuring out where to restart.
* gaussian_failed: bool of if Gaussian terminated abnormally. Will be None
while Gaussian is running and before it gets checked.
* manual_input: str of path to an input file to use to continue the
calculation. Will not use the chk and rwf files.
* manual_restart: str of path to an input file to use to restart the
calculation. This will copy in the chk and rwf files that should be
referenced in the header of the input.
"""
def __init__(self, path):
self.path = pathlib.Path(path).resolve()
if pathlib.Path(path).is_file():
d = json.load(open(path, 'r'))
super(StatusDict, self).__init__(d)
else:
super(StatusDict, self).__init__()
self.log = logging.getLogger(self.__class__.__name__)
self.temp_path = self.path.with_suffix('.json.new')
def __setitem__(self, key, value):
try:
super(StatusDict, self).__setitem__(key, value)
json.dump(self, open(self.temp_path, 'w'), indent=4)
os.rename(str(self.temp_path), str(self.path))
except Exception:
self.log.exception('Exception raised when trying to write status '
'file!')
raise
_defaults = dict( # args=dict(), # want this to be an Error
current_node=None,
last_node=None,
node_list=list(),
current_scratch_dir=None,
base_name=None,
cwd=None,
last_scratch_dir=None,
source_frame_num=None,
original_xyz=None,
starting_xyz=None,
# current_lvl=None, # want this to be an Error
calc_cutoff=None, cleaned_up=None,
chk_ln_path=None, output_scratch_path=None,
job_id=None,
between_levels=None, gaussian_failed=None,
manual_input=None, manual_restart=None,
g_in_curr=None,
**{f'g_in_{i}': None for i in range(20)})
def __getitem__(self, item):
try:
return super(StatusDict, self).__getitem__(item)
except KeyError as ke:
self.log.warning(f'Tried to access non-existent key "{item}" from '
'StatusDict')
try:
return self._defaults[item]
except KeyError:
raise ke
def _check_environ():
log.debug('Checking and setting environment variables')
if os.environ.get('SGE_ROOT', None) is None:
raise ValueError('SGE_ROOT is not defined')
if os.environ.get('SGE_CELL', None) is None:
log.debug('Setting SGE_CELL to default')
os.environ['SGE_CELL'] = 'default'
if os.environ.get('DRMAA_LIBRARY_PATH', None) is None:
lib_path = pathlib.Path(
f"{os.environ['SGE_ROOT']}/lib/linux-x64/libdrmaa.so").resolve()
log.debug(f'Setting DRMAA_LIBRARY_PATH to {lib_path}')
os.environ['DRMAA_LIBRARY_PATH'] = str(lib_path)
def _process_paths(paths) -> List[pathlib.Path]:
log.debug('Processing input paths list')
statuses = list()
if not paths:
log.debug('paths was empty; using current directory')
paths = [pathlib.Path.cwd()]
for path in paths:
path = pathlib.Path(path)
if path.is_file():
statuses.append(path)
continue
if path.is_dir():
jsons = path.glob('*.json')
for j in jsons:
statuses.append(j)
log.debug(f'Found {len(statuses)} json files to process')
return statuses
def get_job_statuses(paths: List[str], df: pd.DataFrame = None):
log.info('Getting job statuses')
_check_environ()
import drmaa
log.debug('Imported DRMAA package')
statuses = _process_paths(paths)
if df is None:
df = pd.DataFrame(
{'system': ['str'], 'index': [0],
'running_sr': [False], 'running_qr': [False],
'current_lvl': [0], 'gaussian_failed': [False]},
)
df.drop(labels=[0], axis=0, inplace=True)
signal.alarm(10) # try to open session for 10 seconds (freezes with some
# unidentified problems with drmaa
with drmaa.Session() as session:
signal.alarm(0) # clear timer if successfully opened session
log.debug('Opened DRMAA session and finding job statuses')
for f_status in statuses:
log.debug(f'Trying file {f_status}')
name = f_status.stem
m = re.search(r'(.*)-ind(\d+)', name)
if m is None:
continue
system, index = m.groups()
d_status = json.load(f_status.open('r'))
running = bool(d_status['running'])
job_id = d_status['job_id']
try:
q_running = session.jobStatus(job_id)
except drmaa.InvalidJobException:
q_running = None
lvl = int(d_status['current_lvl'])
failed = d_status['gaussian_failed']
df.loc[name] = system, int(index), running, q_running, lvl, failed
return df.sort_values(['system', 'index'])
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-n', '--base_name', type=str,
help='base name for this calculation, likely not '
'including any index')
parser.add_argument('-i', '--index', type=int,
help='index of this calculation')
# TODO use mutually exclusive arguments here
parser.add_argument('-x', '--xyz', type=str, default=None,
help='manual input geometry (will take precedence '
'over topology/trajectory frame selection)')
parser.add_argument('-c', '--top', type=str, default=None,
help='topology/structure file (e.g., .gro, .xyz)')
parser.add_argument('-f', '--trajectory', type=str, default=None,
help='trajectory file (e.g., .xtc, .trr, .dcd)')
def parse_crit(kvv):
k, vv = kvv.split('=')
vs = tuple((float(v) for v in vv.split(',')))
return k, vs
parser.add_argument('-s', '--criteria', action='append',
type=parse_crit, metavar='key=min,max',
help='criteria for selection of possible frames from '
'the trajectory. To provide more than one '
'criterion, use this argument multiple times')
parser.add_argument('-d', '--react_dist', type=float, default=False,
help='Distance to set between atoms 20 and 39, '
'in angstroms. If this evaluates to False, '
'no changes to the geometry will be made')
parser.add_argument('-g', '--mgi_dicts', type=str,
help='path to json file that parses to a list of '
'dicts of arguments for make_gaussian_input in '
'order to create inputs to Gaussian')
parser.add_argument('--restart', default=None,
help='Path to status file for resuming an already '
'started calculation')
parser.add_argument('-j', '--job_status', nargs='*', default=None,
help='Folders or paths to status files to report the '
'status of. If nothing is given, status files in '
'current directly will be used. This flag cannot '
'be used with any other arguments other than -v.')
parser.add_argument('-v', '--verbose', action='store_true',
help='If used, the stdout log will be set to DEBUG')
p_args = parser.parse_args()
if p_args.verbose:
_setup_log(logging.DEBUG)
else:
_setup_log()
if p_args.job_status is not None:
status_df = get_job_statuses(p_args.job_status)
print(status_df)
sys.exit(0)
elif p_args.restart is not None:
calc = Calc(status=p_args.restart)
else:
_mgi_dicts = json.load(open(p_args.mgi_dicts, 'r'))
_criteria = None if p_args.criteria is None else dict(p_args.criteria)
calc = Calc(base_name=p_args.base_name,
ind=p_args.index,
geometry=p_args.xyz,
top=p_args.top,
traj=p_args.trajectory,
criteria=_criteria,
react_dist=p_args.react_dist,
mgi_dicts=_mgi_dicts
)
calc.run_calc()
|
{"/gautools/tools.py": ["/gautools/oniom.py"]}
|
16,027
|
theavey/QM-calc-scripts
|
refs/heads/master
|
/gautools/oniom.py
|
"""
A set of tools for setting up Gaussian ONIOM calculations
"""
########################################################################
# #
# This script was written by Thomas Heavey in 2019. #
# theavey@bu.edu thomasjheavey@gmail.com #
# #
# Copyright 2019 Thomas J. Heavey IV #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or #
# implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
# #
########################################################################
import collections
import logging
import re
from typing import List, Union, Tuple, Dict
import MDAnalysis
import numpy as np
import parmed
__all__ = ['OniomUniverse']
log = logging.getLogger(__name__)
class NoStructureException(Exception):
pass
class OniomStructure(object):
def __init__(self,
structure: parmed.structure.Structure = None,
structure_file: str = None,
structure_args: Union[Tuple, List] = None,
structure_kwargs: dict = None,
only_unique_types: bool = False,
only_used_terms: bool = True):
"""
Initialize OniomStructure to create Gaussian Amber MM input section
:param structure_file: filename (first argument) to be provided to
instantiate the Structure
:param structure_args: arguments to be provided to instantiate the
Structure
:param structure_kwargs: keyword arguments to be provided to instantiate
the Structure
:param only_unique_types: If False (default), all bonds, angles,
dihedrals, and impropers will be included.
If True, only the unique elements for each of those will be
included, which may not define the terms for all possible
interactions because one type may be used for several atom types.
For example, there might be an angle_type that should be used for
"*-C3-C3", but it might only get defined for "H1-C3-C3".
:param only_used_terms: If True (default), the params returned will
only include those with all atoms contained in the atoms actually
used. This can make the section returned shorter if not all atoms
have been selected, especially if `only_unique_types` is False.
This will also require `atoms_used_indices` to be defined, which
it will be if `molecule_section` is accessed first (in an associated
OniomUniverse).
If False, all parameters will be given.
"""
log.debug('Initializing OniomStructure object')
_struc_args = list() if structure_args is None else structure_args
_struc_kwargs = dict() if structure_kwargs is None else structure_kwargs
if structure_file is not None:
_struc_args = [structure_file] + _struc_args
if structure is None:
if (structure_file is None and
structure_args is None and
structure_kwargs is None):
log.warning('No arguments given to initialize OniomStructure')
raise NoStructureException
else:
self.structure = parmed.load_file(*_struc_args,
**_struc_kwargs)
else:
self.structure = structure
self.only_unique_types = only_unique_types
self._unique_types = {
'bonds': self._get_bond_types_uniq(),
'angles': self._get_angle_types_uniq(),
'dihedrals': self._get_dihedral_types_uniq(),
'impropers': self._get_improper_types_uniq()
}
self._non_unique_types = {
'bonds': self._get_bond_types_nu(),
'angles': self._get_angle_types_nu(),
'dihedrals': self._get_dihedral_types_nu(),
'impropers': self._get_improper_types_nu()}
self._types_dict = {True: self._unique_types,
False: self._non_unique_types}
self.atoms_used_indices = None
self.only_used_terms = only_used_terms
@property
def params_section(self) -> List[str]:
"""
Parameter specification for Gaussian job using Amber MM
:return: The lines to be included for Gaussian jobs using Amber MM and
HardFirst, SoftFirst, or SoftOnly
"""
# This doesn't seem perfect: the selection functions don't
# really work because (for example) a single bond_type might
# be used for different atom types, which is what is
# currently assumed. Need to find a way to either find all
# atom types for which it should be used (possibly using
# wildcards), or just iterate over all bonds/angles/dihedrals
# instead of iterating over *_types.
log.debug('Creating params_section in OniomStructure')
self._check_structure_universe_compatibility()
lines = list()
# get types based on if only_unique_types is True or False
types = self._types_dict[self.only_unique_types]
if self.only_used_terms and self.atoms_used_indices is not None:
types = self._remove_unused_terms(types)
lines.append('! Van der Waals parameters\n!\n')
atom_types = self._get_atom_types()
param_lines = set()
for at in atom_types:
param_lines.add(self._make_atomtype_line(at))
lines += list(param_lines)
lines.append('! Stretch parameters\n!\n')
bond_types = types['bonds']
param_lines = set()
for bond in bond_types:
param_lines.add(self._make_bondtype_line(bond))
lines += list(param_lines)
lines.append('! Bend parameters\n!\n')
angle_types = types['angles']
param_lines = set()
for angle in angle_types:
param_lines.add(self._make_angletype_line(angle))
lines += list(param_lines)
lines.append('! Dihedral parameters\n!\n')
dihedral_types = types['dihedrals']
param_lines = set()
for dihed in dihedral_types:
param_lines.add(self._make_dihedraltype_line(dihed))
lines += list(param_lines)
lines.append('! Improper dihedral parameters\n!\n')
improper_types = types['impropers']
param_lines = set()
for dihed in improper_types:
param_lines.add(self._make_impropertype_line(dihed))
lines += list(param_lines)
return lines
def _get_atom_types(self,) -> set:
atom_types = set()
for atom in self.structure.atoms:
atom_types.add(atom.atom_type)
return atom_types
@staticmethod
def _make_atomtype_line(atom_type) -> str:
sigma = atom_type.urmin
epsilon = atom_type.uepsilon
sigma = sigma.value_in_unit(parmed.unit.angstrom)
epsilon = epsilon.value_in_unit(parmed.unit.kilocalorie_per_mole)
return f'VDW {atom_type.name: <2} {sigma:4f} {epsilon:4f}\n'
@staticmethod
def _get_types(instances, types) -> List:
instance_types = list()
for _type in types:
for inst in instances:
if inst.type == _type:
instance_types.append(inst)
break
return instance_types
def _get_bond_types_uniq(self, ) -> List:
return self._get_types(self.structure.bonds, self.structure.bond_types)
def _get_bond_types_nu(self) -> List:
return self.structure.bonds
@staticmethod
def _make_bondtype_line(bond: parmed.topologyobjects.Bond) -> str:
a1, a2 = bond.atom1.type, bond.atom2.type
k = bond.type.uk.value_in_unit(parmed.unit.kilocalorie_per_mole /
parmed.unit.angstrom ** 2)
req = bond.type.ureq.value_in_unit(parmed.unit.angstrom)
return f'HrmStr1 {a1:2} {a2:2} {k: <5.1f} {req: <5.3f}\n'
def _get_angle_types_uniq(self, ) -> List:
return self._get_types(self.structure.angles,
self.structure.angle_types)
def _get_angle_types_nu(self) -> List:
return self.structure.angles
@staticmethod
def _make_angletype_line(angle: parmed.topologyobjects.Angle) -> str:
a1, a2, a3 = angle.atom1.type, angle.atom2.type, angle.atom3.type
k = angle.type.uk.value_in_unit(parmed.unit.kilocalorie_per_mole /
parmed.unit.radian ** 2)
thetaeq = angle.type.utheteq.value_in_unit(parmed.unit.degree)
return f'HrmBnd1 {a1:2} {a2:2} {a3:2} {k: >5.1f} {thetaeq:6.2f}\n'
def _get_improper_types_uniq(self, ) -> List:
# Somewhere along antechamber -> acpype, the impropers are stored
# as dihedrals (of GROMACS function 1)
return self._get_types(self.structure.dihedrals,
self.structure.dihedral_types)
def _get_improper_types_nu(self) -> List:
# Somewhere along antechamber -> acpype, the impropers are stored
# as dihedrals (of GROMACS function 1)
return self.structure.dihedrals
@staticmethod
def _make_impropertype_line(dihed: parmed.topologyobjects.Dihedral
) -> str:
a1, a2, a3, a4 = (dihed.atom1.type, dihed.atom2.type,
dihed.atom3.type, dihed.atom4.type)
phi_k = dihed.type.uphi_k.value_in_unit(
parmed.unit.kilocalorie_per_mole)
phase = dihed.type.uphase.value_in_unit(parmed.unit.degree)
per = dihed.type.per
return (f'ImpTrs {a1:2} {a2:2} {a3:2} {a4:2} '
f'{phi_k: >5.1f} {phase:5.1f} {per:3.1f}\n')
def _get_dihedral_types_uniq(self, ) -> List:
# Somewhere along antechamber -> acpype, the impropers are stored
# as dihedrals (of GROMACS function 1)
# and the dihedrals get stored as Ryckaert-Bellemans
# dihedrals (function 3)
return self._get_types(self.structure.rb_torsions,
self.structure.rb_torsion_types)
def _get_dihedral_types_nu(self) -> List:
# Somewhere along antechamber -> acpype, the impropers are stored
# as dihedrals (of GROMACS function 1)
# and the dihedrals get stored as Ryckaert-Bellemans
# dihedrals (function 3)
return self.structure.rb_torsions
@staticmethod
def _make_dihedraltype_line(dihed: parmed.topologyobjects.Dihedral
) -> str:
a1, a2, a3, a4 = (dihed.atom1.type, dihed.atom2.type,
dihed.atom3.type, dihed.atom4.type)
dtl = parmed.DihedralTypeList.from_rbtorsion(dihed.type)
phases = [0] * 4
phis = [0.] * 4
for dihed_type in dtl:
phi_k = dihed_type.uphi_k.value_in_unit(
parmed.unit.kilocalorie_per_mole)
phase = dihed_type.uphase.value_in_unit(parmed.unit.degree)
per = dihed_type.per
phases[per], phis[per] = phase, phi_k
output = (f'AmbTrs {a1:2} {a2:2} {a3:2} {a4:2} ' +
' '.join([f'{i: >3d}' for i in phases]) + ' ' +
' '.join([f'{i: >6.3f}' for i in phis]) +
' -1.0\n')
return output
def _remove_unused_terms(self, types) -> Dict[str, list]:
atoms_used = np.array(self.structure.atoms)[self.atoms_used_indices]
n_atoms_by_type = {'bonds': 2, 'angles': 3, 'dihedrals': 4,
'impropers': 4}
for key in types:
n_atoms = n_atoms_by_type[key]
return_params = []
input_params = types[key]
for param in input_params:
for i in range(n_atoms):
i += 1
if not getattr(param, f'atom{i}') in atoms_used:
break
else:
return_params.append(param)
types[key] = return_params
return types
def _check_structure_universe_compatibility(self):
log.debug('Checking compatibility of this structure with given args')
if self.atoms_used_indices is None:
return None
max_ind = max(self.atoms_used_indices)
try:
self.structure.atoms[max_ind]
return None
except IndexError:
raise ValueError('given atoms_used_indices requests atoms that '
'are not in this structure. Check to make sure '
'this Structure is compatible with the Universe.')
class OniomUniverse(object):
"""
Object to help easily create Gaussian ONIOM input sections
There are a few ways to instantiate this object.
First, it can be instantiated with an existing MDAnalysis Universe
instance and an existing parmed Structure instance:
>>> univ = MDAnalysis.Universe('geom.pdb', 'traj.xtc')
>>> structure = parmed.load_file('topology.top')
>>> ou = OniomUniverse(univ=univ, structure=structure)
Alternatively, the Universe and/or Structure can be instantiated here:
>>> ou = OniomUniverse(univ_args=['geom.pdb', 'traj.xtc'], \
structure_file='topology.top')
Any combination of these methods can also be used.
Also, `high_select` and `low_select` need to be provided during or
after instantiation before accessing the Gaussian input sections:
>>> ou = OniomUniverse(univ=univ, structure=structure, \
high_select='resid 2-3')
>>> ou.low_select = 'protein or byres around 5 resid 2-3'
Then, the Gaussian input sections can be created:
>>> mol_sec = ou.molecule_section
>>> bond_sec = ou.bonds_section
>>> param_sec = ou.params_section
Note, if you do not need the parameter section (e.g., only using
already included AMBER atom types), the structure or structure files
need not be specified. That portion is optional, and `molecule_section` and
`params_section` do not depend on the structure.
The interfaces between high and low are not treated specially, so link
atoms will need to be manually treated. That can be done after writing
to an input file, or using something like::
interface_atom = univ.select_atoms('bynum 88')
interface_atom_index = ou.atom_to_line_num[interface_atom] - 1
# (because the dict gives a (Gaussian) 1-based index)
interface_atom_line = mol_sec[interface_atom_index][:-2]+' H-H1-0.1\\n'
# (remove newline, add link atom definition and newline)
mol_sec[interface_atom_index] = interface_atom_line
"""
def __init__(self, univ: MDAnalysis.Universe = None,
structure: parmed.Structure = None,
high_select: str = None,
low_select: str = None,
overlap_okay: bool = False,
univ_args=None, univ_kwargs=None,
structure_file=None,
structure_args=None, structure_kwargs=None,
freeze_dict: dict = None):
"""
Initialize OniomUniverse to create Gaussian ONIOM input sections
:param univ: Universe with the geometry and bonding information for the
system of interest. Note, the geometry must include bonding
information or MDAnalysis will have to be told to guess them:
https://www.mdanalysis.org/docs/documentation_pages/topology/guessers.html#MDAnalysis.topology.guessers.guess_bonds
:param structure: Structure with atom types, bonds, angles,
etc. Note, this is only currently written to work with AMBER (or
really GAFF as made by Antechamber/AcPype), and it is unclear
how it will work for other force fields or implementations.
:param high_select: Selection string for the atoms to be included in the
"high" calculation
:param low_select: Selection string for the atoms to be included in the
"low" calculation
:param overlap_okay: If True, overlap between the high and low
selections will be ignored and anything in the overlap will be
included in the "high" region
:param univ_args: arguments to be provided to instantiate the Universe
:param univ_kwargs: keyword arguments to be provided to instantiate
the Universe
:param structure_file: filename (first argument) to be provided to
instantiate the Structure
:param structure_args: arguments to be provided to instantiate the
Structure
:param structure_kwargs: keyword arguments to be provided to instantiate
the Structure
:param freeze_dict: mapping from levels ('H' and 'L') to freeze
commands (0 for unfrozen, -1 for frozen).
Default is `{'H': 0, 'L': -1}`
"""
log.debug('Initializing OniomUniverse object')
univ_args = list() if univ_args is None else univ_args
# probably invalid anyway because Universe can't be kwarg only
univ_kwargs = dict() if univ_kwargs is None else univ_kwargs
if univ is None:
self.universe = MDAnalysis.Universe(*univ_args, **univ_kwargs)
else:
self.universe = univ
self._check_universe()
try:
self.oniom_structure = OniomStructure(
structure=structure,
structure_file=structure_file,
structure_args=structure_args,
structure_kwargs=structure_kwargs)
except NoStructureException:
self.oniom_structure = None
self.high_select = high_select
self.low_select = low_select
self.overlap_okay = overlap_okay
self.atom_to_line_num = dict()
self.n_atoms_in_input = 0
self.freeze_dict = ({'H': 0, 'L': -1} if freeze_dict is None
else freeze_dict)
@property
def molecule_section(self,) -> List[str]:
"""
Molecule specification lines for ONIOM calculation
This defines a dict mapping `Atom`s to atom number (line number in
input) as `self.atom_to_line_number`, and number of atoms included in
the input as `self.n_atoms_in_input`.
:return: The lines to be written into the input
"""
log.debug('Creating molecule section for OniomUniverse')
if self.high_select is None or self.low_select is None:
raise self.SelectionError('Both `high_select` and `low_select` '
'must be specified')
high_atoms = self.universe.select_atoms(self.high_select)
low_atoms = self.universe.select_atoms(self.low_select)
n_atoms_in_both = high_atoms.intersection(low_atoms).n_atoms
if n_atoms_in_both and not self.overlap_okay:
log.error('High and low selections are not mutually exclusive and '
'overlap_okay is not True')
raise ValueError('The selections are not mutually exclusive. '
'Make mutually exclusive or set overlap_okay=True')
atoms_used_indices = []
lines = []
line_num = 0
for atom in self.universe.atoms:
if atom in high_atoms:
level = 'H'
elif atom in low_atoms:
level = 'L'
else:
continue
line_num += 1
lines.append(self._make_atom_line(atom=atom, level=level,))
atoms_used_indices.append(atom.index)
self.atom_to_line_num[atom] = line_num
sel_n_atoms = (high_atoms.n_atoms + low_atoms.n_atoms - n_atoms_in_both)
if line_num != sel_n_atoms:
mes = ('Number of lines and n_atoms in selections differ '
f'({line_num} and {sel_n_atoms})')
log.error(mes)
raise ValueError(mes)
self.n_atoms_in_input = sel_n_atoms
if self.oniom_structure is not None:
self.oniom_structure.atoms_used_indices = atoms_used_indices
return lines
@property
def bonds_section(self, ) -> List[str]:
"""
Bond specifications for a Gaussian job with `geom=connectivity`
:return: The lines to be written in the input after the molecule
specification
"""
log.debug('Creating bonds section for OniomUniverse')
if self.n_atoms_in_input == 0:
log.error('No atoms yet picked for this OniomUniverse')
raise ValueError('No atoms have been put into the molecule '
'specification yet so the bonds cannnot yet be '
'defined. Either run `molecule_section` '
'first or check your selections.')
atln = self.atom_to_line_num
bond_dict = collections.defaultdict(list)
for bond in self.universe.bonds:
a1, a2 = bond.atoms
try:
bond_dict[atln[a1]].append(f'{atln[a2]} 1.0')
except KeyError:
continue
lines = []
for i in range(self.n_atoms_in_input):
i += 1 # use 1-based indexing
bonds = ' '.join(bond_dict[i])
lines.append(f'{i} {bonds}\n')
return lines
@property
def params_section(self):
if self.oniom_structure is None:
log.warning('No structure for this OniomUniverse but '
'params_section was accessed')
raise NoStructureException('No Structure given for this '
'OniomUniverse')
else:
return self.oniom_structure.params_section
params_section.__doc__ = OniomStructure.params_section.__doc__
_re_element = re.compile(r'[A-Z][a-z]?')
def _get_elem(self, atom: MDAnalysis.core.groups.Atom) -> str:
"""
Get element name from Atom object
This counts on any multi-letter element being named as Ca (capital
followed by lower case). Also, single-letter element names must be
capitalized and not followed by a lower-case letter.
An alternative method could be a mapping from masses to elements, or
use the parmed Structure which knows element information.
"""
elem_match = self._re_element.match(atom.name)
if elem_match:
return elem_match.group(0)
else:
mes = f'Could not find element for atom {atom}'
log.error(mes)
raise ValueError(mes)
def _make_atom_line(self, atom: MDAnalysis.core.groups.Atom,
level: str,) -> str:
elem = self._get_elem(atom)
line = (f'{elem}-'
f'{atom.type}-'
f'{atom.charge:3f} {self.freeze_dict[level]} '
f'{atom.position[0]:4f} '
f'{atom.position[1]:4f} '
f'{atom.position[2]:4f} {level}\n')
return line
def _check_universe(self):
log.debug('Checking attributes of this Universe')
if not hasattr(self.universe, 'bonds'):
mes = ('This Universe does not have defined bonds. Try '
'an input with defined bonds or try `guess_bonds=True`.')
log.error(mes)
raise ValueError(mes)
if not hasattr(self.universe.atoms[0], 'charge'):
mes = ('The atoms in this Universe do not have charge defined.'
'Try a format with defined charge.')
log.error(mes)
raise ValueError(mes)
if not hasattr(self.universe.atoms[0], 'position'):
mes = ('The atoms in this Universe do not have position defined.'
'Try a format with defined positions or also load in a '
'trajectory file (can just be a pdb or xyz file).')
log.error(mes)
raise ValueError(mes)
class SelectionError(ValueError):
pass
|
{"/gautools/tools.py": ["/gautools/oniom.py"]}
|
16,028
|
theavey/QM-calc-scripts
|
refs/heads/master
|
/sumHills/interfaceToSumHills.py
|
#! /usr/bin/env python3.4
########################################################################
# #
# This script was written by Thomas Heavey in 2015. #
# theavey@bu.edu thomasjheavey@gmail.com #
# #
# Copyright 2015 Thomas J. Heavey IV #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or #
# implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
# #
########################################################################
# This is written to work with python 3.4 because it should be good to
# be working on the newest version of python.
import glob
import argparse
import subprocess
import readline
import os
import shutil
from datetime import datetime
__version__ = '0.2.2'
parser = argparse.ArgumentParser(description='Use PLUMED utility to sum '
'HILLS and then put into '
'Mathematica friendly format')
parser.add_argument('-H', '--hills', default='HILLS',
help='name of the HILLS file')
parser.add_argument('-s', '--stride', default=10000,
help='specify the stride for integrating '
'hills file (default 10000)')
parser.add_argument('-a', '--ask', action='store_true',
help='Flag for specifying that it should ask for min, '
'max, bin, and spacing')
parser.add_argument('-v', '--verbose', action='store_true',
help='make script more verbose')
parser.add_argument('-f', '--folder', default='SumHills',
help='Folder in which this will be run. Can be '
'deleted automatically using -c 3.')
parser.add_argument('-t', '--temp_file', default='temp_data_file.m',
help='File in which to store all the data')
parser.add_argument('-n', '--var_name', default='summedHills',
help='Name of variable to be assigned for Mathematica')
parser.add_argument('-T', '--template', default='sumHillsTempl.m',
help='Output template file')
parser.add_argument('-o', '--output_name', default='summedHills.m',
help='Name of the file to be output')
parser.add_argument('-e', '--exists', action='store_true',
help='Use this argument if the fes data already exists')
parser.add_argument('-c', '--clean', type=int, default=2,
help='Argument for how much to clean up\n'
'0 does not delete or move anything\n'
'>0 moves output to starting folder '
'and deletes copy of HILLS file\n'
'>1 deletes temp data file\n'
'>2 deletes temp folder and contents\n'
'default is 2')
parser.add_argument('--version', action='version',
version='%(prog)s v{}'.format(__version__))
args = parser.parse_args()
# An input function that can prefill in the text entry
def rlinput(prompt, prefill=''):
readline.set_startup_hook(lambda: readline.insert_text(prefill))
try:
# This was raw_input, but that's deprecated in py3
return input(prompt)
finally:
readline.set_startup_hook()
def is_number(s):
try:
float(s)
return True
except ValueError:
return False
def run_plumed_sum_hills():
"""This function takes no arguments and returns nothing.
Using the command line values stored in args, it will run the PLUMED
sum_hills utility."""
command_line = ['mpirun', '-n', '1', 'plumed', 'sum_hills']
command_line += ['--hills', args.hills]
if args.verbose:
print('hills file name is {}'.format(args.hills))
command_line += ['--stride', str(args.stride)]
if args.verbose:
print('data output stride is {}'.format(args.stride))
if args.ask:
print('length of min, max, bin, and spacing should be equal '
'to number of CVs\n'
'For 2 CVs for example, format as "-pi,-pi"')
minim = str(rlinput('min = '))
maxim = str(rlinput('max = '))
spacing = str(rlinput('grid spacing = '))
bins = str(rlinput('num of bins = '))
# Check input arguments for PLUMED sum_hills
if bins:
if minim or maxim:
if not (minim and maxim):
print('If you give a min or max, you need min, '
'max, and bin or spacing')
minim = rlinput('min = ', minim)
maxim = rlinput('max = ', maxim)
else:
if spacing:
if minim or maxim:
if not(minim and maxim):
print('If you give a min or max, you need min, '
'max, and bin or spacing')
minim = rlinput('min = ', minim)
maxim = rlinput('max = ', maxim)
else:
if minim or maxim:
if not(minim and maxim):
print('If you give a min or max, you need min, '
'max, and bin or spacing')
minim = rlinput('min = ', minim)
maxim = rlinput('max = ', maxim)
spacing = rlinput('grid spacing = ')
bins = rlinput('num of bins = ')
# Put these arguments into the list of arguments to be submitted
if bins:
command_line += ['--bin', bins]
if args.verbose:
print('number of bins is {}'.format(bins))
if spacing:
command_line += ['--spacing', spacing]
if args.verbose:
print('grid spacing: {}'.format(spacing))
if minim or maxim:
if minim and maxim:
command_line += ['--min', str(minim),
'--max', str(maxim)]
if args.verbose:
print('min: {}, max: {}'.format(minim, maxim))
else:
raise ValueError('Need to have both min and max!')
command_line_str = ' '.join(command_line)
print('command line argument is:\n{}'.format(command_line_str))
print('Running PLUMED sum_hills utility...')
# Run the PLUMED sum_hills utility and save/print the output as it comes
with open('plumed_output.log', 'w') as log_file:
with subprocess.Popen(command_line,
stdout=subprocess.PIPE, bufsize=1,
universal_newlines=True) as proc:
for line in proc.stdout:
log_file.write(line)
if args.verbose:
print(line, end='')
#if proc.returncode != 0:
# todo check to make sure it ran okay? maybe subprocess does that already
print('Done running PLUMED sum_hills utility')
def setup_folder():
"""This function takes no arguments and returns nothing.
It will create a folder for making all this data, and
it will copy the specified hills file there."""
working_folder = args.folder
# Make the working folder, don't raise error if it already exists
os.makedirs(working_folder, exist_ok=True)
copyout = shutil.copy(args.hills, working_folder)
if args.verbose:
print('HILLS file copied to {}'.format(copyout))
os.chdir(working_folder)
def read_plumed_stuff():
"""This function takes no arguments and returns nothing.
It will save the output data from plumed to a formatted
temporary file that can then be read to put the data
into the file that Mathematica can read.
If defines the global variable num_of_cvs."""
global num_of_cvs, formatted_data, fes_file_names
print('Reading PLUMED output files')
fes_file_names = glob.glob('fes*.dat')
# Make sure the list is in the proper order:
fes_file_names.sort() # sort files alphanumerically
fes_file_names.sort(key=len) # sort files by length
# Find number of CVs:
# At least in the current implementation of PLUMED, the output is
# the list of CV coordinates, then the height there, then the
# derivative with respect to each of the CVs, hence the
# (number of fields - 1) / 2.
with open(fes_file_names[0], 'r') as file:
for line in file:
if line.startswith('#'):
continue
num_fields = len(line.split())
num_of_cvs = (num_fields - 1) / 2.
if num_of_cvs.is_integer():
num_of_cvs = int(num_of_cvs)
else:
print('number of CVs found to be {}!'.format(num_of_cvs))
num_of_cvs = rlinput('Real number of CVs = ',
str(int(num_of_cvs)))
break
all_data = []
for file in fes_file_names:
f_data = []
if args.verbose:
print('Reading file {}'.format(file))
with open(file, 'r') as crf:
l_data = []
for line in crf:
if line.startswith('#'):
continue
try:
if is_number(line.split()[0]):
l_data += [', '.join(line.split()[0:(num_of_cvs+1)])]
except IndexError:
# Blank lines in files have length 0
continue
f_data += ['},\n{'.join(l_data)]
all_data += f_data
formatted_data = '{{{' + '}},\n{{'.join(all_data) + '}}}\n\n'
with open(args.temp_file, 'w') as tf:
tf.write(formatted_data)
print('Done reading PLUMED output data')
def data_into_mfile():
"""This function takes no arguments and returns nothing.
It will take data saved to a temporary file from read_plumed_stuff
and put it into the template .m file so that it can be read into
Mathematica"""
print('Putting data into output file...')
about_content = []
about_content += ['"Number of CVs: {}"'.format(num_of_cvs)]
about_content += ['"Number of points per time chunk: '
'{}"'.format(args.stride)]
about_content += ['"Originally processed on {}"'.format(datetime.now())]
about_content += ['"Processed with '
'{} v{}"'.format(os.path.basename(__file__),
__version__)]
about = '{' + ', '.join(about_content) + '}'
replacements = dict(varname=args.var_name, data=formatted_data,
numcvs=num_of_cvs, stride=args.stride,
about=about)
# Below shouldn't be needed, but leaving for backward compatibility.
replacements['spacing'] = '(Print["getGridSize not currently ' \
'defined"]; $Failed)'
print(replacements.keys())
with open(args.template, 'r') as template, \
open(args.output_name, 'w') as output:
for line in template:
if line.startswith('#'):
# Remove the '#'
line = line[1:]
try:
# Unpack the dict, then do the replacements
output.write(line.format(**replacements))
except KeyError as e:
print('Error! Key {} not found!'.format(e))
choice = input('(a)bort or (s)kip? ')
if choice in 'abort':
raise e
if choice in 'skip':
continue
else:
output.write(line)
print('Output saved as {}'.format(args.output_name))
def clean_up():
"""This function takes no arguments and returns nothing.
It will ask if the temp data should be deleted, and if so, will clean
it all up. It can also move the output file back to the original
directory.
Default is to move output file to the starting directory, removes
the HILLS file if copied, and removes the temp data file. Use -c
argument to change this behavior.
"""
print('Cleaning up...')
if args.clean > 0:
if args.verbose:
print('Copying {} to {}...'.format(args.output_name, current_dir))
shutil.copy(args.output_name, current_dir)
if not args.exists:
# If HILLS file was copied (only true if args.exists is false)
# delete the copy
if args.verbose:
print('Removing {}...'.format(args.hills))
os.remove(args.hills)
if args.clean > 1:
if args.verbose:
print('Removing {}...'.format(args.temp_file))
os.remove(args.temp_file)
if args.clean > 2:
temp_folder = current_dir + '/' + args.folder
if args.verbose:
print('Removing {} and contents...'.format(temp_folder))
shutil.rmtree(temp_folder)
print('Done cleaning up files/folders.')
current_dir = os.getcwd()
if not args.exists:
setup_folder()
run_plumed_sum_hills()
read_plumed_stuff()
data_into_mfile()
clean_up()
print('Done!')
|
{"/gautools/tools.py": ["/gautools/oniom.py"]}
|
16,050
|
kvnlnt/native-demo
|
refs/heads/master
|
/www/settings.py
|
class Config(object):
DEBUG = False
SECRET_KEY = 'secret key'
class ProdConfig(Config):
CACHE_TYPE = 'simple'
class TestConfig(Config):
DEBUG = True
DEBUG_TB_INTERCEPT_REDIRECTS = False
CACHE_TYPE = 'null'
class DevConfig(Config):
DEBUG = True
DEBUG_TB_INTERCEPT_REDIRECTS = False
CACHE_TYPE = 'null'
|
{"/www/__init__.py": ["/www/controller.py"]}
|
16,051
|
kvnlnt/native-demo
|
refs/heads/master
|
/www/controller.py
|
"""
:synopsis: Main blueprint router and controller
"""
from flask import Blueprint, render_template
main = Blueprint('main', __name__, template_folder='templates')
@main.route('/')
def home():
""" home page """
return render_template("pages/home.html")
@main.route('/voice')
def voice():
""" voice page """
return render_template(
"pages/voice.html",
section=".voice",
has_submenu=False,
)
@main.route('/solutions')
def solutions():
""" solutions page """
return render_template(
"pages/solutions.html",
section=".solutions",
has_submenu=True,
)
@main.route('/solutions/unified-communications')
def unified_communication():
""" unified communications page """
return render_template(
"pages/unified-communications.html",
section=".solutions",
has_submenu=True,
)
@main.route('/solutions/call-center')
def call_center():
""" call center page """
return render_template(
"pages/call-center.html",
section=".solutions",
has_submenu=True,
)
@main.route('/solutions/pbx-and-phone-systems')
def pbx_and_phone_systems():
""" pbx and phone systems page """
return render_template(
"pages/pbx-and-phone-systems.html",
section=".solutions",
has_submenu=True,
)
@main.route('/pricing')
def pricing():
""" pricing page """
return render_template(
"pages/pricing.html",
section=".pricing",
has_submenu=False,
)
@main.route('/developers')
def developers():
""" developers page """
return render_template(
"pages/developers.html",
section=".developers",
has_submenu=False,
)
@main.route('/partners')
def partners():
""" partners page """
return render_template(
"pages/partners.html",
section=".partners",
has_submenu=False,
)
@main.route('/patterns')
def patterns():
""" patterns page """
return render_template(
"pages/patterns.html",
section=".patterns", has_subpage=False
)
@main.route('/login')
def login():
""" login page """
return render_template(
"pages/login.html",
section=".login",
has_submenu=False,
)
|
{"/www/__init__.py": ["/www/controller.py"]}
|
16,052
|
kvnlnt/native-demo
|
refs/heads/master
|
/tests/test_controller.py
|
#! ../env/bin/python
# -*- coding: utf-8 -*-
from www import create_app
class TestController:
def setup(self):
# get app test config
app = create_app('www.settings.TestConfig', env='dev')
# create test app
self.app = app.test_client()
def teardown(self):
# destroy session and tables
print "done"
def test_home(self):
endpoint = '/'
response = self.app.get(endpoint)
assert response.status_code == 200
def test_voice(self):
endpoint = '/voice'
response = self.app.get(endpoint)
assert response.status_code == 200
def test_solutions(self):
endpoint = '/solutions'
response = self.app.get(endpoint)
assert response.status_code == 200
def test_pricing(self):
endpoint = '/pricing'
response = self.app.get(endpoint)
assert response.status_code == 200
def test_developers(self):
endpoint = '/developers'
response = self.app.get(endpoint)
assert response.status_code == 200
def test_partners(self):
endpoint = '/partners'
response = self.app.get(endpoint)
assert response.status_code == 200
def test_login(self):
endpoint = '/login'
response = self.app.get(endpoint)
assert response.status_code == 200
|
{"/www/__init__.py": ["/www/controller.py"]}
|
16,053
|
kvnlnt/native-demo
|
refs/heads/master
|
/www/__init__.py
|
#! ../env/bin/python
import os
from flask import Flask, g
from www.controller import main
def create_app(object_name, env="prod"):
"""
An flask application factory, as explained here:
http://flask.pocoo.org/docs/patterns/appfactories/
Arguments:
object_name: the python path of the config object,
e.g. ark.settings.ProdConfig
env: The name of the current environment, e.g. prod or dev
"""
app = Flask(__name__)
app.config.from_object(object_name)
app.config['ENV'] = env
# register our blueprints
app.register_blueprint(main)
return app
if __name__ == '__main__':
# Import the config for the proper environment using the
# shell var APPNAME_ENV
env = os.environ.get('APPNAME_ENV', 'prod')
app = create_app('www.settings.%sConfig' % env.capitalize(), env=env)
app.run()
|
{"/www/__init__.py": ["/www/controller.py"]}
|
16,057
|
chris-hutch/DBScanDroid
|
refs/heads/master
|
/dbscan_android_malware.py
|
from scipy.spatial.distance import pdist, squareform
from sklearn.decomposition import TruncatedSVD
import utils
import csv
import numpy as np
import matplotlib.pyplot as plt
from scipy.sparse import vstack, csr_matrix
from collections import OrderedDict
from sklearn.cluster import DBSCAN
from sklearn import metrics
from sklearn.feature_extraction import DictVectorizer
def create_sha256_dict(ground_truth_dest):
apk_sha256_dict = dict()
with open(ground_truth_dest, mode='r') as family_sha256_csv:
reader = csv.DictReader(family_sha256_csv)
for row in reader:
apk_sha256_dict[row['sha256']] = row['family']
return apk_sha256_dict
def construct_feature_vector_matrix(vocab: OrderedDict,
feature_vector_hashes: list,
apk_sha256_map: dict,
feature_vector_parent: str):
feature_vectors = csr_matrix((1, len(vocab)), dtype=bool)
# Transform vectorizer over vocab to effectively generate a dictionary
vectorizer = DictVectorizer(sort=False)
vectorizer.fit_transform(vocab)
#### Necessary to create the ground_truth indexes
n_families = []
for hashes in feature_vector_hashes:
n_families.append(apk_sha256_map.get(hashes))
n_families_without_none = list(filter(None.__ne__, n_families))
family_mapping = {"Benign": 0}
family_mapping_n = ({family: v for v, family in enumerate(np.unique(n_families_without_none), 1)})
family_mapping.update(family_mapping_n)
y_ground_truth = []
for idx, apk in enumerate(feature_vector_hashes):
apk_feature_data = utils.build_feature_vectors(apk, feature_vector_parent)
# Assign value of 1 in dictionary for feature vectors
apk_feature_dictionary = {feature: 1 for feature in apk_feature_data}
# Transform feature dictionary over fitted to produce a binary feature vector
# 1 means that the feature is there, otherwise a 0
feature_vector = vectorizer.transform(apk_feature_dictionary)
feature_vectors = vstack([feature_vectors, feature_vector])
if apk not in apk_sha256_map:
y_ground_truth.append(family_mapping["Benign"])
else:
y_ground_truth.append(family_mapping[apk_sha256_map[apk]])
# Delete the first row as it was required to create the csr_matrix
utils.delete_row_csr(feature_vectors, 0)
return feature_vectors, y_ground_truth
def compute_jaccard_distance_matrix(feature_vectors: csr_matrix):
return squareform(pdist(feature_vectors.todense(), metric='jaccard'))
'''
Run DBSCAN
'''
def run_dbscan_and_plot(eps, min_pts, distance_matrix, y_ground_truth):
db = DBSCAN(eps=eps, min_samples=min_pts, metric='precomputed').fit(distance_matrix)
labels = db.labels_
core_samples = np.zeros_like(labels, dtype=bool)
core_samples[db.core_sample_indices_] = True
n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)
print('Estimated number of clusters: %d' % n_clusters_)
unique_labels = set(labels)
colors = plt.cm.Spectral(np.linspace(0, 1, len(unique_labels)))
X_embedded = TruncatedSVD(n_components=2).fit_transform(distance_matrix)
for k, col in zip(unique_labels, colors):
if k == -1:
# Black used for noise.
col = 'k'
class_member_mask = (labels == k)
xy = X_embedded[class_member_mask & ~core_samples]
plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=6)
xy = X_embedded[class_member_mask & core_samples]
plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=8)
plt.title('Estimated number of clusters: {}. MinPts: {}, Epsilon: {}'.format(n_clusters_, min_pts, eps))
plt.show()
print("Adjusted Rand Index: %0.3f"
% metrics.adjusted_rand_score(y_ground_truth, labels))
print("Adjusted Mutual Information: %0.3f"
% metrics.adjusted_mutual_info_score(y_ground_truth, labels))
print("Purity: %0.3f"
% utils.purity_score(np.asarray(y_ground_truth), labels))
print("Silhouette Coefficient: %0.3f"
% metrics.silhouette_score(distance_matrix, labels))
def print_dataset_stats(feature_vector_hashes, apk_sha256_dict, vocab):
n_families = []
for hashes in feature_vector_hashes:
n_families.append(apk_sha256_dict.get(hashes))
n_families = [x for x in n_families if x is not None]
print(np.unique(n_families))
print("Malware families in ground truth {}".format(len(np.unique(n_families))))
print("Malicious Apps: {}".format(len(n_families)))
print("Features: {}".format(len(vocab)))
|
{"/dbscan_android_malware.py": ["/utils.py"], "/main.py": ["/dbscan_android_malware.py", "/utils.py"]}
|
16,058
|
chris-hutch/DBScanDroid
|
refs/heads/master
|
/main.py
|
import dbscan_android_malware as dbs
import utils
import argparse
def main(data_hashes_dest, percentage_sample, ground_truth_dest, feature_vector_parent):
apk_sha256_dict = dbs.create_sha256_dict(ground_truth_dest)
feature_vector_hashes = utils.get_feature_vector_hashes(data_hashes_dest,
percentage_sample,
apk_sha256_dict,
ground_truth_dest,
leave_out_benign=False,
only_fake_installer=False,
top_three_malware=False)
vocabulary = utils.build_vocab(feature_vector_hashes, feature_vector_parent)
feature_vector_matrix, ground_truth = dbs.construct_feature_vector_matrix(
vocabulary, feature_vector_hashes, apk_sha256_dict, feature_vector_parent
)
jaccard_distance_matrix = dbs.compute_jaccard_distance_matrix(feature_vector_matrix)
min_pts = 30
eps = 0.46
utils.plot_knn_values(jaccard_distance_matrix, [min_pts], eps)
dbs.run_dbscan_and_plot(eps, min_pts, jaccard_distance_matrix, ground_truth)
dbs.print_dataset_stats(feature_vector_hashes, apk_sha256_dict, vocabulary)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Run DBScanDroid')
parser.add_argument("data_hashes_dest", help="Destination of application hash list")
parser.add_argument("percentage_sample", help="Percentage of data sample to take")
parser.add_argument("ground_truth_dest", help="Destination of ground truth csv")
parser.add_argument("feature_vector_parent", help="Directory name where feature vector parent")
args = parser.parse_args()
main(args.data_hashes_dest, args.percentage_sample, args.ground_truth_dest, args.feature_vector_parent)
|
{"/dbscan_android_malware.py": ["/utils.py"], "/main.py": ["/dbscan_android_malware.py", "/utils.py"]}
|
16,059
|
chris-hutch/DBScanDroid
|
refs/heads/master
|
/utils.py
|
import operator
from collections import OrderedDict
from itertools import dropwhile
from sklearn.neighbors import NearestNeighbors
from collections import Counter
import matplotlib.pyplot as plt
from sklearn import metrics
import numpy as np
def get_feature_vector_hashes(data_split: str,
percent_to_retrieve: float,
apk_sha256_dict: dict,
ground_truth_dest: str,
leave_out_benign=False,
only_fake_installer=False,
top_three_malware=False,
minimum_applications_per_malware_family=10):
ground_truth = np.loadtxt(ground_truth_dest, delimiter=",", skiprows=1, dtype=str)
def _top_three_malware():
with open(data_split, mode='r') as data:
feature_vector_hashes = []
for line in data:
if len(feature_vector_hashes) >= (128994 * percent_to_retrieve):
break
if line.rsplit() in ground_truth[:, 0]:
feature_vector_hashes.append(line.rsplit()[0])
apk_sha256_dict_adjust = {k: apk_sha256_dict[k] for k in feature_vector_hashes if k in apk_sha256_dict}
family_count = Counter(apk_sha256_dict_adjust.values())
family_count = sorted(family_count.items(), key=operator.itemgetter(1), reverse=True)
family_count = family_count[0:3]
apk_sha256_dict_adjust = {
k: apk_sha256_dict_adjust[k] for k in apk_sha256_dict_adjust.keys()
if apk_sha256_dict_adjust[k] in
[i[0] for i in family_count]
}
return list(apk_sha256_dict_adjust.keys())
def _weighted_benign_fake_installer():
with open(data_split, mode='r') as split_1:
feature_vector_hashes = []
for line in split_1:
if len(feature_vector_hashes) >= (128994 * percent_to_retrieve):
break
if line.rsplit() not in ground_truth[:, 0]:
feature_vector_hashes.append(line.rsplit()[0])
apk_sha256_dict_adjust = {k: apk_sha256_dict[k] for k, v in apk_sha256_dict.items() if v == "FakeInstaller"}
apk_sha256_dict_adjust = {k: apk_sha256_dict_adjust[k] for k in sorted(apk_sha256_dict_adjust.keys())[:50]}
feature_vector_hashes.extend(list(apk_sha256_dict_adjust.keys()))
return feature_vector_hashes
if only_fake_installer:
return _weighted_benign_fake_installer()
elif top_three_malware:
return _top_three_malware()
else:
with open(data_split, mode='r') as split_1:
feature_vector_hashes = []
for line in split_1:
if len(feature_vector_hashes) >= (128994 * percent_to_retrieve):
break
feature_vector_hashes.append(line.rsplit()[0])
apk_sha256_dict_adjust = {k: apk_sha256_dict[k] for k in feature_vector_hashes if k in apk_sha256_dict}
family_count = Counter(apk_sha256_dict_adjust.values())
for key, count in dropwhile(lambda key_count: key_count[1] >= minimum_applications_per_malware_family, family_count.most_common()):
del family_count[key]
if leave_out_benign:
return list(apk_sha256_dict_adjust.keys())
else:
for hash in feature_vector_hashes:
if hash in ground_truth[:, 0]:
if hash not in apk_sha256_dict_adjust:
feature_vector_hashes.remove(hash)
return feature_vector_hashes
def build_feature_vectors(file: str, feature_vector_parent: str):
application_feature_set = []
with open(feature_vector_parent + "/" + file) as file_feature_vectors:
for line in [line.rstrip() for line in file_feature_vectors]:
split_line = line.split("::")
# Avoid operating on empty lines which don't contain feature and identifier
if not len(split_line) == 2:
continue
if split_line[0] in ["activity", "service_receiver", "provider", "intent", "permission", "feature"]:
application_feature_set.append(split_line[1])
return application_feature_set
def build_vocab(*files, feature_vector_parent=None):
hardware_comp_dict = {} # S1
requested_perms_dict = {} # S2
app_components = {} # S3
intent_filter_dict = {} # S4
all_feature_dict = OrderedDict()
for idxx, arg in enumerate(files):
for idxy, file in enumerate(arg):
with open(feature_vector_parent + "/" + file) as file_feature_vectors:
for line in [line.rstrip() for line in file_feature_vectors]:
split_line = line.split("::")
feature_set_identifier = split_line[0]
# Avoid operating on empty lines which don't contain feature and identifier
if not len(split_line) == 2:
continue
if feature_set_identifier == "activity" and split_line[1] not in app_components:
app_components[split_line[1]] = 1
elif feature_set_identifier == "service_receiver" and split_line[1] not in app_components:
app_components[split_line[1]] = 1
elif feature_set_identifier == "provider" and split_line[1] not in app_components:
app_components[split_line[1]] = 1
elif feature_set_identifier == "intent" and split_line[1] not in intent_filter_dict:
intent_filter_dict[split_line[1]] = 1
elif feature_set_identifier == "permission" and split_line[1] not in requested_perms_dict:
requested_perms_dict[split_line[1]] = 1
elif feature_set_identifier == "feature" and split_line[1] not in hardware_comp_dict:
hardware_comp_dict[split_line[1]] = 1
all_feature_dict.update(hardware_comp_dict)
all_feature_dict.update(requested_perms_dict)
all_feature_dict.update(app_components)
all_feature_dict.update(intent_filter_dict)
print("Analysed from set ({}) {}/{}".format(idxx + 1, idxy + 1, len(arg)))
return all_feature_dict
def plot_knn_values(X, k_values, eps=None):
for k in k_values:
# K + 1 as neighbours does not count index point whilst DSBSCAN range query does
nbrs = NearestNeighbors(n_neighbors=k + 1, n_jobs=-1).fit(X)
distances, indicies = nbrs.kneighbors(X)
distances = np.sort(distances, axis=0)
distances = distances[:, k - 1]
# distances = distances[::-1]
plt.ylabel('{}-NN Distance'.format(k_values[0]))
plt.xlabel('Points (application) sorted by distance')
plt.plot(distances, label="k (minPts) = {}".format(k))
plt.axhline(y=eps, xmin=0.0, xmax=1.0, linestyle='--', color='k', linewidth=0.8)
plt.legend()
plt.show()
def delete_row_csr(mat, i):
n = mat.indptr[i + 1] - mat.indptr[i]
if n > 0:
mat.data[mat.indptr[i]:-n] = mat.data[mat.indptr[i + 1]:]
mat.data = mat.data[:-n]
mat.indices[mat.indptr[i]:-n] = mat.indices[mat.indptr[i + 1]:]
mat.indices = mat.indices[:-n]
mat.indptr[i:-1] = mat.indptr[i + 1:]
mat.indptr[i:] -= n
mat.indptr = mat.indptr[:-1]
mat._shape = (mat._shape[0] - 1, mat._shape[1])
'''
Purity score impl -> https://stackoverflow.com/questions/34047540/python-clustering-purity-metric/51672699#51672699
'''
def purity_score(y_true, y_pred):
# compute contingency matrix (also called confusion matrix)
confusion_matrix = metrics.confusion_matrix(y_true, y_pred)
# return purity
return np.sum(np.amax(confusion_matrix, axis=0)) / np.sum(confusion_matrix)
|
{"/dbscan_android_malware.py": ["/utils.py"], "/main.py": ["/dbscan_android_malware.py", "/utils.py"]}
|
16,072
|
GKozakjian/P2P
|
refs/heads/master
|
/Part1/Threads/Peer_Server_Thread.py
|
import threading
import time
import socket
import random
import hashlib
from Part1.Peer.Peer import *
import pickle
from Part1.PDU.PDU import *
class Peer_Server_Thread(threading.Thread):
local_peer = None
hash_function = None
def __init__(self, peer):
threading.Thread.__init__(self)
self.local_peer = peer
self.hash_function = hashlib.sha1()
def run(self):
self.local_peer.local_peer_tcp_socket.listen(1)
print("-Local Peer: Started Listening On Bound Socket")
self.start_receiving()
def start_receiving(self):
while True:
try:
if self.local_peer.stop_local_peer:
print("-Local Peer: Server Thread Stopped")
break
# accept new connection
established_tcp_connection, established_tcp_connection_address = self.local_peer.local_peer_tcp_socket.accept()
print()
print("-Local Peer: Accepted New TCP Connection With Remote Peer: " + str(
established_tcp_connection_address))
received_data = established_tcp_connection.recv(4096)
received_pdu = pickle.loads(received_data)
# if received message is a message from a peer in chrod network
if received_pdu['type'] == "message":
received_peer_CID = received_pdu['id']
for x in self.local_peer.chord_net_peers:
if x['ID'] == received_peer_CID:
print("-Local Peer: Received Message: " +received_pdu['message'] +", From Pere: " + established_tcp_connection[0] + ":" +established_tcp_connection[1])
break
# new node joined by other peers
elif received_pdu['type'] == "newnodejoin":
# add new node to the chord array
new_node_info = received_pdu['message']
# add peer to chord network
new_chord_peer = \
{
"id": new_node_info['id'],
"ip": new_node_info['ipv4'],
"port": new_node_info['port']
}
self.local_peer.chord_net_peers.append(new_chord_peer)
print("-Local Peer: New Node Joined Chord Network with ID: " + new_node_info['id'] + ", IP:" +
new_node_info['ipv4'] + " And Port: " + new_node_info['port'])
# if received message is join request
elif received_pdu['type'] == "join":
# if static ID exist in join request
if received_pdu['id'] is not None:
received_join_CID = received_pdu['id']
# check for collision
for x in self.local_peer.chord_net_peers:
if x['id'] == received_join_CID:
# generate new ID based on remote peer IPv4 and Port
received_join_peer_socket = received_pdu['ipv4'] + ":" + received_pdu['port']
self.hash_function.update(received_join_peer_socket.encode())
received_join_CID = self.hash_function.hexdigest()
# add peer to chord network
new_chord_peer = \
{
"id": received_join_CID,
"ip": received_pdu['ipv4'],
"port": received_pdu['port']
}
self.local_peer.chord_net_peers.append(new_chord_peer)
print("-Local Peer: New Node Joined Chord Network with ID: " + received_join_CID + ", IP:" +
received_pdu['ipv4'] + " And Port: " + received_pdu['port'])
if received_join_CID != received_pdu['id']:
msg = received_join_CID
join_reply_pdu = PDU(1, "joinack", self.local_peer.TTL, self.local_peer.local_peer_CID,
self.local_peer.local_host_ip, self.local_peer.local_host_port, msg, "")
serialized_join_reply = pickle.dumps(join_reply_pdu)
established_tcp_connection.send(serialized_join_reply)
else:
join_reply_pdu = PDU(1, "joinackid", self.local_peer.TTL, self.local_peer.local_peer_CID,
self.local_peer.local_host_ip, self.local_peer.local_host_port, "", "")
serialized_join_reply = pickle.dumps(join_reply_pdu)
established_tcp_connection.send(serialized_join_reply)
# notify chord network that a new node joined
# establish connection to remote peer and send message
random_port_once = random.randint(30000, 60000)
socket_once = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
socket_once.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
socket_once.bind((self.local_peer.local_host_ip, int(random_port_once)))
message = \
{
'id': received_join_CID,
'ip': received_pdu['ipv4'],
'port': received_pdu['port']
}
for x in self.local_peer.chord_net_peers:
if x['id'] != received_join_CID or x['id'] != self.local_peer.local_peer_CID:
socket_once.connect((x['ip'], int(x['port'])))
# prepare message
pdu = PDU(1, "newnodejoin", self.local_peer.TTL, self.local_peer.local_peer_CID,
self.local_peer.local_host_ip, self.local_peer.local_host_port, message, "")
picled_pdu = pickle.dumps(pdu)
socket_once.send(picled_pdu)
# close socket once when finished sending
socket_once.shutdown(2)
# if there is no static ID in join request
else:
received_join_peer_socket = received_pdu['ipv4'] + ":" + received_pdu['port']
self.hash_function.update(received_join_peer_socket.encode())
received_join_CID = self.hash_function.hexdigest()
for x in self.local_peer.chord_net_peers:
# check if node already exist in chord network
if x['id'] == received_join_CID:
break
# join new node to chord net if it doesnt exist in network
else:
new_chord_peer = \
{
"id": received_join_CID,
"ip": received_pdu['ipv4'],
"port": received_pdu['port']
}
self.local_peer.chord_net_peers.append(new_chord_peer)
print(
"-Local Peer: New Node Joined Chord Network with ID: " + received_join_CID + ", IP:" +
received_pdu['ipv4'] + " And Port: " + received_pdu['port'])
msg = \
{
"id": received_join_CID
}
join_reply_pdu = PDU(1, "joinack", self.local_peer.TTL, self.local_peer.local_peer_CID,
self.local_peer.local_host_ip, self.local_peer.local_host_port,
msg, "")
serialized_join_reply = pickle.dumps(join_reply_pdu)
established_tcp_connection.send(serialized_join_reply)
# notify chord network that a new node joined
# establish connection to remote peer and send message
random_port_once = random.randint(30000, 60000)
socket_once = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
socket_once.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
socket_once.bind((self.local_peer.local_host_ip, int(random_port_once)))
message = \
{
'id': received_join_CID,
'ip': received_pdu['ipv4'],
'port': received_pdu['port']
}
for x in self.local_peer.chord_net_peers:
if x['id'] != received_join_CID:
socket_once.connect((x['ip'], int(x['port'])))
# prepare message
pdu = PDU(1, "newnodejoin", self.local_peer.TTL,
self.local_peer.local_peer_CID,
self.local_peer.local_host_ip, self.local_peer.local_host_port,
message, "")
picled_pdu = pickle.dumps(pdu)
socket_once.send(picled_pdu)
# close socket once when finished sending
socket_once.shutdown(2)
time.sleep(10)
except Exception as e:
print("im fucking here")
print(e)
pass
# print("-Local Peer: No New Incoming Connections")
|
{"/Part1/Threads/Peer_Server_Thread.py": ["/Part1/PDU/PDU.py"], "/Part2/Threads/Peer_Client_Thread2.py": ["/Part2/Peer/Peer2.py"], "/Part2/Peer/Peer2.py": ["/Part2/Threads/Peer_Client_Thread2.py"]}
|
16,073
|
GKozakjian/P2P
|
refs/heads/master
|
/Part2/Threads/Peer_Client_Thread2.py
|
import threading
import time
import socket
import pickle
import random
from Part2.Peer.Peer2 import *
from Part2.Threads.PDU2 import *
import pickle
import hashlib
import signal
class Peer_Client_Thread2(threading.Thread):
local_peer = None
hash_function = None
def __init__(self, peer):
threading.Thread.__init__(self)
self.local_peer = peer
self.hash_function = hashlib.sha1()
# signal.signal(signal.SIGINT, self.signal_handler)
#
# def signal_handler(signal, frame):
# print("ahyyy")
# pass
def run(self):
while True:
try:
# get peer message from CLI interface
remote_peer = input("-Local peer: Enter Peer you want to send a message to, in IP:Port Format, or enter Q to exit")
if remote_peer == "Q" or remote_peer == "q":
self.local_peer.stop_local_peer = True
print("-Local Peer: Client Thread Stopped")
break
message = input("-Local Peer: Enter Message")
# check if peer in chord network
self.hash_function.update(remote_peer.encode())
remote_peer_CID = self.hash_function.hexdigest()
remote_found = False
for x in self.local_peer.chord_net_peers:
if x['id'] == remote_peer_CID:
remote_found = True
remote_peer_ip, remote_peer_port = remote_peer.split(':')
# establish connection to remote peer and send message
random_port_once = random.randint(30000, 60000)
socket_once =socket.socket(socket.AF_INET, socket.SOCK_STREAM)
socket_once.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
socket_once.bind((self.local_peer.local_host_ip, int(random_port_once)))
socket_once.connect((remote_peer_ip, int(remote_peer_port)))
# prepare message
pdu = PDU2(1, "message", self.local_peer.TTL, self.local_peer.local_peer_CID, self.local_peer.local_host_ip, self.local_peer.local_host_port, message, "Dumb Message ")
picled_pdu = pickle.dumps(pdu)
socket_once.send(picled_pdu)
print("-Local Peer: Message Sent To Peer with ID: " + x['id'] + ", IP: " + remote_peer_ip + ", Port: " + remote_peer_port)
# close socket once when finished sending
socket_once.shutdown(2)
break
if not remote_found:
# try to send it to successor
# check for successor and predecessor
if int(remote_peer_CID, 16) < int(self.local_peer.successor, 16):
# establish connection to remote peer and send message
random_port_once = random.randint(30000, 60000)
socket_once = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
socket_once.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
socket_once.bind((self.local_peer.local_host_ip, int(random_port_once)))
socket_once.connect((remote_peer_ip, int(remote_peer_port)))
# prepare message
pdu = PDU2(1, "message", self.local_peer.TTL, self.local_peer.local_peer_CID,
self.local_peer.local_host_ip, self.local_peer.local_host_port, message,
"Dumb Message ")
picled_pdu = pickle.dumps(pdu)
socket_once.send(picled_pdu)
print("-Local Peer: Message Sent To Peer with ID: " + x[
'id'] + ", IP: " + remote_peer_ip + ", Port: " + remote_peer_port)
# close socket once when finished sending
socket_once.shutdown(2)
break
pass
elif int(remote_peer_CID, 16) > int(self.local_peer.predecessor):
# establish connection to remote peer and send message
random_port_once = random.randint(30000, 60000)
socket_once = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
socket_once.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
socket_once.bind((self.local_peer.local_host_ip, int(random_port_once)))
socket_once.connect((remote_peer_ip, int(remote_peer_port)))
# prepare message
pdu = PDU2(1, "message", self.local_peer.TTL, self.local_peer.local_peer_CID,
self.local_peer.local_host_ip, self.local_peer.local_host_port, message,
"Dumb Message ")
picled_pdu = pickle.dumps(pdu)
socket_once.send(picled_pdu)
print("-Local Peer: Message Sent To Peer with ID: " + x[
'id'] + ", IP: " + remote_peer_ip + ", Port: " + remote_peer_port)
# close socket once when finished sending
socket_once.shutdown(2)
break
pass
else:
print("-Local Peer: Entered Remote Peer Not In Chord Network")
time.sleep(1)
except KeyboardInterrupt:
print("CLI Thread Stopped")
exit()
|
{"/Part1/Threads/Peer_Server_Thread.py": ["/Part1/PDU/PDU.py"], "/Part2/Threads/Peer_Client_Thread2.py": ["/Part2/Peer/Peer2.py"], "/Part2/Peer/Peer2.py": ["/Part2/Threads/Peer_Client_Thread2.py"]}
|
16,074
|
GKozakjian/P2P
|
refs/heads/master
|
/Part1/PDU/PDU.py
|
class PDU():
version = ""
ttl = 0
id = ""
Port = 0
ipv4 = ""
type = ""
message = ""
reserved = ""
def __init__(self, version, type, ttl, ID, ipv4, port, message, reserved):
self.version = version
self.ttl = ttl
self.id = ID
self.Port = port
self.ipv4 = ipv4
self.type = type
self.message = message
self.reserved = reserved
|
{"/Part1/Threads/Peer_Server_Thread.py": ["/Part1/PDU/PDU.py"], "/Part2/Threads/Peer_Client_Thread2.py": ["/Part2/Peer/Peer2.py"], "/Part2/Peer/Peer2.py": ["/Part2/Threads/Peer_Client_Thread2.py"]}
|
16,075
|
GKozakjian/P2P
|
refs/heads/master
|
/Part2/Peer/Peer2.py
|
import sys
import socket
from Part2.Threads.Peer_Client_Thread2 import *
from Part2.Threads.Peer_Server_Thread2 import *
import random
from pickle import *
import hashlib
from Part2.Peer.PDU2 import *
class Peer2:
successor = None
predecessor = None
local_host_ip = None
local_host_port = None
local_peer_CID = None
local_peer_tcp_socket = None
local_peer_server_thread = None
local_peer_client_thread = None
local_peer_cli_thread = None
chord_net_peers = []
latest_remote_peer_ip = None
latest_remote_peer_port = None
TTL = None
hash_function = None
stop_local_peer = False
def __init__(self, opcode, remote_peer_ip_port, SID_Option, CSID, TTL_Option, TTL):
try:
self.message_available_to_send = False
self.local_peer_tcp_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.local_host_ip = "localhost"
self.hash_function = hashlib.sha1()
# bind the socket
self.bind_socket()
# check opcode, and act accordingly
if opcode == '-p':
self.latest_remote_peer_ip, self.latest_remote_peer_port = remote_peer_ip_port.split(':')
if SID_Option == "-I":
self.local_peer_CID = CSID
if TTL_Option == "-t":
self.TTL = TTL
else:
self.TTL = 255
else:
self.local_peer_CID = ""
if TTL_Option == "-t":
self.TTL = TTL
else:
self.TTL = 255
self.join_chord()
elif opcode == "-I":
self.local_peer_CID = CSID
if TTL_Option == "-t":
self.TTL = TTL
else:
self.TTL = 255
# create new chord network
self.create_new_chord()
else:
if opcode == "-t":
self.TTL = TTL
else:
self.TTL = 255
# create new chord network
self.create_new_chord()
# start local peer's sender and receiver threads
self.assign_threads()
# join the threads
self.join_threads()
# both trheads stopped
print("-Local Peer: Started Chord Leave Process")
# establish connection to remote peer and send message
random_port_once = random.randint(30000, 60000)
socket_once = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
socket_once.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
socket_once.bind((self.local_host_ip, int(random_port_once)))
for x in self.chord_net_peers:
if x['id'] != self.local_peer_CID:
socket_once.connect((x['ip'], int(x['port'])))
# prepare message
pdu = PDU2(1, "leave", self.TTL, self.local_peer_CID,
self.local_host_ip, self.local_host_port, "By Bcs")
picled_pdu = pickle.dumps(pdu)
socket_once.send(picled_pdu)
print("-Local Peer: Leave Request Sent to Peer with ID: " + x['id'] + ", IP: " + x['ip'] + ", Port: " + x['port'])
# close socket once when finished sending
socket_once.shutdown(2)
print("-Local Peer: Stopped")
sys.exit()
except KeyboardInterrupt:
# disconnect from network
print(" by by im going home")
pass
def bind_socket(self):
# Generate a random socket for local host
self.local_host_port = random.randint(30000, 60000)
self.local_peer_tcp_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.local_peer_tcp_socket.settimeout(10)
self.local_peer_tcp_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.local_peer_tcp_socket.bind((self.local_host_ip, self.local_host_port))
print("-Local Peer: Socket Bind Completed On IP: " + self.local_host_ip + " & Port: " + str(self.local_host_port))
def assign_threads(self):
self.local_peer_client_thread = Peer_Client_Thread2(self)
self.local_peer_server_thread = Peer_Server_Thread2(self)
print("-Local Peer: Sender & Receiver Threads Started")
self.local_peer_server_thread.start()
self.local_peer_client_thread.start()
def join_threads(self):
self.local_peer_client_thread.join()
self.local_peer_server_thread.join()
def join_chord(self):
try:
# send join message to join a chord network
join_pdu = PDU2(1, "join", self.TTL, self.local_peer_CID, self.local_host_ip, self.local_host_port, "join the network", "")
serialized_pdu = pickle.dumps(join_pdu, pickle.HIGHEST_PROTOCOL)
serialized_join_pdu = pickle.dumps(join_pdu, pickle.HIGHEST_PROTOCOL)
self.local_peer_tcp_socket.connect((self.latest_remote_peer_ip, int(self.latest_remote_peer_port)))
self.local_peer_tcp_socket.send(serialized_join_pdu)
except Exception as e:
print(e)
# receive join confirm
while True:
try:
join_reply_pdu = self.local_peer_tcp_socket.recv(1024)
serialized_join_reply_pdu = pickle.loads(join_reply_pdu)
if serialized_join_reply_pdu['type'] == "joinackid":
new_chord_peer = \
{
"id": serialized_join_reply_pdu['id'],
"ip": serialized_join_reply_pdu['ipv4'],
"port": serialized_join_reply_pdu['port']
}
self.chord_net_peers.append(new_chord_peer)
print("-Local Peer: Joined Chord Network With Static ID")
elif serialized_join_reply_pdu['type'] == 'joinack':
reply_CID_message = serialized_join_reply_pdu['message']
self.local_peer_CID = reply_CID_message['id']
new_chord_peer = \
{
"id": serialized_join_reply_pdu['id'],
"ip": serialized_join_reply_pdu['ipv4'],
"port": serialized_join_reply_pdu['port']
}
self.chord_net_peers.append(new_chord_peer)
print("-Local Peer: Joined Chord Network With New Generated ID")
break
except:
print("-Local Peer: Still Waiting for Join Reply")
def create_new_chord(self):
print("-Local Peer: New Chord Network Created")
if self.local_peer_CID is None:
cid_gen_str = self.local_host_ip + ":" + str(self.local_host_port)
self.local_peer_CID = self.hash_function.update(cid_gen_str.encode())
self.local_peer_CID = self.hash_function.hexdigest()
new_chord_peer = \
{
"id": self.local_peer_CID,
"ip": self.local_host_ip,
"port": self.local_host_port
}
self.chord_net_peers.append(new_chord_peer)
print("-Local Peer: Joined Chord Network With Chord ID: " + str(self.local_peer_CID))
if __name__ == "__main__":
option = sys.argv[1]
if option == '-p':
if sys.argv[3] == "-I":
p = Peer2(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4], sys.argv[5], sys.argv[6])
else:
p = Peer2(sys.argv[1], sys.argv[2], "", "", sys.argv[3], sys.argv[4])
elif option == '-I':
p = Peer2(sys.argv[1], sys.argv[2], "", "", sys.argv[3], sys.argv[4])
elif option == "-t":
p = Peer2(sys.argv[1], "", "", "", "", sys.argv[2])
|
{"/Part1/Threads/Peer_Server_Thread.py": ["/Part1/PDU/PDU.py"], "/Part2/Threads/Peer_Client_Thread2.py": ["/Part2/Peer/Peer2.py"], "/Part2/Peer/Peer2.py": ["/Part2/Threads/Peer_Client_Thread2.py"]}
|
16,078
|
XushengZhao/Backtest_Platform
|
refs/heads/master
|
/Order_class.py
|
# -*- coding: utf-8 -*-
"""
Created on Sun Jun 21 18:24:50 2020
@author: Xzhao
"""
class Order:
def __init__(self, buy, shares):
self.shares = shares;
self.buy = buy;
|
{"/Tutorial_main.py": ["/Platform_class.py", "/BOSC_strategy.py"], "/Platform_class.py": ["/Order_class.py"], "/BOSC_strategy.py": ["/Order_class.py"]}
|
16,079
|
XushengZhao/Backtest_Platform
|
refs/heads/master
|
/Strategies/Equal_weight/Equal_weight_iex.py
|
import pandas as pd
import requests
import xlsxwriter
import math
def chunks(lst,n):
for i in range (0,len(lst),n):
yield lst[i:i +n]
'''This script will create a excel spreadsheet of buy orders using an user input portfolio balance that mimics an equal weight SP500 fund.
It uses IEX API to get stock prices and a spreadsheet for current SP500 stocks.
It will output the final buy orders as an excel spreadsheet.
'''
#API TOKEN
from secrets import IEX_CLOUD_API_TOKEN
#currently using a preset file with list of sp500 instead of gathering the list of stocks from some API
sp500_list_csv_filepath = "sp_500_stocks.csv"
sp500_stocklist = pd.read_csv(sp500_list_csv_filepath)
#the current batch API of IEX only accepts at most 100 stock tickers. We are splitting the stock list up
symbol_groups = list(chunks(sp500_stocklist['Ticker'],100))
symbol_strings = []
for i in range(len(symbol_groups)):
symbol_strings.append(','.join(symbol_groups[i]))
my_columns = ['Ticker', 'Stock Price', ' Market Cap', 'Number of shares to buy']
final_df= pd.DataFrame(columns = my_columns)
#Extract stock price using IEX API
base_url = 'https://sandbox.iexapis.com/stable'
for stocklist in symbol_strings:
request = f'/stock/market/batch?symbols={stocklist}&types=quote&token={IEX_CLOUD_API_TOKEN}'
final_url = base_url + request
data = requests.get(final_url).json()
for symbol in stocklist.split(','):
price = data[symbol]['quote']['latestPrice']
mkt_cap = data[symbol]['quote']['marketCap']/(10**9)
final_df = final_df.append(
pd.Series([symbol, price, mkt_cap, 'N/A'],index = my_columns), ignore_index=True,
)
#user input section to get a portfolio value
portfolio_balance = input('Enter portfolio value : ')
try:
val = float(portfolio_balance)
except ValueError:
print("Please enter a number")
portfolio_balance = input('Enter portfolio value : ')
val = float(portfolio_balance)
position_size = val/len(final_df.index)
num_shares = position_size
for i in range(len(final_df.index)):
final_df.loc[i,'Number of shares to buy'] = math.floor(position_size/final_df.loc[i,'Stock Price'])
#write to the excel spreadsheet
writer = pd.ExcelWriter('sp500_trades.xlsx',engine = 'xlsxwriter')
final_df.to_excel(writer, 'SP500 trades',index = False)
background_color = '#ffffff'
font_color = '#000000'
string_format = writer.book.add_format({
'font_color':font_color,
'bg_color': background_color,
'border' : 1
})
dollar_format = writer.book.add_format({
'num_format' : '$0.00',
'font_color':font_color,
'bg_color': background_color,
'border' : 1
})
integer_format = writer.book.add_format({
'num_format' : '0',
'font_color':font_color,
'bg_color': background_color,
'border' : 1
})
column_formats = {
'A': ['Ticker', string_format],
'B': ['Stock Price', dollar_format],
'C': ['Market Cap', integer_format],
'D': ['Number of Shares to Buy', integer_format]
}
for column in column_formats.keys():
writer.sheets['SP500 trades'].set_column(f'{column}:{column}',18,column_formats[column][1])
writer.save()
|
{"/Tutorial_main.py": ["/Platform_class.py", "/BOSC_strategy.py"], "/Platform_class.py": ["/Order_class.py"], "/BOSC_strategy.py": ["/Order_class.py"]}
|
16,080
|
XushengZhao/Backtest_Platform
|
refs/heads/master
|
/__init__.py
|
# -*- coding: utf-8 -*-
"""
Created on Sun Jun 21 18:51:41 2020
@author: Xzhao
"""
|
{"/Tutorial_main.py": ["/Platform_class.py", "/BOSC_strategy.py"], "/Platform_class.py": ["/Order_class.py"], "/BOSC_strategy.py": ["/Order_class.py"]}
|
16,081
|
XushengZhao/Backtest_Platform
|
refs/heads/master
|
/Tutorial_main.py
|
# -*- coding: utf-8 -*-
"""
Created on Sun Jun 21 19:02:04 2020
@author: Xzhao
"""
from Platform_class import BT_platform
from BOSC_strategy import BOSC
from datetime import datetime
import matplotlib.pyplot as plt
Backtest_platform = BT_platform()
BOSC_strat = BOSC()
filepath = r'C:\Users\Xzhao\source\Backtrader\csv_files\SPY_1993_2020.csv'
fromdate = datetime(1900,1,1)
todate = datetime(2020,6,30)
Backtest_platform.csv_filepath(filepath)
Backtest_platform.startdate(fromdate)
Backtest_platform.enddate(todate)
Backtest_platform.set_balance(10000.0)
Backtest_platform.set_strategy(BOSC_strat)
Backtest_platform.run()
xdata = Backtest_platform.portfolio_x_data()
ydata = Backtest_platform.portfolio_y_data()
plt.plot(xdata,ydata,label = "Buy Open, Sell Close")
plt.title("Portfolio performance for a SPY")
plt.xlabel("Date")
plt.ylabel("Portfolio Value")
plt.legend()
plt.show()
|
{"/Tutorial_main.py": ["/Platform_class.py", "/BOSC_strategy.py"], "/Platform_class.py": ["/Order_class.py"], "/BOSC_strategy.py": ["/Order_class.py"]}
|
16,082
|
XushengZhao/Backtest_Platform
|
refs/heads/master
|
/Platform_class.py
|
# -*- coding: utf-8 -*-
"""
Created on Sun Jun 21 17:14:49 2020
@author: Xzhao
"""
from datetime import datetime
import pandas
from Order_class import Order
class BT_platform:
def __init__(self):
pass
#define a CSV filepath to open
def csv_filepath(self, filepath):
self.csv_filepath = filepath
#set the startdate for CSV file
def startdate(self,startdate):
self.startdate = startdate
#set the enddate for the CSV file
def enddate(self,enddate):
self.enddate = enddate
#functions to set starting balance and get the current money
def set_balance(self, balance):
self.balance = balance
def curr_balance (self):
return self.balance
def run(self):
csv_file = pandas.read_csv(self.csv_filepath)
dates = csv_file.set_index('Date',drop=False)
self.Portfolio_balance = []
self.shares_total = 0
self.date_track = []
for index,row in dates.iterrows():
currdate = datetime.strptime(index, '%Y-%m-%d')
#run only within the dates specified
if currdate >= self.startdate and currdate <= self.enddate:
curropen = row['Open']
currclose = row['Close']
#get order for open price
openorder = self.strategy.open(curropen,self.balance,self.shares_total)
if openorder:
if openorder.buy:
if openorder.shares * curropen > self.balance:
print("NOT ENOUGH CASH to fill BUY order")
else:
self.balance -= openorder.shares * curropen
self.shares_total += openorder.shares
else:
self.balance += openorder.shares * curropen
self.shares_total -= openorder.shares
#get order for close price
closeorder = self.strategy.close(curropen,self.balance,self.shares_total)
if closeorder:
if closeorder.buy:
if closeorder.shares * currclose > self.balance:
print("NOT ENOUGH CASH to fill BUY order")
else:
self.balance -= closeorder.shares * currclose
self.shares_total += closerorder.shares
else:
self.balance += closeorder.shares * currclose
self.shares_total -= closeorder.shares
#data keeping
self.date_track.append(currdate)
self.Portfolio_balance.append(self.balance + (self.shares_total * currclose))
print("End portfolio value = ", self.balance + (self.shares_total * currclose))
def portfolio_y_data(self):
return self.Portfolio_balance
def portfolio_x_data(self):
return self.date_track
def set_strategy (self, strategy):
self.strategy = strategy
|
{"/Tutorial_main.py": ["/Platform_class.py", "/BOSC_strategy.py"], "/Platform_class.py": ["/Order_class.py"], "/BOSC_strategy.py": ["/Order_class.py"]}
|
16,083
|
XushengZhao/Backtest_Platform
|
refs/heads/master
|
/Strategies/Equal_weight/Momentum_Strategy.py
|
import pandas as pd
import requests
from scipy import stats
import xlsxwriter
import math
'''This Python file implements an equal weight high momentum portfolio strategy based on the stocks included in the sp500.
It will extract data from IEX using its API.
The buy information will be output into a excel spreadsheet. It can easily be extended to work on a specfic broker service
'''
#function for portfolio input
def Portfolio_input():
portfolio_balance = input('Enter portfolio value : ')
try:
val = float(portfolio_balance)
except ValueError:
print("Please enter a number")
portfolio_balance = input('Enter portfolio value : ')
val = float(portfolio_balance)
return val
#This function calculates the number of shares to buy given a portfolio size and Panda dataframe
def calc_shares(portfolio_val,dataframe):
position_size = portfolio_val/len(dataframe.index)
num_shares = position_size
for i in range(len(dataframe.index)):
dataframe.loc[i,'Number of Shares to Buy'] = math.floor(position_size/dataframe.loc[i,'Price'])
return dataframe
# Function sourced from
# https://stackoverflow.com/questions/312443/how-do-you-split-a-list-into-evenly-sized-chunks
def chunks(lst, n):
"""Yield successive n-sized chunks from lst."""
for i in range(0, len(lst), n):
yield lst[i:i + n]
return
def main():
from secrets import IEX_CLOUD_API_TOKEN
sp500_list_csv_filepath = "sp_500_stocks.csv"
sp500_stocklist = pd.read_csv(sp500_list_csv_filepath)
my_columns = ['Ticker', 'Price', 'One-Year Price Return', 'Number of Shares to Buy']
#create the strategy using quantitative momentum as our signal
hqm_columns = [
'Ticker', 'Price', 'Number of Shares to Buy', '1-year Price Return', '1-year Return Percentile',
'6-month Price Return', '6-month Return Percentile', '3-month Price Return', '3-month Return Percentile',
'1-month Price Return', '1-month Return Percentile', 'HQM SCORE'
]
base_url = 'https://sandbox.iexapis.com/stable'
hqm_df = pd.DataFrame(columns = hqm_columns)
symbol_groups = list(chunks(sp500_stocklist['Ticker'], 100))
symbol_strings = []
for i in range(0, len(symbol_groups)):
symbol_strings.append(','.join(symbol_groups[i]))
#extract stock data using API
for stocklist in symbol_strings:
request = f'/stock/market/batch?symbols={stocklist}&types=stats,quote&token={IEX_CLOUD_API_TOKEN}'
final_url = base_url + request
data = requests.get(final_url).json()
for symbol in stocklist.split(','):
price = data[symbol]['quote']['latestPrice']
yearlyreturn = data[symbol]['stats']['year1ChangePercent']
if yearlyreturn == None:
yearlyreturn = 0
month6return =data[symbol]['stats']['month6ChangePercent']
if month6return == None:
month6return = 0
month3return = data[symbol]['stats']['month3ChangePercent']
if month3return == None:
month3return = 0
monthreturn = data[symbol]['stats']['month1ChangePercent']
if monthreturn == None:
monthreturn = 0
hqm_df = hqm_df.append(
pd.Series([symbol, price, 'N/A',
yearlyreturn, 'N/A',
month6return, 'N/A',
month3return, 'N/A',
monthreturn, 'N/A','N/A'],index = hqm_columns), ignore_index=True,
)
#calculate percentile data for each stock
time_periods = ['1-year','6-month', '3-month', '1-month']
for row in hqm_df.index:
for time_period in time_periods:
hqm_df.loc[row,f'{time_period} Return Percentile'] = \
stats.percentileofscore(hqm_df[f'{time_period} Price Return'],hqm_df.loc[row,f'{time_period} Price Return'])
hqm_df.sort_values('HQM SCORE',ascending = False, inplace=True)
hqm_df = hqm_df[:50]
hqm_df.reset_index(inplace=True,drop=True)
pf_val = Portfolio_input()
hqm_df = calc_shares(pf_val,hqm_df)
#write to the excel file
writer = pd.ExcelWriter('Momentum_strategy.xlsx',engine='xlsxwriter')
hqm_df.to_excel(writer,sheet_name= "buy")
background_color = '#ffffff'
font_color = '#000000'
string_template = writer.book.add_format(
{
'font_color': font_color,
'bg_color': background_color,
'border': 1
}
)
dollar_template = writer.book.add_format(
{
'num_format':'$0.00',
'font_color': font_color,
'bg_color': background_color,
'border': 1
}
)
integer_template = writer.book.add_format(
{
'num_format':'0',
'font_color': font_color,
'bg_color': background_color,
'border': 1
}
)
percent_template = writer.book.add_format(
{
'num_format':'0.0%',
'font_color': font_color,
'bg_color': background_color,
'border': 1
}
)
column_formats = {
'A': ['Ticker', string_template],
'B': ['Price', dollar_template],
'C':['Number of Shares to Buy', integer_template],
'D':['1-year Price Return', percent_template],
'E':['1-year Return Percentile', percent_template],
'F':['6-month Price Return', percent_template],
'G':['6-month Return Percentile', percent_template],
'H':['3-month Price Return', percent_template],
'I':['3-month Return Percentile', percent_template],
'J':['1-month Price Return', percent_template],
'K':['1-month Return Percentile', percent_template],
'L':['HQM SCORE', percent_template]
}
for column in column_formats.keys():
writer.sheets['buy'].set_column(f'{column}:{column}',25,column_formats[column][1])
writer.save()
print("Finished")
return
if __name__ == "__main__":
main()
|
{"/Tutorial_main.py": ["/Platform_class.py", "/BOSC_strategy.py"], "/Platform_class.py": ["/Order_class.py"], "/BOSC_strategy.py": ["/Order_class.py"]}
|
16,084
|
XushengZhao/Backtest_Platform
|
refs/heads/master
|
/BOSC_strategy.py
|
# -*- coding: utf-8 -*-
"""
Created on Sun Jun 21 18:56:05 2020
@author: Xzhao
"""
from Order_class import Order
class BOSC:
def __init__(self):
pass
def open(self,curropen,balance,shares_cur):
shares = int(balance / curropen)
order = Order(True,shares)
return order
def close(self,currclose, balance,shares_cur):
order = Order(False,shares_cur)
return order
|
{"/Tutorial_main.py": ["/Platform_class.py", "/BOSC_strategy.py"], "/Platform_class.py": ["/Order_class.py"], "/BOSC_strategy.py": ["/Order_class.py"]}
|
16,091
|
crkg/sqlAlchemy
|
refs/heads/master
|
/api/tests/unit_tests/apis/learnpy_endpoint.py
|
from flask_restplus import Resource, fields, reqparse, Namespace
|
{"/manage.py": ["/sqlAlchemy/app_file.py"]}
|
16,092
|
crkg/sqlAlchemy
|
refs/heads/master
|
/api/dumyapi/app_file.py
|
import sys
sys.path.append("c:\\Users\\ramesh.kg\\PycharmProjects\\DumyApi")
from flask import Flask
from dumyapi.apis import api
app = Flask(__name__)
api.init_app(app)
if __name__ == '__main__':
app.run(debug=True)
|
{"/manage.py": ["/sqlAlchemy/app_file.py"]}
|
16,093
|
crkg/sqlAlchemy
|
refs/heads/master
|
/config/gunicorn_conf.py
|
import multiprocessing
import getpass
base_dir = f'/Users/{getpass.getuser()}/PycharmProjects/'
gunicorn_dir = base_dir + 'gunicorn/'
#server
bind = '0.0.0.0:8443'
workers = multiprocessing.cpu_count() * 2 + 1
timeout = 2000
daemon = True
#certfile = base_dir + 'ssl_keys/<application name>.pem'
#keyfile = base_dir + 'ssl_keys/<application_name>.key'
errorlog = gunicorn_dir + '/var/log/error.log'
asseslog = gunicorn_dir + '/var/log/access.log'
|
{"/manage.py": ["/sqlAlchemy/app_file.py"]}
|
16,094
|
crkg/sqlAlchemy
|
refs/heads/master
|
/api/AWS/resources/aws.py
|
import boto3
import sys
import boto
import boto.ec2
import boto.utils
# auth = {"aws_access_key_id": "AKIAIR36LFO6363OXEQQ",
# "aws_secret_access_key": "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"}
def exception_handling(func):
def inner(*args, **kwargs):
try:
return func(*args, **kwargs)
except Exception as e1:
error1 = "Error1: {}".format(e1)
print(error1)
sys.exit(0)
return inner
class awsConnectRegion(object):
def __init__(self, aws_id="AKIAIF35ZV3HKMYC2HUQ", aws_secret="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"):
self.auth = dict()
self.auth['aws_access_key_id'] = aws_id
self.auth['aws_secret_access_key'] = aws_secret
@exception_handling
def connect_to_region(self, region_name):
# change "eu-west-1" region if different
ec2 = boto.ec2.connect_to_region(region_name, **self.auth)
return ec2
class AWSlistInstance(awsConnectRegion):
def __init__(self, aws_id="AKIAIF35ZV3HKMYC2HUQ", aws_secret="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"):
super().__init__(aws_id=aws_id, aws_secret=aws_secret)
@exception_handling
def listAllInstance(self, region_name):
print("Listing all instance in region {} ".format(region_name))
ec2 = self.connect_to_region(region_name)
reservations = ec2.get_all_instances()
return [i.instances for i in reservations]
@exception_handling
def listAInstance(self,region_name,instance_id):
awsList = self.listAllInstance(region_name)
print(f"aws instances \n {awsList}")
for each in awsList:
if instance_id == each[0].id:
return each[0].state
return None
class AWSlistS3bucket(awsConnectRegion):
@exception_handling
def s3bucket(self, region_name, instnc):
print("List all existing buckets for the AWS account")
ec2 = self.connect_to_region(region_name)
ec2.list_buckets()
class AwsOperation(awsConnectRegion):
@exception_handling
def startInstance(self, region_name, instnc):
print("Starting the instance...")
ec2 = self.connect_to_region(region_name)
ec2.start_instances(instance_ids=instnc)
@exception_handling
def stopInstance(self, region_name, instnc):
print("Stopping the instance...")
ec2 = self.connect_to_region(region_name)
ec2.stop_instances(instance_ids=instnc)
@exception_handling
def terminate(self, region_name, instnc):
print("Terminating an instance...")
ec2 = self.connect_to_region(region_name)
ec2.terminate_instances(instance_ids=instnc)
@exception_handling
def s3bucket(self, region_name, instnc):
print("List all existing buckets for the AWS account")
ec2 = self.connect_to_region(region_name)
ec2.list_buckets()
def main():
# read arguments from the command line and
# check whether at least two elements were entered
if len(sys.argv) < 3:
print("Usage arguments are space seperated: python aws.py {start|stop|terminate|awsRegion}\n")
sys.exit(0)
else:
action = sys.argv[1]
awsRegion = sys.argv[2]
# awsAccessKey = sys.argv[3]
# awsSecretKey = sys.argv[4]
Instance = AWSlistInstance()
list = Instance.listAllInstance(awsRegion)
print("Here is the list of instance on region {}, {}".format(awsRegion, list))
for i in list:
instnc = i[0].id
awsop = AwsOperation()
if action == "start":
awsop.startInstance(awsRegion, instnc)
elif action == "stop":
awsop.stopInstance(awsRegion, instnc)
elif action == "terminate":
awsop.terminate(awsRegion, instnc)
else:
print("Usage arguments are space seperated: python aws.py {start|stop|terminate|awsRegion}\n")
if __name__ == '__main__':
main()
|
{"/manage.py": ["/sqlAlchemy/app_file.py"]}
|
16,095
|
crkg/sqlAlchemy
|
refs/heads/master
|
/api/AWS/apis/__init__.py
|
from flask_restplus import Api
from AWS.apis.aws_api_endpoint import namespace as awsnamespace
api = Api(
title='AWS Integration',
version='v0',
description='Creating AWS Api along with Boto 3'
)
api.add_namespace(awsnamespace)
|
{"/manage.py": ["/sqlAlchemy/app_file.py"]}
|
16,096
|
crkg/sqlAlchemy
|
refs/heads/master
|
/api/dumyapi/core/auth.py
|
from flask import request
from base64 import b64decode
from functools import wraps
def authorize(f):
@wraps(f)
def inner(*args, **kwargs):
if 'authorization' in request.headers:
auth = request.headers.get('authorization').split()[1]
username, password = b64decode(auth).decode('UTF-8').split(':')
if username != 'Admin':
return {'message': 'User not Authorized'}, 401
if password != 'Sungard01':
return {'message': 'Incorrect Password'}, 401
return f(*args, **kwargs)
else:
return {'message': 'No credentials provide'}, 401
return inner
|
{"/manage.py": ["/sqlAlchemy/app_file.py"]}
|
16,097
|
crkg/sqlAlchemy
|
refs/heads/master
|
/api/dumyapi/apis/language_api.py
|
from flask_restplus import Resource, fields, reqparse, Namespace
from flask import request
from dumyapi.core.auth import authorize
langnamespace = Namespace("Languages", description='Get languages for given user')
parselevel = reqparse.RequestParser()
parselevel.add_argument('Username', type=str, required=True)
#parselevel.add_argument('Languages', type=list, required=True)
model = langnamespace.model('postInput', {
'username': fields.String,
'Languages': fields.List(fields.String)
})
lang = {}
@langnamespace.route('/languages')
class language(Resource):
@langnamespace.response(201, 'accepted')
@langnamespace.response(400, 'Bad request')
@langnamespace.response(500, 'Internal server error')
@langnamespace.response(404, 'Not found')
@langnamespace.response(403, 'Unauthorized')
@langnamespace.expect(model)
@authorize
def post(self):
'''Something'''
payload = request.get_json()
#print(payload)
key = payload.get('username')
value = payload.get('Languages')
lang[key] = value
return lang, 201
@langnamespace.response(200, 'success')
@langnamespace.response(400, 'Bad request')
@langnamespace.response(500, 'Internal server error')
@langnamespace.response(404, 'Not found')
#@langnamespace.doc(parser=parselevel)
#@langnamespace.marshal_with(model, code=200)
@authorize
def get(self):
'''something2'''
#payload=parselevel.parse_args()
#print(payload)
#key = payload.get('username')
#value = payload.get('Languages')
return lang, 200
@langnamespace.route('/languages/<username>')
class languageByUsername(Resource):
@langnamespace.response(201, 'accepted')
@langnamespace.response(400, 'Bad request')
@langnamespace.response(500, 'Internal server error')
@langnamespace.response(404, 'Not found')
@langnamespace.response(403, 'Unauthorized')
# @langnamespace.doc(parser=parselevel)
@authorize
def get(self, username):
'''SOmething 3'''
# payload = parselevel.parse_args()
# key = payload.get('username')
#value = payload.get('Languages')
#lang[key] = value
return lang[username], 200
|
{"/manage.py": ["/sqlAlchemy/app_file.py"]}
|
16,098
|
crkg/sqlAlchemy
|
refs/heads/master
|
/sqlAlchemy/resources/Injectors.py
|
from sqlAlchemy.resources.movies_ops import MovieOperations
from sqlAlchemy.models.utils.movies import MovieDbOperations
def ins_creation:
movies_ops_ins = MovieOperations()
movies_db_ins = MovieDbOperations()
return movies_db_ins, movies_ops_ins
|
{"/manage.py": ["/sqlAlchemy/app_file.py"]}
|
16,099
|
crkg/sqlAlchemy
|
refs/heads/master
|
/api/REST-Plus-API/apis/__init__.py
|
from flask_restplus import Api
from apis.aws_ops import namespace as awsnamespace
from apis.todos import todonamespace
from apis.movies import namespace as moviesnamespace
api = Api(
title='Learn-API',
version='V1',
description='Learning API'
)
api.add_namespace(awsnamespace)
api.add_namespace(todonamespace)
api.add_namespace(moviesnamespace, path="/v1/movies")
|
{"/manage.py": ["/sqlAlchemy/app_file.py"]}
|
16,100
|
crkg/sqlAlchemy
|
refs/heads/master
|
/api/AWS/apis/aws_api_endpoint.py
|
from flask_restplus import Resource, fields, reqparse, Namespace
from AWS.resources.aws import AWSlistInstance
namespace = Namespace('AWS', description='Try all AWS functions')
awsschema = namespace.model('AWS', {
'AwsRegion': fields.String(description='Select the aws region', required=True),
'State': fields.String(description='Select the aws server state', required=True),
'Ec2Instance': fields.String(description='Select the aws server state', required=True),
})
parselevel=reqparse.RequestParser()
parselevel.add_argument('Instance Name', type=str, required=True)
parselevel.add_argument('AWS Region', type=str, required=True)
@namespace.route('/aws-state')
#End point resource , inherting the resource to the stop instance
class GetInstanceState(Resource):
@namespace.response(200, 'success')
@namespace.response(400, 'Bad request')
@namespace.response(500, 'Internal server error')
@namespace.response(404, 'Not found')
@namespace.response(403, 'Unauthorized')
@namespace.doc(parser=parselevel)
@namespace.doc(description="\n To get the status of the Instance")
def get(self):
awsinfo = parselevel.parse_args()
awsregion = awsinfo.get("AWS Region")
awsinstace = awsinfo.get("Instance Name")
instance = AWSlistInstance()
instancedetails = instance.listAInstance(awsregion, awsinstace)
if instancedetails is None:
return {'error': 'No Instance Found with the name'}, 404
else:
return {'Instance': instancedetails}, 200
|
{"/manage.py": ["/sqlAlchemy/app_file.py"]}
|
16,101
|
crkg/sqlAlchemy
|
refs/heads/master
|
/api/tests/unit_tests/resources/test_worker.py
|
from learnpy.resources.worker import Helper, WorkerService
import pytest
class FakeHelper:
def get_path(self):
return "C:\\Program Files\\db"
class TestHelper:
def test_helper(self, mocker):
a = Helper('db')
assert isinstance(a, Helper)
def test_get_path(self, mocker):
mocker.patch('learnpy.resources.worker.os.getcwd', return_value='C:\\Program Files')
a = Helper('db')
assert a.get_path() == 'C:\\Program Files\\db'
class TestWorker:
def test_worker_instaance_creation(self, mocker):
mocker.patch('learnpy.resources.worker.Helper', return_value=FakeHelper())
w = WorkerService()
assert isinstance(w, WorkerService)
def test_worker_work(self,mocker):
mocker.patch('learnpy.resources.worker.Helper', return_value=FakeHelper())
w = WorkerService()
assert w.work() == 'C:\\Program Files\\db'
def test_worker_work_without_fake_helper(self,mocker):
mock_helper = mocker.patch('learnpy.resources.worker.Helper', autospec=True)
mock_helper.return_value.get_path.return_value = 'C:\\Program Files\\db'
w = WorkerService()
assert w.work() == 'C:\\Program Files\\db'
|
{"/manage.py": ["/sqlAlchemy/app_file.py"]}
|
16,102
|
crkg/sqlAlchemy
|
refs/heads/master
|
/api/learnpy/resources/learn-while.py
|
i = 1
while i <= 5:
print("Computing i {}".format(i))
i = i + 1
|
{"/manage.py": ["/sqlAlchemy/app_file.py"]}
|
16,103
|
crkg/sqlAlchemy
|
refs/heads/master
|
/sqlAlchemy/app_file.py
|
import sys
import os
sys.path.append("/Users/ramesh/PycharmProjects/sqlAlchemy")
from flask import Flask
from sqlAlchemy.models.utils.base_db import db
def load_conf(app):
app.config.from_object('config.app_conf_local')
if 'CONFFILE_PATH' in os.environ:
app.config.from_envvar('CONFFILE_PATH')
app = Flask(__name__)
load_conf(app)
app.app_context().push()
db.init_app(app)
db.create_all()
from sqlAlchemy.apis import api
api.init_app(app)
@api.errorhandler(Exception)
def handle_exception(error):
return {'message': str(error)}, 400
if __name__ == "__main__":
app.run(debug=True)
|
{"/manage.py": ["/sqlAlchemy/app_file.py"]}
|
16,104
|
crkg/sqlAlchemy
|
refs/heads/master
|
/api/tests/unit_tests/resources/test_sample_os_cmd.py
|
from learnpy.resources.sample_os_cmd import work
import pytest
import mock
class TestSampleOsCmd:
def test_work(self, mocker):
mocker.patch('learnpy.resources.sample_os_cmd.os.getcwd', return_value='C:\\Program Files')
assert work() == 'C:\\Program Files'
def test_work_other(self, mocker):
mock_getcwd = mocker.patch('learnpy.resources.sample_os_cmd.os.getcwd')
mock_getcwd.return_value = 'C:\\Program Files'
assert work() == 'C:\\Program Files'
@mock.patch('learnpy.resources.sample_os_cmd.os.getcwd')
def test_work_with_patch(self, mock_cwd):
mock_cwd.return_value = 'C:\\Program Files'
assert work() == 'C:\\Program Files'
|
{"/manage.py": ["/sqlAlchemy/app_file.py"]}
|
16,105
|
crkg/sqlAlchemy
|
refs/heads/master
|
/api/REST-Plus-API/app_file.py
|
import sys
sys.path.append("c:\\Users\\ramesh.kg\\PycharmProjects\\Flask-REST-full\\")
from flask import Flask
from apis import api
app = Flask(__name__)
api.init_app(app)
@api.errorhandler(Exception)
def handle_exception(error):
return {'message': str(error)}, 400
if __name__ == "__main__":
app.run(debug=True)
|
{"/manage.py": ["/sqlAlchemy/app_file.py"]}
|
16,106
|
crkg/sqlAlchemy
|
refs/heads/master
|
/api/dumyapi/apis/__init__.py
|
from flask_restplus import Api
from dumyapi.apis.language_api import langnamespace
authorization = {
'Basic Auth': {'type': 'basic',
'in': 'header',
'name': 'authorization'
}
}
api = Api(
title='Language-API',
version='V1',
description='Lunguage API',
security='Basic Auth',
authorizations=authorization
)
api.add_namespace(langnamespace, path="/v1")
|
{"/manage.py": ["/sqlAlchemy/app_file.py"]}
|
16,107
|
crkg/sqlAlchemy
|
refs/heads/master
|
/sqlAlchemy/sqlAlchemy/models/utils/movies.py
|
from sqlAlchemy.models.utils.base_db import db
from sqlAlchemy.models.utils.database_session import Persistance
from sqlAlchemy.models.schema.movies import Movies
from sqlalchemy import text
class MovieDbOperations:
@staticmethod
def create(dict_args):
#print(dict_args)
movies_model_ins = Movies(dict_args)
with Persistance().session as session:
session.add(movies_model_ins)
@staticmethod
def delete(moviename):
with Persistance().session as session:
session.query(Movies).filter(text(f"movies.movie_name={moviename}")).delete(synchronize_session=False)
#try:
# db.session.query(Movies).filter(text(f"movies.movie_name={moviename}")).delete(synchronize_session=False)
# db.session.commit()
#except Exception as Error:
# db.session.rollback()
# raise Exception(Error)
#finally:
# db.session.close()
@staticmethod
def update(dict_args):
with Persistance().session as session:
session.query(Movies).filter(text(f"movies.movie_name={dict_args.get('moviename')}")).update(dict_args,
synchronize_session=False)
# try:
# db.session.query(Movies).filter(text(f"movies.movie_name={moviename}")).update(dict_args,
# synchronize_session=False)
# db.session.commit()
# except Exception as Error:
# db.session.rollback()
# raise Exception(Error)
# finally:
# db.session.close()
def select(self, condition, all_row):
if all_row:
return self._select_all(condition)
else:
return self._select_one(condition)
def _select_one(self,condition):
with Persistance().session as session:
if condition:
result = session.query(Movies).filter(text(condition)).first()
else:
result = session.query(Movies).first()
if result:
return [result.to_obj()]
else:
return []
def _select_all(self, condition):
with Persistance().session as session:
if condition:
result = session.query(Movies).filter(text(condition)).all()
else:
result = session.query(Movies).all()
if result:
return [each_row.to_obj() for each_row in result if each_row]
else:
return []
|
{"/manage.py": ["/sqlAlchemy/app_file.py"]}
|
16,108
|
crkg/sqlAlchemy
|
refs/heads/master
|
/sqlAlchemy/sqlAlchemy/apis/movies.py
|
from flask_restplus import Resource, fields, reqparse, Namespace
from sqlalchemy.resources.injectors import ins_creation
parselevel=reqparse.RequestParser()
parselevel.add_argument('directorname', type=str, required=True, help='Enter the director name')
namespace = Namespace('Movies', description='to movies from file')
movieschema = namespace.model('Movies', {
'Director': fields.String(description='Director name', required=True),
'Movie Name': fields.String(description='Movie name', required=True),
'Release Year': fields.Integer(description='Year of release', required=True)
})
@namespace.route('/movies')
class TodoSimple(Resource):
@namespace.response(200, 'success')
@namespace.response(400, 'Bad request')
@namespace.response(500, 'Internal server error')
@namespace.response(404, 'Not found')
@namespace.response(403, 'Unauthorized')
# @namespace.doc(parser=awsparselevel)
def get(self):
mov_db_ins, mov_ins = ins_creation()
result = mov_ins.getallmovies(movies_ins=mov_db_ins)
if not result:
return {"movies": "movies not found"}, 404
else:
return {"movies": result}, 200
@namespace.response(201, 'success')
@namespace.response(400, 'Bad request')
@namespace.response(500, 'Internal server error')
@namespace.response(404, 'Not found')
@namespace.response(403, 'Unauthorized')
@namespace.expect(movieschema)
def post(self):
payload = self.parsing_args()
print(payload)
movies_db_ins, movies_ops_ins = ins_creation()
insert_db = movies_ops_ins.insertintodb(payload,movies_db_ins)
if insert_db == 'success':
return 'success', 201
@staticmethod
def parsing_args():
parselevel = reqparse.RequestParser()
parselevel.add_argument('Director', type=str, required=True)
parselevel.add_argument('Movie Name', type=str, required=True)
parselevel.add_argument('Release Year', type=int, required=True)
return parselevel.parse_args()
@namespace.route('/directorname')
class TodoSimpleDirector(Resource):
@namespace.response(200, 'success')
@namespace.response(400, 'Bad request')
@namespace.response(500, 'Internal server error')
@namespace.response(404, 'Not found')
@namespace.response(403, 'Unauthorized')
@namespace.doc(parser=parselevel)
def get(self):
payload = parselevel.parse_args()
print(payload)
directorname = payload.get('directorname')
movies_db_ins, movies_ops_ins = ins_creation()
result = movies_ops_ins.getmoviesbydir(directorname, movies_db_ins)
if not result:
return {"movies": "movies not found"}, 404
else:
return {"movies": result}, 200
|
{"/manage.py": ["/sqlAlchemy/app_file.py"]}
|
16,109
|
crkg/sqlAlchemy
|
refs/heads/master
|
/sqlAlchemy/venv/bin/utils/database_session.py
|
from sqlalchemy import create_engine
from contextlib import contextmanager
from sqlalchemy.orm import scoped_session
from sqlalchemy.orm import sessionmaker
from sqlalchemy.ext.declarative import declarative_base
from sqlAlchemy.models.utils.base_db import db
def sql_engine(db_config: dict):
'''This is for test case usage'''
db_uri = (
f"postgresql://{db_config['user']}:{db_config['password']}"
f"@{db_config['db_host']}:{db_config['db_port']}/{db_config['database']}"
)
return create_engine(db_uri, convert_unicode=True)
engine=db.get_engine()
SESSION = scoped_session(sessionmaker(bind=engine, autocommit=True, expire_on_commit=False))
Base = declarative_base()
metadata = Base.metadata
metadata.bind = engine
Base.query = SESSION.query_property()
@contextmanager
def session_scope():
'''Provide a transactional scope around a series of operations'''
session = scoped_session(sessionmaker(bind=engine, expire_on_commit=False))
try:
yield session
session.commit()
except Exception as Error:
session.rollback()
raise Exception(Error)
finally:
session.close()
class Persistance:
'''Handle database releated actions/attributes'''
@property
def engine(self):
'''Database enginer property'''
return engine
@property
def base(self):
'''Database base propery'''
return Base
@property
def session(self):
'''Database session propery'''
return session_scope()
|
{"/manage.py": ["/sqlAlchemy/app_file.py"]}
|
16,110
|
crkg/sqlAlchemy
|
refs/heads/master
|
/sqlAlchemy/sqlAlchemy/models/schema/movies.py
|
from sqlAlchemy.models.utils.base_db import db
class Movies(db.Model):
movie_name = db.Column(db.String(90), primary_key=True)
year = db.Column(db.Integer)
director = db.Column(db.String(80))
def __init__(self, dict_args):
self.director = dict_args.get('director')
self.movie_name = dict_args.get('movie_name')
self.year = int(dict_args.get('year'))
def to_obj(self):
return {
"movie_name": self.movie_name,
"year": self.year,
"director": self.director
}
|
{"/manage.py": ["/sqlAlchemy/app_file.py"]}
|
16,111
|
crkg/sqlAlchemy
|
refs/heads/master
|
/FnD/fnd/apps/hello.py
|
from flask import Flask
from flask import make_response, render_template
from flask_bootstrap import Bootstrap
app = Flask(__name__)
bootstrap = Bootstrap(app)
class Someobject:
@staticmethod
def somemethod():
return 5
#@app.route('/<path:name>')
def hello(name):
return f'<h1> Hellow {name} </h1>'
app.add_url_rule(endpoint='hello',view_func=hello,rule='/<path:name>')
@app.route('/')
def index():
response = make_response('<h1> This document is for index </h1>')
response.set_cookie('answer', '42')
return response
@app.route('/test/<name>')
def template_test(name):
return render_template('hello.html',var_name=name, mydict={'ac':40}, mylist=[1,2,3,4], myindex=1, myObject=Someobject())
@app.route('/inherit/<name>')
def inherit_html(name):
return render_template('inherit_hello.html',var_name=name, mydict={'ac':40}, mylist=[1,2,3,4], myindex=1, myObject=Someobject())
@app.route('/body/<name>')
def body(name):
return render_template('bodytemplate.html',var_name=name)
@app.route('/user')
def user():
return render_template('user.html')
@app.route('/user_name/<name>')
def user_info(name):
return render_template('user_info.html',var_name=name)
if __name__ == '__main__':
app.run(debug=True, port=5000)
|
{"/manage.py": ["/sqlAlchemy/app_file.py"]}
|
16,112
|
crkg/sqlAlchemy
|
refs/heads/master
|
/manage.py
|
import os
import sys
sys.path.append("/Users/ramesh/PycharmProjects/sqlAlchemy")
from flask_script import Manager
from flask_migrate import MigrateCommand, Migrate
from sqlAlchemy.app_file import app, db
migrate = Migrate(app, db)
manager = Manager(app)
manager.add_command('db', MigrateCommand)
@manager.command
def routes():
'''show all routes available'''
print(app.url_map)
@manager.command
def run():
'''Run app'''
routes()
app.run(host='0.0.0.0',port=5000,threaded=True,debug=True)
@manager.command
def rm_pyc():
'''Remove all pyc on bash only'''
os.system('find . -type f -name "*.py[co]" -delete -or -type d -name "__pycache__" -delete')
if __name__ == '__main__':
manager.run()
|
{"/manage.py": ["/sqlAlchemy/app_file.py"]}
|
16,113
|
crkg/sqlAlchemy
|
refs/heads/master
|
/sqlAlchemy/sqlAlchemy/apis/__init__.py
|
from flask_restplus import Api
from sqlAlchemy.apis.movies import namespace as moviesnamespace
api = Api(
title='Learn-API',
version='V1',
description='Learning API'
)
api.add_namespace(moviesnamespace, path="/v1/movies")
|
{"/manage.py": ["/sqlAlchemy/app_file.py"]}
|
16,114
|
crkg/sqlAlchemy
|
refs/heads/master
|
/api/tests/unit_test/resources/test_aws.py
|
from unittest.mock import Mock, patch
from boto.ec2.connection import EC2Connection
import boto
from moto import mock_ec2_deprecated, mock_ec2
from AWS.resources.aws import awsConnectRegion, AWSlistInstance
class Test_awsConnectRegion:
def test_awsconnectresion_instatiation(self):
awsins = awsConnectRegion(aws_id='xxx', aws_secret='asd12323')
assert isinstance(awsins, awsConnectRegion)
@mock_ec2_deprecated
def test_connect_to_region(self):
awsins = awsConnectRegion(aws_id='xxx', aws_secret='asd12323')
result = awsins.connect_to_region('us-east-1')
assert isinstance(result, EC2Connection)
class TestAWSlistInstance:
@mock_ec2_deprecated
@mock_ec2
@patch.object(AWSlistInstance, 'listAllInstance')
def test_listAllInstance(self, mock_instances):
mock_instances.return_value = ['Instance:i-03117ab1abf9992e9', 'Instance:i-0c6aef3dfd797d6ac']
aswlistins = AWSlistInstance(aws_id='xxx', aws_secret='yyy123')
result = aswlistins.listAllInstance('us-east-1')
assert result == ['Instance:i-03117ab1abf9992e9','Instance:i-0c6aef3dfd797d6ac']
|
{"/manage.py": ["/sqlAlchemy/app_file.py"]}
|
16,115
|
crkg/sqlAlchemy
|
refs/heads/master
|
/sqlAlchemy/sqlAlchemy/resources/movies_ops.py
|
import os
from sqlAlchemy.models.utils.movies import Dboperations
class MovieOperations:
def getallmovies(self, movies_ins: Dboperations):
# fpath = os.path.abspath("resources\movies.csv")
all_db_rows = movies_ins.select(condition=None, all_row=True)
if all_db_rows:
return all_db_rows
else:
return []
def getmoviesbydir(self, director, movie_ops_instance: Dboperations):
condition = f"movies.director = '{director}'"
list_by_condition = movie_ops_instance.select(condition=condition,all_row=True)
if list_by_condition:
return list_by_condition
else:
return []
def insertintodb(self, payload, movie_ops_instance: Dboperations ):
dict_args = {
'director': payload.get('Director'),
'movie_name': payload.get('Movie Name'),
'year': payload.get('Release Year')
}
movie_ops_instance.create(dict_args)
return 'success'
if __name__ == '__main__':
movie = MovieOperations()
#print(movie.getmoviesbydir('FrancisLawrence'))
|
{"/manage.py": ["/sqlAlchemy/app_file.py"]}
|
16,116
|
crkg/sqlAlchemy
|
refs/heads/master
|
/sqlAlchemy/config/app_conf_local.py
|
import os
from ast import literal_eval
_basedir = os.path.abspath(os.path.dirname(__file__))
path = os.path.join(os.path.abspath(os.path.dirname(__file__)),'../sqlAlchemy/models/test.db')
SQLALCHEMY_DATABASE_URI = os.environ.get('SQLALCHEMY_DATABASE_URI', "sqlite:///"+ path)
SQLALCHEMY_TRACK_MODIFICATIONS = True
DEBUG = True
|
{"/manage.py": ["/sqlAlchemy/app_file.py"]}
|
16,118
|
palinnilap/happy-planet
|
refs/heads/master
|
/gameloop.py
|
from player import Player
from level import Level
from typing import Tuple
class GameLoop:
def __init__(self,
player : Player,
main_levels : Tuple[Level], start_lev,
lev_tutorial=None, lev_won=None, lev_lost=None):
self._levels = main_levels
self._player = player
self._cur_lev_num = 0
self._cur_lev = lev_tutorial
self._level_won = lev_won
self._level_lost = lev_lost
self._status = 0
# 0 --> in progress
# 1 --> won
# -1 --> lost
@property
def level_rgbs(self):
return self._get_rgbs()
@property
def status(self):
return self._status
@property
def score(self):
return str(self._player._score)
@property
def emoji(self):
return self._get_emoji()
@property
def happy(self):
return str(self._player._happy)
def get_next_challenge(self):
return self._cur_lev.get_next_challenge()
def submit_results(self, score : int, happy : int):
self._player.add_score(score)
self._player.add_happy(happy)
self.check_happy()
def check_happy(self):
if self._player.happy == -2019:
#you are in the tutorial, do nothing.
return
elif self._player.happy <= 0:
self._change_status(-1)
self._cur_lev = self._level_lost
elif self._player.happy < 25:
self._set_level(0)
elif self._player.happy < 50:
self._set_level(1)
elif self._player.happy < 75:
self._set_level(2)
elif self._player.happy < 100:
self._set_level(3)
elif self._player.happy > 100:
self._change_status(1)
self._cur_lev = self._level_won
def _set_level(self, lev_num : int):
self._cur_lev_num = lev_num
self._cur_lev = self._levels[lev_num]
def _change_status(self, to_what : int):
self._status = to_what
if to_what == 1:
self._cur_lev_num = 100
self._cur_lev = self._level_won
elif to_what == -1:
self._cur_lev_num = -1
self._cur_lev = self._level_lost
def _get_rgbs(self):
if self._status == -1: #lost
return [.1,.1,.5,1], [0,0,.2,1]
elif self._status == 1: #won
return [1,.8,0,1], [.7,0,0,1]
elif self._player.happy == -2019:
return [1,.8,0,1], [.7,0,0,1]
elif self._cur_lev_num == 0:
return [.2,.2,.5,1], [.1,.1,.3,1]
elif self._cur_lev_num == 1:
return [1,.8,.8,1], [.6,.4,.4,1]
elif self._cur_lev_num == 2:
return [1,.4,.4,1], [.6,.3,.3,1]
elif self._cur_lev_num == 3:
return [1,.1,.1,1], [.2,.2,.2,1]
def _get_emoji(self):
if self._player.happy == -2019:
return '(•_•)'
elif self._player._happy < 10:
return r'.·´¯`(>_<)´¯`·.'
elif self._player._happy < 25:
return r'¯\_(•_•)_/¯'
elif self._player._happy < 50:
return '(•_•)'
elif self._player._happy < 75:
return '(^__^)'
elif self._player._happy < 100:
return r'*\(^__^)/*'
elif self._player._happy >= 100:
return r'*(0_o)*'
|
{"/gameloop.py": ["/player.py", "/level.py"], "/level.py": ["/challenge.py"], "/factory.py": ["/challenge.py", "/level.py", "/player.py", "/gameloop.py"], "/main.py": ["/factory.py"]}
|
16,119
|
palinnilap/happy-planet
|
refs/heads/master
|
/player.py
|
class Player():
def __init__(self, start_happy):
self._happy = start_happy
self._score = 0
def add_happy(self, val):
self._happy += val
def add_score(self, val):
self._score += val
@property
def happy(self):
return self._happy
|
{"/gameloop.py": ["/player.py", "/level.py"], "/level.py": ["/challenge.py"], "/factory.py": ["/challenge.py", "/level.py", "/player.py", "/gameloop.py"], "/main.py": ["/factory.py"]}
|
16,120
|
palinnilap/happy-planet
|
refs/heads/master
|
/challenge.py
|
from typing import Tuple
class Challenge:
def __init__(
self, prompt : Tuple[str],
choices : Tuple[str],
ans_vals : Tuple[int],
ans_expl : Tuple[str]
):
self._prompt = prompt
self._choices = choices
self._ans_vals = ans_vals
self._ans_expl = ans_expl
def get_prompt(self) -> tuple:
return self._prompt
def get_choices(self) -> tuple:
return self._choices
def asses_choice(self, choice : int):
choice -= 1 #tuples are zero indexed
return self._ans_vals[choice], self._ans_expl[choice]
|
{"/gameloop.py": ["/player.py", "/level.py"], "/level.py": ["/challenge.py"], "/factory.py": ["/challenge.py", "/level.py", "/player.py", "/gameloop.py"], "/main.py": ["/factory.py"]}
|
16,121
|
palinnilap/happy-planet
|
refs/heads/master
|
/level.py
|
from random import randrange
from typing import Tuple
from challenge import Challenge
class Level:
def __init__(self, challenges : Tuple[Challenge]):
self._challenges = challenges
def get_next_challenge(self):
rand = randrange(0, len(self._challenges))
return self._challenges[rand]
class LevelSequential(Level):
CURRENT = 0
def get_next_challenge(self):
challenge = self._challenges[self.CURRENT]
#in order to avoid an indexing error
if len(self._challenges) - 1 > self.CURRENT:
self.CURRENT += 1
return challenge
|
{"/gameloop.py": ["/player.py", "/level.py"], "/level.py": ["/challenge.py"], "/factory.py": ["/challenge.py", "/level.py", "/player.py", "/gameloop.py"], "/main.py": ["/factory.py"]}
|
16,122
|
palinnilap/happy-planet
|
refs/heads/master
|
/factory.py
|
from challenge import Challenge
from level import Level, LevelSequential
from player import Player
from gameloop import GameLoop
def create_gameloop():
player = Player(-2019)
levels = (create_level_0(), create_level_1(), create_level_2(), create_level_3())
return GameLoop(player, levels, 1, create_tut_lev(), create_won_lev(), create_lost_lev())
def create_level_0():
challenges = (
challenge00(), challenge01(), challenge02(), challenge03()
)
return Level(challenges)
def create_level_1():
challenges = (
challenge11(), challenge12(), challenge13()
)
return Level(challenges)
def create_level_2():
challenges = (
challenge20(), challenge21(), challenge22()
)
return Level(challenges)
def create_level_3():
challenges = (
challenge30(), challenge31(), challenge32(), challenge33()
)
return Level(challenges)
def create_tut_lev():
challenges = (
challenge_t0(), challenge_t1()
)
return LevelSequential(challenges)
def create_lost_lev():
challenges = (
challenge_lost0(), challenge_lost0()
)
return LevelSequential(challenges)
def create_won_lev():
challenges = (
challenge_won0(), challenge_won1(), challenge_won2(), challenge_won3(),
challenge_won4(), challenge_won5(), challenge_won6(), challenge_won7(),
challenge_won8(), challenge_won9(), challenge_won10()
)
return LevelSequential(challenges)
############ level 0 #############################
def challenge00():
prompt = '''Your mood is low.\n
You feel so sad even snuggling a baby bulldog wouldn't cheer you up.
What do you do?'''
choices = ('Call Stan', 'Eat something', 'Fetal position', 'Pep talk')
ans_vals = (-10,3,3,-10)
ans_expl = (
'Stan doesn\'t like you.',
'You feel your strength reviving...',
'What doesn\'t kill you makes you stronger?',
'Your best try quickly soured into you listing reasons mold is more likeable than you are.'
)
return Challenge(prompt, choices, ans_vals, ans_expl)
def challenge01():
prompt = '''Your mood is low.\n
MovieFlix just released all 4 seasons of "Pathetic People with Lives You Will Enjoy Judging".'''
choices = ('Sounds like a plan!', 'I\'m going to train for a marathon instead!',
'I\'ll take a walk first', 'I\'ll go outside. To do what, I don\'t know.')
ans_vals = (-8,-10,5,5)
ans_expl = (
'You have melted into a puddle on the couch. No one is quite sure where you end and butt-imprinted foam begins.',
'You put on running shoes, decided you would never be able to do it, and then watched movies with shoes on',
'Good choice!',
'Anything is better than nothing!'
)
return Challenge(prompt, choices, ans_vals, ans_expl)
def challenge02():
prompt = '''Your mood is low.\n
You just realized you spent the last 3 hours thinking about how jealous you are of house plants. What in the world are you going to do next?'''
choices = ('Take it one day at a time', 'Take it one hour at a time',
'Take it one minute at a time', 'Take it 10 seconds at a time')
ans_vals = (1,2,3,-1)
ans_expl = (
'Let tomorrow worry about itself!',
'Small steps!',
'Just got through another minute!',
'You tried, but forgot the word for 7 and gave up.'
)
return Challenge(prompt, choices, ans_vals, ans_expl)
def challenge03():
prompt = '''Your mood is low.\n\nYou are always talking to yourself. What are you saying right now?'''
choices = ('I have the personality of a sponge',
'I\'m pretty sure the only reason my friends like me is because I have a pogo stick.',
'I wish my face was a pogo stick', 'Life is like my underwear. It doesn\'t change.')
ans_vals = (-1,-1,-1,-1)
ans_expl = (
'Oddly enough, so do sponges.',
'Imagine if you DIDN\'T have that pogo stick then.',
'That is the weirdest thing I have ever heard.',
'Bruh, change them drawers.'
)
return Challenge(prompt, choices, ans_vals, ans_expl)
############ level 1 #############################
def challenge11():
prompt1 = 'What a nice day!\n\nWhat would you like to do?'
choices1 = ('Smile', 'Laugh', 'Hugs', 'Cry')
ans_vals1 = (3,5,-10,-5)
ans_expl1 = (
"Show them pearls!",
'The best medicine!\n\n(this statement has not been approved by the FDA)',
'You chose a bear.\nFrom now on, your hugs will all be one-armed.',
'Well, you haven\'t quite gotten the hang of this game yet...'
)
return Challenge(prompt1, choices1, ans_vals1, ans_expl1)
def challenge12():
prompt1 = 'I KNOW. Let\'s do something for four hours straight!'
choices1 = ('Ski', 'Scream', 'Video Games', 'Read a biography, help orphans, and write well-informed letters to congress')
ans_vals1 = (-5,7,-10,-1)
ans_expl1 = (
"You had fun for 3 hours and 15 minutes. At 3 hours and 16 minutes you swerved to avoid a moose, went off a ledge, and landed on top of a CIA operative, thus blowing his cover. Oh. And you broke your collar bone.",
'Your shrieks were heard by a famous metal band. They offer you a lucrative contract.',
'You won the MMORPG battle, but lost the afternoon',
'No points awarded to the brown-noser.\n\nAnd, minus 1 points for lying.'
)
return Challenge(prompt1, choices1, ans_vals1, ans_expl1)
def challenge13():
prompt1 = 'Time to pick a name for your new band.'
choices1 = ('Shredder Kitties', 'AC/Defroster', 'Shotguns and Petunias', 'Steel Dirgible')
ans_vals1 = (5,3,-5,1)
ans_expl1 = (
'Solid choice.\n\nYou captured both your unbridled rage and cuddly inner soul.',
'Familiar but catchy.',
'Your first single: "Welcome to the Forest"\n\n was not a hit',
'Definitely better than your runner up choice:\n\n "The Stink Beatles"'
)
return Challenge(prompt1, choices1, ans_vals1, ans_expl1)
############ level 2 #############################
def challenge20():
prompt1 = 'Is it just me, or is it getting AWESOME in here?'
choices1 = ('It\'s just you', 'Did someone open up a can of YEEEHAAA?',
'I heard on the radio that today it will be sunny with a chance of SHABAM!',
'Let\'s blast some country music!!'
)
ans_vals1 = (-5,5,5,-10)
ans_expl1 = (
'Party pooper\n >:|',
'YOU\'RE DARN RIGHT I DID',
'Are you a tornado? \'Cause you raisin\' the roof!',
'How about let\'s not'
)
return Challenge(prompt1, choices1, ans_vals1, ans_expl1)
def challenge21():
prompt = ('''"Ridonculous" took the civil world by storm. "Classic" took a fine word and made it on point.\n
What's the next annoying phrase you will grace humanity with?''')
choices = ('Spectankulous', 'Counterfactual', 'Tiktoktakular', 'Typey')
ans_vals = (5,7,-10,8)
ans_expl = (
'Nerdy high schoolers everywhere thank you for giving them something they think will make them sound cool',
'PRENTENSION LEVEL UP. You sound crazy smart without actually having to know what the word means because you are using it "ironically."',
'Sorry, not that creative. Plus China is spying on you.',
'What\'s old? Typewriters. See that guy looking at a map? That is SO typey.'
)
return Challenge(prompt, choices, ans_vals, ans_expl)
def challenge22():
prompt = ('''You are about to get on an airplane to sign a disarmament treaty with North Korea
when you hear some CRANKED UP music and the sounds of people dancing in the distance.''')
choices = ('Treaty Smeaty. Dance is life.', 'Call me Kim Jung Una FIESTA DE BAILE',
'“Without delayed gratification, there is no power over self.”',
'The greatest joy is a job well done'
)
ans_vals = (3,2,-10,0)
ans_expl = (
'NO JOY IS REAL BUT THE PRESENT!',
'Is that confetti? Oh, it\'s nuclear ash? HEY SARAH, WE DON\'T NEED GLOWSTICKS ANYMORE. YEAH, wE ArE ABOuT to BECOme GLoW hUMANS',
'Did you go to college? Because it sounds like you have a bachelor\'s degree in BORING.',
'Life\'s uncertain. Eat dessert first.'
)
return Challenge(prompt, choices, ans_vals, ans_expl)
#People are having so much fun, but they are hipsters. What will you do to get in?
############ level 3 #############################
def challenge30():
prompt = ('''Man, you are SO FREAKIN\' EXCITED\n
How are you going to take this to the next level?''')
choices = ('Eat cake', 'Punch a clown', 'Eat ice cream ALL DAY', 'Be Yourself')
ans_vals = (2,7,-10,-20)
ans_expl = (
"SUGAR RUSH!!!!!!!!!!",
'How can something that feels so right be wrong?',
'Diarrhea\n :`(',
'Not to be offensive, but everyone you know is worse off for having known you.'
)
return Challenge(prompt, choices, ans_vals, ans_expl)
def challenge31():
prompt = ('''ARE. YOU. HAPPY. YET?!''')
choices = ('Yes', 'YHEEESSSS', 'No', 'I\'m only getting started')
ans_vals = (1,-10,5,-5)
ans_expl = (
'WHERE IS THE PASSION?',
'CAN YOU EVEN SPELL?!',
'THAT\'S RIGHT. NEVER BE CONTENT! Compared to how happy you GONNA BE, THIS IS NOTHING YET!!!!!',
'THEN START FOR REAL, BRO.'
)
return Challenge(prompt, choices, ans_vals, ans_expl)
def challenge32():
prompt = ('''WHAT IS THE PURPOSE OF LIFE?!!!''')
choices = ('Be happy','Make a meaningful impact on the world',
'Become famous', 'DIE WITH MORE MONEY THAN GOD.')
ans_vals = (5,-5,-5,-5)
ans_expl = (
'A THOUSAND TIMES YES. NOTHING ELSE MATTERS.',
'But what if you are crying while you do it? Then what is it worth?!',
'FAMOUS PEOPLE DON\'T LOOK HAPPY TO ME',
'IF YOU ARE GOING TO DIE, IT WILL ONLY BE FROM BEING TOO HAPPY.'
)
return Challenge(prompt, choices, ans_vals, ans_expl)
def challenge33():
prompt = '''You are RUNNING through the streets SHOUTING FOR JOY and you see someone NOT looking COMPLETELY ENTHUSED about life.'''
choices = ('Give him a high-five',
'SCREAM in his face like A DRILL SERGEANT OF HAPPINESS until FEELS THE HAPPYS to the core of his middle-aged being',
'Punch him on the shoulder and say, "JOY BUGGY"',
'Keep walking. Don\'t let him vampire your joy.')
ans_vals = (-5,5,2,-10)
ans_expl = (
'He did not give you a high five back. Your face burns with the indignation of unrequited love.',
'DON\'T YOU SEE THE HAPPYS YOU ARE MISSING?! QUIT YOUR JOB. EAT TWINKIES UNTIL YOU PUKE. DO WHATEVER YOU MUST TO ACQUIRE LOLS',
'Great idea, but it didn\'t do enough. PUNCH HIM AGAIN UNTIL HE FEELS THE LOVE',
'NO. NO ONE CAN MISS OUT ON THIS. THEY MUST FEEL THE JOY.'
)
return Challenge(prompt, choices, ans_vals, ans_expl)
############ Tutorial #############################
def challenge_t0():
prompt = ('''H A P P Y \n\nP L A N E T''')
choices = ('', '', '', 'continue')
ans_vals = (0,0,0,0)
ans_expl = (
'',
'',
'',
'by palinnilap and dragongirl'
)
return Challenge(prompt, choices, ans_vals, ans_expl)
def challenge_t1():
prompt = ('''You will be given 25 mood to start.\n\n
Don\'t screw this up.''')
choices = ('', '', 'Start Game', '')
ans_vals = (0,0,2019+25,0)
ans_expl = (
'',
'',
'Time to make the world a happy place',
''
)
return Challenge(prompt, choices, ans_vals, ans_expl)
############ Lost #############################
def challenge_lost0():
prompt = ('''Congratulations. You lost.''')
choices = ('', '', '', 'I want to try again.')
ans_vals = (0,0,0,0)
ans_expl = (
'',
'',
'',
'What about "you lost" was hard to understand?'
)
return Challenge(prompt, choices, ans_vals, ans_expl)
############ Won #############################
def challenge_won0():
prompt = ('''You HaVE DONE IT! YOu HAvE REACHED PeaK hAPPINESS! HWAAHHAHA ''')
choices = ('', '', '', 'I am the best.')
ans_vals = (0,0,0,1000)
ans_expl = (
'',
'',
'',
'Yes, our jolly monarch, you are.'
)
return Challenge(prompt, choices, ans_vals, ans_expl)
def challenge_won1():
prompt = ('''BuT LOOK AT THEM, UNHAPPY mAsseS. ''')
choices = ('', '', 'SOMEthing must be DONE!', '')
ans_vals = (0,0,1324,1000)
ans_expl = (
'',
'',
'WhY DO YOU nOT SeE WHAT HAPpYS ARe FOR YOUR TAKINg, PEOpLES OF THE WORLD?!',
'.'
)
return Challenge(prompt, choices, ans_vals, ans_expl)
def challenge_won2():
prompt = ('''SHOULDN\'T YoU DO SOmEtHING ABOUT THEM? theIR MISERy MUsTN\'T ToUCh YOu! ''')
choices = ('YES!', 'No', '', '')
ans_vals = (9999,9999,1000,0)
ans_expl = (
'OF COURSE WE SHOULD! WHY WOULDN\'T WE?!',
'THAT IS CRAZY TALK MY FRIEND, WE MUST GIVE THEM OUR GIFT',
'',
'.'
)
return Challenge(prompt, choices, ans_vals, ans_expl)
def challenge_won3():
prompt = ('''BUT HOw CaN WE GIVE OuR gIFT?''')
choices = ('USE THE SKY', 'A ROCKET OF GOOD TIDINGS', 'VIRUS OF JOY', '')
ans_vals = (89764,89764,89764,89764)
ans_expl = (
'YES, OF COURSE, WE cAN SpREAD THE JOY THROUGH THE hEAVENS',
'bOMBs HAvE DeSTRoYED BUt OURs CAN GIVE LIFE!',
'HAPPINESS GOES VIRAL',
'.'
)
return Challenge(prompt, choices, ans_vals, ans_expl)
def challenge_won4():
prompt = ('''Sir, the ROCKET is READY. WE WILL SPREAD OUR JOY THROUGHOUT THE EARTH''')
choices = ('HIt thE BUTToN', 'I\m having second thoughts', '', '')
ans_vals = (1001,1010,1001,1001)
ans_expl = (
'3... 2... 1... Isn\'t it beautiful, sir?',
'WE HAVE COME TOO FAR. THERE IS NO BACKING OUT NOW!!\n\n*hits button for you',
'',
''
)
return Challenge(prompt, choices, ans_vals, ans_expl)
def challenge_won5():
prompt = ('''You drive out to the city to see what happens.
For a long time nothing, then blinding light, a ferocious wind, and ...''')
choices = ('HIt thE BUTToN', 'I\m having second thoughts', '', '')
ans_vals = (1000,1000,1000,1000)
ans_expl = (
'Continue',
'',
'',
''
)
return Challenge(prompt, choices, ans_vals, ans_expl)
def challenge_won6():
prompt = ('''It\'s working! The initial shock has washed off and people push themselves off the ground and back to their feet.
And they are... happy.. smiling... laughing ''')
choices = ('', 'This laughter is truly inFECTIous!', '', '')
ans_vals = (1000,9999,1000,1000)
ans_expl = (
'',
'IT WORKED! UtOPIA HAs cOME!!!!!',
'',
''
)
return Challenge(prompt, choices, ans_vals, ans_expl)
def challenge_won7():
prompt = ('''bUT WAIT. PeOPLE can\'t stop laughing. They can't talk. They can\'t eat.\n
The uproar of the city takes on an ominous tone.''')
choices = ('', '', '', 'HAHAHAHA!!')
ans_vals = (1000,9999,100000,1000)
ans_expl = (
'',
'',
'',
'Sir?'
)
return Challenge(prompt, choices, ans_vals, ans_expl)
def challenge_won8():
prompt = ('''Sir, you haven't slept or ate for days!!!''')
choices = ('', 'HAHAHAHAH!!!!!', '', '')
ans_vals = (1000,9999,100000,1000)
ans_expl = (
'',
'Sir, you should.... heh.. ha... HAHAHAHA!!',
'',
''
)
return Challenge(prompt, choices, ans_vals, ans_expl)
def challenge_won9():
prompt = ('''HAHAHAH hahaha h...''')
choices = ('', '', 'ha ... ha .... h', '')
ans_vals = (1000,9999,100000,1000)
ans_expl = (
'',
'',
'h',
''
)
return Challenge(prompt, choices, ans_vals, ans_expl)
def challenge_won10():
prompt = ('''...''')
choices = ('...', '', '', '')
ans_vals = (10000000,9999,100000,1000)
ans_expl = (
'',
'',
'',
''
)
return Challenge(prompt, choices, ans_vals, ans_expl)
def challenge_won10():
prompt = ('''GAME OVER''')
choices = ('', '', '', '')
ans_vals = (1000,9999,100000,1000)
ans_expl = (
'',
'',
'',
''
)
return Challenge(prompt, choices, ans_vals, ans_expl)
|
{"/gameloop.py": ["/player.py", "/level.py"], "/level.py": ["/challenge.py"], "/factory.py": ["/challenge.py", "/level.py", "/player.py", "/gameloop.py"], "/main.py": ["/factory.py"]}
|
16,123
|
palinnilap/happy-planet
|
refs/heads/master
|
/main.py
|
from kivy.app import App
from kivy.uix.widget import Widget
from kivy.properties import (
ObjectProperty, StringProperty
)
from kivy.clock import Clock
import factory
import time
class HappyGame(Widget):
#color management
BUTTON_DN = []
BUTTON_UP = []
BUTTON_DISABLED = [0,0,0,0]
#uix elements
score = ObjectProperty(None)
emoji = ObjectProperty(None)
happy_meter = ObjectProperty(None)
score_updater = ObjectProperty(None)
prompt = ObjectProperty(None)
button1 = ObjectProperty(None)
button2 = ObjectProperty(None)
button3 = ObjectProperty(None)
button4 = ObjectProperty(None)
#class stats
PRESSED = 0
CONTINUE = 0
BUTTON_ENBLD = 1
#setup gameloop
gameloop = factory.create_gameloop()
cur_challenge = gameloop.get_next_challenge()
def update(self, dt):
'''main cycle for reacting to user input'''
self.set_rgbs()
self.check_status()
self.check_for_user_input()
self.set_scores()
def check_for_user_input(self):
if self.prompt.text == 'error':
#need to set up intial values
self.populate_challenge()
if self.CONTINUE and self.PRESSED == 1:
#if user hit continue, present next challenge
self.cur_challenge = self.gameloop.get_next_challenge()
self.score_updater.text = ''
self.populate_challenge()
self.CONTINUE = 0
self.PRESSED = 0
if self.PRESSED and not self.CONTINUE:
#user choice an answer, process it
self.hide_unpicked_buttons()
self.prompt.text = ''
self.BUTTON_ENBLD = 0
self.CONTINUE = 1
self.process_choice()
self.PRESSED = 0 #this can't come earlier
def process_choice(self) -> None:
val, expl = self.cur_challenge.asses_choice(self.PRESSED)
self.gameloop.submit_results(1, val) #score always increments by 1
Clock.schedule_once(lambda dt: self.display_score_update(val), .5)
Clock.schedule_once(lambda dt: self.display_expl(expl), 1)
Clock.schedule_once(lambda dt: self.set_buttons_to_continue(), 2)
Clock.schedule_once(lambda dt: self.enable_buttons(), 2)
def hide_unpicked_buttons(self):
if not self.PRESSED == 1:
self.button1.text = ''
self.button1.background_color = self.BUTTON_DISABLED
if not self.PRESSED == 2:
self.button2.text = ''
self.button2.background_color = self.BUTTON_DISABLED
if not self.PRESSED == 3:
self.button3.text = ''
self.button3.background_color = self.BUTTON_DISABLED
if not self.PRESSED == 4:
self.button4.text = ''
self.button4.background_color = self.BUTTON_DISABLED
def enable_buttons(self):
self.BUTTON_ENBLD = 1
def display_score_update(self, val):
self.score_updater.text = self.format_val(val)
def display_expl(self, expl):
self.prompt.text = expl
def set_buttons_to_continue(self):
self.button1.text = "Continue"
self.button1.background_color = self.BUTTON_DISABLED
self.button2.text = ''
self.button2.background_color = self.BUTTON_DISABLED
self.button3.text = ''
self.button3.background_color = self.BUTTON_DISABLED
self.button4.text = ''
self.button4.background_color = self.BUTTON_DISABLED
def format_val(self, val : int) -> None:
if val >= 0:
return '+' + str(val)
else:
return str(val)
def set_scores(self) -> None:
self.score.text = self.gameloop.score
self.emoji.text = self.gameloop.emoji
self.happy_meter.text = self.gameloop.happy
def populate_challenge(self) -> None:
self.prompt.text = self.cur_challenge.get_prompt()
self.button1.text = self.cur_challenge.get_choices()[0]
self.button2.text = self.cur_challenge.get_choices()[1]
self.button3.text = self.cur_challenge.get_choices()[2]
self.button4.text = self.cur_challenge.get_choices()[3]
if self.cur_challenge.get_choices()[0] == '':
self.button1.background_color = self.BUTTON_DISABLED
else:
self.button1.background_color = self.BUTTON_UP
if self.cur_challenge.get_choices()[1] == '':
self.button2.background_color = self.BUTTON_DISABLED
else:
self.button2.background_color = self.BUTTON_UP
if self.cur_challenge.get_choices()[2] == '':
self.button3.background_color = self.BUTTON_DISABLED
else:
self.button3.background_color = self.BUTTON_UP
if self.cur_challenge.get_choices()[3] == '':
self.button4.background_color = self.BUTTON_DISABLED
else:
self.button4.background_color = self.BUTTON_UP
def on_touch_down(self, touch):
'''lets self.update know which button was pressed. changes color'''
# this code is not DRY. kv language made simpler code hard to write
if not self.BUTTON_ENBLD:
return
if self.button1.collide_point(*touch.pos):
self.PRESSED = 1
self.button1.background_color = self.BUTTON_DN
elif self.button2.collide_point(*touch.pos):
self.PRESSED = 2
self.button2.background_color = self.BUTTON_DN
elif self.button3.collide_point(*touch.pos):
self.PRESSED = 3
self.button3.background_color = self.BUTTON_DN
elif self.button4.collide_point(*touch.pos):
self.PRESSED = 4
self.button4.background_color = self.BUTTON_DN
def on_touch_up(self, touch):
pass
def check_status(self):
if self.gameloop.status == 1:
pass # you won!
elif self.gameloop.status == -1:
pass # you lost
def set_rgbs(self):
a, b = self.gameloop.level_rgbs
self.BUTTON_UP, self.BUTTON_DN = a, b
self.score.color = self.happy_meter.color = a
class HappyApp(App):
def build(self):
game = HappyGame()
Clock.schedule_interval(game.update, 1.0/60.0)
return game
if __name__ == '__main__':
HappyApp().run()
|
{"/gameloop.py": ["/player.py", "/level.py"], "/level.py": ["/challenge.py"], "/factory.py": ["/challenge.py", "/level.py", "/player.py", "/gameloop.py"], "/main.py": ["/factory.py"]}
|
16,124
|
Siddhant111/Capstone-Django
|
refs/heads/master
|
/target/dist/capstone-django-1.0.dev0/build/lib/capstone/diagnostic/views.py
|
from django.shortcuts import render
import os
from django.core.files.storage import FileSystemStorage
from django.conf import settings
# Create your views here.
def diagnostic(request):
if request.method == 'POST':
file_path = os.path.join(settings.BASE_DIR, 'models\\research\\object_detection')
os.system('python ' + file_path + '\\object_detection_tutorial.py')
return render(request, 'diagnostic.html')
|
{"/src/main/python/capstone/preventative/tests.py": ["/src/main/python/capstone/preventative/models.py"]}
|
16,125
|
Siddhant111/Capstone-Django
|
refs/heads/master
|
/target/dist/capstone-django-1.0.dev0/build/lib/capstone/preventative/views.py
|
from django.shortcuts import render
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.preprocessing import Imputer
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
from django.core.files.storage import FileSystemStorage
from django.conf import settings
# Create your views here.
def preventative(request):
try:
if request.method == 'POST' and request.FILES['myfile']:
myfile = request.FILES['myfile']
fs = FileSystemStorage()
filename = fs.save(myfile.name, myfile)
uploaded_file_url = fs.url(filename)
dataset = pd.read_excel(settings.BASE_DIR + uploaded_file_url)
dataset = dataset.drop([0])
dataset = dataset.drop([1])
dataset = dataset.drop([95])
dataset = dataset.rename(columns={
'Type': 'Row Labels',
'TRUE': 'Count of Doctors',
'Unnamed: 2': 'Centers',
'Unnamed: 3': 'Population',
'Unnamed: 4': 'Areas',
'Unnamed: 5': 'Zones',
})
imputer = Imputer(missing_values = 'nan', strategy = 'mean', axis = 0)
dataset = dataset.dropna(how='any')
maximum_doc = dataset['Count of Doctors'].groupby(dataset['Zones']).sum().max()
max_dict = dict(dataset['Count of Doctors'].groupby(dataset['Zones']).sum())
max_zone = list(max_dict.keys())[list(max_dict.values()).index(maximum_doc)]
plt.scatter(dataset['Count of Doctors'], dataset['Zones'])
plt.suptitle('Count of Doctors vs Zones')
plt.xlabel('Count of Doctors')
plt.ylabel('Zones')
plt.savefig(settings.BASE_DIR + '/static/' + uploaded_file_url + '_1.png', bbox_inches = 'tight')
plt.clf()
sheets = pd.read_excel(settings.BASE_DIR + uploaded_file_url, sheet_name=['sum'])
dataset = pd.concat(sheets[frame] for frame in sheets.keys())
dataset = dataset.drop([2])
dataset = dataset.rename(columns={
'Unnamed: 0': 'Row Labels',
'Unnamed: 1': 'Area',
'Unnamed: 2': 'Count of center',
'Unnamed: 3': 'doctors',
'Unnamed: 4': 'pop',
'Unnamed: 5': 'Pop/center',
'Unnamed: 6': 'Pop/Dr',
'Unnamed: 7': 'Dr/cr',
})
imputer = Imputer(missing_values = 'nan', strategy = 'mean', axis = 0)
dataset = dataset.dropna(how='any')
plt.scatter(dataset['Count of center'], dataset['Area'])
plt.suptitle('Count of center vs Area')
plt.xlabel('Count of center')
plt.ylabel('Area')
plt.savefig(settings.BASE_DIR + '/static/' + uploaded_file_url + '_2.png', bbox_inches = 'tight')
plt.clf()
plt.scatter(dataset['doctors'], dataset['Area'])
plt.suptitle('doctors vs Area')
plt.xlabel('doctors')
plt.ylabel('Area')
plt.savefig(settings.BASE_DIR + '/static/' + uploaded_file_url + '_3.png', bbox_inches = 'tight')
plt.clf()
plt.scatter(dataset['Pop/center'], dataset['Area'])
plt.suptitle('Pop/center vs Area')
plt.xlabel('Pop/center')
plt.ylabel('Area')
plt.savefig(settings.BASE_DIR + '/static/' + uploaded_file_url + '_4.png', bbox_inches = 'tight')
plt.clf()
minimum_pop = dataset['Pop/Dr'].groupby(dataset['Area']).sum().min()
min_dict = dict(dataset['Pop/Dr'].groupby(dataset['Area']).sum())
areas_list = []
for i in min_dict:
if min_dict[i] > 5000:
areas_list.append(list(min_dict.keys())[list(min_dict.values()).index(min_dict[i])])
min_area = list(min_dict.keys())[list(min_dict.values()).index(minimum_pop)]
plt.scatter(dataset['Pop/Dr'], dataset['Area'])
plt.suptitle('Pop/Dr vs Area')
plt.xlabel('Pop/Dr')
plt.ylabel('Area')
plt.savefig(settings.BASE_DIR + '/static/' + uploaded_file_url + '_5.png', bbox_inches = 'tight')
plt.clf()
return render(request, 'preventative.html', {'uploaded_file_url': uploaded_file_url, 'maximum_doc': maximum_doc, 'max_zone': max_zone, 'minimum_pop': int(minimum_pop), 'areas_list': areas_list, 'min_area': min_area})
return render(request, 'preventative.html')
except:
return render(request, 'preventative.html', {'error': 'Please Select A File'})
|
{"/src/main/python/capstone/preventative/tests.py": ["/src/main/python/capstone/preventative/models.py"]}
|
16,126
|
Siddhant111/Capstone-Django
|
refs/heads/master
|
/target/dist/capstone-django-1.0.dev0/capstone/insurance/urls.py
|
from django.conf.urls import url
from . import views
app_name = 'insurance'
urlpatterns = [
url('insurance/', views.insurance, name = 'insurance'),
]
|
{"/src/main/python/capstone/preventative/tests.py": ["/src/main/python/capstone/preventative/models.py"]}
|
16,127
|
Siddhant111/Capstone-Django
|
refs/heads/master
|
/src/main/python/capstone/preventative/urls.py
|
from django.conf.urls import url
from . import views
app_name = 'preventative'
urlpatterns = [
url('preventative/', views.preventative, name = 'preventative'),
]
|
{"/src/main/python/capstone/preventative/tests.py": ["/src/main/python/capstone/preventative/models.py"]}
|
16,128
|
Siddhant111/Capstone-Django
|
refs/heads/master
|
/src/main/python/capstone/preventative/apps.py
|
from django.apps import AppConfig
class PreventativeConfig(AppConfig):
name = 'preventative'
|
{"/src/main/python/capstone/preventative/tests.py": ["/src/main/python/capstone/preventative/models.py"]}
|
16,129
|
Siddhant111/Capstone-Django
|
refs/heads/master
|
/src/main/python/capstone/preventative/tests.py
|
import datetime
from django.test import TestCase
from django.utils import timezone
from .models import Document
# Create your tests here.
class DocumentModelTests(TestCase):
def test_was_published_recently_with_future_document(self):
time = timezone.now() + datetime.timedelta(days=30)
future_document = Document(uploaded_at=time)
self.assertIs(future_document.was_published_recently(), False)
def test_was_published_recently_with_old_document(self):
time = timezone.now() - datetime.timedelta(days=1, seconds=1)
old_document = Document(uploaded_at=time)
self.assertIs(old_document.was_published_recently(), False)
def test_was_published_recently_with_recent_document(self):
time = timezone.now() - datetime.timedelta(hours=23, minutes=59, seconds=59)
recent_document = Document(uploaded_at=time)
self.assertIs(recent_document.was_published_recently(), True)
|
{"/src/main/python/capstone/preventative/tests.py": ["/src/main/python/capstone/preventative/models.py"]}
|
16,130
|
Siddhant111/Capstone-Django
|
refs/heads/master
|
/src/main/python/capstone/preventative/models.py
|
import datetime
from django.db import models
from django.utils import timezone
# Create your models here.
class Document(models.Model):
document = models.FileField(upload_to='documents/%Y/%m/%d/')
uploaded_at = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.document
def was_published_recently(self):
now = timezone.now()
return now - datetime.timedelta(days=1) <= self.uploaded_at <= now
|
{"/src/main/python/capstone/preventative/tests.py": ["/src/main/python/capstone/preventative/models.py"]}
|
16,131
|
Siddhant111/Capstone-Django
|
refs/heads/master
|
/src/main/python/capstone/models/research/object_detection/object_detection_tutorial.py
|
import os
import pathlib
import numpy as np
import six.moves.urllib as urllib
import sys
import tarfile
import tensorflow as tf
import zipfile
import cv2
from collections import defaultdict
from io import StringIO
from matplotlib import pyplot as plt
from PIL import Image
from IPython.display import display
from object_detection.utils import ops as utils_ops
from object_detection.utils import label_map_util
from object_detection.utils import visualization_utils as vis_util
cap = cv2.VideoCapture(0)
if "models" in pathlib.Path.cwd().parts:
while "models" in pathlib.Path.cwd().parts:
os.chdir('..')
utils_ops.tf = tf.compat.v1
tf.gfile = tf.io.gfile
def load_model(model_name):
base_url = 'http://download.tensorflow.org/models/object_detection/'
model_file = model_name + '.tar.gz'
model_dir = tf.keras.utils.get_file(
fname=model_name,
origin=base_url + model_file,
untar=True
)
model_dir = pathlib.Path(model_dir)/"saved_model"
model = tf.saved_model.load(str(model_dir))
model = model.signatures['serving_default']
return model
PATH_TO_LABELS = 'models/research/object_detection/data/mscoco_label_map.pbtxt'
category_index = label_map_util.create_category_index_from_labelmap(PATH_TO_LABELS, use_display_name=True)
PATH_TO_TEST_IMAGES_DIR = pathlib.Path('models/research/object_detection/test_images')
TEST_IMAGE_PATHS = sorted(list(PATH_TO_TEST_IMAGES_DIR.glob("*.jpg")))
TEST_IMAGE_PATHS
model_name = 'ssd_mobilenet_v1_coco_2017_11_17'
detection_model = load_model(model_name)
print(detection_model.inputs)
detection_model.output_dtypes
detection_model.output_shapes
def run_inference_for_single_image(model, image):
image = np.asarray(image)
input_tensor = tf.convert_to_tensor(image)
input_tensor = input_tensor[tf.newaxis,...]
output_dict = model(input_tensor)
num_detections = int(output_dict.pop('num_detections'))
output_dict = {key:value[0, :num_detections].numpy() for key,value in output_dict.items()}
output_dict['num_detections'] = num_detections
output_dict['detection_classes'] = output_dict['detection_classes'].astype(np.int64)
if 'detection_masks' in output_dict:
detection_masks_reframed = utils_ops.reframe_box_masks_to_image_masks(
output_dict['detection_masks'], output_dict['detection_boxes'],
image.shape[0], image.shape[1]
)
detection_masks_reframed = tf.cast(detection_masks_reframed > 0.5, tf.uint8)
output_dict['detection_masks_reframed'] = detection_masks_reframed.numpy()
return output_dict
def show_inference(model, image_path):
# image_np = np.array(Image.open(image_path))
image_np = np.array(image_path)
output_dict = run_inference_for_single_image(model, image_np)
vis_util.visualize_boxes_and_labels_on_image_array(
image_np,
output_dict['detection_boxes'],
output_dict['detection_classes'],
output_dict['detection_scores'],
category_index,
instance_masks=output_dict.get('detection_masks_reframed', None),
use_normalized_coordinates=True,
line_thickness=8
)
# display(Image.fromarray(image_np))
cv2.imshow('image', image_np)
# for image_path in TEST_IMAGE_PATHS:
# show_inference(detection_model, image_path)
while True:
ret, image_path = cap.read()
show_inference(detection_model, image_path)
if cv2.waitKey(25) & 0xFF == ord('q'):
cv2.destroyAllWindows()
sys.exit()
model_name = "mask_rcnn_inception_resnet_v2_atrous_coco_2018_01_28"
masking_model = load_model("mask_rcnn_inception_resnet_v2_atrous_coco_2018_01_28")
masking_model.output_shapes
# for image_path in TEST_IMAGE_PATHS:
# show_inference(masking_model, image_path)
while True:
ret, image_path = cap.read()
show_inference(masking_model, image_path)
if cv2.waitKey(25) & 0xFF == ord('q'):
cv2.destroyAllWindows()
sys.exit()
|
{"/src/main/python/capstone/preventative/tests.py": ["/src/main/python/capstone/preventative/models.py"]}
|
16,132
|
Siddhant111/Capstone-Django
|
refs/heads/master
|
/target/dist/capstone-django-1.0.dev0/build/lib/capstone/diagnostic/urls.py
|
from django.conf.urls import url
from . import views
app_name = 'diagnostic'
urlpatterns = [
url('diagnostic/', views.diagnostic, name = 'diagnostic'),
]
|
{"/src/main/python/capstone/preventative/tests.py": ["/src/main/python/capstone/preventative/models.py"]}
|
16,133
|
Siddhant111/Capstone-Django
|
refs/heads/master
|
/target/dist/capstone-django-1.0.dev0/build/lib/capstone/insurance/views.py
|
from django.shortcuts import render
# Create your views here.
def insurance(request):
return render(request, 'insurance.html')
|
{"/src/main/python/capstone/preventative/tests.py": ["/src/main/python/capstone/preventative/models.py"]}
|
16,134
|
Siddhant111/Capstone-Django
|
refs/heads/master
|
/target/dist/capstone-django-1.0.dev0/setup.py
|
#!/usr/bin/env python
from setuptools import setup
from setuptools.command.install import install as _install
class install(_install):
def pre_install_script(self):
pass
def post_install_script(self):
pass
def run(self):
self.pre_install_script()
_install.run(self)
self.post_install_script()
if __name__ == '__main__':
setup(
name = 'Capstone-Django',
version = '1.0.dev0',
description = '',
long_description = '',
author = '',
author_email = '',
license = '',
url = '',
scripts = [],
packages = [
'capstone.capstone',
'capstone.diagnostic',
'capstone.insurance',
'capstone.preventative',
'capstone.diagnostic.migrations',
'capstone.insurance.migrations',
'capstone.models.official',
'capstone.models.tutorials',
'capstone.models.official.benchmark',
'capstone.models.official.mnist',
'capstone.models.official.modeling',
'capstone.models.official.nlp',
'capstone.models.official.r1',
'capstone.models.official.recommendation',
'capstone.models.official.staging',
'capstone.models.official.transformer',
'capstone.models.official.utils',
'capstone.models.official.vision',
'capstone.models.official.benchmark.models',
'capstone.models.official.modeling.activations',
'capstone.models.official.modeling.hyperparams',
'capstone.models.official.modeling.training',
'capstone.models.official.nlp.bert',
'capstone.models.official.nlp.xlnet',
'capstone.models.official.r1.boosted_trees',
'capstone.models.official.r1.resnet',
'capstone.models.official.r1.utils',
'capstone.models.official.r1.wide_deep',
'capstone.models.official.r1.utils.data',
'capstone.models.official.staging.shakespeare',
'capstone.models.official.transformer.model',
'capstone.models.official.transformer.utils',
'capstone.models.official.transformer.v2',
'capstone.models.official.utils.accelerator',
'capstone.models.official.utils.flags',
'capstone.models.official.utils.logs',
'capstone.models.official.utils.misc',
'capstone.models.official.utils.testing',
'capstone.models.official.vision.detection',
'capstone.models.official.vision.image_classification',
'capstone.models.official.vision.detection.configs',
'capstone.models.official.vision.detection.dataloader',
'capstone.models.official.vision.detection.evaluation',
'capstone.models.official.vision.detection.executor',
'capstone.models.official.vision.detection.modeling',
'capstone.models.official.vision.detection.utils',
'capstone.models.official.vision.detection.modeling.architecture',
'capstone.models.official.vision.detection.utils.object_detection',
'capstone.models.research.adversarial_text',
'capstone.models.research.autoencoder',
'capstone.models.research.cognitive_mapping_and_planning',
'capstone.models.research.cognitive_planning',
'capstone.models.research.cvt_text',
'capstone.models.research.deeplab',
'capstone.models.research.deep_speech',
'capstone.models.research.domain_adaptation',
'capstone.models.research.feelvos',
'capstone.models.research.learning_unsupervised_learning',
'capstone.models.research.lstm_object_detection',
'capstone.models.research.morph_net',
'capstone.models.research.object_detection',
'capstone.models.research.real_nvp',
'capstone.models.research.sentiment_analysis',
'capstone.models.research.slim',
'capstone.models.research.adversarial_logit_pairing.datasets',
'capstone.models.research.adversarial_text.data',
'capstone.models.research.attention_ocr.python.datasets',
'capstone.models.research.autoencoder.autoencoder_models',
'capstone.models.research.build.lib.object_detection',
'capstone.models.research.build.lib.object_detection.anchor_generators',
'capstone.models.research.build.lib.object_detection.box_coders',
'capstone.models.research.build.lib.object_detection.builders',
'capstone.models.research.build.lib.object_detection.core',
'capstone.models.research.build.lib.object_detection.dataset_tools',
'capstone.models.research.build.lib.object_detection.data_decoders',
'capstone.models.research.build.lib.object_detection.inference',
'capstone.models.research.build.lib.object_detection.legacy',
'capstone.models.research.build.lib.object_detection.matchers',
'capstone.models.research.build.lib.object_detection.meta_architectures',
'capstone.models.research.build.lib.object_detection.metrics',
'capstone.models.research.build.lib.object_detection.models',
'capstone.models.research.build.lib.object_detection.predictors',
'capstone.models.research.build.lib.object_detection.protos',
'capstone.models.research.build.lib.object_detection.tpu_exporters',
'capstone.models.research.build.lib.object_detection.utils',
'capstone.models.research.build.lib.object_detection.models.keras_models',
'capstone.models.research.build.lib.object_detection.predictors.heads',
'capstone.models.research.build.lib.object_detection.tpu_exporters.testdata',
'capstone.models.research.cognitive_mapping_and_planning.cfgs',
'capstone.models.research.cognitive_mapping_and_planning.datasets',
'capstone.models.research.cognitive_mapping_and_planning.render',
'capstone.models.research.cognitive_mapping_and_planning.scripts',
'capstone.models.research.cognitive_mapping_and_planning.src',
'capstone.models.research.cognitive_mapping_and_planning.tfcode',
'capstone.models.research.cognitive_planning.envs',
'capstone.models.research.cognitive_planning.preprocessing',
'capstone.models.research.compression.entropy_coder',
'capstone.models.research.compression.entropy_coder.all_models',
'capstone.models.research.compression.entropy_coder.lib',
'capstone.models.research.compression.entropy_coder.model',
'capstone.models.research.compression.entropy_coder.progressive',
'capstone.models.research.cvt_text.base',
'capstone.models.research.cvt_text.corpus_processing',
'capstone.models.research.cvt_text.model',
'capstone.models.research.cvt_text.task_specific',
'capstone.models.research.cvt_text.training',
'capstone.models.research.cvt_text.task_specific.word_level',
'capstone.models.research.deeplab.core',
'capstone.models.research.deeplab.datasets',
'capstone.models.research.deeplab.deprecated',
'capstone.models.research.deeplab.evaluation',
'capstone.models.research.deeplab.utils',
'capstone.models.research.deep_speech.data',
'capstone.models.research.delf.delf',
'capstone.models.research.delf.delf.protos',
'capstone.models.research.delf.delf.python',
'capstone.models.research.delf.delf.python.detect_to_retrieve',
'capstone.models.research.delf.delf.python.examples',
'capstone.models.research.domain_adaptation.datasets',
'capstone.models.research.domain_adaptation.domain_separation',
'capstone.models.research.efficient-hrl.agents',
'capstone.models.research.efficient-hrl.context',
'capstone.models.research.efficient-hrl.environments',
'capstone.models.research.efficient-hrl.utils',
'capstone.models.research.feelvos.datasets',
'capstone.models.research.feelvos.utils',
'capstone.models.research.fivo.fivo',
'capstone.models.research.fivo.fivo.data',
'capstone.models.research.fivo.fivo.models',
'capstone.models.research.gan.mnist',
'capstone.models.research.learning_unsupervised_learning.architectures',
'capstone.models.research.learning_unsupervised_learning.datasets',
'capstone.models.research.learning_unsupervised_learning.meta_objective',
'capstone.models.research.lstm_object_detection.builders',
'capstone.models.research.lstm_object_detection.inputs',
'capstone.models.research.lstm_object_detection.lstm',
'capstone.models.research.lstm_object_detection.meta_architectures',
'capstone.models.research.lstm_object_detection.metrics',
'capstone.models.research.lstm_object_detection.models',
'capstone.models.research.lstm_object_detection.protos',
'capstone.models.research.lstm_object_detection.utils',
'capstone.models.research.maskgan.data',
'capstone.models.research.maskgan.losses',
'capstone.models.research.maskgan.models',
'capstone.models.research.maskgan.model_utils',
'capstone.models.research.maskgan.nas_utils',
'capstone.models.research.maskgan.regularization',
'capstone.models.research.morph_net.framework',
'capstone.models.research.morph_net.network_regularizers',
'capstone.models.research.morph_net.op_regularizers',
'capstone.models.research.morph_net.testing',
'capstone.models.research.object_detection.anchor_generators',
'capstone.models.research.object_detection.box_coders',
'capstone.models.research.object_detection.builders',
'capstone.models.research.object_detection.core',
'capstone.models.research.object_detection.dataset_tools',
'capstone.models.research.object_detection.data_decoders',
'capstone.models.research.object_detection.inference',
'capstone.models.research.object_detection.legacy',
'capstone.models.research.object_detection.matchers',
'capstone.models.research.object_detection.meta_architectures',
'capstone.models.research.object_detection.metrics',
'capstone.models.research.object_detection.models',
'capstone.models.research.object_detection.predictors',
'capstone.models.research.object_detection.protos',
'capstone.models.research.object_detection.tpu_exporters',
'capstone.models.research.object_detection.utils',
'capstone.models.research.object_detection.models.keras_models',
'capstone.models.research.object_detection.predictors.heads',
'capstone.models.research.object_detection.tpu_exporters.testdata',
'capstone.models.research.qa_kg.model_n2nmn',
'capstone.models.research.qa_kg.util',
'capstone.models.research.sentiment_analysis.data',
'capstone.models.research.seq2species.protos',
'capstone.models.research.skip_thoughts.skip_thoughts',
'capstone.models.research.skip_thoughts.skip_thoughts.data',
'capstone.models.research.skip_thoughts.skip_thoughts.ops',
'capstone.models.research.slim.datasets',
'capstone.models.research.slim.deployment',
'capstone.models.research.slim.nets',
'capstone.models.research.slim.preprocessing',
'capstone.models.research.slim.nets.mobilenet',
'capstone.models.research.slim.nets.nasnet',
'capstone.models.research.syntaxnet.dragnn',
'capstone.models.research.syntaxnet.dragnn.config_builder',
'capstone.models.research.vid2depth.dataset',
'capstone.models.research.vid2depth.ops',
'capstone.models.samples.cookbook.regression',
'capstone.models.tutorials.embedding',
'capstone.models.tutorials.image',
'capstone.models.tutorials.rnn',
'capstone.models.tutorials.image.alexnet',
'capstone.models.tutorials.image.cifar10',
'capstone.models.tutorials.image.cifar10_estimator',
'capstone.models.tutorials.image.mnist',
'capstone.models.tutorials.rnn.ptb',
'capstone.preventative.migrations'
],
namespace_packages = [],
py_modules = [],
classifiers = [
'Development Status :: 3 - Alpha',
'Programming Language :: Python'
],
entry_points = {},
data_files = [],
package_data = {},
install_requires = [],
dependency_links = [],
zip_safe = True,
cmdclass = {'install': install},
keywords = '',
python_requires = '',
obsoletes = [],
)
|
{"/src/main/python/capstone/preventative/tests.py": ["/src/main/python/capstone/preventative/models.py"]}
|
16,145
|
bubbleqi/xlm_roberta_NMT
|
refs/heads/master
|
/utils/data_utils.py
|
import os
import logging
import torch
from torch.utils.data import TensorDataset
from torch.nn.utils.rnn import pad_sequence
class InputFeatures(object):
def __init__(self, input_ids):
self.src_tensor = input_ids[0]
self.target_tensor = input_ids[1]
class en_fr_processor:
def get_train_examples(self, data_dir):
"""See base class."""
return self._read_file(data_dir)
def _read_file(self, data_dir):
'''
read file
'''
data = []
with open(os.path.join(data_dir, "news-commentary-v9.fr-en.en")) as en_file, open(os.path.join(data_dir, "news-commentary-v9.fr-en.fr")) as fr_file:
for fr_sentence, en_sentence in zip(fr_file, en_file):
if fr_sentence and en_sentence:
data.append([en_sentence.strip(), fr_sentence.strip()])
#print(data[:1000])
return data[:1000]
def convert_examples_to_features(examples, max_seq_length, encode_method):
features = []
for (ex_index, example) in enumerate(examples):
if not example:
continue
token_ids = []
for i, word in enumerate(example):
tokens = encode_method(word.strip())
tokens.insert(0, 1)
tokens.append(0)
token_ids.append(tokens)# word token ids
#token_ids.extend(tokens) # all sentence token ids
if ex_index == 0:
logging.info("token ids = ")
logging.info(token_ids)
logging.debug("token ids = ")
logging.debug(token_ids)
if token_ids:
features.append(
InputFeatures(input_ids=token_ids))
return features
def create_dataset(features):
#print(f'src tensor : {features[1].src_tensor}')
all_src_tensor = [torch.tensor(f.src_tensor) for f in features]
all_target_tensor = [torch.tensor(f.target_tensor) for f in features]
all_src_tensor = pad_sequence(all_src_tensor, batch_first=True)
all_target_tensor = pad_sequence(all_target_tensor, batch_first=True)
return TensorDataset(
all_src_tensor, all_target_tensor)
|
{"/train.py": ["/model/xlmr_mt.py", "/utils/train_utils.py", "/utils/data_utils.py"]}
|
16,146
|
bubbleqi/xlm_roberta_NMT
|
refs/heads/master
|
/train.py
|
from __future__ import absolute_import, division, print_function
import argparse
import logging
import os
import random
import sys
import numpy as np
import torch
import torch.nn.functional as F
from pytorch_transformers import AdamW, WarmupLinearSchedule
from torch import nn
from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler,
TensorDataset)
from model.xlmr_mt import XLMR_Encoder, XLM_Decoder
from utils.train_utils import add_xlmr_args
from utils.data_utils import en_fr_processor, create_dataset, convert_examples_to_features
from tqdm.notebook import tqdm
from tqdm import trange
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO)
logger = logging.getLogger(__name__)
def main():
parser = argparse.ArgumentParser()
parser = add_xlmr_args(parser)
args = parser.parse_args()
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
processor = en_fr_processor()
train_examples = processor.get_train_examples(args.data_dir)
# preparing model configs
hidden_size = 768 if 'base' in args.pretrained_path else 1024 # TODO: move this inside model.__init__
device = 'cuda' if (torch.cuda.is_available() and not args.no_cuda) else 'cpu'
# creating model
model = XLMR_Encoder_Decoder(pretrained_path=args.pretrained_path,
hidden_size=hidden_size,
dropout_p=args.dropout, device=device)
model.encoder.to(device)
model.decoder.to(device)
params = model.encoder.named_parameters() + model.decoder.named_parameters()
optimizer_grouped_parameters = [
{'params': [p for n, p in params]}
]
optimizer = AdamW(optimizer_grouped_parameters,
lr=args.learning_rate, eps=args.adam_epsilon)
scheduler = WarmupLinearSchedule(
optimizer, warmup_steps=1, t_total=1)
train_features = convert_examples_to_features(
train_examples, args.max_seq_length, model.encoder.encode_word)
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_examples))
logger.info(" Batch size = %d", args.train_batch_size)
#logger.info(" Num steps = %d", num_train_optimization_steps)
train_data = create_dataset(train_features)
train_sampler = RandomSampler(train_data)
train_dataloader = DataLoader(
train_data, sampler=train_sampler, batch_size=args.train_batch_size)
for _ in tqdm(range(args.num_train_epochs), desc="Epoch"):
tr_loss = 0
nb_tr_examples, nb_tr_steps = 0, 0
tbar = tqdm(train_dataloader, desc="Iteration")
model.encoder.train()
for step, batch in enumerate(tbar):
batch = tuple(t.to(device) for t in batch)
src_tensor, target_tensor = batch
enc_out = model.encoder(src_tensor)
torch.nn.utils.clip_grad_norm_(
model.encoder.parameters(), args.max_grad_norm)
optimizer.step()
scheduler.step() # Update learning rate schedule
model.encoder.zero_grad()
model.encoder.to(device)
if __name__ == "__main__":
main()
|
{"/train.py": ["/model/xlmr_mt.py", "/utils/train_utils.py", "/utils/data_utils.py"]}
|
16,147
|
bubbleqi/xlm_roberta_NMT
|
refs/heads/master
|
/model/xlmr_mt.py
|
from fairseq.models.roberta import XLMRModel
from fairseq.models import FairseqDecoder
from fairseq import utils
import torch
import torch.nn as nn
import torch.nn.functional as F
class XLMR_Encoder(nn.Module):
def __init__(self, pretrained_path, hidden_size, dropout_p, device='cuda'):
super().__init__()
self.xlmr = XLMRModel.from_pretrained(pretrained_path)
self.model = self.xlmr.model
self.dropout = nn.Dropout(dropout_p)
self.device=device
def forward(self, src_tensor):
#print(src_tensor)
transformer_out, _ = self.model(src_tensor)#, features_only=True)
return transformer_out
def encode_word(self, s):
"""
takes a string and returns a list of token ids
"""
tensor_ids = self.xlmr.encode(s)
# remove <s> and </s> ids
return tensor_ids.cpu().numpy().tolist()[1:-1]
class XLMR_Decoder(FairseqDecoder):
def __init__(
self, dictionary, encoder_hidden_dim=768, embed_dim=768, hidden_dim=768,
dropout=0.1
):
super().__init__(dictionary)
# Our decoder will embed the inputs before feeding them to the LSTM.
self.embed_tokens = nn.Embedding(
num_embeddings=len(dictionary),
embedding_dim=embed_dim,
padding_idx=dictionary.pad(),
)
self.dropout = nn.Dropout(p=dropout)
# We'll use a single-layer, unidirectional LSTM for simplicity.
self.lstm = nn.LSTM(
# For the first layer we'll concatenate the Encoder's final hidden
# state with the embedded target tokens.
input_size=encoder_hidden_dim + embed_dim,
hidden_size=hidden_dim,
num_layers=1,
bidirectional=False,
)
# Define the output projection.
self.output_projection = nn.Linear(hidden_dim, len(dictionary))
# During training Decoders are expected to take the entire target sequence
# (shifted right by one position) and produce logits over the vocabulary.
# The *prev_output_tokens* tensor begins with the end-of-sentence symbol,
# ``dictionary.eos()``, followed by the target sequence.
def forward(self, prev_output_tokens, encoder_out):
"""
Args:
prev_output_tokens (LongTensor): previous decoder outputs of shape
`(batch, tgt_len)`, for teacher forcing
encoder_out (Tensor, optional): output from the encoder, used for
encoder-side attention
Returns:
tuple:
- the last decoder layer's output of shape
`(batch, tgt_len, vocab)`
- the last decoder layer's attention weights of shape
`(batch, tgt_len, src_len)`
"""
bsz, tgt_len = prev_output_tokens.size()
# Extract the final hidden state from the Encoder.
final_encoder_hidden = encoder_out
# Embed the target sequence, which has been shifted right by one
# position and now starts with the end-of-sentence symbol.
x = self.embed_tokens(prev_output_tokens)
# Apply dropout.
x = self.dropout(x)
# Concatenate the Encoder's final hidden state to *every* embedded
# target token.
x = torch.cat(
[x, final_encoder_hidden.unsqueeze(1).expand(bsz, tgt_len, -1)],
dim=2,
)
# Using PackedSequence objects in the Decoder is harder than in the
# Encoder, since the targets are not sorted in descending length order,
# which is a requirement of ``pack_padded_sequence()``. Instead we'll
# feed nn.LSTM directly.
initial_state = (
final_encoder_hidden.unsqueeze(0), # hidden
torch.zeros_like(final_encoder_hidden).unsqueeze(0), # cell
)
output, _ = self.lstm(
x.transpose(0, 1), # convert to shape `(tgt_len, bsz, dim)`
initial_state,
)
x = output.transpose(0, 1) # convert to shape `(bsz, tgt_len, hidden)`
# Project the outputs to the size of the vocabulary.
x = self.output_projection(x)
# Return the logits and ``None`` for the attention weights
return x, None
class XLMR_Encoder_Decoder():
def __init__(self, enc_pretrained_path=None, hidden_size=768, dec_embed_dim = 768, dropout = 0.1, task = None, device = None):
self.encoder = XLMR_Encoder(pretrained_path=enc_pretrained_path, hidden_size=hidden_size, dropout_p = dropout)
self.decoder = XLMR_Decoder(self.task.target_dictionary)
|
{"/train.py": ["/model/xlmr_mt.py", "/utils/train_utils.py", "/utils/data_utils.py"]}
|
16,148
|
bubbleqi/xlm_roberta_NMT
|
refs/heads/master
|
/utils/train_utils.py
|
from torch.utils.data import SequentialSampler, DataLoader
from tqdm import tqdm
from seqeval.metrics import f1_score, classification_report
import torch
import torch.nn.functional as F
def add_xlmr_args(parser):
"""
Adds training and validation arguments to the passed parser
"""
parser.add_argument("--data_dir",
default=None,
type=str,
required=True,
help="The input data dir. Should contain the .tsv files (or other data files) for the task.")
parser.add_argument("--pretrained_path", default=None, type=str, required=True,
help="pretrained XLM-Roberta model path")
parser.add_argument("--output_dir",
default=None,
type=str,
required=True,
help="The output directory where the model predictions and checkpoints will be written.")
# Other parameters
parser.add_argument("--cache_dir",
default="",
type=str,
help="Where do you want to store the pre-trained models downloaded from s3")
parser.add_argument("--max_seq_length",
default=128,
type=int,
help="The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, and sequences shorter \n"
"than this will be padded.")
parser.add_argument("--train_batch_size",
default=32,
type=int,
help="Total batch size for training.")
parser.add_argument("--learning_rate",
default=5e-5,
type=float,
help="The initial learning rate for Adam.")
parser.add_argument("--num_train_epochs",
default=3,
type=int,
help="Total number of training epochs to perform.")
parser.add_argument("--weight_decay", default=0.01, type=float,
help="Weight deay if we apply some.")
parser.add_argument("--adam_epsilon", default=1e-8, type=float,
help="Epsilon for Adam optimizer.")
parser.add_argument("--max_grad_norm", default=1.0, type=float,
help="Max gradient norm.")
parser.add_argument("--no_cuda",
action='store_true',
help="Whether not to use CUDA when available")
parser.add_argument('--seed',
type=int,
default=42,
help="random seed for initialization")
parser.add_argument('--gradient_accumulation_steps',
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.")
parser.add_argument('--loss_scale',
type=float, default=0,
help="Loss scaling to improve fp16 numeric stability. Only used when fp16 set to True.\n"
"0 (default value): dynamic loss scaling.\n"
"Positive power of 2: static loss scaling value.\n")
parser.add_argument('--dropout',
type=float, default=0.3,
help = "training dropout probability")
return parser
|
{"/train.py": ["/model/xlmr_mt.py", "/utils/train_utils.py", "/utils/data_utils.py"]}
|
16,163
|
gakonst/raiden-contracts
|
refs/heads/master
|
/raiden_contracts/tests/test_channel_settle.py
|
import pytest
from copy import deepcopy
from random import randint
from raiden_contracts.constants import (
EVENT_CHANNEL_SETTLED,
TEST_SETTLE_TIMEOUT_MIN,
)
from raiden_contracts.utils.events import check_channel_settled
from raiden_contracts.tests.fixtures.channel_test_values import channel_settle_test_values
from raiden_contracts.tests.fixtures.channel import call_settle
from raiden_contracts.tests.fixtures.config import fake_hex, fake_bytes
from raiden_contracts.tests.utils import (
MAX_UINT256,
get_settlement_amounts,
get_onchain_settlement_amounts,
)
def test_max_safe_uint256(token_network, token_network_test):
max_safe_uint256 = token_network_test.functions.get_max_safe_uint256().call()
assert token_network.functions.MAX_SAFE_UINT256().call() == max_safe_uint256
assert max_safe_uint256 == MAX_UINT256
def test_settle_no_bp_success(
web3,
token_network,
create_channel_and_deposit,
get_accounts,
create_balance_proof,
):
(A, B) = get_accounts(2)
deposit_A = 10
deposit_B = 6
settle_timeout = TEST_SETTLE_TIMEOUT_MIN
locksroot = fake_bytes(32)
additional_hash = fake_bytes(32)
create_channel_and_deposit(A, B, deposit_A, deposit_B)
# Close channel with no balance proof
token_network.functions.closeChannel(
B,
locksroot,
0,
additional_hash,
fake_bytes(64),
).transact({'from': A})
# Do not call updateNonClosingBalanceProof
# Settlement window must be over before settling the channel
web3.testing.mine(settle_timeout)
# Settling the channel should work with no balance proofs
token_network.functions.settleChannel(
A,
0,
0,
locksroot,
B,
0,
0,
locksroot,
).transact({'from': A})
@pytest.mark.parametrize('channel_test_values', channel_settle_test_values)
def test_settle_channel_state(
web3,
get_accounts,
custom_token,
token_network,
create_channel_and_deposit,
withdraw_channel,
close_and_update_channel,
settle_state_tests,
channel_test_values,
):
number_of_channels = 5
accounts = get_accounts(2 * number_of_channels)
(vals_A0, vals_B0) = channel_test_values
# We mimic old balance proofs here, with a high locked amount and lower transferred amount
# We expect to have the same settlement values as the original values
def equivalent_transfers(balance_proof):
new_balance_proof = deepcopy(balance_proof)
new_balance_proof.locked = randint(
balance_proof.locked,
balance_proof.transferred + balance_proof.locked,
)
new_balance_proof.transferred = (
balance_proof.transferred +
balance_proof.locked -
new_balance_proof.locked
)
return new_balance_proof
vals_A_reversed = deepcopy(vals_A0)
vals_A_reversed.locked = vals_A0.transferred
vals_A_reversed.transferred = vals_A0.locked
vals_B_reversed = deepcopy(vals_B0)
vals_B_reversed.locked = vals_B0.transferred
vals_B_reversed.transferred = vals_B0.locked
new_values = [
(vals_A0, vals_B0),
(vals_A_reversed, vals_B_reversed),
] + [
sorted(
[
equivalent_transfers(vals_A0),
equivalent_transfers(vals_B0),
],
key=lambda x: x.transferred + x.locked,
reverse=False,
) for no in range(0, number_of_channels - 1)
]
# Calculate how much A and B should receive
settlement = get_settlement_amounts(vals_A0, vals_B0)
# Calculate how much A and B receive according to onchain computation
settlement2 = get_onchain_settlement_amounts(vals_A0, vals_B0)
for no in range(0, number_of_channels + 1):
A = accounts[no]
B = accounts[no + 1]
(vals_A, vals_B) = new_values[no]
vals_A.locksroot = fake_bytes(32, '02')
vals_B.locksroot = fake_bytes(32, '03')
create_channel_and_deposit(A, B, vals_A.deposit, vals_B.deposit)
withdraw_channel(A, vals_A.withdrawn, B)
withdraw_channel(B, vals_B.withdrawn, A)
close_and_update_channel(
A,
vals_A,
B,
vals_B,
)
web3.testing.mine(TEST_SETTLE_TIMEOUT_MIN)
pre_balance_A = custom_token.functions.balanceOf(A).call()
pre_balance_B = custom_token.functions.balanceOf(B).call()
pre_balance_contract = custom_token.functions.balanceOf(token_network.address).call()
call_settle(token_network, A, vals_A, B, vals_B)
# We do the balance & state tests here for each channel and also compare with
# the expected settlement amounts
settle_state_tests(
A,
vals_A,
B,
vals_B,
pre_balance_A,
pre_balance_B,
pre_balance_contract,
)
# We compute again the settlement amounts here to compare with the other channel
# settlement test values, which should be equal
# Calculate how much A and B should receive
settlement_equivalent = get_settlement_amounts(vals_A, vals_B)
assert (
settlement.participant1_balance +
settlement.participant2_locked == settlement_equivalent.participant1_balance +
settlement_equivalent.participant2_locked
)
assert (
settlement.participant2_balance +
settlement.participant1_locked == settlement_equivalent.participant2_balance +
settlement_equivalent.participant1_locked
)
# Calculate how much A and B receive according to onchain computation
settlement2_equivalent = get_onchain_settlement_amounts(vals_A, vals_B)
assert (
settlement2.participant1_balance +
settlement2.participant2_locked == settlement2_equivalent.participant1_balance +
settlement2_equivalent.participant2_locked
)
assert (
settlement2.participant2_balance +
settlement2.participant1_locked == settlement2_equivalent.participant2_balance +
settlement2_equivalent.participant1_locked
)
def test_settle_channel_event(
web3,
get_accounts,
token_network,
create_channel,
channel_deposit,
create_balance_proof,
create_balance_proof_update_signature,
event_handler,
):
ev_handler = event_handler(token_network)
(A, B) = get_accounts(2)
deposit_A = 10
settle_timeout = TEST_SETTLE_TIMEOUT_MIN
locksroot = fake_hex(32, '00')
channel_identifier = create_channel(A, B)[0]
channel_deposit(A, deposit_A, B)
balance_proof_A = create_balance_proof(channel_identifier, A, 10, 0, 1, locksroot)
balance_proof_B = create_balance_proof(channel_identifier, B, 5, 0, 3, locksroot)
balance_proof_update_signature_B = create_balance_proof_update_signature(
B,
channel_identifier,
*balance_proof_A,
)
token_network.functions.closeChannel(B, *balance_proof_B).transact({'from': A})
token_network.functions.updateNonClosingBalanceProof(
A, B,
*balance_proof_A,
balance_proof_update_signature_B,
).transact({'from': B})
web3.testing.mine(settle_timeout)
txn_hash = token_network.functions.settleChannel(
B,
5,
0,
locksroot,
A,
10,
0,
locksroot,
).transact({'from': A})
ev_handler.add(txn_hash, EVENT_CHANNEL_SETTLED, check_channel_settled(
channel_identifier,
5,
5,
))
ev_handler.check()
|
{"/raiden_contracts/cm_test/test_contract_manager.py": ["/raiden_contracts/contract_manager.py"]}
|
16,164
|
gakonst/raiden-contracts
|
refs/heads/master
|
/raiden_contracts/tests/test_channel_update_transfer.py
|
import pytest
from eth_tester.exceptions import TransactionFailed
from raiden_contracts.constants import (
EVENT_CHANNEL_BALANCE_PROOF_UPDATED,
CHANNEL_STATE_OPENED,
CHANNEL_STATE_NONEXISTENT,
)
from raiden_contracts.utils.events import check_transfer_updated
from .fixtures.config import fake_bytes, empty_address
def test_update_call(
get_accounts,
token_network,
create_channel,
channel_deposit,
create_balance_proof,
create_balance_proof_update_signature,
):
(A, B, C) = get_accounts(3)
channel_identifier = create_channel(A, B)[0]
channel_deposit(A, 15, B)
token_network.functions.closeChannel(
B,
fake_bytes(32),
0,
fake_bytes(32),
fake_bytes(64),
).transact({'from': A})
balance_proof_A = create_balance_proof(channel_identifier, A, 10, 0, 5, fake_bytes(32, '02'))
balance_proof_update_signature_B = create_balance_proof_update_signature(
B,
channel_identifier,
*balance_proof_A,
)
(balance_hash, nonce, additional_hash, closing_signature) = balance_proof_A
with pytest.raises(TransactionFailed):
token_network.functions.updateNonClosingBalanceProof(
empty_address,
B,
*balance_proof_A,
balance_proof_update_signature_B,
).transact({'from': C})
with pytest.raises(TransactionFailed):
token_network.functions.updateNonClosingBalanceProof(
A,
empty_address,
*balance_proof_A,
balance_proof_update_signature_B,
).transact({'from': C})
with pytest.raises(TransactionFailed):
token_network.functions.updateNonClosingBalanceProof(
A,
B,
*balance_proof_A,
fake_bytes(64),
).transact({'from': C})
with pytest.raises(TransactionFailed):
token_network.functions.updateNonClosingBalanceProof(
A,
B,
fake_bytes(32),
nonce,
additional_hash,
closing_signature,
balance_proof_update_signature_B,
).transact({'from': C})
with pytest.raises(TransactionFailed):
token_network.functions.updateNonClosingBalanceProof(
A,
B,
balance_hash,
0,
additional_hash,
closing_signature,
balance_proof_update_signature_B,
).transact({'from': C})
with pytest.raises(TransactionFailed):
token_network.functions.updateNonClosingBalanceProof(
A,
B,
balance_hash,
nonce,
additional_hash,
fake_bytes(64),
balance_proof_update_signature_B,
).transact({'from': C})
def test_update_nonexistent_fail(
get_accounts,
token_network,
create_balance_proof,
create_balance_proof_update_signature,
):
(A, B, C) = get_accounts(3)
(_, settle_block_number, state) = token_network.functions.getChannelInfo(A, B).call()
assert settle_block_number == 0
assert state == CHANNEL_STATE_NONEXISTENT
channel_identifier = token_network.functions.getChannelIdentifier(A, B).call()
balance_proof_A = create_balance_proof(channel_identifier, A, 10, 0, 5, fake_bytes(32, '02'))
balance_proof_update_signature_B = create_balance_proof_update_signature(
B,
channel_identifier,
*balance_proof_A,
)
with pytest.raises(TransactionFailed):
token_network.functions.updateNonClosingBalanceProof(
A,
B,
*balance_proof_A,
balance_proof_update_signature_B,
).transact({'from': C})
def test_update_notclosed_fail(
get_accounts,
token_network,
create_channel,
channel_deposit,
create_balance_proof,
create_balance_proof_update_signature,
):
(A, B, C) = get_accounts(3)
channel_identifier = create_channel(A, B)[0]
channel_deposit(A, 25, B)
balance_proof_A = create_balance_proof(channel_identifier, A, 10, 0, 5, fake_bytes(32, '02'))
balance_proof_update_signature_B = create_balance_proof_update_signature(
B,
channel_identifier,
*balance_proof_A,
)
(_, settle_block_number, state) = token_network.functions.getChannelInfo(A, B).call()
assert settle_block_number > 0
assert state == CHANNEL_STATE_OPENED
with pytest.raises(TransactionFailed):
token_network.functions.updateNonClosingBalanceProof(
A,
B,
*balance_proof_A,
balance_proof_update_signature_B,
).transact({'from': C})
def test_update_wrong_nonce_fail(
token_network,
create_channel,
channel_deposit,
get_accounts,
create_balance_proof,
create_balance_proof_update_signature,
updateBalanceProof_state_tests,
):
(A, B, Delegate) = get_accounts(3)
settle_timeout = 6
deposit_A = 20
channel_identifier = create_channel(A, B, settle_timeout)[0]
channel_deposit(A, deposit_A, B)
balance_proof_A = create_balance_proof(channel_identifier, A, 10, 0, 5, fake_bytes(32, '02'))
balance_proof_B = create_balance_proof(channel_identifier, B, 5, 0, 3, fake_bytes(32, '02'))
balance_proof_update_signature_B = create_balance_proof_update_signature(
B,
channel_identifier,
*balance_proof_A,
)
txn_hash1 = token_network.functions.closeChannel(B, *balance_proof_B).transact({'from': A})
token_network.functions.updateNonClosingBalanceProof(
A,
B,
*balance_proof_A,
balance_proof_update_signature_B,
).transact({'from': Delegate})
balance_proof_A_same_nonce = balance_proof_A
with pytest.raises(TransactionFailed):
token_network.functions.updateNonClosingBalanceProof(
A,
B,
*balance_proof_A_same_nonce,
balance_proof_update_signature_B,
).transact({'from': Delegate})
balance_proof_A_lower_nonce = create_balance_proof(
channel_identifier,
A,
10,
0,
4,
fake_bytes(32, '02'),
)
balance_proof_update_signature_B = create_balance_proof_update_signature(
B,
channel_identifier,
*balance_proof_A_lower_nonce,
)
with pytest.raises(TransactionFailed):
token_network.functions.updateNonClosingBalanceProof(
A,
B,
*balance_proof_A_lower_nonce,
balance_proof_update_signature_B,
).transact({'from': A})
updateBalanceProof_state_tests(
A, balance_proof_A,
B, balance_proof_B,
settle_timeout,
txn_hash1,
)
def test_update_wrong_signatures(
token_network,
create_channel,
channel_deposit,
get_accounts,
create_balance_proof,
create_balance_proof_update_signature,
):
(A, B, C) = get_accounts(3)
channel_identifier = create_channel(A, B)[0]
channel_deposit(A, 25, B)
balance_proof_A = create_balance_proof(channel_identifier, A, 10, 0, 5, fake_bytes(32, '02'))
balance_proof_A_fake = create_balance_proof(
channel_identifier,
A,
10,
0,
5,
fake_bytes(32, '02'),
signer=C,
)
balance_proof_update_signature_B = create_balance_proof_update_signature(
B,
channel_identifier,
*balance_proof_A,
)
balance_proof_update_signature_B_fake = create_balance_proof_update_signature(
C,
channel_identifier,
*balance_proof_A,
)
with pytest.raises(TransactionFailed):
token_network.functions.updateNonClosingBalanceProof(
A,
B,
*balance_proof_A_fake,
balance_proof_update_signature_B,
).transact({'from': C})
with pytest.raises(TransactionFailed):
token_network.functions.updateNonClosingBalanceProof(
A,
B,
*balance_proof_A,
balance_proof_update_signature_B_fake,
).transact({'from': C})
def test_update_channel_state(
token_network,
create_channel,
channel_deposit,
get_accounts,
create_balance_proof,
create_balance_proof_update_signature,
updateBalanceProof_state_tests,
):
(A, B, Delegate) = get_accounts(3)
settle_timeout = 6
deposit_A = 20
channel_identifier = create_channel(A, B, settle_timeout)[0]
channel_deposit(A, deposit_A, B)
balance_proof_A = create_balance_proof(channel_identifier, A, 10, 0, 5, fake_bytes(32, '02'))
balance_proof_B = create_balance_proof(channel_identifier, B, 5, 0, 3, fake_bytes(32, '02'))
balance_proof_update_signature_B = create_balance_proof_update_signature(
B,
channel_identifier,
*balance_proof_A,
)
txn_hash1 = token_network.functions.closeChannel(B, *balance_proof_B).transact({'from': A})
token_network.functions.updateNonClosingBalanceProof(
A,
B,
*balance_proof_A,
balance_proof_update_signature_B,
).transact({'from': Delegate})
updateBalanceProof_state_tests(
A, balance_proof_A,
B, balance_proof_B,
settle_timeout,
txn_hash1,
)
def test_update_channel_fail_no_offchain_transfers(
get_accounts,
token_network,
create_channel,
create_balance_proof,
create_balance_proof_update_signature,
):
(A, B) = get_accounts(2)
channel_identifier = create_channel(A, B)[0]
balance_proof_A = create_balance_proof(channel_identifier, A, 0, 0, 0)
balance_proof_update_signature_B = create_balance_proof_update_signature(
B,
channel_identifier,
*balance_proof_A,
)
token_network.functions.closeChannel(
B,
fake_bytes(32),
0,
fake_bytes(32),
fake_bytes(64),
).transact({'from': A})
with pytest.raises(TransactionFailed):
token_network.functions.updateNonClosingBalanceProof(
A,
B,
fake_bytes(32),
0,
fake_bytes(32),
fake_bytes(64),
fake_bytes(64),
).transact({'from': B})
with pytest.raises(TransactionFailed):
token_network.functions.updateNonClosingBalanceProof(
A,
B,
*balance_proof_A,
balance_proof_update_signature_B,
).transact({'from': B})
def test_update_channel_event(
get_accounts,
token_network,
create_channel,
channel_deposit,
create_balance_proof,
create_balance_proof_update_signature,
event_handler,
):
ev_handler = event_handler(token_network)
(A, B) = get_accounts(2)
deposit_A = 10
deposit_B = 10
channel_identifier = create_channel(A, B)[0]
channel_deposit(A, deposit_A, B)
channel_deposit(B, deposit_B, A)
balance_proof_B = create_balance_proof(channel_identifier, B, 5, 0, 3)
balance_proof_A = create_balance_proof(channel_identifier, A, 2, 0, 1)
balance_proof_update_signature_B = create_balance_proof_update_signature(
B,
channel_identifier,
*balance_proof_A,
)
token_network.functions.closeChannel(B, *balance_proof_B).transact({'from': A})
txn_hash = token_network.functions.updateNonClosingBalanceProof(
A,
B,
*balance_proof_A,
balance_proof_update_signature_B,
).transact({'from': B})
ev_handler.add(
txn_hash,
EVENT_CHANNEL_BALANCE_PROOF_UPDATED,
check_transfer_updated(channel_identifier, A),
)
ev_handler.check()
|
{"/raiden_contracts/cm_test/test_contract_manager.py": ["/raiden_contracts/contract_manager.py"]}
|
16,165
|
gakonst/raiden-contracts
|
refs/heads/master
|
/raiden_contracts/cm_test/test_contract_manager.py
|
import pytest
from raiden_contracts.contract_manager import (
ContractManager,
CONTRACTS_SOURCE_DIRS,
)
from raiden_contracts.constants import (
CONTRACT_TOKEN_NETWORK,
EVENT_CHANNEL_CLOSED,
)
PRECOMPILED_CONTRACTS_PATH = 'raiden_contracts/data/contracts.json'
def contract_manager_meta(contracts_path):
manager = ContractManager(contracts_path)
abi = manager.get_contract_abi(CONTRACT_TOKEN_NETWORK)
assert isinstance(abi, list)
with pytest.raises(KeyError):
manager.get_contract_abi('SomeName')
abi = manager.get_event_abi(CONTRACT_TOKEN_NETWORK, EVENT_CHANNEL_CLOSED)
assert isinstance(abi, dict)
with pytest.raises(ValueError):
manager.get_event_abi(CONTRACT_TOKEN_NETWORK, 'NonExistant')
def test_contract_manager_compile():
contract_manager_meta(CONTRACTS_SOURCE_DIRS)
def test_contract_manager_json():
# try to load contracts from a precompiled file
contract_manager_meta(PRECOMPILED_CONTRACTS_PATH)
|
{"/raiden_contracts/cm_test/test_contract_manager.py": ["/raiden_contracts/contract_manager.py"]}
|
16,166
|
gakonst/raiden-contracts
|
refs/heads/master
|
/raiden_contracts/contract_manager.py
|
import os
import json
import logging
from typing import Union, List, Dict
from solc import compile_files
from web3.utils.contracts import find_matching_event_abi
log = logging.getLogger(__name__)
CONTRACTS_DIR = os.path.join(os.path.dirname(__file__), 'data/contracts.json')
CONTRACTS_SOURCE_DIRS = {
'raiden': os.path.join(os.path.dirname(__file__), 'contracts/'),
'test': os.path.join(os.path.dirname(__file__), 'contracts/test'),
}
CONTRACTS_SOURCE_DIRS = {
k: os.path.normpath(v) for k, v in CONTRACTS_SOURCE_DIRS.items()
}
def fix_contract_key_names(input: Dict) -> Dict:
result = {}
for k, v in input.items():
name = k.split(':')[1]
result[name] = v
return result
class ContractManager:
def __init__(self, path: Union[str, List[str]]) -> None:
"""Params:
path: either path to a precompiled contract JSON file, or a list of
directories which contain solidity files to compile
"""
self.contracts_source_dirs = None
self.abi = dict()
if isinstance(path, dict):
self.contracts_source_dirs = path
for dir_path in path.values():
self.abi.update(
ContractManager.precompile_contracts(dir_path, self.get_mappings()),
)
elif os.path.isdir(path):
ContractManager.__init__(self, {'smart_contracts': path})
else:
with open(path, 'r') as json_file:
self.abi = json.load(json_file)
def compile_contract(self, contract_name: str, libs=None, *args):
"""Compile contract and return JSON containing abi and bytecode"""
contract_json = compile_files(
[self.get_contract_path(contract_name)[0]],
output_values=('abi', 'bin', 'ast'),
import_remappings=self.get_mappings(),
optimize=False,
)
contract_json = {
os.path.basename(key).split('.', 1)[0]: value
for key, value in contract_json.items()
}
return contract_json.get(contract_name, None)
def get_contract_path(self, contract_name: str):
return sum(
(self.list_contract_path(contract_name, x)
for x in self.contracts_source_dirs.values()),
[],
)
@staticmethod
def list_contract_path(contract_name: str, directory: str):
"""Get contract source file for a specified contract"""
return [
os.path.join(directory, x)
for x in os.listdir(directory)
if os.path.basename(x).split('.', 1)[0] == contract_name
]
def get_mappings(self) -> List[str]:
"""Return dict of mappings to use as solc argument."""
return ['%s=%s' % (k, v) for k, v in self.contracts_source_dirs.items()]
@staticmethod
def precompile_contracts(contracts_dir: str, map_dirs: List) -> Dict:
"""
Compile solidity contracts into ABI. This requires solc somewhere in the $PATH
and also ethereum.tools python library.
Parameters:
contracts_dir: directory where the contracts are stored.
All files with .sol suffix will be compiled.
The method won't recurse into subdirectories.
Return:
map (contract_name => ABI)
"""
files = []
for contract in os.listdir(contracts_dir):
contract_path = os.path.join(contracts_dir, contract)
if not os.path.isfile(contract_path) or not contract_path.endswith('.sol'):
continue
files.append(contract_path)
try:
res = compile_files(
files,
output_values=('abi', 'bin', 'ast'),
import_remappings=map_dirs,
optimize=False,
)
return fix_contract_key_names(res)
except FileNotFoundError:
raise Exception('Could not compile the contract. Check that solc is available.')
def get_contract(self, contract_name: str) -> Dict:
"""Return bin+abi of the contract"""
return self.abi[contract_name]
def get_contract_abi(self, contract_name: str) -> Dict:
""" Returns the ABI for a given contract. """
return self.abi[contract_name]['abi']
def get_event_abi(self, contract_name: str, event_name: str) -> Dict:
""" Returns the ABI for a given event. """
contract_abi = self.get_contract_abi(contract_name)
return find_matching_event_abi(contract_abi, event_name)
if os.path.isfile(CONTRACTS_DIR):
CONTRACT_MANAGER = ContractManager(CONTRACTS_DIR)
else:
CONTRACT_MANAGER = ContractManager(CONTRACTS_SOURCE_DIRS)
|
{"/raiden_contracts/cm_test/test_contract_manager.py": ["/raiden_contracts/contract_manager.py"]}
|
16,174
|
Duskamo/goopies_gui
|
refs/heads/master
|
/listeners/GoopieConsumerListener.py
|
import threading
import time
class GoopieConsumerListener(threading.Thread):
def __init__(self,goopies,pellets,pelletConsumedQueue):
super(GoopieConsumerListener, self).__init__()
self.goopies = goopies
self.pellets = pellets
self.pelletConsumedQueue = pelletConsumedQueue
def run(self):
while True:
for i in range(len(self.goopies)):
# determine if goopies consumed a pellet or not
if self.goopieConsumedPellet(i):
self.goopies[i].health += 20
else:
""
#self.goopies[i].health -= 1
# change goopie state as health increases or decreases
if self.goopies[i].health >= 70:
self.goopies[i].color = 'Blue'
elif self.goopies[i].health >= 30 and self.goopies[i].health < 70:
self.goopies[i].color = 'Yellow'
elif self.goopies[i].health < 30:
self.goopies[i].color = 'Red'
#print(i, self.goopies[i].health)
time.sleep(0.25)
def goopieConsumedPellet(self,i):
isPelletConsumed = False
for j in range(len(self.pellets)):
if (abs(self.goopies[i].x - self.pellets[j].x) < 20) and (abs(self.goopies[i].y - self.pellets[j].y) < 20):
isPelletConsumed = True
self.pelletConsumedQueue.put(j)
return isPelletConsumed
|
{"/models/Zapper.py": ["/managers/ZapperUpdateManager.py", "/Config.py"], "/listeners/KeyboardListener.py": ["/Config.py"], "/main.py": ["/models/Tank.py", "/models/Zapper.py", "/models/Goopie.py", "/models/Pellet.py", "/helpers/StateCreator.py", "/listeners/GoopieConsumerListener.py", "/listeners/KeyboardListener.py"], "/models/Goopie.py": ["/Config.py", "/managers/GoopieUpdateManager.py"], "/helpers/StateCreator.py": ["/Config.py", "/models/Tank.py", "/models/Zapper.py", "/models/Goopie.py", "/models/Pellet.py"]}
|
16,175
|
Duskamo/goopies_gui
|
refs/heads/master
|
/Config.py
|
class Config:
# Population Config
GOOPIE_POPULATION = 1
ZAPPER_POPULATION = 5
PELLET_POPULATION = 2
# Movement Config
ZAPPER_MOVEMENT = "None" # None | Random | Nearest
GOOPIE_MOVEMENT = "Fluid" # None | Cardinal | Fluid
|
{"/models/Zapper.py": ["/managers/ZapperUpdateManager.py", "/Config.py"], "/listeners/KeyboardListener.py": ["/Config.py"], "/main.py": ["/models/Tank.py", "/models/Zapper.py", "/models/Goopie.py", "/models/Pellet.py", "/helpers/StateCreator.py", "/listeners/GoopieConsumerListener.py", "/listeners/KeyboardListener.py"], "/models/Goopie.py": ["/Config.py", "/managers/GoopieUpdateManager.py"], "/helpers/StateCreator.py": ["/Config.py", "/models/Tank.py", "/models/Zapper.py", "/models/Goopie.py", "/models/Pellet.py"]}
|
16,176
|
Duskamo/goopies_gui
|
refs/heads/master
|
/models/Zapper.py
|
import random
import math
from libs.graphics import *
from managers.ZapperUpdateManager import *
from Config import *
class Zapper:
def __init__(self,win,x,y):
self.win = win
self.x = x
self.y = y
self.radius = 50
self.updateManager = ZapperUpdateManager(self)
self.speed = 0.1
self.initState()
def initState(self):
self.body = Circle(Point(self.x,self.y),self.radius)
self.body.setWidth(10)
self.body.setOutline('Cyan')
self.center = Rectangle(Point(self.x-20,self.y-20),Point(self.x+20,self.y+20))
self.center.setFill('Cyan')
def draw(self):
self.body.draw(self.win)
self.center.draw(self.win)
def undraw(self):
self.body.undraw()
self.center.undraw()
def move(self,dx,dy):
self.x = self.x + dx
self.y = self.y + dy
self.body.move(dx, dy)
self.center.move(dx, dy)
def update(self):
# move zapper randomly
if Config.ZAPPER_MOVEMENT == "Random":
self.updateManager.zapperMoveRandom()
# move zapper with knowledge of goopies whereabouts
elif Config.ZAPPER_MOVEMENT == "Nearest":
self.updateManager.zapperMoveNearestGoopie()
|
{"/models/Zapper.py": ["/managers/ZapperUpdateManager.py", "/Config.py"], "/listeners/KeyboardListener.py": ["/Config.py"], "/main.py": ["/models/Tank.py", "/models/Zapper.py", "/models/Goopie.py", "/models/Pellet.py", "/helpers/StateCreator.py", "/listeners/GoopieConsumerListener.py", "/listeners/KeyboardListener.py"], "/models/Goopie.py": ["/Config.py", "/managers/GoopieUpdateManager.py"], "/helpers/StateCreator.py": ["/Config.py", "/models/Tank.py", "/models/Zapper.py", "/models/Goopie.py", "/models/Pellet.py"]}
|
16,177
|
Duskamo/goopies_gui
|
refs/heads/master
|
/data/TempGoopieThrusterData.py
|
class TempGoopieThrusterData:
def __init__(self):
self.thrusterData = []
self.addData()
def addData(self):
self.thrusterData.append({"thruster": ['SW'], "time": "5"})
self.thrusterData.append({"thruster": ['SE'], "time": "5"})
self.thrusterData.append({"thruster":['SW','SE'], "time":"10"})
def getThrusterData(self):
return self.thrusterData
|
{"/models/Zapper.py": ["/managers/ZapperUpdateManager.py", "/Config.py"], "/listeners/KeyboardListener.py": ["/Config.py"], "/main.py": ["/models/Tank.py", "/models/Zapper.py", "/models/Goopie.py", "/models/Pellet.py", "/helpers/StateCreator.py", "/listeners/GoopieConsumerListener.py", "/listeners/KeyboardListener.py"], "/models/Goopie.py": ["/Config.py", "/managers/GoopieUpdateManager.py"], "/helpers/StateCreator.py": ["/Config.py", "/models/Tank.py", "/models/Zapper.py", "/models/Goopie.py", "/models/Pellet.py"]}
|
16,178
|
Duskamo/goopies_gui
|
refs/heads/master
|
/listeners/KeyboardListener.py
|
import time
from pynput import keyboard
from functools import partial
from Config import *
class KeyboardListener:
def __init__(self,keyboardQueue):
self.listener = None
self.keyboardQueue = keyboardQueue
self.current = set()
def join(self):
# Collect events until released
with keyboard.Listener(
on_press=self.on_press,
on_release=self.on_release) as listener:
listener.join()
def start(self):
# ...or, in a non-blocking fashion:
self.listener = keyboard.Listener(
on_press=partial(self.on_press))
self.listener.start()
def on_press(self,key):
if Config.GOOPIE_MOVEMENT == "Cardinal":
try:
if key == keyboard.Key.up:
self.keyboardQueue.put("Up")
elif key == keyboard.Key.down:
self.keyboardQueue.put("Down")
elif key == keyboard.Key.left:
self.keyboardQueue.put("Left")
elif key == keyboard.Key.right:
self.keyboardQueue.put("Right")
except AttributeError:
print('special key {0} pressed'.format(key))
elif Config.GOOPIE_MOVEMENT == "Fluid":
try:
if key == keyboard.Key.up:
self.keyboardQueue.put({'thruster':['SW','SE'], 'status':'On'})
elif key == keyboard.Key.down:
self.keyboardQueue.put({'thruster':['NW','NE'], 'status':'On'})
elif key == keyboard.Key.left:
self.keyboardQueue.put({'thruster':['NE'], 'status':'On'})
elif key == keyboard.Key.right:
self.keyboardQueue.put({'thruster':['NW'], 'status':'On'})
except AttributeError:
print('special key {0} pressed'.format(key))
"""
def on_release(self, key):
if Config.GOOPIE_MOVEMENT == "Fluid":
try:
if key == keyboard.Key.up:
self.keyboardQueue.put({'thruster': ['SW', 'SE'], 'status': 'Off'})
elif key == keyboard.Key.down:
self.keyboardQueue.put({'thruster': ['NW', 'NE'], 'status': 'Off'})
elif key == keyboard.Key.left:
self.keyboardQueue.put({'thruster': ['NE'], 'status': 'Off'})
elif key == keyboard.Key.right:
self.keyboardQueue.put({'thruster': ['NW'], 'status': 'Off'})
except AttributeError:
print('special key {0} pressed'.format(key))
"""
|
{"/models/Zapper.py": ["/managers/ZapperUpdateManager.py", "/Config.py"], "/listeners/KeyboardListener.py": ["/Config.py"], "/main.py": ["/models/Tank.py", "/models/Zapper.py", "/models/Goopie.py", "/models/Pellet.py", "/helpers/StateCreator.py", "/listeners/GoopieConsumerListener.py", "/listeners/KeyboardListener.py"], "/models/Goopie.py": ["/Config.py", "/managers/GoopieUpdateManager.py"], "/helpers/StateCreator.py": ["/Config.py", "/models/Tank.py", "/models/Zapper.py", "/models/Goopie.py", "/models/Pellet.py"]}
|
16,179
|
Duskamo/goopies_gui
|
refs/heads/master
|
/main.py
|
import time
from libs.graphics import *
from queue import Queue
from models.Tank import *
from models.Zapper import *
from models.Goopie import *
from models.Pellet import *
from helpers.StateCreator import *
from listeners.GoopieConsumerListener import *
from listeners.KeyboardListener import *
class main():
def __init__(self):
self.initialize()
def initialize(self):
# Setup Background, game objects, and initial states
self.win = GraphWin('Goopies', 1200, 800)
self.win.setBackground('black')
self.tank = Tank(self.win)
self.tank.draw()
self.stateCreator = StateCreator(self.win)
self.stateCreator.createGame()
self.goopies = self.stateCreator.getGoopies()
self.pellets = self.stateCreator.getPellets()
self.zappers = self.stateCreator.getZappers()
self.isRunning = True
# Setup Queues, Listeners, and off threads
self.keyboardQueue = Queue(maxsize=0)
self.pelletConsumedQueue = Queue(maxsize=0)
self.goopies[0].pellets = self.pellets
self.goopies[0].keyboardQueue = self.keyboardQueue # TEMP
self.goopies[0].pelletConsumedQueue = self.pelletConsumedQueue
goopieConsumerListener = GoopieConsumerListener(self.goopies,self.pellets,self.pelletConsumedQueue)
goopieConsumerListener.start()
keyboardListener = KeyboardListener(self.keyboardQueue)
keyboardListener.start()
# Setup Game Loop
self.run()
# Pause and Close
self.win.getMouse()
self.win.close()
def run(self):
# Game Loop
while self.isRunning:
# Process Events - Process inputs and other things
self.processEvents()
# Update - Update all objects that needs updating, ex position changes, physics
for i in range(len(self.goopies)):
self.goopies[i].update()
for i in range(len(self.zappers)):
self.zappers[i].goopies = self.goopies
self.zappers[i].update()
# Draw - Render things on screen
# Pause thread for framerate
time.sleep(0.0017)
def processEvents(self):
# Check if game is complete or not
""
if __name__ == "__main__":
main = main()
|
{"/models/Zapper.py": ["/managers/ZapperUpdateManager.py", "/Config.py"], "/listeners/KeyboardListener.py": ["/Config.py"], "/main.py": ["/models/Tank.py", "/models/Zapper.py", "/models/Goopie.py", "/models/Pellet.py", "/helpers/StateCreator.py", "/listeners/GoopieConsumerListener.py", "/listeners/KeyboardListener.py"], "/models/Goopie.py": ["/Config.py", "/managers/GoopieUpdateManager.py"], "/helpers/StateCreator.py": ["/Config.py", "/models/Tank.py", "/models/Zapper.py", "/models/Goopie.py", "/models/Pellet.py"]}
|
16,180
|
Duskamo/goopies_gui
|
refs/heads/master
|
/models/Tank.py
|
from libs.graphics import *
class Tank:
def __init__(self,win):
self.win = win
self.initState()
def initState(self):
self.border = Circle(Point(600,400),400)
self.border.setWidth(10)
self.border.setOutline('Cyan')
def draw(self):
self.border.draw(self.win)
|
{"/models/Zapper.py": ["/managers/ZapperUpdateManager.py", "/Config.py"], "/listeners/KeyboardListener.py": ["/Config.py"], "/main.py": ["/models/Tank.py", "/models/Zapper.py", "/models/Goopie.py", "/models/Pellet.py", "/helpers/StateCreator.py", "/listeners/GoopieConsumerListener.py", "/listeners/KeyboardListener.py"], "/models/Goopie.py": ["/Config.py", "/managers/GoopieUpdateManager.py"], "/helpers/StateCreator.py": ["/Config.py", "/models/Tank.py", "/models/Zapper.py", "/models/Goopie.py", "/models/Pellet.py"]}
|
16,181
|
Duskamo/goopies_gui
|
refs/heads/master
|
/models/Goopie.py
|
from libs.graphics import *
from Config import *
from managers.GoopieUpdateManager import *
class Goopie:
def __init__(self,win,x,y):
self.win = win
self.x = x
self.y = y
self.radius = 15
self.updateManager = GoopieUpdateManager(self)
self.health = 100
self.color = 'blue'
self.initState()
def initState(self):
self.body = Circle(Point(self.x,self.y),self.radius)
self.body.setWidth(1)
self.body.setOutline(self.color)
self.gland = Oval(Point(self.x-5,self.y-13),Point(self.x+5,self.y+13))
self.gland.setWidth(1)
self.gland.setFill('purple')
self.sensorNorth = Line(Point(self.x,self.y-20),Point(self.x,self.y-15))
self.sensorNorth.setFill('red')
self.sensorSouth = Line(Point(self.x,self.y+20),Point(self.x,self.y+15))
self.sensorSouth.setFill('red')
self.sensorWest = Line(Point(self.x-20,self.y),Point(self.x-15,self.y))
self.sensorWest.setFill('red')
self.sensorEast = Line(Point(self.x+20,self.y),Point(self.x+15,self.y))
self.sensorEast.setFill('red')
self.thrusterNE = Line(Point(self.x,self.y-15),Point(self.x+15,self.y))
self.thrusterNE.setFill('light grey')
self.thrusterNW = Line(Point(self.x,self.y-15),Point(self.x-15,self.y))
self.thrusterNW.setFill('light grey')
self.thrusterSE = Line(Point(self.x, self.y + 15), Point(self.x + 15, self.y))
self.thrusterSE.setFill('light grey')
self.thrusterSW = Line(Point(self.x,self.y+15),Point(self.x-15,self.y))
self.thrusterSW.setFill('light grey')
def draw(self):
self.body.draw(self.win)
self.gland.draw(self.win)
self.sensorNorth.draw(self.win)
self.sensorSouth.draw(self.win)
self.sensorWest.draw(self.win)
self.sensorEast.draw(self.win)
self.thrusterNE.draw(self.win)
self.thrusterSE.draw(self.win)
self.thrusterNW.draw(self.win)
self.thrusterSW.draw(self.win)
def undraw(self):
self.body.undraw()
self.gland.undraw()
self.sensorNorth.undraw()
self.sensorSouth.undraw()
self.sensorWest.undraw()
self.sensorEast.undraw()
self.thrusterNE.undraw()
self.thrusterSE.undraw()
self.thrusterNW.undraw()
self.thrusterSW.undraw()
def move(self,dx,dy):
self.x += dx
self.y += dy
self.body.move(dx,dy)
self.gland.move(dx,dy)
self.sensorNorth.move(dx,dy)
self.sensorSouth.move(dx,dy)
self.sensorWest.move(dx,dy)
self.sensorEast.move(dx,dy)
self.thrusterNE.move(dx,dy)
self.thrusterSE.move(dx,dy)
self.thrusterNW.move(dx,dy)
self.thrusterSW.move(dx,dy)
def update(self):
# Update Skin of goopie by its health
if self.health > 0:
self.updateManager.updateSkin()
# Turn goopie to corpse if health drops to 0
if self.health == 0:
self.updateManager.goopieCorpse()
# Move goopie on keyboard input in basic cardinal direction pattern
if not self.keyboardQueue.empty() and Config.GOOPIE_MOVEMENT == "Cardinal":
self.updateManager.goopieKeyboardInput()
# Move goopie on keyboard input in fluid direction pattern
if not self.keyboardQueue.empty() and Config.GOOPIE_MOVEMENT == "Fluid":
self.updateManager.goopieKeyboardInputFluid()
# Clear pellet from screen when collided
if not self.pelletConsumedQueue.empty():
self.updateManager.clearPelletOnContact()
|
{"/models/Zapper.py": ["/managers/ZapperUpdateManager.py", "/Config.py"], "/listeners/KeyboardListener.py": ["/Config.py"], "/main.py": ["/models/Tank.py", "/models/Zapper.py", "/models/Goopie.py", "/models/Pellet.py", "/helpers/StateCreator.py", "/listeners/GoopieConsumerListener.py", "/listeners/KeyboardListener.py"], "/models/Goopie.py": ["/Config.py", "/managers/GoopieUpdateManager.py"], "/helpers/StateCreator.py": ["/Config.py", "/models/Tank.py", "/models/Zapper.py", "/models/Goopie.py", "/models/Pellet.py"]}
|
16,182
|
Duskamo/goopies_gui
|
refs/heads/master
|
/models/Pellet.py
|
from libs.graphics import *
class Pellet:
def __init__(self,win,x,y):
self.win = win
self.x = x
self.y = y
self.radius = 10
self.initState()
def initState(self):
self.body = Circle(Point(self.x,self.y),self.radius)
self.body.setWidth(1)
self.body.setFill('yellow')
self.core = Rectangle(Point(self.x-5,self.y-5),Point(self.x+5,self.y+5))
self.core.setWidth(1)
self.core.setFill('black')
def draw(self):
self.body.draw(self.win)
self.core.draw(self.win)
def undraw(self):
self.body.undraw()
self.core.undraw()
|
{"/models/Zapper.py": ["/managers/ZapperUpdateManager.py", "/Config.py"], "/listeners/KeyboardListener.py": ["/Config.py"], "/main.py": ["/models/Tank.py", "/models/Zapper.py", "/models/Goopie.py", "/models/Pellet.py", "/helpers/StateCreator.py", "/listeners/GoopieConsumerListener.py", "/listeners/KeyboardListener.py"], "/models/Goopie.py": ["/Config.py", "/managers/GoopieUpdateManager.py"], "/helpers/StateCreator.py": ["/Config.py", "/models/Tank.py", "/models/Zapper.py", "/models/Goopie.py", "/models/Pellet.py"]}
|
16,183
|
Duskamo/goopies_gui
|
refs/heads/master
|
/helpers/StateCreator.py
|
import random
from queue import Queue
from Config import *
from models.Tank import *
from models.Zapper import *
from models.Goopie import *
from models.Pellet import *
class StateCreator:
def __init__(self,win):
self.win = win
def createGame(self):
# make sure objects dont touch
objectsDontTouch = False
while not objectsDontTouch:
# init game object lists
self.zappers = []
self.goopies = []
self.pellets = []
# generate random coordinates to place objects in tank
randCoords = []
for i in range(Config.GOOPIE_POPULATION + Config.ZAPPER_POPULATION + Config.PELLET_POPULATION):
randCoords.append({'x':random.randint(350,850), 'y':random.randint(150,650)})
# check to see if all objects are separate
objCount = 0
for i in range(len(randCoords)):
for j in range(len(randCoords)):
if ((abs(randCoords[i]['x'] - randCoords[j]['x']) < 125) and (abs(randCoords[i]['y'] - randCoords[j]['y']) < 125)):
objCount += 1
if objCount == len(randCoords):
objectsDontTouch = True
# place objects in tank
for i in range(Config.GOOPIE_POPULATION):
self.goopies.append(Goopie(self.win,randCoords[0]['x'],randCoords[0]['y']))
randCoords.pop(0)
for i in range(Config.ZAPPER_POPULATION):
self.zappers.append(Zapper(self.win,randCoords[0]['x'],randCoords[0]['y']))
randCoords.pop(0)
for i in range(Config.PELLET_POPULATION):
self.pellets.append(Pellet(self.win,randCoords[0]['x'],randCoords[0]['y']))
randCoords.pop(0)
# print objects to tank
for i in range(len(self.zappers)):
self.zappers[i].draw()
for i in range(len(self.goopies)):
self.goopies[i].draw()
for i in range(len(self.pellets)):
self.pellets[i].draw()
# let goopies know about pellets, and zappers know about goopies
for i in range(len(self.zappers)):
self.zappers[i].goopies = self.goopies
for i in range(len(self.goopies)):
self.goopies[i].pellets = self.pellets
def getZappers(self):
return self.zappers
def getGoopies(self):
return self.goopies
def getPellets(self):
return self.pellets
|
{"/models/Zapper.py": ["/managers/ZapperUpdateManager.py", "/Config.py"], "/listeners/KeyboardListener.py": ["/Config.py"], "/main.py": ["/models/Tank.py", "/models/Zapper.py", "/models/Goopie.py", "/models/Pellet.py", "/helpers/StateCreator.py", "/listeners/GoopieConsumerListener.py", "/listeners/KeyboardListener.py"], "/models/Goopie.py": ["/Config.py", "/managers/GoopieUpdateManager.py"], "/helpers/StateCreator.py": ["/Config.py", "/models/Tank.py", "/models/Zapper.py", "/models/Goopie.py", "/models/Pellet.py"]}
|
16,184
|
Duskamo/goopies_gui
|
refs/heads/master
|
/managers/GoopieUpdateManager.py
|
from libs.graphics import *
from models.Corpse import *
class GoopieUpdateManager:
def __init__(self,goopie):
self.goopie = goopie
def updateSkin(self):
self.goopie.body.undraw()
self.goopie.body = Circle(Point(self.goopie.x, self.goopie.y), self.goopie.radius)
self.goopie.body.setWidth(1)
self.goopie.body.setOutline(self.goopie.color)
self.goopie.body.draw(self.goopie.win)
self.goopie.thrusterSE.setFill('light grey')
self.goopie.thrusterSW.setFill('light grey')
self.goopie.thrusterNE.setFill('light grey')
self.goopie.thrusterNW.setFill('light grey')
def goopieCorpse(self):
self.goopie.undraw()
corpse = Corpse(self.goopie.win, self.goopie.x, self.goopie.y)
corpse.draw()
self.goopie = None
def goopieKeyboardInput(self):
if self.goopie.keyboardQueue.get() == "Up":
self.goopie.move(0, -3)
elif self.goopie.keyboardQueue.get() == "Down":
self.goopie.move(0, 5)
elif self.goopie.keyboardQueue.get() == "Left":
self.goopie.move(-6, 0)
elif self.goopie.keyboardQueue.get() == "Right":
self.goopie.move(6, 0)
self.goopie.keyboardQueue.queue.clear()
def goopieKeyboardInputFluid(self):
#print(self.goopie.keyboardQueue.get()['thruster'],self.goopie.keyboardQueue.get()['status'])
# When thrusters are activated
if self.goopie.keyboardQueue.get()['thruster'] == ['SW','SE'] and self.goopie.keyboardQueue.get()['status'] == "On":
self.goopie.thrusterSE.setFill('red')
self.goopie.thrusterSW.setFill('red')
self.goopie.move(0, -3)
elif self.goopie.keyboardQueue.get()['thruster'] == ['NW','NE'] and self.goopie.keyboardQueue.get()['status'] == "On":
self.goopie.thrusterNE.setFill('red')
self.goopie.thrusterNW.setFill('red')
self.goopie.move(0, 5)
elif self.goopie.keyboardQueue.get()['thruster'] == ['NE'] and self.goopie.keyboardQueue.get()['status'] == "On":
self.goopie.thrusterNE.setFill('red')
self.goopie.move(-6, 0)
elif self.goopie.keyboardQueue.get()['thruster'] == ['NW'] and self.goopie.keyboardQueue.get()['status'] == "On":
self.goopie.thrusterNW.setFill('red')
self.goopie.move(6, 0)
"""
# When thrusters are deactivated
if self.goopie.keyboardQueue.get()['thruster'] == ['SW', 'SE'] and self.goopie.keyboardQueue.get()[
'status'] == "Off":
print('dsfsgsdgsdg')
self.goopie.thrusterSE.setFill('light grey')
self.goopie.thrusterSW.setFill('light grey')
self.goopie.move(0, -3)
elif self.goopie.keyboardQueue.get()['thruster'] == ['NW', 'NE'] and self.goopie.keyboardQueue.get()[
'status'] == "Off":
self.goopie.thrusterNE.setFill('light grey')
self.goopie.thrusterNW.setFill('light grey')
self.goopie.move(0, 5)
elif self.goopie.keyboardQueue.get()['thruster'] == ['NE'] and self.goopie.keyboardQueue.get()[
'status'] == "Off":
self.goopie.thrusterNE.setFill('light grey')
self.goopie.move(-6, 0)
elif self.goopie.keyboardQueue.get()['thruster'] == ['NW'] and self.goopie.keyboardQueue.get()[
'status'] == "Off":
self.goopie.thrusterNW.setFill('light grey')
self.goopie.move(6, 0)
"""
self.goopie.keyboardQueue.queue.clear()
def clearPelletOnContact(self):
removedPellet = self.goopie.pelletConsumedQueue.get()
self.goopie.pellets[removedPellet].undraw()
self.goopie.pellets.pop(removedPellet)
|
{"/models/Zapper.py": ["/managers/ZapperUpdateManager.py", "/Config.py"], "/listeners/KeyboardListener.py": ["/Config.py"], "/main.py": ["/models/Tank.py", "/models/Zapper.py", "/models/Goopie.py", "/models/Pellet.py", "/helpers/StateCreator.py", "/listeners/GoopieConsumerListener.py", "/listeners/KeyboardListener.py"], "/models/Goopie.py": ["/Config.py", "/managers/GoopieUpdateManager.py"], "/helpers/StateCreator.py": ["/Config.py", "/models/Tank.py", "/models/Zapper.py", "/models/Goopie.py", "/models/Pellet.py"]}
|
16,185
|
Duskamo/goopies_gui
|
refs/heads/master
|
/managers/ZapperUpdateManager.py
|
import random
import math
class ZapperUpdateManager:
def __init__(self,zapper):
self.zapper = zapper
def zapperMoveRandom(self):
x = random.randint(-1, 1)
y = random.randint(-1, 1)
self.zapper.move(x * self.zapper.speed, y * self.zapper.speed)
def zapperMoveNearestGoopie(self):
# 1. sort goopies by closest first
closestGoopies = []
for i in range(len(self.zapper.goopies)):
distance = math.sqrt((self.zapper.goopies[i].x - self.zapper.x) ** 2 + (self.zapper.goopies[i].y - self.zapper.y) ** 2)
closestGoopies.append({'goopie': self.zapper.goopies[i], 'distance': distance})
closestGoopies.sort(key=lambda x: x['distance'])
# 2. travel to goopie
if self.zapper.x < closestGoopies[0]['goopie'].x and self.zapper.y < closestGoopies[0]['goopie'].y: # Move SE
self.zapper.move(1 * self.zapper.speed, 1 * self.zapper.speed)
elif self.zapper.x == closestGoopies[0]['goopie'].x and self.zapper.y < closestGoopies[0]['goopie'].y: # Move S
self.zapper.move(0 * self.zapper.speed, 1 * self.zapper.speed)
elif self.zapper.x > closestGoopies[0]['goopie'].x and self.zapper.y < closestGoopies[0]['goopie'].y: # Move SW
self.zapper.move(-1 * self.zapper.speed, 1 * self.zapper.speed)
elif self.zapper.x > closestGoopies[0]['goopie'].x and self.zapper.y == closestGoopies[0]['goopie'].y: # Move W
self.zapper.move(-1 * self.zapper.speed, 0 * self.zapper.speed)
elif self.zapper.x > closestGoopies[0]['goopie'].x and self.zapper.y > closestGoopies[0]['goopie'].y: # Move NW
self.zapper.move(-1 * self.zapper.speed, -1 * self.zapper.speed)
elif self.zapper.x == closestGoopies[0]['goopie'].x and self.zapper.y > closestGoopies[0]['goopie'].y: # Move N
self.zapper.move(0 * self.zapper.speed, -1 * self.zapper.speed)
elif self.zapper.x < closestGoopies[0]['goopie'].x and self.zapper.y > closestGoopies[0]['goopie'].y: # Move NE
self.zapper.move(1 * self.zapper.speed, -1 * self.zapper.speed)
elif self.zapper.x < closestGoopies[0]['goopie'].x and self.zapper.y == closestGoopies[0]['goopie'].y: # Move E
self.zapper.move(1 * self.zapper.speed, 0 * self.zapper.speed)
|
{"/models/Zapper.py": ["/managers/ZapperUpdateManager.py", "/Config.py"], "/listeners/KeyboardListener.py": ["/Config.py"], "/main.py": ["/models/Tank.py", "/models/Zapper.py", "/models/Goopie.py", "/models/Pellet.py", "/helpers/StateCreator.py", "/listeners/GoopieConsumerListener.py", "/listeners/KeyboardListener.py"], "/models/Goopie.py": ["/Config.py", "/managers/GoopieUpdateManager.py"], "/helpers/StateCreator.py": ["/Config.py", "/models/Tank.py", "/models/Zapper.py", "/models/Goopie.py", "/models/Pellet.py"]}
|
16,190
|
lauranewland/gsdc-webapp-flask
|
refs/heads/master
|
/model.py
|
from flask_sqlalchemy import SQLAlchemy
from flask_login import UserMixin
db = SQLAlchemy()
def connect_to_db(flask_app):
flask_app.config['SQLALCHEMY_DATABASE_URI'] = "sqlite:///./gsdc.db"
db.app = flask_app
db.init_app(flask_app)
# Creates database tables
db.create_all()
print('Connected to the db!')
class Users(db.Model, UserMixin):
"""Data Model for a User"""
# Creates a table of users
__tablename__ = 'users'
# Defines the Schema for the users table
user_id = db.Column(db.Integer, primary_key=True, autoincrement=True)
fname = db.Column(db.String(50), nullable=False)
lname = db.Column(db.String(50), nullable=False)
email = db.Column(db.String(100), nullable=False, unique=True)
address = db.Column(db.String(100), nullable=True)
city = db.Column(db.String(50), nullable=True)
zip_code = db.Column(db.String(10), nullable=True)
phone = db.Column(db.String(15), nullable=True)
pref_communication = db.Column(db.String(50), nullable=True)
print_permissions = db.Column(db.String(5), nullable=True)
password = db.Column(db.String(50), nullable=True)
member_type = db.Column(db.String(100), nullable=True)
member_standing = db.Column(db.String(25), default='Good')
other_orgs = db.Column(db.Text)
num_of_gsd = db.Column(db.Integer)
num_breedings = db.Column(db.Integer)
def get_id(self):
return self.user_id
# app_date = db.Column(Date)
# # co_app_fname = db.Column(db.String(50))
# # co_app_lname = db.Column(db.String(50))
# # co_app_email = db.Column(db.String(100))
def __repr__(self):
return f'<user_id={self.user_id}, fname={self.fname}, lname={self.lname}>'
class Interest(db.Model):
"""Data Model for User Interest"""
# Creates a table of user interests
__tablename__ = 'interests'
# Defines the Schema for the users interest table
interest_id = db.Column(db.Integer, primary_key=True, autoincrement=True)
user_id = db.Column(db.Integer, db.ForeignKey('users.user_id'))
obedience = db.Column(db.Boolean)
rally = db.Column(db.Boolean)
conformation = db.Column(db.Boolean)
agility = db.Column(db.Boolean)
herding = db.Column(db.Boolean)
scentwork = db.Column(db.Boolean)
fun_match = db.Column(db.Boolean)
shep_o_gram = db.Column(db.Boolean)
training = db.Column(db.Boolean)
hospitality = db.Column(db.Boolean)
fundraising = db.Column(db.Boolean)
gsd_fun_day = db.Column(db.Boolean)
demo_mn_fair = db.Column(db.Boolean)
annual_banquet = db.Column(db.Boolean)
breeding = db.Column(db.Boolean)
other = db.Column(db.String(100))
def __repr__(self):
return f'<interest_id={self.interest_id}, obedience={self.obedience}, training={self.training}>'
|
{"/server.py": ["/model.py", "/crud.py"], "/crud.py": ["/model.py", "/server.py"]}
|
16,191
|
lauranewland/gsdc-webapp-flask
|
refs/heads/master
|
/server.py
|
from flask import (Flask, render_template, request, flash, session, redirect, jsonify)
from werkzeug.security import check_password_hash
from model import connect_to_db, Users
from jinja2 import StrictUndefined
import crud
from flask_login import LoginManager, login_user, login_required, current_user, logout_user
# Creates an instance of Flask
app = Flask(__name__)
app.secret_key = "dev"
app.jinja_env.undefined = StrictUndefined
# Creates an instance of Flask LoginManager
login_manager = LoginManager()
login_manager.login_view = 'app.login'
login_manager.init_app(app)
@login_manager.user_loader
def load_user(user_id):
return Users.query.get(int(user_id))
@app.route('/')
def homepage():
"""Renders Homepage"""
return render_template('homepage.html')
@app.route('/users', methods=['POST'])
def register_user():
"""Create a new user.
.. note::
Checks if the user email is already in the database.
If yes, a flash message will note the email already exists
if not, the user will be created
"""
fname = request.form.get('fname')
lname = request.form.get('lname')
email = request.form.get('email')
address = request.form.get('address')
city = request.form.get('city')
zip_code = request.form.get('zip_code')
phone = request.form.get('phone')
pref_communication = request.form.get('pref_communication')
print_permissions = request.form.get('print_permissions')
member_type = request.form.get('member_type')
password = request.form.get('password')
other_orgs = request.form.get('other_orgs')
num_of_gsd = request.form.get('num_of_gsd')
num_breedings = request.form.get('num_breedings')
obedience = bool(request.form.get('obedience'))
rally = bool(request.form.get('rally'))
conformation = bool(request.form.get('conformation'))
agility = bool(request.form.get('agility'))
herding = bool(request.form.get('herding'))
scentwork = bool(request.form.get('scentwork'))
fun_match = bool(request.form.get('fun_match'))
shep_o_gram = bool(request.form.get('shep_o_gram'))
training = bool(request.form.get('training'))
hospitality = bool(request.form.get('hospitality'))
fundraising = bool(request.form.get('fundraising'))
gsd_fun_day = bool(request.form.get('gsd_fun_day'))
demo_mn_fair = bool(request.form.get('demo_mn_fair'))
annual_banquet = bool(request.form.get('annual_banquet'))
breeding = bool(request.form.get('breeding'))
other = request.form.get('other')
# Queries database on the email address and stores all data in user
user = crud.get_user_by_email(email)
# Checks if a user account has been found in the database
if user:
# If so, flash a message that the email already exists
flash('Email Already Exists.')
return redirect('/signup')
# Otherwise add a new user and their interest to the database
else:
new_user = crud.create_user(fname, lname, email, address, city, zip_code, phone, pref_communication,
print_permissions, member_type, password, other_orgs, num_of_gsd, num_breedings)
crud.create_user_interest(new_user.user_id, obedience, rally, conformation, agility, herding, scentwork,
fun_match, shep_o_gram,training, hospitality, fundraising, gsd_fun_day, demo_mn_fair,
annual_banquet, breeding, other)
flash('Membership Application Submitted.')
return redirect('/')
@app.route('/signup')
def signup_page():
"""Renders Membership Signup Page"""
return render_template('membership_signup.html')
@app.route('/user')
def all_users():
user = crud.get_all_users()
return render_template('all_users.html', user=user)
@app.route('/search', methods=["GET", "POST"])
def search_database():
"""Takes in a request from Search.html and returns results"""
# Takes in the search input
user_input = request.form.get('meminput')
print(user_input)
users = crud.get_user_interest(user_input)
print(users)
if len(users) != 0:
return render_template('search.html', name=current_user.fname, users=users)
else:
users = crud.get_user(user_input)
print(users)
return render_template('search.html', name=current_user.fname, users=users)
@app.route('/login')
def login():
return render_template('login.html')
@app.route('/login', methods=['GET', 'POST'])
def login_post():
"""Takes in Users input and checks password & email matches the user in the database
If a match, a user login session is created and the user is routed to the login_landing page"""
# Takes in the users input
email = request.form.get('useremail')
print(email)
password = request.form.get('upassword')
print(password)
remember = True if request.form.get('remember') else False
# Sets the users variable to an empty list to be a place holder for the login_landing page
users = []
try:
# Queries database on the email address and stores all data in user
user = crud.get_user_by_email(email)
print(user)
# Checks if password and email the user input matches the database
if check_password_hash(user.password, password):
# Creates Session for the logged in user
login_user(user, remember=remember)
flash('Successful Login')
# If users password does not match flash message
else:
flash('Password Incorrect')
return redirect('/login')
# If users email does not match flash message
except AttributeError:
flash('Email not found')
return redirect('/login')
# Renders the login_landing page and passes the logged in users first name
return render_template('login_landing.html', name=current_user.fname, users=users)
@app.route('/login_landing')
@login_required
def login_landing():
"""Renders Login Landing Page"""
return render_template('login_landing.html', name=current_user.fname)
@app.route('/isLoggedIn')
def logged_in():
"""AJAX backed for log"""
return jsonify(current_user.is_authenticated)
@app.route('/logout')
@login_required
def logout():
logout_user()
return redirect('/login')
# @app.route('/interest', methods=["GET", "POST"])
# def search_user_interest():
# """Takes in a request from Search.html and returns results"""
#
# # Takes in the search input
# user_input = request.form.get('memberInput')
# print(user_input)
#
# # Queries the users input against the database
# intresults = crud.get_user_interest(user_input)
# print(intresults)
#
# # Passes the query results back to Search.html
# return render_template('interest.html', intresults=intresults)
if __name__ == '__main__':
connect_to_db(app)
app.run(host='0.0.0.0', debug=True)
|
{"/server.py": ["/model.py", "/crud.py"], "/crud.py": ["/model.py", "/server.py"]}
|
16,192
|
lauranewland/gsdc-webapp-flask
|
refs/heads/master
|
/crud.py
|
import sqlalchemy
from flask import flash
from model import Users, Interest, db, connect_to_db
from werkzeug.security import generate_password_hash
def create_user(fname, lname, email, address, city, zip_code, phone, pref_communication, print_permissions,
member_type, password, other_orgs, num_of_gsd, num_breedings):
user = Users(fname=fname, lname=lname, email=email, address=address, city=city, zip_code=zip_code, phone=phone,
pref_communication=pref_communication, print_permissions=print_permissions, member_type=member_type,
password=generate_password_hash(password, method='sha256'), other_orgs=other_orgs,
num_of_gsd=num_of_gsd, num_breedings=num_breedings)
# Adds user interest to the database session
db.session.add(user)
# Commits user interest to the database
db.session.commit()
# Refreshes the database instances
db.session.refresh(user)
return user
def get_user_by_email(email):
"""Queries a user by email"""
return Users.query.filter(Users.email == email).first()
def get_all_users():
"""Queries and returns all users"""
return Users.query.all()
def get_user(user_input):
"""Queries and returns a user"""
return Users.query.filter((Users.fname == user_input) | (Users.lname == user_input)
| (Users.email == user_input)
| (Users.city == user_input)
| (Users.phone == user_input)
| (Users.pref_communication == user_input)
| (Users.print_permissions == user_input)
| (Users.member_type == user_input)
| (Users.other_orgs == user_input)
| (Users.num_of_gsd == user_input)
| (Users.num_breedings == user_input)).all()
def create_user_interest(user_id, obedience, rally, conformation, agility, herding, scentwork, fun_match, shep_o_gram,
training, hospitality, fundraising, gsd_fun_day, demo_mn_fair,
annual_banquet, breeding, other):
"""Creates a user interest"""
interest = Interest(user_id=user_id, obedience=obedience, rally=rally, conformation=conformation, agility=agility,
herding=herding, scentwork=scentwork, fun_match=fun_match, shep_o_gram=shep_o_gram,
training=training, hospitality=hospitality, fundraising=fundraising, gsd_fun_day=gsd_fun_day,
demo_mn_fair=demo_mn_fair, annual_banquet=annual_banquet, breeding=breeding, other=other)
# Adds user interest to the database session
db.session.add(interest)
# Commits user interest to the database
db.session.commit()
# Refreshes the database instances
db.session.refresh(interest)
return interest
def get_user_interest(user_input):
"""Queries an interest and returns members associated with it"""
try:
if user_input is None:
"""User_input is going to be nothing when the page first renders
this statement returns an empty list"""
return []
else:
query = ("SELECT * FROM users WHERE user_id IN "
f"(SELECT user_id FROM interests WHERE {user_input} = true)")
# Executes the Query
db_cursor = db.session.execute(query)
return db_cursor.fetchall()
# an Operational error is given when a column is not found, if that is the case return an empty list
except sqlalchemy.exc.OperationalError:
return []
if __name__ == '__main__':
from server import app
connect_to_db(app)
|
{"/server.py": ["/model.py", "/crud.py"], "/crud.py": ["/model.py", "/server.py"]}
|
16,195
|
rongliangzi/CrowdCountingBaseline
|
refs/heads/master
|
/modeling/__init__.py
|
from .utils import *
from .m_vgg import M_VGG
from .Res50_C3 import Res50
from .sanet import SANet
from .inceptionv3_cc import Inception3CC
from .def_ccnet import DefCcNet
from .cannet import CANNet
from .csrnet import CSRNet
from .u_vgg import U_VGG
|
{"/modeling/__init__.py": ["/modeling/utils.py", "/modeling/m_vgg.py", "/modeling/Res50_C3.py", "/modeling/sanet.py", "/modeling/csrnet.py", "/modeling/u_vgg.py"], "/modeling/csrnet.py": ["/modeling/utils.py"], "/modeling/Res50_C3.py": ["/modeling/utils.py"], "/modeling/u_vgg.py": ["/modeling/utils.py"], "/train_generic.py": ["/modeling/__init__.py", "/utils/functions.py"], "/modeling/m_vgg.py": ["/modeling/utils.py"]}
|
16,196
|
rongliangzi/CrowdCountingBaseline
|
refs/heads/master
|
/modeling/csrnet.py
|
import torch.nn as nn
import torch
import numpy as np
import torch.nn.functional as F
from .utils import *
class CSRNet(nn.Module):
def __init__(self, load_model='', downsample=1, bn=False):
super(CSRNet, self).__init__()
self.downsample = downsample
self.bn = bn
self.features_cfg = [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512]
self.features = make_layers(self.features_cfg)
self.backend_cfg = [512, 512, 512, 256, 128, 64]
self.backend = make_layers(self.backend_cfg, in_channels=512, dilation=True)
self.output_layer = nn.Conv2d(64, 1, kernel_size=1)
self.load_model = load_model
self._init_weights()
def forward(self, x_in):
x = self.features(x_in)
x = self.backend(x)
x = self.output_layer(x)
x = torch.abs(x)
return x
def _random_init_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.normal_(m.weight, std=0.01)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def _init_weights(self):
if not self.load_model:
pretrained_dict = dict()
model_dict = self.state_dict()
path = "/home/datamining/Models/vgg16_bn-6c64b313.pth" if self.bn else '/home/datamining/Models/vgg16-397923af.pth'
pretrained_model = torch.load(path)
print(path,' loaded!')
self._random_init_weights()
# load the pretrained vgg16 parameters
for k, v in pretrained_model.items():
if k in model_dict and model_dict[k].size() == v.size():
pretrained_dict[k] = v
print(k, ' parameters loaded!')
model_dict.update(pretrained_dict)
self.load_state_dict(model_dict)
else:
self.load_state_dict(torch.load(self.load_model))
print(self.load_model,' loaded!')
|
{"/modeling/__init__.py": ["/modeling/utils.py", "/modeling/m_vgg.py", "/modeling/Res50_C3.py", "/modeling/sanet.py", "/modeling/csrnet.py", "/modeling/u_vgg.py"], "/modeling/csrnet.py": ["/modeling/utils.py"], "/modeling/Res50_C3.py": ["/modeling/utils.py"], "/modeling/u_vgg.py": ["/modeling/utils.py"], "/train_generic.py": ["/modeling/__init__.py", "/utils/functions.py"], "/modeling/m_vgg.py": ["/modeling/utils.py"]}
|
16,197
|
rongliangzi/CrowdCountingBaseline
|
refs/heads/master
|
/modeling/utils.py
|
import torch.nn as nn
import torch
class conv_act(nn.Module):
'''
basic module for conv-(bn-)activation
'''
def __init__(self, in_channels, out_channels, kernel_size, NL='relu', dilation=1, stride=1, same_padding=True, use_bn=False):
super(conv_act, self).__init__()
padding = (kernel_size + (dilation - 1) * (kernel_size - 1) - 1) // 2 if same_padding else 0
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding=padding, dilation=dilation)
self.use_bn = use_bn
if self.use_bn:
self.bn = nn.BatchNorm2d(out_channels, eps=0.001, momentum=0, affine=True)
if NL == 'relu' :
self.activation = nn.ReLU(inplace=True)
elif NL == 'prelu':
self.activation = nn.PReLU()
elif NL == 'swish':
self.activation = Swish()
def forward(self, x):
x = self.conv(x)
if self.use_bn:
x = self.bn(x)
x = self.activation(x)
return x
def make_layers(cfg, in_channels=3, batch_norm=False, dilation=False, NL='relu', se=False):
if dilation:
d_rate = 2
else:
d_rate = 1
layers = []
for v in cfg:
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=d_rate, dilation=d_rate)
if NL=='prelu':
nl_block = nn.PReLU()
elif NL=='swish':
nl_block = Swish()
else:
nl_block = nn.ReLU(inplace=True)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nl_block]
else:
layers += [conv2d, nl_block]
if se:
layers += [SEModule(v)]
in_channels = v
return nn.Sequential(*layers)
class DilationPyramid(nn.Module):
'''
aggregate different dilations
'''
def __init__(self, in_channels, out_channels, dilations=[1,2,3,6], NL='relu'):
super(DilationPyramid, self).__init__()
assert len(dilations)==4, 'length of dilations must be 4'
self.conv1 = nn.Sequential(conv_act(in_channels, out_channels, 1, NL), conv_act(out_channels, out_channels, 3, NL, dilation=dilations[0]))
self.conv2 = nn.Sequential(conv_act(in_channels, out_channels, 1, NL), conv_act(out_channels, out_channels, 3, NL, dilation=dilations[1]))
self.conv3 = nn.Sequential(conv_act(in_channels, out_channels, 1, NL), conv_act(out_channels, out_channels, 3, NL, dilation=dilations[2]))
self.conv4 = nn.Sequential(conv_act(in_channels, out_channels, 1, NL), conv_act(out_channels, out_channels, 3, NL, dilation=dilations[3]))
self.conv5 = conv_act(4*out_channels, 4*out_channels, 1, NL)
def forward(self, x):
x1 = self.conv1(x)
x2 = self.conv2(x)
x3 = self.conv3(x)
x4 = self.conv4(x)
output = torch.cat([x1, x2, x3, x4], 1)
output = self.conv5(output)
return output
class SizePyramid(nn.Module):
'''
aggregate different filter sizes, [1,3,5,7] like ADCrowdNet's decoder
'''
def __init__(self, in_channels, out_channels, NL='relu'):
super(SizePyramid, self).__init__()
self.conv1 = nn.Sequential(conv_act(in_channels, out_channels, 1, NL), conv_act(out_channels, out_channels, 3, NL))
self.conv2 = nn.Sequential(conv_act(in_channels, out_channels, 1, NL), conv_act(out_channels, out_channels, 5, NL))
self.conv3 = nn.Sequential(conv_act(in_channels, out_channels, 1, NL), conv_act(out_channels, out_channels, 7, NL))
self.conv4 = nn.Sequential(conv_act(in_channels, out_channels, 1, NL))
self.conv5 = conv_act(4*out_channels, 4*out_channels, 1, NL)
def forward(self, x):
x1 = self.conv1(x)
x2 = self.conv2(x)
x3 = self.conv3(x)
x4 = self.conv4(x)
output = torch.cat([x1, x2, x3, x4], 1)
output = self.conv5(output)
return output
class DepthPyramid(nn.Module):
'''
aggregate different depths, like TEDNet's decoder
'''
def __init__(self, in_channels, out_channels, NL='relu'):
super(DepthPyramid, self).__init__()
self.conv1 = nn.Sequential(conv_act(in_channels, out_channels, 1, NL), conv_act(out_channels, out_channels, 3, NL))
self.conv2 = nn.Sequential(conv_act(in_channels, out_channels, 1, NL), conv_act(out_channels, out_channels, 3, NL), conv_act(out_channels, out_channels, 3, NL))
self.conv3 = nn.Sequential(conv_act(in_channels, out_channels, 1, NL), conv_act(out_channels, out_channels, 3, NL), conv_act(out_channels, out_channels, 3, NL), conv_act(out_channels, out_channels, 3, NL))
self.conv4 = nn.Sequential(conv_act(in_channels, out_channels, 1, NL))
self.conv5 = conv_act(4*out_channels, 4*out_channels, 1, NL)
def forward(self, x):
x1 = self.conv1(x)
x2 = self.conv2(x)
x3 = self.conv3(x)
x4 = self.conv4(x)
output = torch.cat([x1, x2, x3, x4], 1)
output = self.conv5(output)
return output
class Swish(nn.Module):
def forward(self, x):
return x * torch.sigmoid(x)
class SPModule(nn.Module):
def __init__(self, in_channels, branch_out=None):
super(SPModule, self).__init__()
if not branch_out:
# ensure the in and out have the same channels.
branch_out = in_channels
self.dilated1 = nn.Sequential(nn.Conv2d(in_channels, branch_out,3,padding=2, dilation=2),nn.ReLU(True))
self.dilated2 = nn.Sequential(nn.Conv2d(in_channels, branch_out,3,padding=4, dilation=4),nn.ReLU(True))
self.dilated3 = nn.Sequential(nn.Conv2d(in_channels, branch_out,3,padding=8, dilation=8),nn.ReLU(True))
self.dilated4 = nn.Sequential(nn.Conv2d(in_channels, branch_out,3,padding=12, dilation=12),nn.ReLU(True))
self.down_channels = nn.Sequential(nn.Conv2d(branch_out*4, in_channels,1),nn.ReLU(True))
def forward(self,x):
x1 = self.dilated1(x)
x2 = self.dilated2(x)
x3 = self.dilated3(x)
x4 = self.dilated4(x)
# concat
x = torch.cat([x1,x2,x3,x4],1)
x = self.down_channels(x)
return x
class SEModule(nn.Module):
def __init__(self, in_, reduction=16):
super().__init__()
squeeze_ch = in_//reduction
self.se = nn.Sequential(
nn.AdaptiveAvgPool2d(1),
nn.Conv2d(in_, squeeze_ch, kernel_size=1, stride=1, padding=0, bias=True),
Swish(),
nn.Conv2d(squeeze_ch, in_, kernel_size=1, stride=1, padding=0, bias=True),
)
def forward(self, x):
return x * torch.sigmoid(self.se(x))
|
{"/modeling/__init__.py": ["/modeling/utils.py", "/modeling/m_vgg.py", "/modeling/Res50_C3.py", "/modeling/sanet.py", "/modeling/csrnet.py", "/modeling/u_vgg.py"], "/modeling/csrnet.py": ["/modeling/utils.py"], "/modeling/Res50_C3.py": ["/modeling/utils.py"], "/modeling/u_vgg.py": ["/modeling/utils.py"], "/train_generic.py": ["/modeling/__init__.py", "/utils/functions.py"], "/modeling/m_vgg.py": ["/modeling/utils.py"]}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.