index
int64 | repo_name
string | branch_name
string | path
string | content
string | import_graph
string |
|---|---|---|---|---|---|
22,155
|
nh-99/DigitalTA
|
refs/heads/master
|
/config.py
|
import os
basedir = os.path.abspath(os.path.dirname(__file__))
class Config:
APP_NAME = "AutoHomeGrade"
UPLOAD_FOLDER = 'uploads/'
SECRET_KEY = os.environ.get("SECRET_KEY") or "asdjfklj23k409UULKJKL#J$jkljflkajsdf"
class DevConfig(Config):
DEBUG = True
SQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(basedir, "test.db")
SQLALCHEMY_TRACK_MODIFICATIONS = False
class ProdConfig(Config):
DEBUG = True
SQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(basedir, "prod.db")
SQLALCHEMY_TRACK_MODIFICATIONS = False
config = {
"dev": DevConfig,
"prod": ProdConfig,
"default": DevConfig
}
|
{"/models.py": ["/config.py"], "/app.py": ["/models.py", "/config.py"]}
|
22,156
|
nh-99/DigitalTA
|
refs/heads/master
|
/models.py
|
import datetime, uuid
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from config import config
app = Flask(__name__)
app.config.from_object(config['dev'])
db = SQLAlchemy(app)
class Assignment(db.Model):
__tablename__ = "assignments"
id = db.Column(db.String(50), primary_key=True)
name = db.Column(db.String(100))
time = db.Column(db.DateTime, default=datetime.datetime.utcnow())
output = db.Column(db.String(1000))
graded = db.Column(db.Boolean, default=False)
score = db.Column(db.Float)
class HomeworkCheck(db.Model):
__tablename__ = 'checks'
id = db.Column(db.String(50), primary_key=True, default=str(uuid.uuid4()))
stdin = db.Column(db.String(2000))
|
{"/models.py": ["/config.py"], "/app.py": ["/models.py", "/config.py"]}
|
22,157
|
nh-99/DigitalTA
|
refs/heads/master
|
/app.py
|
# 3rd Party Imports
from flask import Flask, render_template, session, request, redirect, url_for, jsonify
from flask_sqlalchemy import SQLAlchemy
from werkzeug.utils import secure_filename
import os, uuid, subprocess
# Local imports
import models
from models import Assignment, HomeworkCheck
from config import config
app = Flask(__name__)
app.config.from_object(config['dev'])
models.db.init_app(app)
# Index page
@app.route('/')
def index():
return render_template('index.html')
# File upload route
@app.route('/upload', methods=['POST'])
def homework_upload():
# Save the file
file = request.files['homework']
file2 = request.files['homeworksrc']
filename = str(uuid.uuid4())
file.save(os.path.join(app.config['UPLOAD_FOLDER'], secure_filename(filename + '.jar')))
file2.save(os.path.join(app.config['UPLOAD_FOLDER'], secure_filename(filename + '.zip')))
# Add file to the database of assignments
assignment = Assignment(id=filename, name=file.filename)
models.db.session.add(assignment)
models.db.session.commit()
return redirect('/')
@app.route('/check', methods=['GET'])
def create_check():
check = HomeworkCheck(stdin=request.args.get('stdin'))
models.db.session.add(check)
models.db.session.commit()
return redirect('/')
@app.route('/check/<string:id>')
def check_homework(id):
check_id = request.args.get('check')
check = HomeworkCheck.query.filter_by(id=check_id).first()
assignment = Assignment.query.filter_by(id=id).first()
# Check the homework against the check
bash_cmd = 'echo "{}" | java -jar {}'.format(check.stdin, 'uploads/' + assignment.id + '.jar')
output = os.popen(bash_cmd).read()
# Save the output to the assignment
assignment.output = output
models.db.session.add(assignment)
models.db.session.commit()
return redirect('/grade/' + assignment.id)
@app.route('/grade/<string:id>')
def grade_homework(id):
assignment = Assignment.query.filter_by(id=id).first()
return render_template('grade.html', assignment=assignment)
@app.route('/grade/finish', methods=['POST'])
def finish_grading():
id = request.form.get('id')
points = request.form.get('points')
assignment = Assignment.query.filter_by(id=id).first()
assignment.graded = True
assignment.score = float(points)
models.db.session.add(assignment)
models.db.session.commit()
return redirect('/')
if __name__ == '__main__':
models.db.create_all()
app.run(host='0.0.0.0', port=5000)
|
{"/models.py": ["/config.py"], "/app.py": ["/models.py", "/config.py"]}
|
22,185
|
hysds/chimera
|
refs/heads/develop
|
/tests/test_run_pge_docker.py
|
import json
import os
from swot_chimera import run_pge_docker
if __name__ == '__main__':
"""
This is for testing of PGE Job Submission JSON
Comment out from hysds.celery import app in query_util.py
Comment out from hysds_commons.job_utils import resolve_hysds_job in run_pge_docker.py
In commons/query_util.py, overwrite GRQ URL with:
ES_URL = "http://127.0.0.1:9300"
In run_pge_docker.construct_job_payload()
make the following change:
if test_mode is False:
#job = resolve_hysds_job(job_type, job_queue, params=params, job_name= job_name, enable_dedup= True, tags=tags, payload_hash= payload_hash)
job = {}
Update the following sample files:
sf_context: should be the sciflo context of an actual workflow run
runconfig: output from the input preprocessor
job_json: this is optional, it is used to determine if the workflow has been retried.
This is currently only used at step PGE_L0B_Radiometer.
It should be the _job.json of an actual workflow run
If not testing for an L0B run please update:
pge_config_file: path to PGE's config file
run this script
"""
# Testing L0B PGE job submission
os.path.dirname(os.path.realpath(__file__))
sf_context = os.path.dirname(os.path.realpath(
__file__))+"/test-files/L1B_HR_SLC-sfcontext.json"
runconfig = json.loads(open(os.path.dirname(os.path.realpath(__file__))+"/test-files/L1B_HR_SLC-runconfig.json", "r")
.read())
pge_config_file = os.path.abspath(os.path.join(os.path.realpath(__file__), "../../",
"swot_chimera/configs/pge_configs/examples/PGE_L1B_HR_SLC.json"))
sys_config_file = os.path.abspath(
os.path.join(os.path.realpath(__file__), "../..", "swot_chimera/configs/sys.config.json"))
job_json = os.path.dirname(os.path.realpath(
__file__))+"/test-files/L0B_job.json"
run_pge_docker.submit_pge_job(sf_context, runconfig, pge_config_file, sys_config_file,
wuid="1213", job_num="231232")
|
{"/chimera/input_preprocessor.py": ["/chimera/logger.py", "/chimera/precondition_evaluator.py"], "/chimera/post_processor.py": ["/chimera/logger.py", "/chimera/postprocess_evaluator.py"], "/chimera/postprocess_evaluator.py": ["/chimera/logger.py", "/chimera/commons/conf_util.py", "/chimera/commons/constants.py", "/chimera/postprocess_functions.py"], "/chimera/run_sciflo.py": ["/chimera/logger.py", "/chimera/commons/accountability.py", "/chimera/commons/sciflo_util.py"], "/chimera/postprocess_functions.py": ["/chimera/commons/accountability.py", "/chimera/commons/constants.py", "/chimera/logger.py"], "/chimera/precondition_evaluator.py": ["/chimera/logger.py", "/chimera/commons/conf_util.py", "/chimera/commons/constants.py", "/chimera/precondition_functions.py"], "/chimera/run_pge_docker.py": ["/chimera/logger.py", "/chimera/commons/conf_util.py", "/chimera/pge_job_submitter.py"], "/chimera/pge_job_submitter.py": ["/chimera/commons/constants.py", "/chimera/commons/conf_util.py", "/chimera/logger.py"], "/chimera/commons/accountability.py": ["/chimera/commons/constants.py"]}
|
22,186
|
hysds/chimera
|
refs/heads/develop
|
/chimera/logger.py
|
import logging
from enum import Enum
# set logger and custom filter to handle being run from sciflo
log_format = "[%(asctime)s: %(levelname)s/%(funcName)s] %(message)s"
logging.basicConfig(format=log_format, level=logging.INFO)
class LogLevels(Enum):
DEBUG = "DEBUG"
INFO = "INFO"
WARNING = "WARNING"
ERROR = "ERROR"
@staticmethod
def list():
return list(map(lambda c: c.value, LogLevels))
def __str__(self):
return self.value
@staticmethod
def set_level(level):
if level == LogLevels.DEBUG.value:
logger.setLevel(logging.DEBUG)
elif level == LogLevels.INFO.value:
logger.setLevel(logging.INFO)
elif level == LogLevels.WARNING.value:
logger.setLevel(logging.WARNING)
elif level == LogLevels.ERROR.value:
logger.setLevel(logging.ERROR)
else:
raise RuntimeError("{} is not a valid logging level. Should be one of the following: {}".format(
level, LogLevels.list()))
class LogFilter(logging.Filter):
def filter(self, record):
if not hasattr(record, 'id'):
record.id = '--'
return True
logger = logging.getLogger('nisar_chimera')
logger.setLevel(logging.INFO)
logger.addFilter(LogFilter())
|
{"/chimera/input_preprocessor.py": ["/chimera/logger.py", "/chimera/precondition_evaluator.py"], "/chimera/post_processor.py": ["/chimera/logger.py", "/chimera/postprocess_evaluator.py"], "/chimera/postprocess_evaluator.py": ["/chimera/logger.py", "/chimera/commons/conf_util.py", "/chimera/commons/constants.py", "/chimera/postprocess_functions.py"], "/chimera/run_sciflo.py": ["/chimera/logger.py", "/chimera/commons/accountability.py", "/chimera/commons/sciflo_util.py"], "/chimera/postprocess_functions.py": ["/chimera/commons/accountability.py", "/chimera/commons/constants.py", "/chimera/logger.py"], "/chimera/precondition_evaluator.py": ["/chimera/logger.py", "/chimera/commons/conf_util.py", "/chimera/commons/constants.py", "/chimera/precondition_functions.py"], "/chimera/run_pge_docker.py": ["/chimera/logger.py", "/chimera/commons/conf_util.py", "/chimera/pge_job_submitter.py"], "/chimera/pge_job_submitter.py": ["/chimera/commons/constants.py", "/chimera/commons/conf_util.py", "/chimera/logger.py"], "/chimera/commons/accountability.py": ["/chimera/commons/constants.py"]}
|
22,187
|
hysds/chimera
|
refs/heads/develop
|
/chimera/input_preprocessor.py
|
#!/usr/bin/env python
"""
Contributors:
- Sujen Shah
- Michael Cayanan
- Namrata Malarout
This is the first step of Chimera called Input Preprocessor (IPP)
The Input preprocessor runs all the preconditions and constructs the configuration required to run an algorithm (PGE)
"""
from chimera.logger import logger
from chimera.precondition_evaluator import PreConditionEvaluator
def process(sf_context, chimera_config_file, pge_config_filepath, settings_file):
"""
Process the inputs to check if the preconditions for the provided PGE are satisfied.
:param sf_context: Input context (sciflow context or post processor output)
:param chimera_config_file: Chimera config file.
:param pge_config_filepath: path to the pge config json file
:param settings_file: Settings file.
:return: python dict containing context for the PGE to run
"""
logger.info("Starting input_preprocessor step.")
pre_cond_evaluator = PreConditionEvaluator(sf_context, chimera_config_file, pge_config_filepath, settings_file)
output_context = pre_cond_evaluator.evaluate()
logger.info("Finished input_preprocessor step.")
return output_context
if __name__ == '__main__':
pass
|
{"/chimera/input_preprocessor.py": ["/chimera/logger.py", "/chimera/precondition_evaluator.py"], "/chimera/post_processor.py": ["/chimera/logger.py", "/chimera/postprocess_evaluator.py"], "/chimera/postprocess_evaluator.py": ["/chimera/logger.py", "/chimera/commons/conf_util.py", "/chimera/commons/constants.py", "/chimera/postprocess_functions.py"], "/chimera/run_sciflo.py": ["/chimera/logger.py", "/chimera/commons/accountability.py", "/chimera/commons/sciflo_util.py"], "/chimera/postprocess_functions.py": ["/chimera/commons/accountability.py", "/chimera/commons/constants.py", "/chimera/logger.py"], "/chimera/precondition_evaluator.py": ["/chimera/logger.py", "/chimera/commons/conf_util.py", "/chimera/commons/constants.py", "/chimera/precondition_functions.py"], "/chimera/run_pge_docker.py": ["/chimera/logger.py", "/chimera/commons/conf_util.py", "/chimera/pge_job_submitter.py"], "/chimera/pge_job_submitter.py": ["/chimera/commons/constants.py", "/chimera/commons/conf_util.py", "/chimera/logger.py"], "/chimera/commons/accountability.py": ["/chimera/commons/constants.py"]}
|
22,188
|
hysds/chimera
|
refs/heads/develop
|
/chimera/post_processor.py
|
#!/usr/bin/env python
"""
Contributors:
- Namrata Malarout
- Michael Cayanan
- Sujen Shah
The Post Processor queries:
1. Mozart for job infor
2. GRQ for product metadata
and creates a context.json (not the same as _context.json)
"""
from chimera.logger import logger
from chimera.postprocess_evaluator import PostProcessor
def post_process(sf_context, job_result, chimera_config_file, pge_config_file, settings_file, test_mode=False):
"""
The main task of the post processor is
to create a file [PGE_type]_context.json.
The file's purpose is to pass metadata of
the previous smap_sciflo process (PGE run) to
the next one's input preprocessor.
product produced and the job status of the PGE run.
JOB Status Codes:
-3 -> job deduped against a failed, queued/updated job
-2 -> job deduped against a completed job
-1 -> failed (handled at commoms.sciflo_util)
0 -> never ran (default value in document)
1 -> running (set in run_pge_docker.py)
2 -> completed successfully
Parameters:
@job_result - job_id of the PGE run
@pge_type - type of SMAP PGE run
@pge_config_file - path of the config file of specific PGE type
"""
logger.info("Starting post_preprocessor step.")
post_processor = PostProcessor(sf_context, chimera_config_file, pge_config_file, settings_file, job_result)
output_context = post_processor.process()
logger.info("Finished post_processor step.")
return output_context
if __name__ == '__main__':
pass
|
{"/chimera/input_preprocessor.py": ["/chimera/logger.py", "/chimera/precondition_evaluator.py"], "/chimera/post_processor.py": ["/chimera/logger.py", "/chimera/postprocess_evaluator.py"], "/chimera/postprocess_evaluator.py": ["/chimera/logger.py", "/chimera/commons/conf_util.py", "/chimera/commons/constants.py", "/chimera/postprocess_functions.py"], "/chimera/run_sciflo.py": ["/chimera/logger.py", "/chimera/commons/accountability.py", "/chimera/commons/sciflo_util.py"], "/chimera/postprocess_functions.py": ["/chimera/commons/accountability.py", "/chimera/commons/constants.py", "/chimera/logger.py"], "/chimera/precondition_evaluator.py": ["/chimera/logger.py", "/chimera/commons/conf_util.py", "/chimera/commons/constants.py", "/chimera/precondition_functions.py"], "/chimera/run_pge_docker.py": ["/chimera/logger.py", "/chimera/commons/conf_util.py", "/chimera/pge_job_submitter.py"], "/chimera/pge_job_submitter.py": ["/chimera/commons/constants.py", "/chimera/commons/conf_util.py", "/chimera/logger.py"], "/chimera/commons/accountability.py": ["/chimera/commons/constants.py"]}
|
22,189
|
hysds/chimera
|
refs/heads/develop
|
/chimera/postprocess_evaluator.py
|
import json
import os
import traceback
from importlib import import_module
from chimera.logger import logger
from chimera.commons.conf_util import YamlConf, load_config
from chimera.commons.constants import ChimeraConstants
from chimera.postprocess_functions import PostProcessFunctions
class PostProcessor(object):
def __init__(
self,
sf_context,
chimera_config_filepath,
pge_config_filepath,
settings_file,
job_result
):
# load context file
if isinstance(sf_context, dict):
self._sf_context = sf_context
elif isinstance(sf_context, str):
self._sf_context = json.load(open(sf_context, "r"))
logger.debug("Loaded context file: {}".format(json.dumps(self._sf_context)))
# load pge config file
self._pge_config = load_config(pge_config_filepath)
logger.debug("Loaded PGE config file: {}".format(json.dumps(self._pge_config)))
# load PP config file
try:
self._chimera_config = YamlConf(chimera_config_filepath).cfg
self._module_path = self._chimera_config.get("postprocessor", {}).get(
"module_path", None
)
if not self._module_path:
raise RuntimeError(
"'module_path' must be defined in the 'preprocessor' section of the "
"Chimera Config file '{}'".format(chimera_config_filepath)
)
self._class_name = self._chimera_config.get("postprocessor", {}).get(
"class_name", None
)
if not self._class_name:
raise RuntimeError(
"'class_name' must be defined in the 'preprocessor' section of the "
"Chimera Config file '{}'".format(chimera_config_filepath)
)
except Exception as e:
raise RuntimeError(
"Could not read preconditions definition file : {}".format(e)
)
# load Settings file
try:
if settings_file:
settings_file = os.path.abspath(os.path.normpath(settings_file))
self._settings = YamlConf(settings_file).cfg
except Exception as e:
if settings_file:
file_name = settings_file
else:
file_name = "~/verdi/etc/settings.yaml"
raise RuntimeError(
"Could not read settings file '{}': {}".format(file_name, e)
)
# load PGE job result
if isinstance(job_result, dict):
self._job_result = job_result
elif isinstance(job_result, str):
self._job_result = json.load(open(job_result, "r"))
self._job_result["work_dir"] = os.path.dirname(sf_context)
logger.debug("Loaded job result: {}".format(json.dumps(self._job_result)))
def prepare_psuedo_context(self, psuedo_context):
"""
Write the gathered job and product metadata information to the psuedo context file.
:return: dict
"""
logger.debug(
"Preparing psuedo_context file after {} run".format(
self._pge_config.get("pge_name")
)
)
# write out job context
psu_context = open(
"{}_context.json".format(self._pge_config.get("pge_name")), "w"
)
psu_context.write(json.dumps(psuedo_context))
psu_context.close()
return "{}_context.json".format(self._pge_config.get("pge_name"))
def process(self):
new_context = dict()
try:
module = import_module(self._module_path)
cls = getattr(module, self._class_name)
if not issubclass(cls, PostProcessFunctions):
raise RuntimeError(
"Class must be a subclass of {}: {}".format(
PostProcessFunctions.__name__, cls.__name__
)
)
# run mandatory post process funtions
# new_context.update(self.required_post_process_steps())
# run custom post processing steps and update the psuedo context content
cls_object = cls(
self._sf_context, self._pge_config, self._settings, self._job_result
)
new_context.update(
cls_object.run(
self._pge_config.get(ChimeraConstants.POSTPROCESS, list())
)
)
# write to output context file
new_context_file = self.prepare_psuedo_context(new_context)
return new_context_file
except Exception as e:
logger.error(
"Post processor failure: {}. {}".format(e, traceback.format_exc())
)
raise RuntimeError("Post processor failure: {}".format(e))
|
{"/chimera/input_preprocessor.py": ["/chimera/logger.py", "/chimera/precondition_evaluator.py"], "/chimera/post_processor.py": ["/chimera/logger.py", "/chimera/postprocess_evaluator.py"], "/chimera/postprocess_evaluator.py": ["/chimera/logger.py", "/chimera/commons/conf_util.py", "/chimera/commons/constants.py", "/chimera/postprocess_functions.py"], "/chimera/run_sciflo.py": ["/chimera/logger.py", "/chimera/commons/accountability.py", "/chimera/commons/sciflo_util.py"], "/chimera/postprocess_functions.py": ["/chimera/commons/accountability.py", "/chimera/commons/constants.py", "/chimera/logger.py"], "/chimera/precondition_evaluator.py": ["/chimera/logger.py", "/chimera/commons/conf_util.py", "/chimera/commons/constants.py", "/chimera/precondition_functions.py"], "/chimera/run_pge_docker.py": ["/chimera/logger.py", "/chimera/commons/conf_util.py", "/chimera/pge_job_submitter.py"], "/chimera/pge_job_submitter.py": ["/chimera/commons/constants.py", "/chimera/commons/conf_util.py", "/chimera/logger.py"], "/chimera/commons/accountability.py": ["/chimera/commons/constants.py"]}
|
22,190
|
hysds/chimera
|
refs/heads/develop
|
/chimera/run_sciflo.py
|
"""
Run the NRT production pipeline
"""
import argparse
import os
import json
import sys
from importlib import import_module
from chimera.logger import logger
from chimera.commons.accountability import Accountability
from chimera.commons.sciflo_util import run_sciflo
# Set up logging
LOGGER = logger
BASE_PATH = os.path.dirname(__file__)
# grabs accountability class if implemented and set in the sciflo jobspecs
def get_accountability_class(context_file):
work_dir = None
context = None
if isinstance(context_file, str):
work_dir = os.path.dirname(context_file)
with open(context_file, "r") as f:
context = json.load(f)
path = context.get("module_path")
if "accountability_module_path" in context:
path = context.get("accountability_module_path")
accountability_class_name = context.get("accountability_class", None)
accountability_module = import_module(path, "nisar-pcm")
if accountability_class_name is None:
LOGGER.error(
"No accountability class specified"
)
return Accountability(context, work_dir)
cls = getattr(accountability_module, accountability_class_name)
if not issubclass(cls, Accountability):
LOGGER.error(
"accountability class does not extend Accountability"
)
return Accountability(context, work_dir)
cls_object = cls(context, work_dir)
return cls_object
def main(sfl_file, context_file, output_folder):
"""Main."""
sfl_file = os.path.abspath(sfl_file)
context_file = os.path.abspath(context_file)
output_file = os.path.abspath(output_folder)
LOGGER.info("sfl_file: %s" % sfl_file)
LOGGER.info("context_file: %s" % context_file)
accountability = get_accountability_class(context_file)
accountability.create_job_entry()
result = run_sciflo(sfl_file, ["sf_context=%s" % context_file], output_folder)
return result
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("sfl_file", help="SciFlo workflow")
parser.add_argument("context_file", help="HySDS context file")
parser.add_argument("output_folder", help="Sciflo output file")
args = parser.parse_args()
sys.exit(main(args.sfl_file, args.context_file, args.output_folder))
|
{"/chimera/input_preprocessor.py": ["/chimera/logger.py", "/chimera/precondition_evaluator.py"], "/chimera/post_processor.py": ["/chimera/logger.py", "/chimera/postprocess_evaluator.py"], "/chimera/postprocess_evaluator.py": ["/chimera/logger.py", "/chimera/commons/conf_util.py", "/chimera/commons/constants.py", "/chimera/postprocess_functions.py"], "/chimera/run_sciflo.py": ["/chimera/logger.py", "/chimera/commons/accountability.py", "/chimera/commons/sciflo_util.py"], "/chimera/postprocess_functions.py": ["/chimera/commons/accountability.py", "/chimera/commons/constants.py", "/chimera/logger.py"], "/chimera/precondition_evaluator.py": ["/chimera/logger.py", "/chimera/commons/conf_util.py", "/chimera/commons/constants.py", "/chimera/precondition_functions.py"], "/chimera/run_pge_docker.py": ["/chimera/logger.py", "/chimera/commons/conf_util.py", "/chimera/pge_job_submitter.py"], "/chimera/pge_job_submitter.py": ["/chimera/commons/constants.py", "/chimera/commons/conf_util.py", "/chimera/logger.py"], "/chimera/commons/accountability.py": ["/chimera/commons/constants.py"]}
|
22,191
|
hysds/chimera
|
refs/heads/develop
|
/chimera/commons/constants.py
|
"""
Add field names from PGE config files, names of functions,
match patterns or key names that can be referenced throughout code base
Note: To add new keys, please follow an alphabetical order
e.g.
LOCALIZE_KEY = "localize" # name of key found in input preprocessor output
GET_PGE_NAME = "pge_name" # name of key found in PGE config file
GET_ICE_SCLK = "getIceSclk" # name of function
"""
class ChimeraConstants(object):
def __init__(self):
pass
# PGE's name
PGE_NAME = "pge_name"
# To identify the preconditions to check for
PRECONDITIONS = "preconditions"
# To identify the post processing steps to run
POSTPROCESS = "postprocess"
# Key identifying the payload in the _context file
RUNCONFIG = "runconfig"
# To Specify which group elements to localize
LOCALIZE_GROUPS = "localize_groups"
# To specify which filepaths to localize in the worker. Used by Mozart
LOCALIZE = "localize"
CONFIGURATION = "configuration"
PRODUCTION_DATETIME = "ProductionDateTime"
# Key in runconfig for list of inputs
RC_INPUT = "InputFilePath"
# To identify file type level conditions
CONDITIONS = "conditions"
# Keys for identifying in the post_processor produced context.json
PRODUCTS_ID = "product_ids"
# primary input key in PGE config
PRIMARY_INPUT = "primary_input"
# identifier token to specify empty runconfig values to be filled
EMPTY_FIELD_IDENTIFIER = "empty_field_identifier"
# field to specify optionals runconfig fields
OPTIONAL_FIELDS = "optionalFields"
# Key used in post processor to identify the metadata of all products generated
# This is a list of dictionaries
PRODUCTS_METADATA = "product_metadata"
# Key used to identify output products from the previous PGE run
PRODUCT_NAMES = "product_names"
# Key used to identify the path of the products created by the previous PGE
PRODUCT_PATHS = "product_paths"
RELEASE_VERSION = "release_version"
SIMULATE_OUTPUTS = "simulate_outputs"
PGE_SIM_MODE = "PGE_SIMULATION_MODE"
OUTPUT_TYPES = "output_types"
LAST_MOD_TIME = "LastModifiedTime"
JOB_INFO = "job_info"
JOB_PAYLOAD = "job_payload"
PAYLOAD_TASK_ID = "payload_task_id"
JOB_ID_FIELD = "job_id"
JOB_TYPES = "JOB_TYPES"
JOB_QUEUES = "JOB_QUEUES"
WORK_DIR = "work_dir"
|
{"/chimera/input_preprocessor.py": ["/chimera/logger.py", "/chimera/precondition_evaluator.py"], "/chimera/post_processor.py": ["/chimera/logger.py", "/chimera/postprocess_evaluator.py"], "/chimera/postprocess_evaluator.py": ["/chimera/logger.py", "/chimera/commons/conf_util.py", "/chimera/commons/constants.py", "/chimera/postprocess_functions.py"], "/chimera/run_sciflo.py": ["/chimera/logger.py", "/chimera/commons/accountability.py", "/chimera/commons/sciflo_util.py"], "/chimera/postprocess_functions.py": ["/chimera/commons/accountability.py", "/chimera/commons/constants.py", "/chimera/logger.py"], "/chimera/precondition_evaluator.py": ["/chimera/logger.py", "/chimera/commons/conf_util.py", "/chimera/commons/constants.py", "/chimera/precondition_functions.py"], "/chimera/run_pge_docker.py": ["/chimera/logger.py", "/chimera/commons/conf_util.py", "/chimera/pge_job_submitter.py"], "/chimera/pge_job_submitter.py": ["/chimera/commons/constants.py", "/chimera/commons/conf_util.py", "/chimera/logger.py"], "/chimera/commons/accountability.py": ["/chimera/commons/constants.py"]}
|
22,192
|
hysds/chimera
|
refs/heads/develop
|
/chimera/postprocess_functions.py
|
import traceback
import time
import json
from hysds.es_util import get_grq_es, get_mozart_es
from chimera.commons.accountability import Accountability
from chimera.commons.constants import ChimeraConstants as chimera_consts
from chimera.logger import logger
class PostProcessFunctions(object):
MOZART_ES_ENDPOINT = "MOZART"
GRQ_ES_ENDPOINT = "GRQ"
def __init__(self, context, pge_config, settings, job_result, mozart_es=None, grq_es=None):
self._context = context
self._pge_config = pge_config
self._settings = settings
self._job_result = job_result
self.accountability = Accountability(self._context, self._job_result.get(chimera_consts.WORK_DIR))
if mozart_es:
self._mozart_es = mozart_es
else:
self._mozart_es = get_mozart_es()
if grq_es:
self._grq_es = grq_es
else:
self._grq_es = get_grq_es()
def run(self, function_list):
"""
Runs the set of post processing functions passed into the given list.
:param function_list: A list of post process methods that will be defined in the subclasses.
:return: a dictionary containing information about the results of the post PGE processes.
"""
output_context = dict()
logger.info(
"function_list: {}".format(function_list)
)
for func in function_list:
self._job_result.update(getattr(self, func)())
return self._job_result
def _check_job_status(self):
"""
Check if job is completed or deduped. If any other status then raise an error.
:return:
"""
# getting the job paylooad and status
job_id = str(self._job_result["payload_id"])
job_status = str(self._job_result["status"])
logger.info("Recieved JOB ID: {} with status: {}".format(job_id, job_status))
if job_status != "job-completed" and job_status != "job-deduped":
logger.info(
"Job with job_id: {} was not completed. Status: {}".format(
job_id, job_status
)
)
raise ValueError(
"Job with job_id: {} was not completed. Status: {}".format(
job_id, job_status
)
)
return job_status
def _get_job(self):
"""
This function gets the staged products and context of previous PGE job
:return: tuple(products_staged, prev_context, message)
"""
job_id = str(self._job_result["payload_id"])
endpoint = self.MOZART_ES_ENDPOINT
return_job_id = None
"""
Check if Jobs ES has updated job status and gets job information if completed/ deduped
"""
try:
if self._check_job_status():
try:
response = self.query_es(endpoint=endpoint, doc_id=job_id)
# check if job not found
if len(response["hits"]["hits"]) == 0:
raise Exception(
"Couldn't find record with ID in MOZART: %s, at %s"
% (job_id, endpoint)
)
except Exception as ex:
logger.error(
"Error querying MOZART for doc {}. {}. {}".format(
job_id, str(ex), traceback.format_exc()
)
)
raise Exception(
"Error querying MOZART for doc {}. {}".format(job_id, str(ex))
)
except Exception as ex:
logger.error(
"Failed to find job in MOZART. {}. {}. {}".format(
job_id, str(ex), traceback.format_exc()
)
)
raise Exception(
"Failed to find job in MOZART. {}. {}. {}".format(
job_id, str(ex), traceback.format_exc()
)
)
"""
Parse job's full information to get products staged, job context
If job deduped then find original job's information
"""
result = response["hits"]["hits"][0]
products_staged = None
prev_context = None
message = None # using this to store information regarding deduped jobs,
status = str(result["_source"]["status"])
# if job was deduped then find the original job status and what products (if any) were created
if status == "job-deduped":
logger.info("Job was deduped")
# query ES for the original job's status
orig_job_id = result["_source"]["dedup_job"]
return_job_id = orig_job_id
try:
orig_job_info = self.query_es(endpoint=endpoint, doc_id=orig_job_id)
if len(response["hits"]["hits"]) == 0:
raise Exception(
"Couldn't find record with ID: {}, at {}".format(
job_id, endpoint
)
)
except Exception as ex:
logger.error(
"Error querying ES for doc {}. {}. {}".format(
job_id, str(ex), traceback.format_exc()
)
)
raise Exception(
"Error querying ES for doc {}. {}".format(job_id, str(ex))
)
"""
check if original job failed -> this would happen when at the moment
of deduplication, the original job was in 'running state', but soon
afterwards failed. So, by the time the status is checked in this
function, it may be shown as failed.
"""
orig_job_info = orig_job_info["hits"]["hits"][0]
orig_job_status = str(orig_job_info["_source"]["status"])
if orig_job_status == "job-failed":
message = (
"Job was deduped against a failed job with id: {},"
" please retry sciflo.".format(orig_job_id)
)
logger.info(
"Job was deduped against a job which has now failed "
"with id: {}, Please retry sciflo.".format(orig_job_id)
)
elif orig_job_status == "job-started" or orig_job_status == "job-queued":
logger.info(
"Job was deduped against a queued/started job with "
"id: {}. Please look at already running sciflo with "
"same params.".format(orig_job_id)
)
message = (
"Job was deduped against a queued/started job with "
"id: {}. Please look at already running sciflo with "
"same params.".format(orig_job_id)
)
elif orig_job_status == "job-completed":
products_staged = orig_job_info["_source"]["job"]["job_info"][
"metrics"
]["products_staged"]
prev_context = orig_job_info["_source"]["context"]
logger.info("Queried ES to get Job context and staged files info")
message = "success"
elif status == "job-completed":
logger.info("Job completed")
products_staged = result["_source"]["job"]["job_info"]["metrics"][
"products_staged"
]
prev_context = result["_source"]["context"]
logger.info("Queried ES to get Job context and staged files info")
message = "success"
return_job_id = job_id
else:
logger.info(
"Job was not completed. Status: {}".format(result["_source"]["status"])
)
message = "Job was not completed. Status: {}".format(
result["_source"]["status"]
)
return products_staged, prev_context, message, return_job_id
def _create_products_list(self, products):
"""
This function creates a list of the product URLs and metadata required
for the next PGE's input preprocessor.
:param products: list of products staged after PGE run
:return: tuple( product's id, list of products' URLs, list of products'
metadata)
"""
product_id = None
products_url_list = []
products_metadata_list = []
for product in products:
input_product_id = product["id"]
# get information required for next PGE's input preprocessor
product_id = input_product_id
try:
product_url, metadata = self.get_product_info(
product_id=input_product_id
)
product_info = dict()
product_info["id"] = input_product_id
product_info["url"] = product_url
product_info["metadata"] = metadata
products_metadata_list.append(product_info)
products_url_list.append(product_url)
except Exception as ex:
raise Exception(
"Failed to get product information, {}. {}".format(
str(ex), traceback.format_exc()
)
)
return product_id, products_url_list, products_metadata_list
def query_es(
self,
endpoint,
doc_id=None,
query=None,
request_timeout=30,
retried=False,
size=1,
):
"""
This function queries ES. Not using the query util because the ES
connection is set
for the GRQ ES.
:param endpoint: the value specifies which ES endpoint to send query
can be MOZART or GRQ
:param doc_id: id of product or job
:param query: query to run
:param request_timeout: how long to wait for ES request
:param retried: flag to specify if the query has already been retried
:param size: number of results to be returned
:return: result of query
"""
result = None
if query is None and doc_id is None:
raise ValueError("Both doc_id and query cannot be None")
es, es_url, es_index = None, None, None
if endpoint == self.GRQ_ES_ENDPOINT:
es_index = "grq"
es = self._grq_es
if endpoint == self.MOZART_ES_ENDPOINT:
es_index = "job_status-current"
es = self._mozart_es
if doc_id is not None:
query = {"query": {"bool": {"must": [{"term": {"_id": doc_id}}]}}}
try:
result = es.search(
index=es_index, body=query, size=size, request_timeout=request_timeout
)
# retry in case of time out
if "timed_out" in result and result.get("timed_out"):
logger.warning(
"ES responded with a timed out result, "
"retrying....: {}".format(json.dumps(result))
)
raise RuntimeWarning(
"ES responded with a timed out result, retrying...."
)
except Exception as e:
logger.warning(
"Caught exception from elasticsearch "
"retrying: {}".format(traceback.format_exc())
)
# Retry querying, this is incase ES takes too long to respond
if not retried:
self.query_es(
endpoint=endpoint,
doc_id=doc_id,
size=size,
request_timeout=int(request_timeout + 30),
retried=True,
)
else:
raise Exception(str(e))
return result
def product_in_grq(self, doc_id):
"""
Checks if the product has been indexed in ES
:param doc_id:
:return: True if product found else throw suitable exception
"""
query = {
"_source": ["id"],
"query": {"bool": {"must": [{"term": {"_id": doc_id}}]}},
}
try:
if self.wait_for_doc(endpoint=self.GRQ_ES_ENDPOINT, query=query, timeout=120):
return True
except Exception as ex:
logger.error(
"Error querying GRQ for product {}. {}. {}".format(
doc_id, str(ex), traceback.format_exc()
)
)
raise Exception(
"Error querying GRQ for product {}. {}".format(doc_id, str(ex))
)
def wait_condition(self, endpoint, result):
results_exist = len(result.get("hits").get("hits")) == 0
if endpoint == self.MOZART_ES_ENDPOINT:
return results_exist or str(result.get("hits").get("hits")[0].get(
"_source").get("status")) == "job-started"
if endpoint == self.GRQ_ES_ENDPOINT:
return results_exist
def wait_for_doc(self, endpoint, query, timeout):
"""
This function executes the search query for specified wait time until
document is found
:param endpoint: GRQ or MOZART
:param query: search query
:param timeout: time to wait in seconds
:return: True if document found else raise suitable Exception
"""
try:
result = self.query_es(
endpoint=endpoint, query=query, request_timeout=30, size=1
)
slept_seconds = 0
sleep_seconds = 2
while self.wait_condition(endpoint=endpoint, result=result):
if result.get("timed_out", True):
slept_seconds += 30
if slept_seconds + sleep_seconds < timeout:
logger.debug("Slept for {} seconds".format(slept_seconds))
logger.debug("Sleeping for {} seconds".format(sleep_seconds))
else:
sleep_seconds = timeout - slept_seconds
logger.debug("Slept for {} seconds".format(slept_seconds))
logger.debug(
"Sleeping for {} seconds to conform to timeout "
"of {} seconds".format(sleep_seconds, timeout)
)
if slept_seconds >= timeout:
if len(result.get("hits").get("hits")) == 0:
raise Exception(
"{} ES taking too long to index document".format(endpoint)
)
if endpoint == self.MOZART_ES_ENDPOINT:
if (
str(result["hits"]["hits"][0]["_source"]["status"])
== "job-started"
):
raise Exception(
"{} ES taking too long to update status of "
"job".format(endpoint)
)
time.sleep(sleep_seconds)
result = self.query_es(
endpoint=endpoint, query=query, request_timeout=30, size=1
)
slept_seconds += sleep_seconds
sleep_seconds *= 2
return True
except Exception as e:
raise Exception("ElasticSearch Operation failed due to : {}".format(str(e)))
def get_product_info(self, product_id):
"""
This function gets the product's URL and associated metadata from Elastic
Search
:param product_id: id of product
:return: tuple(product_url, metadata)
"""
response = None
try:
if self.product_in_grq(doc_id=product_id):
try:
response = self.query_es(
endpoint=self.GRQ_ES_ENDPOINT, doc_id=product_id
)
if len(response.get("hits").get("hits")) == 0:
raise Exception(
"ES taking too long to index product with id "
"%s." % product_id
)
except Exception as ex:
raise Exception(
"ElasticSearch Operation failed due to : {}".format(str(ex))
)
except Exception as ex:
raise Exception(
"Failed to find product in GRQ. {}. {}".format(
str(ex), traceback.format_exc()
)
)
try:
result = response.get("hits").get("hits")[0]
product_urls = result.get("_source").get("urls")
product_url = None
for url in product_urls:
if url.startswith("s3://"):
product_url = url
metadata = result.get("_source").get("metadata")
except Exception as ex:
raise Exception(
"Failed to get product info. {}. {}".format(
str(ex), traceback.format_exc()
)
)
return product_url, metadata
def core_post_process_steps(self):
"""
The mandatory post processing steps of Chimera are:
1. check submitted job's status
2. Get complete job run information i.e job's context, products produced, job id
(in case job submitted by sciflo was deduped, the original job ID is tracked down)
3.
:return:
"""
pseudo_context = dict()
"""
check submitted job's status
"""
job_status = self._check_job_status()
"""
get the products staged, context of job and job's ID (incase job submitted by sciflo was deduped)
"""
try:
products, prev_context, message, job_id = self._get_job()
except Exception as ex:
logger.error(
"Couldn't get job info for {}. {}. {}".format(
job_id, str(ex), traceback.format_exc()
)
)
job_status_code = -1
logger.error("Job was not found.")
raise RuntimeError(
"Couldn't get job info for {}. {}. {}".format(
job_id, str(ex), traceback.format_exc()
)
)
"""
Handle job status codes for all outcomes of a deduped job
# Case 1: if the original job is queued or has started, fail the current sciflo
so that the original workflow can take care of the PGE run
update the job status with -3
# Case 2: if the original job has completed, then get products and prev _context from original job
update job_status_code to 2
# Case 3: if the original job was deduped (NOTE: Unlikely unrealistic case)
set job_status_code to -2
"""
# case 1
if products is None and prev_context is None:
job_status_code = -3
raise RuntimeError(message)
else:
# case 2
if job_status == "job-completed":
job_status_code = 2
# case 3
elif job_status == "job-deduped":
job_status_code = -2
"""
Query to get information of all products staged.
NOTE: Sciflo gets notification of the celery task completion and moves to the post processing step when
it gets the PGE job submission results. The completion of a celery task is different than completion of
a HySDS job. A HySDS job includes the celery task execution, worker post processing and dataset ingestion.
We get to the step of querying a product's information before it has been indexed into GRQ. To handle this race
condition we have an exponential backoff logic in the query to wait for the product to appear.
Max wait time is 2 mins.
"""
try:
(
product_id,
products_url_list,
products_metadata_list,
) = self._create_products_list(products=products)
except Exception as ex:
job_status_code = -1
logger.error("Setting Job failure status code as product was not found.")
raise RuntimeError(
"Failed PGE run as products list could not be made."
" {}. {}".format(str(ex), traceback.format_exc())
)
"""
Now that we have all job and products information we can put the psuedo context contents together.
"""
logger.info("Job Status Code: {}".format(job_status_code))
product_url_key = ChimeraConstants.PRODUCT_PATHS
metadata_key = ChimeraConstants.PRODUCTS_METADATA
pseudo_context[product_url_key] = products_url_list
pseudo_context[metadata_key] = products_metadata_list
pseudo_context["job_id"] = job_id
pseudo_context["job_context"] = prev_context
return pseudo_context
|
{"/chimera/input_preprocessor.py": ["/chimera/logger.py", "/chimera/precondition_evaluator.py"], "/chimera/post_processor.py": ["/chimera/logger.py", "/chimera/postprocess_evaluator.py"], "/chimera/postprocess_evaluator.py": ["/chimera/logger.py", "/chimera/commons/conf_util.py", "/chimera/commons/constants.py", "/chimera/postprocess_functions.py"], "/chimera/run_sciflo.py": ["/chimera/logger.py", "/chimera/commons/accountability.py", "/chimera/commons/sciflo_util.py"], "/chimera/postprocess_functions.py": ["/chimera/commons/accountability.py", "/chimera/commons/constants.py", "/chimera/logger.py"], "/chimera/precondition_evaluator.py": ["/chimera/logger.py", "/chimera/commons/conf_util.py", "/chimera/commons/constants.py", "/chimera/precondition_functions.py"], "/chimera/run_pge_docker.py": ["/chimera/logger.py", "/chimera/commons/conf_util.py", "/chimera/pge_job_submitter.py"], "/chimera/pge_job_submitter.py": ["/chimera/commons/constants.py", "/chimera/commons/conf_util.py", "/chimera/logger.py"], "/chimera/commons/accountability.py": ["/chimera/commons/constants.py"]}
|
22,193
|
hysds/chimera
|
refs/heads/develop
|
/chimera/precondition_evaluator.py
|
import json
import os
import copy
import traceback
from importlib import import_module
from chimera.logger import logger
from chimera.commons.conf_util import YamlConf, load_config
from chimera.commons.constants import ChimeraConstants
from chimera.precondition_functions import PreConditionFunctions
from urllib.parse import urlparse
# Used to identify fields to be filled within the runconfig context.json of PGE
EMPTY_FIELD_IDENTIFIER = "__CHIMERA_VAL__"
class PreConditionEvaluator(object):
def __init__(self, sf_context, chimera_config_filepath, pge_config_filepath, settings_file):
# load context file
if isinstance(sf_context, dict):
self._sf_context = sf_context
elif isinstance(sf_context, str):
self._sf_context = json.load(open(sf_context, 'r'))
logger.debug("Loaded context file: {}".format(json.dumps(self._sf_context)))
# load pge config file
self._pge_config = load_config(pge_config_filepath)
logger.debug("Loaded PGE config file: {}".format(json.dumps(self._pge_config)))
# load IPP config file
try:
self._chimera_config = YamlConf(chimera_config_filepath).cfg
self._module_path = self._chimera_config.get("preprocessor", {}).get("module_path", None)
if not self._module_path:
raise RuntimeError("'module_path' must be defined in the 'preprocessor' section of the "
"Chimera Config file '{}'".format(chimera_config_filepath))
self._class_name = self._chimera_config.get("preprocessor", {}).get("class_name", None)
if not self._class_name:
raise RuntimeError("'class_name' must be defined in the 'preprocessor' section of the "
"Chimera Config file '{}'".format(chimera_config_filepath))
except Exception as e:
raise RuntimeError("Could not read preconditions definition file : {}".format(e))
# load Settings file
try:
if settings_file:
settings_file = os.path.abspath(os.path.normpath(settings_file))
self._settings = YamlConf(settings_file).cfg
except Exception as e:
if settings_file:
file_name = settings_file
else:
file_name = '~/verdi/etc/settings.yaml'
raise RuntimeError("Could not read settings file '{}': {}".format(file_name, e))
def repl_val_in_dict(self, d, val, job_params, root=None, optional_fields=None):
"""
Recursive function to replace occurences of val in a dict with values from the job_params.
"""
if root is None: root = []
if optional_fields is None: optional_fields = []
matched_keys = []
for k, v in d.items():
rt = copy.copy(root)
rt.append(k)
if isinstance(v, dict):
matched_keys.extend(self.repl_val_in_dict(v, val, job_params, rt, optional_fields))
if v == val:
jp_key = '.'.join(rt)
# use job_params with explicit dot notation
if jp_key in job_params:
d[k] = job_params[jp_key]
matched_keys.append(jp_key)
# maintain backwards-compatibility of using job_param values without dot notation
elif k in job_params:
d[k] = job_params[k]
matched_keys.append(k)
else:
# check if optionalField; if so, set value to empty string
if jp_key in optional_fields:
logger.info("Explicit dot notation key {} is an optional field.".format(jp_key))
logger.info("Setting {} value to empty string.".format(k))
d[k] = ""
elif k in optional_fields:
logger.info("Key {} is an optional field.".format(k))
logger.info("Setting {} value to empty string.".format(k))
d[k] = ""
else:
logger.error("job_params: {}".format(json.dumps(job_params, indent=2, sort_keys=True)))
raise(ValueError("{} or {} has not been evaluated by the preprocessor.".format(jp_key, k)))
return matched_keys
def localize_paths(self, output_context):
"""
To set file to localize in the docker
:param output_context:
"""
logger.debug("Preparing to localize file paths")
# Deprecated function since not all values in localize_groups are on s3
# for example SPS config files
def is_url(val):
parse_result = urlparse(val)
schemes = ["s3", "s3s", "http", "https",
"ftp", "sftp", "azure", "azures", "rsync"]
return parse_result.scheme in schemes
localize_paths_list = []
for group in self._pge_config.get(ChimeraConstants.LOCALIZE_GROUPS, []):
for elem in output_context.get(group, []):
value = output_context.get(group).get(elem)
# If the value is a list, example some InputFilGroups could be
# scalars or vectors
if isinstance(value, list):
for v in value:
if is_url(v):
localize_paths_list.append(v)
elif isinstance(value, str):
if is_url(value):
localize_paths_list.append(value)
else:
continue
return localize_paths_list
def prepare_runconfig(self, job_params):
"""
To prepare the final completed runconfig context.json which will be fed in
to the pge
:return: dict
"""
logger.debug("Preparing runconfig for {}".format(self._pge_config.get('pge_name')))
empty_field_identifier = self._pge_config.get(ChimeraConstants.EMPTY_FIELD_IDENTIFIER,
EMPTY_FIELD_IDENTIFIER)
logger.debug("Empty field identifier: {}".format(empty_field_identifier))
output_context = dict()
optional_fields = self._pge_config.get(ChimeraConstants.OPTIONAL_FIELDS, [])
if self._pge_config.get(ChimeraConstants.RUNCONFIG):
output_context = copy.deepcopy(self._pge_config.get(ChimeraConstants.RUNCONFIG))
matched_keys = self.repl_val_in_dict(output_context, empty_field_identifier,
job_params, optional_fields=optional_fields)
else:
raise KeyError("Key runconfig not found in PGE config file")
# Add localized urls
output_context[ChimeraConstants.LOCALIZE] = self.localize_paths(output_context)
output_context[ChimeraConstants.SIMULATE_OUTPUTS] = self._settings[ChimeraConstants.PGE_SIM_MODE]
return output_context
def evaluate(self):
job_params = dict()
try:
module = import_module(self._module_path)
cls = getattr(module, self._class_name)
if not issubclass(cls, PreConditionFunctions):
raise RuntimeError("Class must be a subclass of {}: {}".format(PreConditionFunctions.__name__,
cls.__name__))
cls_object = cls(self._sf_context, self._pge_config, self._settings, job_params)
job_params.update(cls_object.run(self._pge_config.get(ChimeraConstants.PRECONDITIONS, list())))
output_context = self.prepare_runconfig(job_params)
return output_context
except Exception as e:
logger.error("Input precondition failure: {}. {}".format(e, traceback.format_exc()))
raise RuntimeError("Input precondition failure: {}".format(e))
|
{"/chimera/input_preprocessor.py": ["/chimera/logger.py", "/chimera/precondition_evaluator.py"], "/chimera/post_processor.py": ["/chimera/logger.py", "/chimera/postprocess_evaluator.py"], "/chimera/postprocess_evaluator.py": ["/chimera/logger.py", "/chimera/commons/conf_util.py", "/chimera/commons/constants.py", "/chimera/postprocess_functions.py"], "/chimera/run_sciflo.py": ["/chimera/logger.py", "/chimera/commons/accountability.py", "/chimera/commons/sciflo_util.py"], "/chimera/postprocess_functions.py": ["/chimera/commons/accountability.py", "/chimera/commons/constants.py", "/chimera/logger.py"], "/chimera/precondition_evaluator.py": ["/chimera/logger.py", "/chimera/commons/conf_util.py", "/chimera/commons/constants.py", "/chimera/precondition_functions.py"], "/chimera/run_pge_docker.py": ["/chimera/logger.py", "/chimera/commons/conf_util.py", "/chimera/pge_job_submitter.py"], "/chimera/pge_job_submitter.py": ["/chimera/commons/constants.py", "/chimera/commons/conf_util.py", "/chimera/logger.py"], "/chimera/commons/accountability.py": ["/chimera/commons/constants.py"]}
|
22,194
|
hysds/chimera
|
refs/heads/develop
|
/tests/test_post_processor.py
|
import json
import os
from smap_sciflo import post_processor
if __name__ == '__main__':
"""
This is for testing of Production PGE Post Processing
Comment out from hysds.celery import app in post_processor and query_util
Setup 2 SSH tunnels:
ssh -i [PEM file] -L 9200:localhost:9200 [username]@[MOZART_IP]
ssh -i [PEM file] -L 9300:localhost:9200 [username]@[GRQ_IP]
In post_processor, overwrite ES URLs with following:
JOBS_ES_URL = "http://127.0.0.1:9200"
GRQ_ES_URL = "http://127.0.0.1:9300"
In commons/query_util.py, overwrite GRQ URL with:
ES_URL = "http://127.0.0.1:9300"
Update the following sample files:
test-files/sf_context.json should be the sciflo context of an actual workflow run
test-files/sample_job_submission_result.json with the result of a job submission corresponding
If not testing for an L0B run please update:
pge_type: Type of PGE
pge_config_file: path to PGE's config file
to the sciflo context above
run this script
"""
# Testing L0B post processing
os.path.dirname(os.path.realpath(__file__))
job_result = json.loads(open(os.path.dirname(os.path.realpath(
__file__))+"/test-files/sample_job_submission_result.json").read())
sf_context = os.path.dirname(os.path.realpath(
__file__))+"/test-files/sf_context.json"
pge_type = "L0A_Radiometer"
level_up_dir = os.path.dirname(os.path.realpath(__file__))
pge_config_file = os.path.abspath(os.path.join(os.path.realpath(
__file__), "../..", "configs/examples/PGE_L0A_RADIOMETER.json"))
post_processor.create_context(
sf_context, job_result, pge_type, pge_config_file, test_mode=True)
|
{"/chimera/input_preprocessor.py": ["/chimera/logger.py", "/chimera/precondition_evaluator.py"], "/chimera/post_processor.py": ["/chimera/logger.py", "/chimera/postprocess_evaluator.py"], "/chimera/postprocess_evaluator.py": ["/chimera/logger.py", "/chimera/commons/conf_util.py", "/chimera/commons/constants.py", "/chimera/postprocess_functions.py"], "/chimera/run_sciflo.py": ["/chimera/logger.py", "/chimera/commons/accountability.py", "/chimera/commons/sciflo_util.py"], "/chimera/postprocess_functions.py": ["/chimera/commons/accountability.py", "/chimera/commons/constants.py", "/chimera/logger.py"], "/chimera/precondition_evaluator.py": ["/chimera/logger.py", "/chimera/commons/conf_util.py", "/chimera/commons/constants.py", "/chimera/precondition_functions.py"], "/chimera/run_pge_docker.py": ["/chimera/logger.py", "/chimera/commons/conf_util.py", "/chimera/pge_job_submitter.py"], "/chimera/pge_job_submitter.py": ["/chimera/commons/constants.py", "/chimera/commons/conf_util.py", "/chimera/logger.py"], "/chimera/commons/accountability.py": ["/chimera/commons/constants.py"]}
|
22,195
|
hysds/chimera
|
refs/heads/develop
|
/__init__.py
|
from __future__ import absolute_import
from .chimera import commons
|
{"/chimera/input_preprocessor.py": ["/chimera/logger.py", "/chimera/precondition_evaluator.py"], "/chimera/post_processor.py": ["/chimera/logger.py", "/chimera/postprocess_evaluator.py"], "/chimera/postprocess_evaluator.py": ["/chimera/logger.py", "/chimera/commons/conf_util.py", "/chimera/commons/constants.py", "/chimera/postprocess_functions.py"], "/chimera/run_sciflo.py": ["/chimera/logger.py", "/chimera/commons/accountability.py", "/chimera/commons/sciflo_util.py"], "/chimera/postprocess_functions.py": ["/chimera/commons/accountability.py", "/chimera/commons/constants.py", "/chimera/logger.py"], "/chimera/precondition_evaluator.py": ["/chimera/logger.py", "/chimera/commons/conf_util.py", "/chimera/commons/constants.py", "/chimera/precondition_functions.py"], "/chimera/run_pge_docker.py": ["/chimera/logger.py", "/chimera/commons/conf_util.py", "/chimera/pge_job_submitter.py"], "/chimera/pge_job_submitter.py": ["/chimera/commons/constants.py", "/chimera/commons/conf_util.py", "/chimera/logger.py"], "/chimera/commons/accountability.py": ["/chimera/commons/constants.py"]}
|
22,196
|
hysds/chimera
|
refs/heads/develop
|
/chimera/run_pge_docker.py
|
#!/usr/bin/env python
"""
Contributors:
- Namrata Malarout
- Michael Cayanan
- Sujen Shah
This is the second step of Chimera
Takes the configuration generated by IPP and creates the HySDS job parameters for job submission
"""
from importlib import import_module
from chimera.logger import logger
from chimera.commons.conf_util import YamlConf
from chimera.pge_job_submitter import PgeJobSubmitter
"""
This is a sample mozart job payload
{
"job_name": "%s-%s" % (job_type, l0b_lr_raw_id),
"job_type": "job:%s" % job_type,
"job_queue": job_queue,
"container_mappings": container_mappings,
"soft_time_limit": 86400,
"time_limit": 86700,
"payload": {
# smap_sciflo tracking info
"_sciflo_wuid": wuid,
"_sciflo_job_num": job_num,
# job spec for dependencies
"job_specification": {
"digest": "sha256:3debc246c9d86f45a317ae6af4fa82ef9faf1206faf8201ed94db511468d214b",
"id": "container-aria-hysds_aria-pdl-clone:master",
"url": "s3://s3-us-west-2.amazonaws.com/grfn-v2-ops-code-bucket/container-aria-hysds_aria-pdl-clone:master.tar.gz",
"version": "master"
"dependency_images": dependency_images,
},
# job params
"context_blob": job_payload, # one param - one JSON blob
# v2 cmd
"_command": "/home/ops/verdi/ops/SPDM-with-HySDS/run_pge.sh",
# disk usage
"_disk_usage": disk_usage,
# localize urls
"localize_urls": localize_urls,
}
}
"""
def submit_pge_job(sf_context, runconfig, pge_config_file, settings_file, chimera_config_file,
wuid=None, job_num=None):
"""
'JOBS_ES_URL'
This function returns the job payload that needs to be mapped by sciflo
and run on a remote worker.
:param sf_context: context of workflow job
:param runconfig: Run config created by input preprocessor
:param pge_config_file: PGE's config file name
:param settings_file:
:param chimera_config_file:
:param wuid: wuid of sciflo
:param job_num: job_num in sciflo
:return: job payload of PGE job
"""
logger.info("Starting run_pge_docker step.")
chimera_config = YamlConf(chimera_config_file).cfg
module_path = chimera_config.get("job_submitter", {}).get("module_path", None)
if not module_path:
raise RuntimeError("'module_path' must be defined in the 'job_submitter' section of the "
"Chimera Config file '{}'".format(chimera_config_file))
class_name = chimera_config.get("job_submitter", {}).get("class_name", None)
if not class_name:
raise RuntimeError("'class_name' must be defined in the 'job_submitter' section of the Chimera "
"Config file '{}'".format(chimera_config_file))
module = import_module(module_path)
cls = getattr(module, class_name)
if not issubclass(cls, PgeJobSubmitter):
raise RuntimeError("Class must be a subclass of {}: {}".format(PgeJobSubmitter.__name__, cls.__name__))
cls_object = cls(sf_context, runconfig, pge_config_file, settings_file, wuid, job_num)
job_json = cls_object.submit_job()
logger.info("Finished run_pge_docker step.")
return job_json
|
{"/chimera/input_preprocessor.py": ["/chimera/logger.py", "/chimera/precondition_evaluator.py"], "/chimera/post_processor.py": ["/chimera/logger.py", "/chimera/postprocess_evaluator.py"], "/chimera/postprocess_evaluator.py": ["/chimera/logger.py", "/chimera/commons/conf_util.py", "/chimera/commons/constants.py", "/chimera/postprocess_functions.py"], "/chimera/run_sciflo.py": ["/chimera/logger.py", "/chimera/commons/accountability.py", "/chimera/commons/sciflo_util.py"], "/chimera/postprocess_functions.py": ["/chimera/commons/accountability.py", "/chimera/commons/constants.py", "/chimera/logger.py"], "/chimera/precondition_evaluator.py": ["/chimera/logger.py", "/chimera/commons/conf_util.py", "/chimera/commons/constants.py", "/chimera/precondition_functions.py"], "/chimera/run_pge_docker.py": ["/chimera/logger.py", "/chimera/commons/conf_util.py", "/chimera/pge_job_submitter.py"], "/chimera/pge_job_submitter.py": ["/chimera/commons/constants.py", "/chimera/commons/conf_util.py", "/chimera/logger.py"], "/chimera/commons/accountability.py": ["/chimera/commons/constants.py"]}
|
22,197
|
hysds/chimera
|
refs/heads/develop
|
/chimera/commons/conf_util.py
|
from __future__ import absolute_import
from builtins import object
import re
import yaml
import os
import json
from collections import OrderedDict
# have yaml parse regular expressions
yaml.SafeLoader.add_constructor(
u"tag:yaml.org,2002:python/regexp", lambda l, n: re.compile(l.construct_scalar(n))
)
class YamlConfEncoder(json.JSONEncoder):
"""Custom encoder for YamlConf."""
def default(self, obj):
if isinstance(obj, type(re.compile(r""))):
return obj.pattern
return super(YamlConfEncoder, self).default(obj)
class YamlConfError(Exception):
"""Exception class for YamlConf class."""
pass
class YamlConf(object):
"""YAML configuration class."""
def __init__(self, file):
"""Construct YamlConf instance."""
self._file = file
with open(self._file) as f:
self._cfg = yaml.safe_load(f)
@property
def file(self):
return self._file
@property
def cfg(self):
return self._cfg
def get(self, key):
try:
return self._cfg[key]
except KeyError:
raise YamlConfError
def __repr__(self):
return json.dumps(self._cfg, cls=YamlConfEncoder, indent=2)
class JobContext(object):
"""Job context class."""
def __init__(self, file):
"""Construct JobContext instance."""
self._file = file
with open(self._file) as f:
self._ctx = json.load(f)
@property
def file(self):
return self._file
@property
def ctx(self):
return self._ctx
def get(self, key):
try:
return self._ctx[key]
except KeyError:
raise (
Exception(
"Context '{}' doesn't exist in {}.".format(key, self._file)
)
)
def set(self, key, val):
self._ctx[key] = val
def save(self):
with open(self._file, "w") as f:
json.dump(self._ctx, f, indent=2, sort_keys=True)
class DockerParams(object):
"""Job context class."""
def __init__(self, file):
"""Construct DockerParams instance."""
self._file = file
with open(self._file) as f:
self._params = json.load(f)
@property
def file(self):
return self._file
@property
def params(self):
return self._params
def get(self, key):
try:
return self._params[key]
except KeyError:
raise (
Exception(
"Docker params '{}' doesn't exist in {}.".format(key, self._file)
)
)
def load_config(config_filepath):
# load config file
config_ext = os.path.splitext(config_filepath)[1]
if config_ext == ".json":
try:
config = json.load(open(config_filepath, 'r'), object_pairs_hook=OrderedDict)
except Exception as e:
raise RuntimeError("Could not load Config : {}".format(e))
elif config_ext == ".yaml":
try:
config = YamlConf(config_filepath).cfg
except Exception as e:
raise RuntimeError("Could not load Config : {}".format(e))
else:
raise RuntimeError("Config file must end in .yaml or .json: {}".format(config_filepath))
return config
|
{"/chimera/input_preprocessor.py": ["/chimera/logger.py", "/chimera/precondition_evaluator.py"], "/chimera/post_processor.py": ["/chimera/logger.py", "/chimera/postprocess_evaluator.py"], "/chimera/postprocess_evaluator.py": ["/chimera/logger.py", "/chimera/commons/conf_util.py", "/chimera/commons/constants.py", "/chimera/postprocess_functions.py"], "/chimera/run_sciflo.py": ["/chimera/logger.py", "/chimera/commons/accountability.py", "/chimera/commons/sciflo_util.py"], "/chimera/postprocess_functions.py": ["/chimera/commons/accountability.py", "/chimera/commons/constants.py", "/chimera/logger.py"], "/chimera/precondition_evaluator.py": ["/chimera/logger.py", "/chimera/commons/conf_util.py", "/chimera/commons/constants.py", "/chimera/precondition_functions.py"], "/chimera/run_pge_docker.py": ["/chimera/logger.py", "/chimera/commons/conf_util.py", "/chimera/pge_job_submitter.py"], "/chimera/pge_job_submitter.py": ["/chimera/commons/constants.py", "/chimera/commons/conf_util.py", "/chimera/logger.py"], "/chimera/commons/accountability.py": ["/chimera/commons/constants.py"]}
|
22,198
|
hysds/chimera
|
refs/heads/develop
|
/chimera/commons/sciflo_util.py
|
#!/usr/bin/env python
import os
import json
import re
import shutil
WORK_RE = re.compile(r"\d{5}-.+")
# sciflo PGE process names and mapping to their config files
# This is the list of PGEs that need to report status to an explict index
MAX_PLACEHOLDER_FILE_SIZE = 1000
PLACEHOLDER_ERROR_FILE = "_alt_error_hold.txt"
PLACEHOLDER_TB_FILE = "_alt_traceback_hold.txt"
PLACEHOLDER_DOCKER_STATS_FILE = "_docker_stats_hold.json"
PLACEHOLDER_FILES = [
PLACEHOLDER_ERROR_FILE,
PLACEHOLDER_TB_FILE,
PLACEHOLDER_DOCKER_STATS_FILE,
]
def __create_placeholder_alt_files():
"""
Due to possible disk space issues, this function will create temporary
files in case we need to capture the _alt_error, _alt_traceback, and _docker_stats
files
:param work_dir:
:return:
"""
with open(PLACEHOLDER_ERROR_FILE, "wb") as f:
f.seek(MAX_PLACEHOLDER_FILE_SIZE)
f.write(b"\0")
with open(PLACEHOLDER_TB_FILE, "wb") as f:
f.seek(MAX_PLACEHOLDER_FILE_SIZE)
f.write(b"\0")
with open(PLACEHOLDER_DOCKER_STATS_FILE, "w") as f:
json.dump(dict(), f)
def __cleanup_placeholder_alt_files():
for temp_file in PLACEHOLDER_FILES:
if os.path.exists(temp_file):
print(f"Remove existing placeholder file: {temp_file}")
def __write_error_files(error, traceback):
alt_error_file = "_alt_error.txt"
alt_tb_file = "_alt_traceback.txt"
docker_stats_file = "_docker_stats.json"
try:
with open(alt_error_file, "w") as f:
f.write("%s\n" % error)
with open(alt_tb_file, "w") as f:
f.write("%s\n" % traceback)
except OSError as oe:
print(
f"OSError encountered: {str(oe)}. Will write errors to placeholder files."
)
print(f"Renaming {PLACEHOLDER_ERROR_FILE} to {alt_error_file}.")
os.rename(PLACEHOLDER_ERROR_FILE, alt_error_file)
print(f"Renaming {PLACEHOLDER_TB_FILE} to {alt_tb_file}.")
os.rename(PLACEHOLDER_TB_FILE, alt_tb_file)
with open(alt_error_file, "w") as f:
f.write("%s\n" % error[:MAX_PLACEHOLDER_FILE_SIZE])
with open(alt_tb_file, "w") as f:
f.write("%s\n" % traceback[:MAX_PLACEHOLDER_FILE_SIZE])
print(f"Successfully wrote the errors to {alt_error_file} and {alt_tb_file}")
if (
os.path.exists(docker_stats_file)
and os.path.getsize(docker_stats_file) == 0
):
print(f"Renaming {PLACEHOLDER_DOCKER_STATS_FILE} to {docker_stats_file}")
os.rename(PLACEHOLDER_DOCKER_STATS_FILE, docker_stats_file)
print(
f"Successfully renamed {PLACEHOLDER_DOCKER_STATS_FILE} to {docker_stats_file}"
)
def copy_sciflo_work(output_dir):
"""Move over smap_sciflo work dirs."""
# Instead of creating symlinks like it was initially doing, this has been updated
# to copy the sciflo workunit directories to its human readable sciflo step.
for root, dirs, files in os.walk(output_dir):
for d in dirs:
if not WORK_RE.search(d):
continue
path = os.path.join(root, d)
if os.path.islink(path) and os.path.exists(path):
real_path = os.path.realpath(path)
os.unlink(path)
base_name = os.path.basename(path)
new_path = os.path.join(root, base_name)
shutil.copytree(real_path, new_path)
return
def extract_error(sfl_json):
"""Extract SciFlo error and traceback for mozart."""
with open(sfl_json) as f:
j = json.load(f)
exc_message = j.get("exceptionMessage", None)
if exc_message is not None:
try:
exc_list = eval(exc_message)
except Exception:
exc_list = []
if len(exc_list) == 3:
proc = exc_list[0]
exc = exc_list[1]
tb = exc_list[2]
accountability = None
try:
exc = eval(exc)
except Exception:
pass
if isinstance(exc, tuple) and len(exc) == 2:
err = exc[0]
job_json = exc[1]
if isinstance(job_json, dict):
if "job_id" in job_json:
err_str = (
"SciFlo step %s with job_id %s (task %s) failed: %s"
% (proc, job_json["job_id"], job_json["uuid"], err)
)
__write_error_files(err_str, job_json["traceback"])
else:
err_str = "SciFlo step %s failed: %s" % (proc, exc)
__write_error_files(err_str, tb)
def run_sciflo(sfl_file, sfl_args, output_dir):
"""Run sciflo."""
# build paths to executables
sflexec_path = os.path.join(os.environ["HOME"], "verdi", "bin", "sflExec.py")
__create_placeholder_alt_files()
# execute sciflo
cmd = [
sflexec_path,
"-s",
"-f",
"-o",
output_dir,
"--args",
'"%s"' % ",".join(sfl_args),
sfl_file,
]
print("Running sflExec.py command:\n%s" % " ".join(cmd))
status = os.system(" ".join(cmd))
sf_key, context_file = sfl_args[0].split("=")
print("Exit status is: %d" % status)
if status != 0:
extract_error("%s/sciflo.json" % output_dir)
status = 1
# copy smap_sciflo work and exec dir
try:
copy_sciflo_work(output_dir)
except Exception:
pass
__cleanup_placeholder_alt_files()
return status
|
{"/chimera/input_preprocessor.py": ["/chimera/logger.py", "/chimera/precondition_evaluator.py"], "/chimera/post_processor.py": ["/chimera/logger.py", "/chimera/postprocess_evaluator.py"], "/chimera/postprocess_evaluator.py": ["/chimera/logger.py", "/chimera/commons/conf_util.py", "/chimera/commons/constants.py", "/chimera/postprocess_functions.py"], "/chimera/run_sciflo.py": ["/chimera/logger.py", "/chimera/commons/accountability.py", "/chimera/commons/sciflo_util.py"], "/chimera/postprocess_functions.py": ["/chimera/commons/accountability.py", "/chimera/commons/constants.py", "/chimera/logger.py"], "/chimera/precondition_evaluator.py": ["/chimera/logger.py", "/chimera/commons/conf_util.py", "/chimera/commons/constants.py", "/chimera/precondition_functions.py"], "/chimera/run_pge_docker.py": ["/chimera/logger.py", "/chimera/commons/conf_util.py", "/chimera/pge_job_submitter.py"], "/chimera/pge_job_submitter.py": ["/chimera/commons/constants.py", "/chimera/commons/conf_util.py", "/chimera/logger.py"], "/chimera/commons/accountability.py": ["/chimera/commons/constants.py"]}
|
22,199
|
hysds/chimera
|
refs/heads/develop
|
/chimera/pge_job_submitter.py
|
"""
Class that submits a PGE job to HySDS. This class can be used as-is, which will rely on HySDS Core's feature
of performing the hash calculation to determine dedup.
"""
import json
import os
from chimera.commons.constants import ChimeraConstants as chimera_const
from chimera.commons.conf_util import load_config, YamlConf
from chimera.logger import logger
from hysds_commons.job_utils import resolve_hysds_job
class PgeJobSubmitter(object):
def __init__(self, context, run_config, pge_config_file, settings_file, wuid=None, job_num=None):
# load context file
if isinstance(context, dict):
self._context = context
elif isinstance(context, str):
self._context = json.load(open(context, 'r'))
logger.debug("Loaded context file: {}".format(json.dumps(self._context)))
# This is intended to represent the top level working directory of the job. It's assumed to be at the same
# level as the given context file.
self._base_work_dir = os.path.dirname(os.path.abspath(context))
# load pge config file
self._pge_config = load_config(pge_config_file)
logger.debug("Loaded PGE config file: {}".format(json.dumps(self._pge_config)))
self._wuid = wuid
self._job_num = job_num
# load Settings file
try:
if settings_file:
settings_file = os.path.abspath(os.path.normpath(settings_file))
self._settings = YamlConf(settings_file).cfg
self._chimera_config = self._settings.get("CHIMERA", None)
if self._wuid and self._job_num is not None:
if not self._chimera_config:
raise RuntimeError("Must specify a CHIMERA area in {}".format(settings_file))
except Exception as e:
if settings_file:
file_name = settings_file
else:
file_name = '~/verdi/etc/settings.yaml'
raise RuntimeError("Could not read settings file '{}': {}".format(file_name, e))
self._run_config = run_config
def get_input_file_name(self, input_file_key=None):
"""
Function to grab the primary input file name out of the run config
:param input_file_key: JSON key in runconfig containing the primary input
value
:return:
"""
input_products = self._run_config.get(chimera_const.RC_INPUT).get(input_file_key, None)
if input_products is None:
return None
if isinstance(input_products, list):
input_file = [os.path.basename(path) for path in input_products
if not path.endswith(".XFR")]
files = "-".join(input_file)
else:
input_file = os.path.basename(input_products)
files = input_file
return files
def get_localize_urls(self, localize):
"""
create the list of products to be localized within the docker for the PGE
run
:param localize: list of urls to be localized
:return: localize list that osaka understands
"""
localize_list = []
for url in localize:
element = {"url": url, "path": "input/"}
localize_list.append(element)
return localize_list
def construct_params(self):
"""
Construct the params for the PGE job submission
:return:
"""
try:
localize_urls = self.get_localize_urls(self._run_config.get(chimera_const.LOCALIZE))
except Exception:
raise ValueError(
"Couldn't find {} in runconfig from input preprocessor".format(
chimera_const.LOCALIZE))
job_params = {
"run_config": self._run_config,
"pge_config": self._pge_config,
"localize_urls": localize_urls,
"simulate_outputs": self._run_config[chimera_const.SIMULATE_OUTPUTS]
}
return job_params
def get_payload_hash(self, job_type):
"""
Can be overwritten to calculate the payload hash to determine dedup. By returning None, we will use HySDS
Core's hash calculation to determine dedup.
:param job_type:
:return:
"""
return None
def perform_adaptation_tasks(self, job_json):
"""
Can be used to perform additional tasks prior to job submission.
:param job_json:
:return:
"""
return job_json
def construct_job_payload(self, params=None, dataset_id=None, pge_config=None, job_type=None, job_queue=None,
payload_hash=None):
"""
Uses resolve hysds job to get the job json
:param params:
:param dataset_id:
:param pge_config:
:param job_type:
:param job_queue:
:param payload_hash:
:return:
"""
if dataset_id is not None:
job_name = job_type + "_" + pge_config["pge_name"] + "_" + dataset_id
else:
job_name = job_type + "_" + pge_config["pge_name"]
try:
if dataset_id is not None:
tags = [pge_config["pge_name"], dataset_id]
else:
tags = [pge_config["pge_name"]]
job = resolve_hysds_job(job_type, job_queue,
params=params, job_name=job_name, enable_dedup=True, tags=tags,
payload_hash=payload_hash)
except Exception as e:
raise Exception(e)
except:
raise RuntimeError("Wasn't able to get Job JSON from resolve_hysds_job.")
print(json.dumps(job, sort_keys=True, indent=4, separators=(',', ': ')))
return job
def submit_job(self):
if not isinstance(self._run_config, dict):
raise RuntimeError("The output from input preprocessor is not a dictionary")
params = self.construct_params()
# If wuid and job_num are not null, it is implied that we need to do job submission. In that case, we need to
# construct the job payload.
if self._wuid and self._job_num is not None:
# get HySDS job type and queue information
job_name = self._chimera_config.get(chimera_const.JOB_TYPES).get(
self._pge_config.get(chimera_const.PGE_NAME))
job_queue = self._chimera_config.get(chimera_const.JOB_QUEUES).get(
self._pge_config.get(chimera_const.PGE_NAME))
if chimera_const.RELEASE_VERSION in self._context:
release_version = self._context[chimera_const.RELEASE_VERSION]
else:
release_version = self._context.get('container_specification').get('version')
job_type = job_name + ":" + release_version
localize_hash = self.get_payload_hash(job_type)
# Find what the primary input is to the job
# input_file_key = self._pge_config.get(chimera_const.PRIMARY_INPUT, None)
# dataset_id = self.get_input_file_name(input_file_key)
# Nominally, the primary input is used as part of the job name. If we wanted to set something else in the
# job
# name, look to see if the pge_job_name field is specified in the run_config
dataset_id = self._run_config.get("pge_job_name", None)
if dataset_id:
logger.info("dataset_id is set to {}".format(dataset_id))
job_json = self.construct_job_payload(params, dataset_id=dataset_id, pge_config=self._pge_config,
job_type=job_type, job_queue=job_queue, payload_hash=localize_hash)
# Set the sciflo fields wuid and job num
# these are internally passed context information available in sciflo processes
job_json['payload']['_sciflo_wuid'] = self._wuid
job_json['payload']['_sciflo_job_num'] = self._job_num
logger.debug("Resolved Job JSON: {}".format(json.dumps(job_json)))
else:
# If we're running inline, we will set the params as the job_json
job_json = params
# We also need to get the job_specification from _context.json as that contains dependency image
# information, if specified
if "job_specification" in self._context:
job_json["job_specification"] = self._context["job_specification"]
job_json = self.perform_adaptation_tasks(job_json)
return job_json
|
{"/chimera/input_preprocessor.py": ["/chimera/logger.py", "/chimera/precondition_evaluator.py"], "/chimera/post_processor.py": ["/chimera/logger.py", "/chimera/postprocess_evaluator.py"], "/chimera/postprocess_evaluator.py": ["/chimera/logger.py", "/chimera/commons/conf_util.py", "/chimera/commons/constants.py", "/chimera/postprocess_functions.py"], "/chimera/run_sciflo.py": ["/chimera/logger.py", "/chimera/commons/accountability.py", "/chimera/commons/sciflo_util.py"], "/chimera/postprocess_functions.py": ["/chimera/commons/accountability.py", "/chimera/commons/constants.py", "/chimera/logger.py"], "/chimera/precondition_evaluator.py": ["/chimera/logger.py", "/chimera/commons/conf_util.py", "/chimera/commons/constants.py", "/chimera/precondition_functions.py"], "/chimera/run_pge_docker.py": ["/chimera/logger.py", "/chimera/commons/conf_util.py", "/chimera/pge_job_submitter.py"], "/chimera/pge_job_submitter.py": ["/chimera/commons/constants.py", "/chimera/commons/conf_util.py", "/chimera/logger.py"], "/chimera/commons/accountability.py": ["/chimera/commons/constants.py"]}
|
22,200
|
hysds/chimera
|
refs/heads/develop
|
/tests/test_product_counter.py
|
import simplejson
import sys
from smap_sciflo import input_preprocessor as ipp
if __name__ == '__main__':
context_file = "test-files/sf_context.json"
context = simplejson.load(open(context_file, 'r'))
pge_config = simplejson.load(open("../configs/PGE_TSURF.json", "r"))
# context = process_for_l0b_radiometer(context, simplejson.load(open(pge_config, 'r')))
job_params = ipp.get_product_counter(pge_config, context)
# test output of get_product_metadata)_
print(job_params)
|
{"/chimera/input_preprocessor.py": ["/chimera/logger.py", "/chimera/precondition_evaluator.py"], "/chimera/post_processor.py": ["/chimera/logger.py", "/chimera/postprocess_evaluator.py"], "/chimera/postprocess_evaluator.py": ["/chimera/logger.py", "/chimera/commons/conf_util.py", "/chimera/commons/constants.py", "/chimera/postprocess_functions.py"], "/chimera/run_sciflo.py": ["/chimera/logger.py", "/chimera/commons/accountability.py", "/chimera/commons/sciflo_util.py"], "/chimera/postprocess_functions.py": ["/chimera/commons/accountability.py", "/chimera/commons/constants.py", "/chimera/logger.py"], "/chimera/precondition_evaluator.py": ["/chimera/logger.py", "/chimera/commons/conf_util.py", "/chimera/commons/constants.py", "/chimera/precondition_functions.py"], "/chimera/run_pge_docker.py": ["/chimera/logger.py", "/chimera/commons/conf_util.py", "/chimera/pge_job_submitter.py"], "/chimera/pge_job_submitter.py": ["/chimera/commons/constants.py", "/chimera/commons/conf_util.py", "/chimera/logger.py"], "/chimera/commons/accountability.py": ["/chimera/commons/constants.py"]}
|
22,201
|
hysds/chimera
|
refs/heads/develop
|
/setup.py
|
from setuptools import setup, find_packages
adaptation_path = "folder/"
setup(
name='chimera',
version='2.2.2',
packages=find_packages(),
install_requires=[
'elasticsearch>=7.0.0,<7.14.0',
'elasticsearch-dsl>=7.0.0,<=7.4.0',
'requests>=2.18.4',
'simplejson>=3.11.1'
]
)
|
{"/chimera/input_preprocessor.py": ["/chimera/logger.py", "/chimera/precondition_evaluator.py"], "/chimera/post_processor.py": ["/chimera/logger.py", "/chimera/postprocess_evaluator.py"], "/chimera/postprocess_evaluator.py": ["/chimera/logger.py", "/chimera/commons/conf_util.py", "/chimera/commons/constants.py", "/chimera/postprocess_functions.py"], "/chimera/run_sciflo.py": ["/chimera/logger.py", "/chimera/commons/accountability.py", "/chimera/commons/sciflo_util.py"], "/chimera/postprocess_functions.py": ["/chimera/commons/accountability.py", "/chimera/commons/constants.py", "/chimera/logger.py"], "/chimera/precondition_evaluator.py": ["/chimera/logger.py", "/chimera/commons/conf_util.py", "/chimera/commons/constants.py", "/chimera/precondition_functions.py"], "/chimera/run_pge_docker.py": ["/chimera/logger.py", "/chimera/commons/conf_util.py", "/chimera/pge_job_submitter.py"], "/chimera/pge_job_submitter.py": ["/chimera/commons/constants.py", "/chimera/commons/conf_util.py", "/chimera/logger.py"], "/chimera/commons/accountability.py": ["/chimera/commons/constants.py"]}
|
22,202
|
hysds/chimera
|
refs/heads/develop
|
/chimera/commons/accountability.py
|
#!/usr/bin/env python
import json
from chimera.commons.constants import ChimeraConstants as chimera_const
class Accountability(object):
def __init__(self, context, work_dir):
self.context = context
self.job_json = None
self.job_id = None
self.work_dir = work_dir
if work_dir is not None:
with open("{}/_job.json".format(work_dir), "r") as f:
self.job_json = json.load(f)
self.job_id = self.job_json.get(chimera_const.JOB_INFO).get(chimera_const.JOB_PAYLOAD).get(chimera_const.PAYLOAD_TASK_ID)
def get_entries(self):
pass
def create_job_entry(self):
pass
def set_products(self, job_results):
pass
|
{"/chimera/input_preprocessor.py": ["/chimera/logger.py", "/chimera/precondition_evaluator.py"], "/chimera/post_processor.py": ["/chimera/logger.py", "/chimera/postprocess_evaluator.py"], "/chimera/postprocess_evaluator.py": ["/chimera/logger.py", "/chimera/commons/conf_util.py", "/chimera/commons/constants.py", "/chimera/postprocess_functions.py"], "/chimera/run_sciflo.py": ["/chimera/logger.py", "/chimera/commons/accountability.py", "/chimera/commons/sciflo_util.py"], "/chimera/postprocess_functions.py": ["/chimera/commons/accountability.py", "/chimera/commons/constants.py", "/chimera/logger.py"], "/chimera/precondition_evaluator.py": ["/chimera/logger.py", "/chimera/commons/conf_util.py", "/chimera/commons/constants.py", "/chimera/precondition_functions.py"], "/chimera/run_pge_docker.py": ["/chimera/logger.py", "/chimera/commons/conf_util.py", "/chimera/pge_job_submitter.py"], "/chimera/pge_job_submitter.py": ["/chimera/commons/constants.py", "/chimera/commons/conf_util.py", "/chimera/logger.py"], "/chimera/commons/accountability.py": ["/chimera/commons/constants.py"]}
|
22,203
|
hysds/chimera
|
refs/heads/develop
|
/chimera/precondition_functions.py
|
class PreConditionFunctions(object):
def __init__(self, context, pge_config, settings, job_params):
self._context = context
self._pge_config = pge_config
self._settings = settings
self._job_params = job_params
def run(self, function_list):
"""
Runs the set of preconditions passed into the given list.
:param function_list: A list of precondition methods that will be defined in the subclasses.
:return: a dictionary containing information about the results of the precondition evaluations.
"""
for func in function_list:
self._job_params.update(getattr(self, func)())
return self._job_params
|
{"/chimera/input_preprocessor.py": ["/chimera/logger.py", "/chimera/precondition_evaluator.py"], "/chimera/post_processor.py": ["/chimera/logger.py", "/chimera/postprocess_evaluator.py"], "/chimera/postprocess_evaluator.py": ["/chimera/logger.py", "/chimera/commons/conf_util.py", "/chimera/commons/constants.py", "/chimera/postprocess_functions.py"], "/chimera/run_sciflo.py": ["/chimera/logger.py", "/chimera/commons/accountability.py", "/chimera/commons/sciflo_util.py"], "/chimera/postprocess_functions.py": ["/chimera/commons/accountability.py", "/chimera/commons/constants.py", "/chimera/logger.py"], "/chimera/precondition_evaluator.py": ["/chimera/logger.py", "/chimera/commons/conf_util.py", "/chimera/commons/constants.py", "/chimera/precondition_functions.py"], "/chimera/run_pge_docker.py": ["/chimera/logger.py", "/chimera/commons/conf_util.py", "/chimera/pge_job_submitter.py"], "/chimera/pge_job_submitter.py": ["/chimera/commons/constants.py", "/chimera/commons/conf_util.py", "/chimera/logger.py"], "/chimera/commons/accountability.py": ["/chimera/commons/constants.py"]}
|
22,204
|
hysds/chimera
|
refs/heads/develop
|
/tests/test_input_preprocessor.py
|
import simplejson
from nisar_chimera import input_preprocessor as ipp
if __name__ == '__main__':
sys_config = "../nisar_chimera/configs/sys.config.json"
test_configs = list()
# For testing without sfl_exec L0A
context = simplejson.load(
open("test-files/L0A_sfcontext.json", 'r'))
ipp_def_filepath = "../nisar_chimera/configs/precondition_definition.yaml"
pge_config = "../nisar_chimera/configs/pge_configs/PGE_L0A.yaml"
settings_file = '../../nisar-pcm/conf/settings.yaml'
test_configs.append((context, pge_config))
# context = process_for_l0b_radiometer(context, simplejson.load(open(pge_config, 'r')))
# Loop through all test configs
for context, pge_config in test_configs:
payload = ipp.process(sf_context=context, ipp_def_filepath=ipp_def_filepath, pge_config_filepath=pge_config,
sys_config_file=sys_config, settings_file=settings_file, testmode=True)
print(simplejson.dumps(payload, indent=2))
|
{"/chimera/input_preprocessor.py": ["/chimera/logger.py", "/chimera/precondition_evaluator.py"], "/chimera/post_processor.py": ["/chimera/logger.py", "/chimera/postprocess_evaluator.py"], "/chimera/postprocess_evaluator.py": ["/chimera/logger.py", "/chimera/commons/conf_util.py", "/chimera/commons/constants.py", "/chimera/postprocess_functions.py"], "/chimera/run_sciflo.py": ["/chimera/logger.py", "/chimera/commons/accountability.py", "/chimera/commons/sciflo_util.py"], "/chimera/postprocess_functions.py": ["/chimera/commons/accountability.py", "/chimera/commons/constants.py", "/chimera/logger.py"], "/chimera/precondition_evaluator.py": ["/chimera/logger.py", "/chimera/commons/conf_util.py", "/chimera/commons/constants.py", "/chimera/precondition_functions.py"], "/chimera/run_pge_docker.py": ["/chimera/logger.py", "/chimera/commons/conf_util.py", "/chimera/pge_job_submitter.py"], "/chimera/pge_job_submitter.py": ["/chimera/commons/constants.py", "/chimera/commons/conf_util.py", "/chimera/logger.py"], "/chimera/commons/accountability.py": ["/chimera/commons/constants.py"]}
|
22,236
|
norangLemon/cheesebot
|
refs/heads/master
|
/Value.py
|
import random
from setting import *
def randJoinMsg(msg):
list = ["안녕하세요냥!", "내가 왔다냥!", msg.nick+"님, 불러주셔서 고맙다냥!", ">ㅅ<냐옹!"]
return random.choice(list)
def randPartMsg(msg):
list = ["알았다냥..ㅠㅅㅠ", msg.nick+"님 나쁘다냥!ㅠㅠ", "냥무룩.. 잘있으라냥.."]
return random.choice(list)
def randQuitMsg(msg):
list = ["히잉.. 알았다냥..", "ㅜㅅㅠ다들 잘있으라냥!"]
return random.choice(list)
def randNoQuitMsg(msg):
list = ["난 주인님 말씀만 듣는다냥!", msg.nick+"님은 바보다냥!!"]
return random.choice(list)
def randHateMsg(msg):
list = ["캬옹!", "와아아옹!!!-ㅅ-", "냐아아!!=ㅅ=", "(˃̣̣̣̣̣̣ㅅ˂̣̣̣̣̣̣ ू)"]
return random.choice(list)
def randSatisfyMsg(msg):
list = ["고릉고릉", "냐아.. 잠이 온다냥zZ", "냐아앙♡"]
return random.choice(list)
def randTunaMsg(msg):
list = ["!!", msg.nick+"님, 참치 주세요냥!!", "어디서 참치 소리를 들었다냥?!"]
return random.choice(list)
def randOKMsg(msg):
list = ["나 여기 있다요냥!", "그렇다냥!", msg.nick+", 나 찾았나요냥?"]
return random.choice(list)
def randOPMsg(msg):
list = [">ㅅ<", msg.nick+"님, 고맙다냥!", "부비적부비적♡"]
return random.choice(list)
def randDEOPMsg(msg):
list = [msg.nick+"님 미워! ㅠㅠ", "냥무룩...", "삐질꺼다냥ㅇㅅㅠ"]
return random.choice(list)
def randCuriousMsg(msg):
list = [msg.nick + "님, 뭐 한거냐요냥?", "냐아?", "내 이름을 본거 같은데냥?!"]
return random.choice(list)
def howMuchLoveMsg(msg, affection):
if affection > MAX_AFFECTION * 4/5:
list = [msg.nick+"님 엄청 좋아한다냥!", "당연히 최고 좋아한다냥!", "냐앙..(부끄)//ㅅ//"]
elif affection > MAX_AFFECTION *1/3:
list = ["헤헤.. "+msg.nick+"님이랑 나랑은 꽤 친하지요냥?", "우웅.. 참치 다음으로 좋아한다냥!>ㅅ<"]
elif affection > MAX_AFFECTION * 1/4:
list = ["비밀이다냥!>ㅅ<", msg.nick+"님은 좋은 사람이다냥! 그치만 좀 더 놀아주면 좋겠다냥!"]
elif affection > MAX_AFFECTION* 1/5:
list = ["좀 더 친해졌으면 좋겠다냥!", "냐아웅!ㅇㅅㅇ","헤에?ㅇㅅㅇ?"]
elif affection >MIN_AFFECTION * 1/5:
list = ["오늘 처음 본 거 같은데냥...?", msg.nick+"님이랑 나랑 아는 사이었냥..?", "좀 더 쓰다듬고 나서 물어봐달라냥!"]
elif affection > MIN_AFFECTION *1/4:
list = ["후웅..?", "히잉.. 솔직히 잘 모르겠다냥..."]
elif affection > MIN_AFFECTION * 4/5:
list = ["흥!", msg.nick +"님한테는 지금 삐져있다냥!", "ㅡㅅㅡ..."]
else:
list = ["캬아아옹!", "우왜야아옹.. 또 괴롭히려고 그러냥!!", "히이잉..ㅠㅅㅠ", "와아앙! "+msg.nick+"님이 또 괴롭힌다냥!ㅠㅅㅠ"]
return random.choice(list)
def randAnnoyedMsg(msg):
list = ["자꾸 그러면 삐진다냥!", "귀찮게 하지 말라냐앙!", "히잉.. "+msg.nick+"님은 할 일도 없냥?!"]
return random.choice(list)
def randGiveOpMsg(msg):
list = ["옵 여기있다냥~! =(>ㅅㅇ=)/*", "옵 드렸다냥!>ㅅ<", "옵 줬으니 쓰담쓰담 해달라냥♥"]
return random.choice(list)
def randEatingMsg(msg):
list = ["0ㅅ0)!! 챱챱", ">ㅅ< "+msg.nick+"님 고맙다냥!", "와아아앙! >ㅅ< 챱챱챱"]
return random.choice(list)
|
{"/cheesebot.py": ["/Value.py", "/db.py"]}
|
22,237
|
norangLemon/cheesebot
|
refs/heads/master
|
/db.py
|
import redis
r = redis.Redis('localhost', decode_responses=True)
dbKeyPrefix = "cheese-bot-"
def getChanList():
return r.hgetall(dbKeyPrefix+"channel-list")
def addChanList(chan, key = ""):
r.hset(dbKeyPrefix+"channel-list", chan, key)
def removeChanList(chan):
r.hdel(dbKeyPrefix+"channel-list", chan)
|
{"/cheesebot.py": ["/Value.py", "/db.py"]}
|
22,238
|
norangLemon/cheesebot
|
refs/heads/master
|
/cheesebot.py
|
import socket, ssl, re
import Value
from setting import *
from Message import *
from Log import *
from snuMenu import *
from daumDic import *
from naverWeather import *
from db import *
import arith
def send_msg(channel, txt):
irc.send(bytes('PRIVMSG ' + channel + ' :' + txt + '\n', UTF8))
def pong():
irc.send(bytes("PONG :pingpong\n", UTF8))
def join(channel, txt, pw = None):
irc.send(bytes("JOIN %s %s\r\n" %(channel, pw), UTF8))
send_msg(channel, txt)
def part(channel, txt):
send_msg(channel, txt)
irc.send(bytes("PART %s\r\n" %channel, UTF8))
def quit(txt):
for ch in CHAN:
send_msg(ch, txt)
irc.send(bytes("QUIT\r\n", UTF8))
def react_part(msg):
prtLog("part: "+msg.nick)
part(msg.channel, Value.randPartMsg(msg))
def react_invite(msg):
prtLog(msg)
prtLog("invite"+msg.nick)
if msg.channel in getChanList().keys():
join(msg.channel, Value.randJoinMsg(msg), getChanList()[msg.channel])
def react_mode(msg):
if msg.msg == "+o " + NICK:
send_msg(msg.channel, Value.randOPMsg(msg))
elif msg.msg == "-o " + NICK:
send_msg(msg.channel, Value.randDEOPMsg(msg))
elif msg.msg.find(NICK) != -1:
send_msg(msg.channel, Value.randCuriousMsg(msg))
elif msg.msg.split()[0] == "+k":
addChanList(msg.channel, msg.msg.split(' ', maxsplit = 1)[1])
def react_RUOK(msg):
prtLog("RUOK: "+msg.nick)
send_msg(msg.channel, Value.randOKMsg(msg))
def react_tuna(msg):
send_msg(msg.channel, Value.randTunaMsg(msg))
def react_goAway(msg):
prtLog("goAway: "+msg.nick)
part(msg.channel, Value.randPartMsg(msg))
def react_loveU(msg):
prtLog("pat: "+msg.nick)
send_msg(msg.channel, Value.randSatisfyMsg(msg))
def react_dog(msg):
prtLog("dog: "+msg.nick)
send_msg(msg.channel, Value.randHateMsg(msg))
def react_giveOp(msg):
irc.send(bytes('MODE ' + msg.channel + ' +o ' + msg.nick + '\n', UTF8))
send_msg(msg.channel, Value.randGiveOpMsg(msg))
def react_eating(msg):
send_msg(msg.channel, Value.randEatingMsg(msg))
def run():
while 1:
try:
ircmsg_raw = irc.recv(8192).decode(UTF8)
except KeyboardInterrupt:
quit("난 자러 간다냥!")
prtLog("ctrl+c")
return
except UnicodeDecodeError as err:
prtErr("Unicode Error!")
prtLog(ircmsg_raw)
prtErr(err)
continue
except:
prtLog(ircmsg_raw)
prtLog("?")
continue
ircmsg_raw = ircmsg_raw.strip("\n\r")
if ircmsg_raw.find("PING :") != -1:
pong()
continue
if ircmsg_raw[0] != ':':
continue
msg = Message(ircmsg_raw)
# print(ircmsg_raw)
if msg.msgType == "INVITE":
react_invite(msg)
elif msg.msgType == "MODE":
react_mode(msg)
elif msg.msgType == "PRIVMSG":
if msg.msg == NICK + " 살아있니?":
react_RUOK(msg)
elif msg.msg == "돌아가!" or msg.msg == "사라져버려!":
react_goAway(msg)
elif msg.msg == NICK +"야 참치 먹자" or msg.msg == "참치 먹자" or msg.msg == NICK + ", 참치 먹자":
react_eating(msg)
elif msg.msg.find("참치") != -1:
react_tuna(msg)
elif msg.msg == "쓰담쓰담":
react_loveU(msg)
elif msg.msg == "멍멍":
react_dog(msg)
elif msg.msg == NICK + ", 옵줘" or msg.msg == NICK + "야 옵줘":
react_giveOp(msg)
elif msg.msg[0] == '!':
commands = msg.msg.split()
if commands[0] in ["!식단", "!메뉴"]:
menu = snuMenu(msg.msg[4:])
for line in menu.getMenu().split('\n'):
send_msg(msg.channel, line)
elif commands[0][1:] in daumDic.map_dic.keys():
search = daumDic(msg.msg[1:])
send_msg(msg.channel, search.getResult())
elif commands[0] == "!계산":
result = arith.calculate(msg.msg[4:])
send_msg(msg.channel, result)
elif commands[0] == "!날씨":
weather = naverWeather(msg.msg[4:])
for line in weather.getWeather().split('\n'):
send_msg(msg.channel, line)
else:
prtLog(str(msg))
if __name__ == "__main__":
irc_raw = socket.socket()
irc_raw.connect((HOST, PORT))
irc = ssl.wrap_socket(irc_raw)
irc.send(bytes("NICK " + NICK + "\r\n", UTF8))
irc.send(bytes("USER %s %s %s : %s\r\n" %(ID, ID, HOST, ID), UTF8))
print("연결되었습니다.")
for ch, pw in getChanList().items():
join(ch, "일어났다!", pw)
run()
|
{"/cheesebot.py": ["/Value.py", "/db.py"]}
|
22,239
|
manicmaniac/wsgir
|
refs/heads/master
|
/wsgir/__main__.py
|
from __future__ import print_function, unicode_literals
from argparse import ArgumentParser
from code import InteractiveConsole
import sys
from wsgiref.simple_server import make_server
from wsgiref.util import setup_testing_defaults
from . import app, init_db, init_environ
def main(argv=None):
parser = make_argument_parser()
args = parser.parse_args(argv)
if args.subcommand == 'initdb':
initdb_command()
elif args.subcommand == 'run':
run_command(args.host, args.port)
elif args.subcommand == 'shell':
shell_command()
def make_argument_parser():
parser = ArgumentParser()
subparsers = parser.add_subparsers(dest='subcommand')
subparsers.add_parser('initdb', help='Initializes the database.')
run_parser = subparsers.add_parser('run',
help='Runs a development server.')
run_parser.add_argument('-H', '--host', default='localhost')
run_parser.add_argument('-p', '--port', default=5000, type=int)
subparsers.add_parser('shell', help='Runs a shell in the app context.')
return parser
def initdb_command():
environ = init_environ({})
init_db(environ)
print('Initialized the database.', file=sys.stderr)
def run_command(host, port):
server = make_server(host, port, app)
print('Starting a server on http://{}:{}.'.format(host, port))
server.serve_forever()
def shell_command():
environ = {}
setup_testing_defaults(environ)
environ = init_environ(environ)
console = InteractiveConsole(dict(environ=environ))
console.interact()
if __name__ == '__main__':
main()
|
{"/wsgir/__main__.py": ["/wsgir/__init__.py"], "/tests/test_wsgir.py": ["/wsgir/__init__.py"]}
|
22,240
|
manicmaniac/wsgir
|
refs/heads/master
|
/tests/test_wsgir.py
|
from __future__ import unicode_literals
from contextlib import closing
import io
import os.path
from shutil import rmtree
import sqlite3
from tempfile import mkdtemp
import unittest
from wsgiref.headers import Headers
from wsgiref.util import FileWrapper, setup_testing_defaults
from wsgiref.validate import validator
import wsgir
class WSGIrTestCase(unittest.TestCase):
def setUp(self):
os.environ['WSGIR_DEBUG'] = '1'
self.environ = {'QUERY_STRING': b''}
setup_testing_defaults(self.environ)
self.start_response = Mock()
self.temp_dir = mkdtemp()
def tearDown(self):
rmtree(self.temp_dir)
def test_get_app(self):
app = validator(wsgir.app)
with closing(app(self.environ, self.start_response)) as response:
body = next(response)
self.assertIn(b'<!doctype html>', body)
self.assertIn(b'<title>WSGIr</title>', body)
self.assertEqual(len(self.start_response.called), 1)
(status, headers), _kwargs = self.start_response.called[0]
headers = Headers(headers)
self.assertEqual(status, b'200 OK')
self.assertEqual(headers['Content-Type'], b'text/html; charset="UTF-8"')
def test_get_show_entries_when_logged_in(self):
self.init_environ()
self.login()
app = validator(wsgir.show_entries)
with closing(app(self.environ, self.start_response)) as response:
body = next(response)
self.assertIn(b'<!doctype html>', body)
self.assertIn(b'<title>WSGIr</title>', body)
self.assertIn(b'</form>', body)
self.assertEqual(len(self.start_response.called), 1)
(status, headers), _kwargs = self.start_response.called[0]
headers = Headers(headers)
self.assertEqual(status, b'200 OK')
self.assertEqual(len(headers.get_all('Set-Cookie')), 1)
self.assertEqual(headers['Content-Type'], b'text/html; charset="UTF-8"')
def test_get_show_entries_when_not_logged_in(self):
self.init_environ()
app = validator(wsgir.show_entries)
with closing(app(self.environ, self.start_response)) as response:
body = next(response)
self.assertIn(b'<!doctype html>', body)
self.assertIn(b'<title>WSGIr</title>', body)
self.assertNotIn(b'</form>', body)
self.assertEqual(len(self.start_response.called), 1)
(status, headers), _kwargs = self.start_response.called[0]
headers = Headers(headers)
self.assertEqual(status, b'200 OK')
self.assertEqual(len(headers.get_all('Set-Cookie')), 1)
self.assertEqual(headers['Content-Type'], b'text/html; charset="UTF-8"')
def test_get_add_entry(self):
self.init_environ()
app = validator(wsgir.add_entry)
with closing(app(self.environ, self.start_response)) as response:
self.assertEqual(next(response), b'Method Not Allowed\n')
self.assertEqual(len(self.start_response.called), 1)
(status, headers), _kwargs = self.start_response.called[0]
self.assertEqual(status, b'405 Method Not Allowed')
headers = Headers(headers)
self.assertEqual(headers['Content-Type'], b'text/plain; charset="UTF-8"')
self.assertIn(b'POST', headers['Allow'])
def test_post_add_entry_when_logged_in(self):
self.init_environ({
'REQUEST_METHOD': b'POST',
'wsgi.input': io.BytesIO(b'title=title&text=text'),
})
self.login()
app = validator(wsgir.add_entry)
with closing(app(self.environ, self.start_response)) as response:
self.assertRaises(StopIteration, next, response)
self.assertEqual(len(self.start_response.called), 1)
(status, headers), _kwargs = self.start_response.called[0]
headers = Headers(headers)
self.assertEqual(status, b'303 See Other')
self.assertEqual(len(headers.get_all('Set-Cookie')), 1)
self.assertEqual(headers['Content-Type'], b'text/plain; charset="UTF-8"')
self.assertEqual(headers['Location'], b'/')
def test_post_add_entry_when_not_logged_in(self):
self.init_environ({
'REQUEST_METHOD': b'POST',
'wsgi.input': io.BytesIO(b'title=title&text=text'),
})
app = validator(wsgir.add_entry)
with closing(app(self.environ, self.start_response)) as response:
self.assertEqual(next(response), b'Unauthorized\n')
self.assertEqual(len(self.start_response.called), 1)
(status, headers), _kwargs = self.start_response.called[0]
headers = Headers(headers)
self.assertEqual(status, b'401 Unauthorized')
self.assertEqual(headers['Content-Type'], b'text/plain; charset="UTF-8"')
def test_connect_db(self):
self.init_environ()
db = wsgir.connect_db(self.environ)
self.assertIsInstance(db, sqlite3.Connection)
self.assertEqual(db.row_factory, sqlite3.Row)
def test_init_db(self):
self.init_environ()
wsgir.init_db(self.environ)
with closing(sqlite3.connect(self.environ['wsgir.database'])) as db:
cursor = db.execute('select * from sqlite_master')
self.assertIn('CREATE TABLE entries', repr(cursor.fetchall()))
def test_render(self):
self.init_environ()
self.environ['wsgir.template_dir'] = self.temp_dir
template_name = 'example.html'
template_path = os.path.join(self.environ['wsgir.template_dir'],
template_name)
with open(template_path, 'w') as f:
f.write('<!doctype html><title>{title}</title>\n')
html = wsgir.render(self.environ, template_name, title='title')
self.assertEqual('<!doctype html><title>title</title>\n', html)
def test_get_file_wrapper(self):
file_wrapper = wsgir.get_file_wrapper({})
self.assertEqual(file_wrapper, FileWrapper)
file_wrapper = wsgir.get_file_wrapper({'wsgi.file_wrapper': None})
self.assertEqual(file_wrapper, FileWrapper)
stub_file_wrapper = object()
file_wrapper = wsgir.get_file_wrapper({'wsgi.file_wrapper': stub_file_wrapper})
self.assertEqual(file_wrapper, stub_file_wrapper)
def test_modified_since(self):
since = 'Thu, 01 Jan 1970 00:00:01 GMT'
rv = wsgir.modified_since({}, 0)
self.assertTrue(rv)
rv = wsgir.modified_since({'HTTP_IF_MODIFIED_SINCE': since}, 0)
self.assertTrue(rv)
rv = wsgir.modified_since({'HTTP_IF_MODIFIED_SINCE': since}, 1)
self.assertTrue(rv)
rv = wsgir.modified_since({'HTTP_IF_MODIFIED_SINCE': since}, 2)
self.assertFalse(rv)
def test_get_session(self):
self.init_environ()
session = wsgir.get_session(self.environ)
self.assertEqual(session, {})
def test_decode_session(self):
key = 'secret'
invalid_session_string = 'invalid session'
rv = wsgir.decode_session(invalid_session_string, key)
self.assertIsNone(rv)
invalid_session_string = 'invalid.session'
rv = wsgir.decode_session(invalid_session_string, key)
self.assertIsNone(rv)
def test_compare_digest(self):
self.assertTrue(wsgir.compare_digest('foo', 'foo'))
self.assertFalse(wsgir.compare_digest('foo', 'bar'))
def test_safe_join(self):
self.assertIsNone(wsgir.safe_join('/var/www', '/etc/passwd'))
self.assertIsNone(wsgir.safe_join('/var/www', '../etc/passwd'))
self.assertEqual(wsgir.safe_join('/var/www', 'etc/passwd'), '/var/www/etc/passwd')
def init_environ(self, kwargs=None):
self.environ = wsgir.init_environ(self.environ)
self.environ['wsgir.debug'] = True
self.environ['wsgir.database'] = os.path.join(self.temp_dir, 'test.db')
if kwargs is not None:
self.environ.update(kwargs)
wsgir.init_db(self.environ)
def login(self):
session = wsgir.get_session(self.environ)
session['logged_in'] = True
headers = wsgir.make_session_headers(self.environ, session)
self.environ['HTTP_COOKIE'] = headers['Set-Cookie']
class Mock(object):
def __init__(self):
self.called = []
def __getattr__(self, key):
return Mock()
def __call__(self, *args, **kwargs):
self.called.append((args, kwargs))
if __name__ == '__main__':
unittest.main()
|
{"/wsgir/__main__.py": ["/wsgir/__init__.py"], "/tests/test_wsgir.py": ["/wsgir/__init__.py"]}
|
22,241
|
manicmaniac/wsgir
|
refs/heads/master
|
/setup.py
|
from distutils.core import setup
setup(
name='wsgir',
version='0.0.1',
description='Flaskr without Flask.',
long_description=open('README.rst').read(),
packages=['wsgir'],
include_package_data=True,
)
|
{"/wsgir/__main__.py": ["/wsgir/__init__.py"], "/tests/test_wsgir.py": ["/wsgir/__init__.py"]}
|
22,242
|
manicmaniac/wsgir
|
refs/heads/master
|
/wsgir/__init__.py
|
from __future__ import unicode_literals
from base64 import b64decode, b64encode
try:
import builtins
except ImportError:
import __builtin__ as builtins
from cgi import FieldStorage
from contextlib import closing
from email.utils import formatdate, mktime_tz, parsedate_tz
import hashlib
import hmac
try:
from html import escape
except ImportError:
from cgi import escape
try:
from http.cookies import SimpleCookie
except ImportError:
from Cookie import SimpleCookie
import logging
from mimetypes import guess_type
import os
try:
import cPickle as pickle
except ImportError:
import pickle
from posixpath import normpath
import re
import sqlite3
from string import Formatter
from wsgiref.headers import Headers
from wsgiref.util import FileWrapper, guess_scheme
def app(environ, start_response):
environ = init_environ(environ)
if environ['wsgir.debug']:
from wsgiref.validate import validator
return validator(route)(environ, start_response)
return route(environ, start_response)
def route(environ, start_response):
url_map = [
(r'^$', show_entries),
(r'^add/?$', add_entry),
(r'^login/?$', login),
(r'^logout/?$', logout),
(r'^static/(.*)$', static),
]
path = environ['PATH_INFO'].lstrip('/')
for pattern, callback in url_map:
matched = re.search(pattern, path)
if matched is not None:
environ['wsgir.url_args'] = matched.groups()
return callback(environ, start_response)
return not_found(environ, start_response)
def show_entries(environ, start_response):
with closing(connect_db(environ)) as db:
cursor = db.execute('select title, text from entries order by id desc')
entries = cursor.fetchall()
session = get_session(environ)
flashes = session.pop('flashes', [])
headers = make_session_headers(environ, session)
headers.add_header(b'Content-Type', b'text/html', charset=b'UTF-8')
start_response(b'200 OK', headers.items())
body = render(environ, 'show_entries.html', entries=entries)
return [render(environ, 'layout.html', body=body, flashes=flashes)]
def add_entry(environ, start_response):
method = environ['REQUEST_METHOD']
if method == 'POST':
session = get_session(environ)
if not session.get('logged_in'):
return unauthorized(environ, start_response)
form = FieldStorage(fp=environ['wsgi.input'], environ=environ)
with closing(connect_db(environ)) as db, db:
db.execute('insert into entries (title, text) values (?, ?)',
[form.getfirst('title'), form.getfirst('text')])
session.setdefault('flashes', [])
session['flashes'].append('New entry was successfully posted')
headers = make_session_headers(environ, session)
headers.add_header(b'Content-Type', b'text/plain', charset=b'UTF-8')
headers.add_header(b'Location', b'/')
start_response(b'303 See Other', headers.items())
return []
return make_method_not_allowed('POST')(environ, start_response)
def login(environ, start_response):
error = None
method = environ['REQUEST_METHOD']
session = get_session(environ)
if method == 'POST':
form = FieldStorage(fp=environ['wsgi.input'], environ=environ)
if form.getfirst('username') != environ['wsgir.username']:
error = 'Invalid username'
elif form.getfirst('password') != environ['wsgir.password']:
error = 'Invalid password'
else:
session['logged_in'] = True
session.setdefault('flashes', [])
session['flashes'].append('You were logged in')
headers = make_session_headers(environ, session)
headers.add_header(b'Content-Type', b'text/plain',
charset=b'UTF-8')
headers.add_header(b'Location', b'/')
start_response(b'302 Found', headers.items())
return []
flashes = session.pop('flashes', [])
headers = make_session_headers(environ, session)
headers.add_header(b'Content-Type', b'text/html')
start_response(b'200 OK', headers.items())
body = render(environ, 'login.html', error=error)
return [render(environ, 'layout.html', body=body, flashes=flashes)]
def logout(environ, start_response):
session = get_session(environ)
session.pop('logged_in', None)
session.setdefault('flashes', [])
session['flashes'].append('You were logged out')
headers = make_session_headers(environ, session)
headers.add_header(b'Content-Type', b'text/plain', charset=b'UTF-8')
headers.add_header(b'Location', b'/')
start_response(b'302 Found', headers.items())
return []
def static(environ, start_response):
filename = environ['wsgir.url_args'][0]
path = safe_join(environ['wsgir.static_dir'], filename)
if path is None:
return not_found(environ, start_response)
headers = Headers([])
max_age = 365 * 24 * 60 * 60
headers.add_header(b'Cache-Control', b'public, max-age={}'.format(max_age))
try:
stat = os.stat(path)
except (IOError, OSError):
return not_found(environ, start_response)
mtime = int(stat.st_mtime)
last_modified = formatdate(mtime, localtime=False, usegmt=True)
headers.add_header(b'Last-Modified', bytes(last_modified))
expires = formatdate(mtime + max_age, localtime=False, usegmt=True)
headers.add_header(b'Expires', bytes(expires))
if not modified_since(environ, mtime):
start_response(b'304 Not Modified', headers.items())
return []
mimetype, encoding = guess_type(path)
headers.add_header(b'Content-Type',
bytes(mimetype or b'application/octet-stream'))
if encoding == 'gzip':
accept_encoding = environ.get('HTTP_ACCEPT_ENCODING', 'identity')
if not ((b'gzip' in accept_encoding) or
(b'deflate' in accept_encoding)):
return not_acceptable(environ, start_response)
headers.add_header(b'Content-Encoding', bytes(encoding))
try:
f = open(path, 'rb')
except (IOError, OSError):
return not_found(environ, start_response)
start_response(b'200 OK', headers.items())
file_wrapper = get_file_wrapper(environ)
return file_wrapper(f)
def unauthorized(environ, start_response):
start_response(b'401 Unauthorized',
[(b'Content-Type', b'text/plain; charset="UTF-8"')])
return [b'Unauthorized\n']
def not_found(environ, start_response):
start_response(b'404 Not Found',
[(b'Content-Type', b'text/plain; charset="UTF-8"')])
return [b'Not Found\n']
def make_method_not_allowed(*allow):
allow = b', '.join(allow)
def method_not_allowed(environ, start_response):
start_response(b'405 Method Not Allowed',
[(b'Content-Type', b'text/plain; charset="UTF-8"'),
(b'Allow', allow)])
return [b'Method Not Allowed\n']
return method_not_allowed
def not_acceptable(environ, start_response):
start_response(b'406 Not Acceptable',
[(b'Content-Type', b'text/plain; charset="UTF-8"')])
return [b'Not Acceptable\n']
def init_environ(environ):
here = os.path.abspath(os.path.dirname(__file__))
environ = dict(environ)
debug = bool(int(os.getenv('WSGIR_DEBUG', 0)))
environ['wsgir.debug'] = debug
environ['wsgir.root_dir'] = here
environ['wsgir.database'] = os.path.join(here, 'wsgir.db')
log_filename = os.path.join(here, 'wsgir.log')
environ['wsgir.log_filename'] = log_filename
environ['wsgir.logger'] = init_logger(debug, log_filename)
environ['wsgir.secret_key'] = b'secret key'
environ['wsgir.static_dir'] = os.path.join(here, 'static')
environ['wsgir.template_dir'] = os.path.join(here, 'templates')
environ['wsgir.username'] = 'admin'
environ['wsgir.password'] = 'default'
return environ
def init_logger(debug, log_filename):
logger = logging.getLogger(__name__)
if debug:
logger.setLevel(logging.DEBUG)
logger.addHandler(logging.StreamHandler())
else:
logger.setLevel(logging.WARNING)
file_handler = logging.FileHandler(log_filename)
logger.addHandler(file_handler)
return logger
def connect_db(environ):
db = sqlite3.connect(environ['wsgir.database'])
db.row_factory = sqlite3.Row
return db
def init_db(environ):
script_path = os.path.join(environ['wsgir.root_dir'], 'schema.sql')
with open(script_path, 'r') as f:
script = f.read()
with closing(connect_db(environ)) as db, db:
db.executescript(script)
def render(environ, template_name, _formatter=None, **kwargs):
template_path = os.path.join(environ['wsgir.template_dir'], template_name)
with open(template_path, 'r') as f:
template = f.read()
if _formatter is None:
_formatter = TemplateFormatter(environ)
return bytes(_formatter.format(template, **kwargs))
class TemplateFormatter(Formatter):
def __init__(self, environ):
self._environ = environ
self._globals = dict(
builtins.__dict__,
e=self._escape,
environ=self._environ,
include=self._include,
session=get_session(environ),
)
def get_field(self, field_name, args, kwargs):
return eval(field_name, self._globals, kwargs), field_name
def _escape(self, s):
return escape(str(s), quote=True)
def _include(self, template_name):
return render(self._environ, template_name, _formatter=self)
def get_file_wrapper(environ):
file_wrapper = environ.get('wsgi.file_wrapper')
if file_wrapper is None:
file_wrapper = FileWrapper
return file_wrapper
def modified_since(environ, mtime):
since = environ.get('HTTP_IF_MODIFIED_SINCE')
if since is not None:
since_timestamp = mktime_tz(parsedate_tz(since))
return since_timestamp >= mtime
return True
def get_session(environ):
cookie_string = environ.get('HTTP_COOKIE', '')
cookie = SimpleCookie(cookie_string)
if cookie is not None:
morsel = cookie.get('wsgir_session')
if morsel is not None:
key = environ['wsgir.secret_key']
return decode_session(morsel.value, key)
return {}
def make_session_headers(environ, session):
cookie = SimpleCookie()
key = environ['wsgir.secret_key']
cookie[b'wsgir_session'] = encode_session(session, key)
cookie[b'wsgir_session'][b'domain'] = environ['SERVER_NAME']
cookie[b'wsgir_session'][b'httponly'] = True
cookie[b'wsgir_session'][b'max-age'] = 30 * 24 * 60 * 60
if guess_scheme(environ) == b'https':
cookie[b'wsgir_session'][b'secure'] = True
headers = Headers([])
for line in cookie.output(header='').splitlines():
headers.add_header(b'Set-Cookie', bytes(line))
return headers
def decode_session(session_string, key):
if session_string.count('.') == 1:
try:
dumped_session, signature = map(b64decode, session_string.split('.'))
except TypeError:
return None
mac = hmac.new(key, dumped_session, digestmod=hashlib.sha256)
expected_signature = mac.digest()
if compare_digest(signature, expected_signature):
return pickle.loads(dumped_session)
def encode_session(session, key):
dumped_session = pickle.dumps(session)
mac = hmac.new(key, dumped_session, digestmod=hashlib.sha256)
signature = mac.digest()
return '{}.{}'.format(b64encode(dumped_session), b64encode(signature))
def compare_digest(a, b):
if hasattr(hmac, 'compare_digest'):
return hmac.compare_digest(a, b)
if len(a) != len(b):
return False
return a == b
def safe_join(directory, filename):
filename = normpath(filename)
for sep in [os.path.sep, os.path.altsep]:
if sep not in (None, '/') and sep in filename:
return
if os.path.isabs(filename) or filename.startswith('../'):
return
return os.path.join(directory, filename)
|
{"/wsgir/__main__.py": ["/wsgir/__init__.py"], "/tests/test_wsgir.py": ["/wsgir/__init__.py"]}
|
22,246
|
chuazh/cs231n_project
|
refs/heads/master
|
/.ipynb_checkpoints/train_model-checkpoint.py
|
#!/usr/bin/env python
import input_util as ip
import transfer_models as mdl
import vis_util as vis
import torch
import torch.optim as optim
import torch.nn as nn
import pickle
import numpy as np
def string_prepend(lr,wt,is_deep):
if is_deep:
deep_str = 'deep'
else:
deep_str = 'shallow'
prepend = 'lr' + str(lr) + '_wt' + str(wt) + '_' + deep_str
return prepend
def train_net(num_epochs,is_deep,num_train_trials,num_val_trials,lr,wt):
# preliminaries:
input_size = 224 # for RESNET
model_name = 'resnet'
num_output = 14
feature_extract = True
# load our dictionary for the samples seq order
f = open("index_dict.dat",'rb')
index_dict = pickle.load(f)
f.close()
# load the dataset
train_loader,val_loader,seq_loader = ip.load_dataset(input_size,index_dict,num_train_trials=num_train_trials,num_val_trials=num_val_trials)
dataloaders_dict = {}
dataloaders_dict['train']= train_loader
dataloaders_dict['val'] = val_loader
#intialize our model
model_ft, input_size = mdl.initialize_model(model_name, num_output, feature_extract, use_pretrained=True,is_deep = is_deep)
print(model_ft)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print('using device:')
print(device)
# Send the model to GPU
model_ft = model_ft.to(device)
# Gather the parameters to be optimized/updated in this run. If we are
# finetuning we will be updating all parameters. However, if we are
# doing feature extract method, we will only update the parameters
# that we have just initialized, i.e. the parameters with requires_grad
# is True.
params_to_update = model_ft.parameters()
print("Params to learn:")
if feature_extract:
params_to_update = []
for name,param in model_ft.named_parameters():
if param.requires_grad == True:
params_to_update.append(param)
print("\t",name)
else:
for name,param in model_ft.named_parameters():
if param.requires_grad == True:
print("\t",name)
# Observe that all parameters are being optimized
#optimizer_ft = optim.SGD(params_to_update, lr=0.001, momentum=0.9)
optimizer_ft = optim.Adam(params_to_update, lr=lr, betas=(0.9, 0.999), eps=1e-08, weight_decay=wt, amsgrad=False)
# Setup the loss fxn
criterion = nn.MSELoss(reduction='mean')
# Train and evaluate
model_ft, hist, train_hist = mdl.train_model(model_ft, dataloaders_dict, device, criterion, optimizer_ft, num_epochs=num_epochs,print_every = 5 , is_inception=(model_name=="inception"))
prefix = string_prepend(lr,wt,is_deep)
f = open(prefix+'_model_weights.model','wb')
torch.save(model_ft.state_dict(), f)
f.close()
f = open(prefix+'_train_loss.dat','wb')
pickle.dump(train_hist,f)
f.close()
f = open(prefix+'_val_loss.dat','wb')
pickle.dump(hist,f)
f.close()
vis.check_accuracy_vis(prefix,seq_loader, model_ft, device, plot = False) # don't plot our data
return hist
'''--------------------------------------------------------------------------------------------------'''
num_epochs = 1
num_train_trials = 1
num_val_trials = 1
#lr_list = [1e-3,1e-2,1e-4]
#wt_list = [0,0.01,0.001]
lr_list = [1e-3,1e-2]
wt_list = [0,0.01]
is_deep_list = [False,True]
val_hist_list = []
for is_deep in is_deep_list:
lr = 0.001
wt = 0
val_hist_temp = train_net(num_epochs,is_deep,num_train_trials,num_val_trials,lr,wt)
val_hist_list.append(val_hist_temp)
dp = np.amin(np.array(val_hist_list[1]))
shal = np.amin(np.array(val_hist_list[0]))
if dp > shal:
print('shallow is better')
is_deep = False
else:
print('deep is better')
is_deep = True
val_hist_list_final = []
for lr in lr_list:
for wt in wt_list:
val_hist_temp = train_net(num_epochs,is_deep,num_train_trials,num_val_trials,lr,wt)
val_hist_list_final.append(val_hist_temp)
|
{"/.ipynb_checkpoints/train_model-checkpoint.py": ["/input_util.py"]}
|
22,247
|
chuazh/cs231n_project
|
refs/heads/master
|
/.ipynb_checkpoints/vis_util-checkpoint.py
|
import torchvision
import torchvision.datasets as dset
import torchvision.transforms as T
import torchvision.models as models
import torch
import torch.nn as nn
import matplotlib.pyplot as plt
import time
import os
import copy
import numpy as np
def check_accuracy_vis(prefix,loader, model, device, plot=True):
print('Checking accuracy on sequential validation set')
model.eval() # set model to evaluation mode
count = 0
score_array = np.empty((0,14))
gt_array = np.empty((0,14))
plt.figure()
with torch.no_grad():
for x, y in loader:
x = x.to(device=device, dtype=torch.float) # move to device, e.g. CPU
y = y.to(device=device, dtype=torch.float)
scores = model(x)
loss_fn = torch.nn.MSELoss(reduction='mean')
loss = loss_fn(scores,y)
scores = scores.to(device="cpu",dtype=torch.float)
y = y.to(device = "cpu", dtype = torch.float)
if plot:
plt.plot(range(count, len(scores) + count), scores.numpy()[:,0:3], 'b')
plt.plot(range(count, len(scores) + count), y.numpy()[:,0:3], 'r')
# append our results
score_array = np.vstack((score_array,scores.numpy()))
gt_array = np.vstack((gt_array,y.numpy()))
count = count + len(scores)
#save our results
print('saving our results...')
np.savetxt(prefix+'_vis_scores.dat', score_array, delimiter=',') # X is an array
np.savetxt(prefix+'_vis_gt.dat', gt_array, delimiter=',') # X is an array
print('MSE loss is: %f ' % loss)
plt.show()
|
{"/.ipynb_checkpoints/train_model-checkpoint.py": ["/input_util.py"]}
|
22,248
|
chuazh/cs231n_project
|
refs/heads/master
|
/.ipynb_checkpoints/transfer_models-checkpoint.py
|
import torchvision
import torchvision.datasets as dset
import torchvision.transforms as T
import torchvision.models as models
import torch
import torch.nn as nn
import matplotlib.pyplot as plt
import time
import os
import copy
import numpy as np
def train_model(model, dataloaders, device, criterion, optimizer, num_epochs=25, print_every = 100 ,is_inception=False):
since = time.time()
val_loss_history = []
train_loss_history = []
best_model_wts = copy.deepcopy(model.state_dict())
best_loss = 10000000000.0 # some large number
for epoch in range(num_epochs):
print('Epoch {}/{}'.format(epoch, num_epochs - 1))
print('-' * 10)
# Each epoch has a training and validation phase
for phase in ['train', 'val']:
if phase == 'train':
model.train() # Set model to training mode
else:
model.eval() # Set model to evaluate mode
running_loss = 0.0
running_corrects = 0
num_iter = 0
# Iterate over data.
for inputs, labels in dataloaders[phase]:
inputs = inputs.to(device,dtype=torch.float)
labels = labels.to(device,dtype= torch.float)
num_iter +=1
# zero the parameter gradients
optimizer.zero_grad()
# forward
# track history if only in train
with torch.set_grad_enabled(phase == 'train'):
# Get model outputs and calculate loss
# Special case for inception because in training it has an auxiliary output. In train
# mode we calculate the loss by summing the final output and the auxiliary output
# but in testing we only consider the final output.
if is_inception and phase == 'train':
# From https://discuss.pytorch.org/t/how-to-optimize-inception-model-with-auxiliary-classifiers/7958
outputs, aux_outputs = model(inputs)
loss1 = criterion(outputs, labels)
loss2 = criterion(aux_outputs, labels)
loss = loss1 + 0.4*loss2
else:
outputs = model(inputs)
loss = criterion(outputs, labels)
#_, preds = torch.max(outputs, 1) dont need this because we are not doing classification
if num_iter%print_every == 0:
print('loss for iteration %d: %.6f' % (num_iter,loss),flush=True)
# backward + optimize only if in training phase
if phase == 'train':
loss.backward()
optimizer.step()
# statistics
running_loss += loss.item() * inputs.size(0)
if num_iter%100==0:
if phase == 'val':
val_loss_history.append(running_loss/num_iter)
if phase == 'train':
train_loss_history.append(running_loss/num_iter)
#running_corrects += torch.sum(preds == labels.data)
#epoch_loss = running_loss / len(dataloaders[phase].dataset)
epoch_loss = running_loss / (len(dataloaders[phase].dataset)/64)
#epoch_acc = running_corrects.double() / len(dataloaders[phase].dataset)
print('{} Loss: {:.4f}'.format(phase, epoch_loss))
# deep copy the model
if phase == 'val' and epoch_loss < best_loss:
best_loss = epoch_loss
best_model_wts = copy.deepcopy(model.state_dict())
print()
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60))
print('Best val Acc: {:4f}'.format(best_loss))
# load best model weights
model.load_state_dict(best_model_wts)
return model, val_loss_history, train_loss_history
def set_parameter_requires_grad(model, feature_extracting):
if feature_extracting:
for param in model.parameters():
param.requires_grad = False
def initialize_model(model_name, num_classes, feature_extract, use_pretrained=True, is_deep=True):
# Initialize these variables which will be set in this if statement. Each of these
# variables is model specific.
model_ft = None
input_size = 0
if model_name == "resnet":
""" Resnet50
"""
model_res = models.resnet50(pretrained=use_pretrained)
set_parameter_requires_grad(model_res, feature_extract)
num_ftrs = model_res.fc.in_features
if is_deep:
out_features = 2048*4
hidden_sizes = (out_features*2,out_features)
# add additional FC layers to better utilize the extracted features.
out1,out2 = hidden_sizes
model_res.fc = nn.Linear(num_ftrs, out_features)
model_ft = nn.Sequential(
model_res,
nn.BatchNorm1d(out_features),
nn.ReLU(),
nn.Linear(out_features,out1),
nn.BatchNorm1d(out1),
nn.ReLU(),
nn.Linear(out1,out2),
nn.BatchNorm1d(out2),
nn.ReLU(),
nn.Linear(out2,num_classes)
)
else:
out_features = num_classes
model_res.fc = nn.Linear(num_ftrs, out_features)
model_ft = model_res
input_size = 224
elif model_name == "alexnet":
""" Alexnet
"""
model_ft = models.alexnet(pretrained=use_pretrained)
set_parameter_requires_grad(model_ft, feature_extract)
num_ftrs = model_ft.classifier[6].in_features
model_ft.classifier[6] = nn.Linear(num_ftrs,num_classes)
input_size = 224
elif model_name == "vgg":
""" VGG11_bn
"""
model_ft = models.vgg11_bn(pretrained=use_pretrained)
set_parameter_requires_grad(model_ft, feature_extract)
num_ftrs = model_ft.classifier[6].in_features
model_ft.classifier[6] = nn.Linear(num_ftrs,num_classes)
input_size = 224
elif model_name == "squeezenet":
""" Squeezenet
"""
model_ft = models.squeezenet1_0(pretrained=use_pretrained)
set_parameter_requires_grad(model_ft, feature_extract)
model_ft.classifier[1] = nn.Conv2d(512, num_classes, kernel_size=(1,1), stride=(1,1))
model_ft.num_classes = num_classes
input_size = 224
elif model_name == "densenet":
""" Densenet
"""
model_ft = models.densenet121(pretrained=use_pretrained)
set_parameter_requires_grad(model_ft, feature_extract)
num_ftrs = model_ft.classifier.in_features
model_ft.classifier = nn.Linear(num_ftrs, num_classes)
input_size = 224
elif model_name == "inception":
""" Inception v3
Be careful, expects (299,299) sized images and has auxiliary output
"""
model_ft = models.inception_v3(pretrained=use_pretrained)
set_parameter_requires_grad(model_ft, feature_extract)
# Handle the auxilary net
num_ftrs = model_ft.AuxLogits.fc.in_features
model_ft.AuxLogits.fc = nn.Linear(num_ftrs, num_classes)
# Handle the primary net
num_ftrs = model_ft.fc.in_features
model_ft.fc = nn.Linear(num_ftrs,num_classes)
input_size = 299
else:
print("Invalid model name, exiting...")
exit()
return model_ft, input_size
|
{"/.ipynb_checkpoints/train_model-checkpoint.py": ["/input_util.py"]}
|
22,249
|
chuazh/cs231n_project
|
refs/heads/master
|
/input_util.py
|
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader, sampler, Dataset
import os
import torchvision.datasets as dset
import torchvision.transforms as T
import torchvision.models
import glob
from PIL import Image
import cv2
import numpy as np
import pickle
from natsort import humansorted
import pdb
import random
def collect_video_framecount(action,subject,trial_num):
'''
This function takes the video and splits it into frames
Inputs:
Outputs:
'''
action_dict = {'KT':'Knot_Tying','S':'Suturing','NP': 'Needle_Passing'}
act = action_dict[action]
filename1 = act+'/video/'+act+'_'+subject+'00'+str(trial_num)+'_capture1.avi'
filename2 = act+'/video/'+act+'_'+subject+'00'+str(trial_num)+'_capture2.avi'
print('reading '+filename1)
vidcap1 = cv2.VideoCapture(filename1)
vidcap2 = cv2.VideoCapture(filename2)
count = 0
success = True
while success:
success,image = vidcap1.read()
'''
success,image = vidcap2.read()
'''
count += 1
print('total frame count : %d' % count)
return count-1
def collect_video_sample(action,subject,trial_num):
num_frames = collect_video_framecount(action,subject,trial_num)
action_dict = {'KT':'Knot_Tying','S':'Suturing','NP': 'Needle_Passing'}
act = action_dict[action]
filename1 = act+'/video/'+act+'_'+subject+'00'+str(trial_num)+'_capture1.avi'
filename2 = act+'/video/'+act+'_'+subject+'00'+str(trial_num)+'_capture2.avi'
print('reading '+filename1)
vidcap1 = cv2.VideoCapture(filename1)
vidcap2 = cv2.VideoCapture(filename2)
# collect kinematic data
filepath = act + '/kinematics/AllGestures/'
filename = filepath + act + '_' +subject + '00' + str(trial_num) + '.txt'
data = np.loadtxt(filename)
num_labels = data.shape[0]
print('total labels loaded: %d' % num_labels)
if (num_labels>num_frames):
pass
else:
num_frames = num_labels
count = 0
success = True
while success and count<num_frames:
success,image = vidcap1.read()
write_name = 'data/' + subject+'_'+str(trial_num)+'_1'+'_%d_'+ action + '.png'
cv2.imwrite(write_name % count, image) # save frame as png file
'''
success,image = vidcap2.read()
write_name = 'data/' + subject+'_'+str(trial_num)+'_2'+'_%d_'+ action + '.png'
cv2.imwrite(write_name % count, image) # save frame as png file
'''
count += 1
if count%100 == 0:
print('capturing frame %d' % count)
print('total frame count : %d' % count)
return out
def collect_labels(act,task,subj,trial):
y = np.empty((0,14)) # 6 for position, 24 for direction cosines, 14 for quaternion
index_dict = {}
for i in range(len(subj)):
for j in trial[i]:
# collect kinematic data
count = collect_video_framecount(task,subj[i],j+1)
filepath = act + '/kinematics/AllGestures/'
filename = filepath + act + '_' + subj[i] + '00' + str(j+1) + '.txt'
data = np.loadtxt(filename)
r_test = data[0, 3:12]
num_labels = data.shape[0]
print('total labels loaded: %d' % num_labels)
dataL_pos = data[:count,39:42]
dataR_pos = data[:count,57:60]
dataL_rot = data[:count, 41:50]
dataR_rot = data[:count, 60:69]
# now we change the representation of the rotation to quaternion
N = dataL_rot.shape[0]
dataL_quat = np.zeros((N, 4))
dataR_quat = np.zeros((N, 4))
for k in range(N):
L_rot = np.asarray(dataL_rot[k, :]).reshape((3,3))
R_rot = np.asarray(dataR_rot[k, :]).reshape((3,3))
dataL_quat[k,:] = quaternion_from_matrix(L_rot)
dataR_quat[k,:] = quaternion_from_matrix(R_rot)
dataL = np.hstack((dataL_pos, dataL_quat))
dataR = np.hstack((dataR_pos, dataR_quat))
out = np.hstack((dataL,dataR))
print(out.shape)
index_dict[subj[i] + '00' + str(j+1)] = list(range(y.shape[0], y.shape[0] + out.shape[0]))
y = np.vstack((y,out))
return y, index_dict
class JIGSAWDataset(Dataset):
def __init__(self, y, sortedFilelist , transform=None):
"""
Args:
csv_file (string): Path to the csv file with annotations.
sortedFilelist (string): sorted list of filenames.
transform (callable, optional): Optional transform to be applied
on a sample.
"""
self.labels = y
self.sortedlist = sortedFilelist
self.transform = transform
def __len__(self):
return self.labels.shape[0]
def __getitem__(self, idx):
img_name = self.sortedlist[idx]
trial_info = img_name[13:] + '_' + img_name[5] + '00' + img_name[7]
#print('opening image ' + img_name)
image = Image.open(img_name,'r')
label = self.labels[idx,:]
if self.transform:
image = self.transform(image)
sample = (image,label,trial_info)
return sample
def load_dataset(input_size,index_dict,num_train_trials=3,num_val_trials=3):
tasks = ['KT', 'NP', 'S']
train_list['KT'] = [['B','C','D','E','F','G','I'],[[1,2,3],[1,2,3,4],[1,2,3,4],[1,2,3,4],[1,2,3,4],[0,1,2,3,4],[1,2,4]]]
test_list['KT'] = [['C','E','H'],[[0],[0],[2,3,4]]]
val_list['KT'] = [['B','D','F','G'],[[0],[0],[0],[0]]]
trian_list['NP'] = [['B','C','D','E','F','I'],[[0,1,2,3],[1,2,3,4],[0,1,2,3,4],[2,3,4],[2,3],[2,3,4]]]
test_list['NP'] = [['F','H'],[[0],[1,3,4]]]
val_list['NP'] = [['C','E','I'],[[0],[0],[1]]]
train_list['S'] = [['B','C','D','E','F','G','I'],[[1,2,3,4],[1,2,3,4],[1,2,3,4],[1,2,3,4],[0,1,2,3,4],[1,2,3,4],[0,1,2,3,4]]]
test_list['S'] = [['G','H'],[[0],[0,2,3,4]]]
val_list['S'] = [['B','C','D','E'],[[0],[0],[0],[0]]]
data_path = 'data'
picklefile = open("kinematics", "rb" )
num_files = len(next(os.walk('data'))[2]) #dir is your directory path as string
print('num image files: %d' % num_files)
trans = T.Compose([
T.CenterCrop(240),
T.Resize((input_size), interpolation=2),
T.ToTensor(),
T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
transy = T.Compose([T.ToTensor()])
y = pickle.load(picklefile)
print('shape of label data: %dx%d' % (y.shape[0],y.shape[1]))
picklefile.close()
file_list = glob.glob('data/*.png')
sortedlist = humansorted(file_list)
dataset = JIGSAWDataset(y,sortedlist,transform = trans)
range_train = []
range_test = []
range_val = []
for task in tasks:
# train
subj = train_list[task][0]
trial = train_list[task][1]
for i in range(len(subj)):
for j in trial[i]:
range_train.extend(index_dict[task + '_' + subj[i] + '00' + str(j+1)])
# test
subj = test_list[task][0]
trial = test_list[task][1]
for i in range(len(subj)):
for j in trial[i]:
range_test.extend(index_dict[task + '_' + subj[i] + '00' + str(j+1)])
# val
subj = val_list[task][0]
trial = val_list[task][1]
for i in range(len(subj)):
for j in trial[i]:
range_val.extend(index_dict[task + '_' + subj[i] + '00' + str(j+1)])
# range_total = random.sample(list(index_dict.values()), k=(num_train_trials + num_val_trials))
# range_train = [range_total[i] for i in range(num_train_trials)]
# range_train = sum(range_train,[])
# range_val = [range_total[i] for i in range(num_train_trials,num_train_trials+num_val_trials)]
# range_val = sum(range_val,[])
sortedlist_seq = [sortedlist[i] for i in range_val]
seq_dataset = JIGSAWDataset(y[range_val,:],sortedlist_seq,transform = trans)
train_loader = DataLoader(
dataset,
batch_size=64,
num_workers=0,
shuffle=False,
sampler=sampler.SubsetRandomSampler(range_train)
)
val_loader = DataLoader(
dataset,
batch_size=64,
num_workers=0,
shuffle=False,
sampler=sampler.SubsetRandomSampler(range_val)
)
test_loader = DataLoader(
dataset,
batch_size=64,
num_workers=0,
shuffle=False,
sampler=sampler.SubsetRandomSampler(range_test)
)
seq_loader = DataLoader(
seq_dataset,
batch_size=64,
num_workers=0,
shuffle=False,
sampler=sampler.SequentialSampler(seq_dataset)
)
return train_loader,val_loader,test_loader, seq_loader
# GOT THIS ONLINE, NEED TO FIGURE OUT HOW TO CITE IT https://www.lfd.uci.edu/~gohlke/code/transformations.py.html
def quaternion_from_matrix(matrix, isprecise=False):
M = np.array(matrix, dtype=np.float64, copy=False)[:4, :4]
if isprecise:
q = np.empty((4, ))
t = np.trace(M)
if t > M[3, 3]:
q[0] = t
q[3] = M[1, 0] - M[0, 1]
q[2] = M[0, 2] - M[2, 0]
q[1] = M[2, 1] - M[1, 2]
else:
i, j, k = 0, 1, 2
if M[1, 1] > M[0, 0]:
i, j, k = 1, 2, 0
if M[2, 2] > M[i, i]:
i, j, k = 2, 0, 1
t = M[i, i] - (M[j, j] + M[k, k]) + M[3, 3]
q[i] = t
q[j] = M[i, j] + M[j, i]
q[k] = M[k, i] + M[i, k]
q[3] = M[k, j] - M[j, k]
q = q[[3, 0, 1, 2]]
q *= 0.5 / math.sqrt(t * M[3, 3])
else:
m00 = M[0, 0]
m01 = M[0, 1]
m02 = M[0, 2]
m10 = M[1, 0]
m11 = M[1, 1]
m12 = M[1, 2]
m20 = M[2, 0]
m21 = M[2, 1]
m22 = M[2, 2]
# symmetric matrix K
K = np.array([[m00-m11-m22, 0.0, 0.0, 0.0],
[m01+m10, m11-m00-m22, 0.0, 0.0],
[m02+m20, m12+m21, m22-m00-m11, 0.0],
[m21-m12, m02-m20, m10-m01, m00+m11+m22]])
K /= 3.0
# quaternion is eigenvector of K that corresponds to largest eigenvalue
w, V = np.linalg.eigh(K)
q = V[[3, 0, 1, 2], np.argmax(w)]
if q[0] < 0.0:
np.negative(q, q)
return q
|
{"/.ipynb_checkpoints/train_model-checkpoint.py": ["/input_util.py"]}
|
22,253
|
wojtii/smti
|
refs/heads/master
|
/solver_test.py
|
from solver import solve
from checker import check
def test_3x3():
albert, bradley, charles, diane, fergie, emily = 0, 1, 2, 0, 1, 2
p = {
'men': {
albert: [diane, emily, fergie],
bradley: [emily, diane, fergie],
charles: [diane, emily, fergie],
},
'women': {
diane: [bradley, albert, charles],
fergie: [albert, bradley, charles],
emily: [albert, bradley, charles],
},
'men_str': ['albert', 'bradley', 'charles'],
'women_str': ['diane', 'fergie', 'emily'],
}
guy = {
'albert': ['diane', 'emily', 'fergie'],
'bradley': ['emily', 'diane', 'fergie'],
'charles': ['diane', 'emily', 'fergie'],
}
girls = {
'diane': ['bradley', 'albert', 'charles'],
'fergie': ['albert', 'bradley', 'charles'],
'emily': ['albert', 'bradley', 'charles'],
}
res = solve(p)
assert check(guy, girls, solve(p))
assert res == {'diane': 'albert', 'emily': 'bradley', 'fergie': 'charles'}
def test_3x3_v2():
arie, bert, carl, ann, betty, cindy = 0, 1, 2, 0, 1, 2
p = {
'men': {
arie: [betty, ann, cindy],
bert: [ann, cindy, betty],
carl: [ann, cindy, betty],
},
'women': {
ann: [bert, arie, carl],
betty: [arie, carl, bert],
cindy: [bert, arie, carl]
},
'men_str': ['arie', 'bert', 'carl'],
'women_str': ['ann', 'betty', 'cindy'],
}
guy = {
'arie': ['betty', 'ann', 'cindy'],
'bert': ['ann', 'cindy', 'betty'],
'carl': ['ann', 'cindy', 'betty'],
}
girls = {
'ann': ['bert', 'arie', 'carl'],
'betty': ['arie', 'carl', 'bert'],
'cindy': ['bert', 'arie', 'carl'],
}
res = solve(p)
assert check(guy, girls, res)
assert res == {'betty': 'arie', 'ann': 'bert', 'cindy': 'carl'}
def test_10x10():
# from rosetta
abe, bob, col, dan, ed, fred, gav, hal, ian, jon = 0, 1, 2, 3, 4, 5, 6, 7, 8, 9
abi, bea, cath, dee, eve, fay, gay, hope, ivy, jan = 0, 1, 2, 3, 4, 5, 6, 7, 8, 9
p = {
'men': {
abe: [abi, eve, cath, ivy, jan, dee, fay, bea, hope, gay],
bob: [cath, hope, abi, dee, eve, fay, bea, jan, ivy, gay],
col: [hope, eve, abi, dee, bea, fay, ivy, gay, cath, jan],
dan: [ivy, fay, dee, gay, hope, eve, jan, bea, cath, abi],
ed: [jan, dee, bea, cath, fay, eve, abi, ivy, hope, gay],
fred: [bea, abi, dee, gay, eve, ivy, cath, jan, hope, fay],
gav: [gay, eve, ivy, bea, cath, abi, dee, hope, jan, fay],
hal: [abi, eve, hope, fay, ivy, cath, jan, bea, gay, dee],
ian: [hope, cath, dee, gay, bea, abi, fay, ivy, jan, eve],
jon: [abi, fay, jan, gay, eve, bea, dee, cath, ivy, hope],
},
'women': {
abi: [bob, fred, jon, gav, ian, abe, dan, ed, col, hal],
bea: [bob, abe, col, fred, gav, dan, ian, ed, jon, hal],
cath: [fred, bob, ed, gav, hal, col, ian, abe, dan, jon],
dee: [fred, jon, col, abe, ian, hal, gav, dan, bob, ed],
eve: [jon, hal, fred, dan, abe, gav, col, ed, ian, bob],
fay: [bob, abe, ed, ian, jon, dan, fred, gav, col, hal],
gay: [jon, gav, hal, fred, bob, abe, col, ed, dan, ian],
hope: [gav, jon, bob, abe, ian, dan, hal, ed, col, fred],
ivy: [ian, col, hal, gav, fred, bob, abe, ed, jon, dan],
jan: [ed, hal, gav, abe, bob, jon, col, ian, fred, dan],
},
'men_str': ["abe", "bob", "col", "dan", "ed", "fred", "gav", "hal", "ian", "jon"],
'women_str': ["abi", "bea", "cath", "dee", "eve", "fay", "gay", "hope", "ivy", "jan"],
}
guyprefers = {
'abe': ['abi', 'eve', 'cath', 'ivy', 'jan', 'dee', 'fay', 'bea', 'hope', 'gay'],
'bob': ['cath', 'hope', 'abi', 'dee', 'eve', 'fay', 'bea', 'jan', 'ivy', 'gay'],
'col': ['hope', 'eve', 'abi', 'dee', 'bea', 'fay', 'ivy', 'gay', 'cath', 'jan'],
'dan': ['ivy', 'fay', 'dee', 'gay', 'hope', 'eve', 'jan', 'bea', 'cath', 'abi'],
'ed': ['jan', 'dee', 'bea', 'cath', 'fay', 'eve', 'abi', 'ivy', 'hope', 'gay'],
'fred': ['bea', 'abi', 'dee', 'gay', 'eve', 'ivy', 'cath', 'jan', 'hope', 'fay'],
'gav': ['gay', 'eve', 'ivy', 'bea', 'cath', 'abi', 'dee', 'hope', 'jan', 'fay'],
'hal': ['abi', 'eve', 'hope', 'fay', 'ivy', 'cath', 'jan', 'bea', 'gay', 'dee'],
'ian': ['hope', 'cath', 'dee', 'gay', 'bea', 'abi', 'fay', 'ivy', 'jan', 'eve'],
'jon': ['abi', 'fay', 'jan', 'gay', 'eve', 'bea', 'dee', 'cath', 'ivy', 'hope']}
galprefers = {
'abi': ['bob', 'fred', 'jon', 'gav', 'ian', 'abe', 'dan', 'ed', 'col', 'hal'],
'bea': ['bob', 'abe', 'col', 'fred', 'gav', 'dan', 'ian', 'ed', 'jon', 'hal'],
'cath': ['fred', 'bob', 'ed', 'gav', 'hal', 'col', 'ian', 'abe', 'dan', 'jon'],
'dee': ['fred', 'jon', 'col', 'abe', 'ian', 'hal', 'gav', 'dan', 'bob', 'ed'],
'eve': ['jon', 'hal', 'fred', 'dan', 'abe', 'gav', 'col', 'ed', 'ian', 'bob'],
'fay': ['bob', 'abe', 'ed', 'ian', 'jon', 'dan', 'fred', 'gav', 'col', 'hal'],
'gay': ['jon', 'gav', 'hal', 'fred', 'bob', 'abe', 'col', 'ed', 'dan', 'ian'],
'hope': ['gav', 'jon', 'bob', 'abe', 'ian', 'dan', 'hal', 'ed', 'col', 'fred'],
'ivy': ['ian', 'col', 'hal', 'gav', 'fred', 'bob', 'abe', 'ed', 'jon', 'dan'],
'jan': ['ed', 'hal', 'gav', 'abe', 'bob', 'jon', 'col', 'ian', 'fred', 'dan']}
res = solve(p)
assert check(guyprefers, galprefers, res)
assert res == {'ivy': 'abe', 'cath': 'bob', 'dee': 'col', 'fay': 'dan', 'jan': 'ed',
'bea': 'fred', 'gay': 'gav', 'eve': 'hal', 'hope': 'ian', 'abi': 'jon'}
def test_incomp_5x5():
abe, bob, col, dan, ed = 0, 1, 2, 3, 4
abi, bea, cath, dee, eve = 0, 1, 2, 3, 4
p = {
'men': {
abe: [bea, dee, eve],
bob: [abi, cath],
col: [dee, bea, cath, eve, abi],
dan: [eve, cath, abi, dee],
ed: [dee, eve],
},
'women': {
abi: [bob, col, dan],
bea: [col, abe],
cath: [col, dan, bob],
dee: [ed, abe, dan, col],
eve: [abe, ed, col, dan],
},
'men_str': ["abe", "bob", "col", "dan", "ed"],
'women_str': ["abi", "bea", "cath", "dee", "eve"],
}
guyprefers = {
'abe': ['bea', 'dee', 'eve'],
'bob': ['abi', 'cath'],
'col': [ 'dee', 'bea', 'cath', 'eve', 'abi'],
'dan': ['eve', 'cath', 'abi', 'dee'],
'ed': ['dee', 'eve'],
}
galprefers = {
'abi': ['bob', 'col', 'dan'],
'bea': ['col', 'abe'],
'cath': ['col', 'dan', 'bob'],
'dee': ['ed', 'abe', 'dan', 'col'],
'eve': ['abe', 'ed', 'col', 'dan']
}
res = solve(p)
assert check(guyprefers, galprefers, res)
assert res == {'cath': 'dan', 'abi': 'bob', 'bea': 'col', 'eve': 'abe', 'dee': 'ed'}
|
{"/solver_test.py": ["/solver.py", "/checker.py"], "/Frames/WomenAndMen.py": ["/data.py"], "/main.py": ["/Frames/WomenAndMen.py", "/Frames/Preferences.py"], "/Frames/Preferences.py": ["/data.py", "/solver.py"]}
|
22,254
|
wojtii/smti
|
refs/heads/master
|
/Frames/WomenAndMen.py
|
from data import men, women
from tkinter import Label, Listbox, Entry, messagebox, Button, END, E, W, N, HORIZONTAL
from tkinter.ttk import Separator
class People:
def __init__(self, tab):
# List of women
women_label = Label(tab, text="Women")
women_label.grid(column=0, row=0, padx=25, pady=4, sticky=W)
self.women_list = Listbox(tab, width=35, height=10)
self.women_list.grid(column=0, row=1, padx=25)
# Add Woman
add_woman_label = Label(tab, text="Add Woman")
add_woman_label.grid(column=1, row=0, padx=25, pady=4, sticky=W)
self.add_woman_entry = Entry(tab, width=24)
self.add_woman_entry.grid(column=1, row=1, padx=27, sticky=N+W)
add_woman_btn = Button(tab, text="Add Woman", width=20, command=lambda: self.add("woman", self.add_woman_entry.get()))
add_woman_btn.grid(column=1, row=1, pady=25, padx=27, sticky=N+W)
tab.columnconfigure(3, weight=1)
Separator(tab, orient=HORIZONTAL).grid(row=2, pady=20, columnspan=4, sticky=E+W)
# List of men
men_label = Label(tab, text="Men")
men_label.grid(column=0, row=3, padx=25, sticky=W)
self.men_list = Listbox(tab, width=35, height=10)
self.men_list.grid(column=0, row=4, padx=25)
# Add Man
add_man_label = Label(tab, text="Add Man")
add_man_label.grid(column=1, row=3, padx=25, sticky=W)
self.add_man_entry = Entry(tab, width=24)
self.add_man_entry.grid(column=1, row=4, padx=27, sticky=N + W)
add_man_btn = Button(tab, text="Add Man", width=20, command=lambda: self.add("man", self.add_man_entry.get()))
add_man_btn.grid(column=1, row=4, pady=25, padx=27, sticky=N + W)
def add(self, sex, name):
if len(name) < 3:
messagebox.showinfo("Name is too short", "Please provide real name")
return
if sex == "woman":
women.append(name)
self.women_list.insert(END, name)
self.add_woman_entry.delete(0, END)
else:
men.append(name)
self.men_list.insert(END, name)
self.add_man_entry.delete(0, END)
|
{"/solver_test.py": ["/solver.py", "/checker.py"], "/Frames/WomenAndMen.py": ["/data.py"], "/main.py": ["/Frames/WomenAndMen.py", "/Frames/Preferences.py"], "/Frames/Preferences.py": ["/data.py", "/solver.py"]}
|
22,255
|
wojtii/smti
|
refs/heads/master
|
/data.py
|
men = []
women = []
p = {
'men': {},
'women': {},
'men_str': men,
'women_str': women
}
def update_global_pref(pref, sex):
sex = sex.replace('a', 'e').lower()
for key in pref:
p[sex][key] = pref[key]
print(p)
|
{"/solver_test.py": ["/solver.py", "/checker.py"], "/Frames/WomenAndMen.py": ["/data.py"], "/main.py": ["/Frames/WomenAndMen.py", "/Frames/Preferences.py"], "/Frames/Preferences.py": ["/data.py", "/solver.py"]}
|
22,256
|
wojtii/smti
|
refs/heads/master
|
/solver.py
|
from z3 import Solver, If, Not, Bool, And, Int, Distinct, sat
def _if_x(x, ind, i):
if i == len(x) - 1:
return If(x[i] == ind, i, -1)
return If(x[i] == ind, i, _if_x(x, ind, i+1))
def _if_xy(x, y, i):
if i == len(y) - 1:
return If(x == y[i], i, -1)
return If(x == y[i], i, _if_xy(x, y, i+1))
def solve(data):
men_str = data['men_str']
women_str = data['women_str']
men_prefer = data['men']
women_prefer = data['women']
s = Solver()
size = len(men_prefer)
size_range = range(size)
men_choice = [Int(f'men_choice_{i}') for i in size_range]
women_choice = [Int(f'women_choice_{i}') for i in size_range]
for i in size_range:
s.add(And(men_choice[i] >= 0, men_choice[i] <= size-1))
s.add(And(women_choice[i] >= 0, women_choice[i] <= size-1))
s.add(Distinct(men_choice))
for i in size_range:
s.add(women_choice[i] == _if_x(men_choice, i, 0))
men_own_choice = [Int(f'men_own_choice_{i}') for i in size_range]
women_own_choice = [Int(f'women_own_choice_{i}') for i in size_range]
for m in size_range:
s.add(men_own_choice[m] == _if_xy(men_choice[m], men_prefer[m], 0))
for w in size_range:
s.add(women_own_choice[w] == _if_xy(women_choice[w], women_prefer[w], 0))
men_want = [[Bool(f'men_want_{m}_{w}') for w in size_range] for m in size_range]
women_want = [[Bool(f'women_want_{w}_{m}') for m in size_range] for w in size_range]
for m in size_range:
for w in men_prefer[m]:
s.add(men_want[m][w] == (men_prefer[m].index(w) < men_own_choice[m]))
for w in size_range:
for m in women_prefer[w]:
s.add(women_want[w][m] == (women_prefer[w].index(m) < women_own_choice[w]))
for m in size_range:
for w in size_range:
s.add(Not(And(men_want[m][w], women_want[w][m])))
if s.check() != sat:
raise Exception('not a valid input')
with open('z3_input.txt', 'w') as f:
f.write(s.sexpr())
mdl = s.model()
with open('z3_model.txt', 'w') as f:
f.write(str(mdl))
return {women_str[mdl[men_choice[m]].as_long()]: men_str[m] for m in size_range}
|
{"/solver_test.py": ["/solver.py", "/checker.py"], "/Frames/WomenAndMen.py": ["/data.py"], "/main.py": ["/Frames/WomenAndMen.py", "/Frames/Preferences.py"], "/Frames/Preferences.py": ["/data.py", "/solver.py"]}
|
22,257
|
wojtii/smti
|
refs/heads/master
|
/checker.py
|
def check(guyprefers, galprefers, engaged):
inverseengaged = dict((v, k) for k, v in engaged.items())
for she, he in engaged.items():
shelikes = galprefers[she]
shelikesbetter = shelikes[:shelikes.index(he)]
helikes = guyprefers[he]
helikesbetter = helikes[:helikes.index(she)]
for guy in shelikesbetter:
guysgirl = inverseengaged[guy]
guylikes = guyprefers[guy]
if guylikes.index(guysgirl) > guylikes.index(she):
print("%s and %s like each other better than "
"their present partners: %s and %s, respectively"
% (she, guy, he, guysgirl))
return False
for gal in helikesbetter:
girlsguy = engaged[gal]
gallikes = galprefers[gal]
if gallikes.index(girlsguy) > gallikes.index(he):
print("%s and %s like each other better than "
"their present partners: %s and %s, respectively"
% (he, gal, she, girlsguy))
return False
return True
|
{"/solver_test.py": ["/solver.py", "/checker.py"], "/Frames/WomenAndMen.py": ["/data.py"], "/main.py": ["/Frames/WomenAndMen.py", "/Frames/Preferences.py"], "/Frames/Preferences.py": ["/data.py", "/solver.py"]}
|
22,258
|
wojtii/smti
|
refs/heads/master
|
/main.py
|
from tkinter import Tk, ttk, Label, BOTTOM, SUNKEN, X, W
from Frames.WomenAndMen import People
from Frames.Preferences import Preferences
def handle_tab_changed(event):
selection = event.widget.select()
tab = event.widget.tab(selection, "text")
if tab == "Preferences list":
preferences.update_screen()
# Main Window
window = Tk()
window.title("SMTI")
window.geometry('640x540')
# Menu Controls
tab_control = ttk.Notebook(window)
tab1 = ttk.Frame(tab_control)
tab2 = ttk.Frame(tab_control)
tab_control.add(tab1, text='Women & Men')
tab_control.add(tab2, text='Preferences list')
people = People(tab1)
preferences = Preferences(tab2)
# Status Bar
status = Label(text="", bd=1, relief=SUNKEN, anchor=W)
status.pack(side=BOTTOM, fill=X)
tab_control.bind("<<NotebookTabChanged>>", handle_tab_changed)
# Pack menu and loop window
tab_control.pack(expand=1, fill='both')
window.mainloop()
|
{"/solver_test.py": ["/solver.py", "/checker.py"], "/Frames/WomenAndMen.py": ["/data.py"], "/main.py": ["/Frames/WomenAndMen.py", "/Frames/Preferences.py"], "/Frames/Preferences.py": ["/data.py", "/solver.py"]}
|
22,259
|
wojtii/smti
|
refs/heads/master
|
/Frames/Preferences.py
|
import tkinter as tk
from data import p, men, women, update_global_pref
from tkinter import Label, W, N, E, Button, Listbox, END, messagebox
from tkinter.ttk import Combobox
from solver import solve
class Preferences:
def __init__(self, tab):
# Scroll Bar
yscroll = tk.Scrollbar(tab, orient=tk.VERTICAL)
xscroll = tk.Scrollbar(tab, orient=tk.HORIZONTAL)
yscroll.pack(side=tk.RIGHT, fill=tk.Y)
xscroll.pack(side=tk.BOTTOM, fill=tk.X)
self.canvas = tk.Canvas(tab)
self.canvas.pack(fill=tk.BOTH, expand=True)
self.canvas['yscrollcommand'] = yscroll.set
self.canvas['xscrollcommand'] = xscroll.set
yscroll['command'] = self.canvas.yview
xscroll['command'] = self.canvas.xview
self.frame = tk.Frame(self.canvas)
self.canvas.create_window(4, 4, window=self.frame, anchor='nw')
self.frame.bind("<Configure>", self._on_frame_configure)
# Set preferences for ...
preferences_label = Label(self.frame, text="Preferences for: ")
preferences_label.grid(column=0, row=0, padx=25, pady=4, sticky=W)
self.people_list = Combobox(self.frame)
self.people_list.grid(column=1, row=0, padx=5, pady=4, sticky=W)
self.autoCreatedWidgets = []
add_pref = Button(self.frame, text="Add Preference", width=20, command=lambda: self.add_pref())
add_pref.grid(column=1, row=1000, pady=25, padx=27, sticky=N + W)
# Preferences window
self.frame.columnconfigure(3, weight=1)
self.pref_box = Listbox(self.frame, width=80, height=10)
self.pref_box.grid(column=0, columnspan=3, row=1001, pady=25, padx=27, sticky=E+W)
self.currentSex = 'Man'
self.currentPref = ''
# Solve them all
solve_btn = Button(self.frame, text="Solve", width=20, command=lambda: self.solve())
solve_btn.grid(column=1, row=1002, pady=25, padx=27, sticky=N + W)
def _on_frame_configure(self, event=None):
self.canvas.configure(scrollregion=self.canvas.bbox("all"))
def update_screen(self):
# Combo box update
self.people_list.destroy()
self.people_list = Combobox(self.frame, values=women+men)
self.people_list.grid(column=1, row=0, padx=5, pady=4, sticky=W)
self.people_list.bind("<<ComboboxSelected>>", self.selected)
# Handle select event on combo box
def selected(self, event):
# Destroy previous widgets
for widget in self.autoCreatedWidgets:
widget.destroy()
self.autoCreatedWidgets.clear()
# Get Selection and generate widget to set preference
self.currentPref = event.widget.get()
if self.currentPref in men:
prefl = women
self.currentSex = 'Man'
ordinal = [self.int2ordinal(i) for i in range(1, 1 + len(women))]
else:
prefl = men
self.currentSex = 'Woman'
ordinal = [self.int2ordinal(i) for i in range(1, 1 + len(men))]
ordinal.append("--")
for index, human in enumerate(prefl):
l = Label(self.frame, text="I choose " + human + " as my ")
l.grid(row=index+1, sticky=W)
c = Combobox(self.frame, values=ordinal)
c.grid(row=index+1, column=1)
self.autoCreatedWidgets.append(l)
self.autoCreatedWidgets.append(c)
def add_pref(self):
pref_str = ['_' for _ in range(len(men))]
pref_int = [-1 for _ in range(len(men))]
index = 0
for widget in self.autoCreatedWidgets:
if 'label' not in str(widget).split(".")[-1]:
choose = self.ordinal2int(widget.get())
if self.currentSex == 'Man':
index_current_pref = men.index(self.currentPref)
else:
index_current_pref = women.index(self.currentPref)
if self.currentSex == 'Man' and choose > -1:
pref_str[choose] = women[index]
pref_int[choose] = index
elif self.currentSex == 'Woman' and choose > -1:
pref_str[choose] = men[index]
pref_int[choose] = index
index += 1
update_global_pref({index_current_pref: list(filter(lambda x: x >= 0, pref_int))}, self.currentSex)
self.update_pref_box()
def update_pref_box(self):
self.pref_box.delete(0, END)
for key in p['women']:
prefs = [men[x] for x in p['women'][key]]
line = women[key] + ': ' + str(prefs)
self.pref_box.insert(END, line)
for key in p['men']:
prefs = [women[x] for x in p['men'][key]]
line = men[key] + ': ' + str(prefs)
self.pref_box.insert(END, line)
def solve(self):
try:
res = solve(p)
messagebox.showinfo("Results of matching", str(res))
except:
messagebox.showinfo("Error", "Invalid input")
@staticmethod
def int2ordinal(num):
num = str(num)
if len(num) > 2:
end_digits = int(num) % 100
else:
end_digits = int(num) % 10
if end_digits == 1:
return (num + "st")
if end_digits == 2:
return (num + "nd")
if end_digits == 3:
return (num + "rd")
else:
return (num + "th")
@staticmethod
def ordinal2int(ordinal):
if ordinal and ordinal != "--":
ordinal = ordinal.replace("st", "").replace("nd", "").replace("rd", "").replace("th", "")
else:
return -1
return int(ordinal) - 1
|
{"/solver_test.py": ["/solver.py", "/checker.py"], "/Frames/WomenAndMen.py": ["/data.py"], "/main.py": ["/Frames/WomenAndMen.py", "/Frames/Preferences.py"], "/Frames/Preferences.py": ["/data.py", "/solver.py"]}
|
22,260
|
ganeshth/room-book
|
refs/heads/master
|
/users/models.py
|
from django.db import models
# Create your models here.
class User(models.Model):
first_name = models.CharField("first name",max_length=30, blank=False)
last_name = models.CharField("last name",max_length=30, blank=False)
joined_date = models.DateField()
def __str__(self):
return self.first_name
|
{"/expensetracker/admin.py": ["/expensetracker/models.py"]}
|
22,261
|
ganeshth/room-book
|
refs/heads/master
|
/expensetracker/admin.py
|
from django.contrib import admin
from expensetracker.models import ExpenseRecord, Category
# Register your models here.
admin.site.register(ExpenseRecord)
admin.site.register(Category)
|
{"/expensetracker/admin.py": ["/expensetracker/models.py"]}
|
22,262
|
ganeshth/room-book
|
refs/heads/master
|
/expensetracker/views.py
|
from django.shortcuts import render
# Create your views here.
def home(request):
home_context = {'message':'Welcome to our website'}
return render(request,'home.html',home_context)
|
{"/expensetracker/admin.py": ["/expensetracker/models.py"]}
|
22,263
|
ganeshth/room-book
|
refs/heads/master
|
/expensetracker/models.py
|
from django.db import models
# Create your models here.
class Category(models.Model):
category = models.CharField(max_length=40, primary_key=True)
createdby = models.CharField(max_length=40)
def __str__(self):
return self.category
class Meta:
verbose_name= "Category of the expense record"
class ExpenseRecord(models.Model):
user = models.ForeignKey('users.User')
transaction_date = models.DateTimeField()
category = models.ForeignKey(Category)
is_room_expense = models.BooleanField()
amount = models.FloatField()
balance = models.FloatField()
def __str__(self):
return self.user.__str__() +" on " + str(self.transaction_date) +": "+ str(self.amount)
class Meta:
verbose_name= "Transaction record for the user"
|
{"/expensetracker/admin.py": ["/expensetracker/models.py"]}
|
22,264
|
ganeshth/room-book
|
refs/heads/master
|
/expensetracker/urls.py
|
from django.conf.urls import url
from expensetracker import views
urlpatterns = [
url('^$',views.home, name='home'),
]
|
{"/expensetracker/admin.py": ["/expensetracker/models.py"]}
|
22,267
|
MrMahus/DraftPygame
|
refs/heads/master
|
/engine/entities/player.py
|
import pygame
class Player:
def __init__(self):
self.life_points = 10
self.inventory = []
self.player_position_x = 0
self.player_position_y = 0
self.velocity = 3
self.player_width = 32
self.play_height = 32
self.playerSurface = pygame.Surface((self.player_width, self.play_height))
self.playerSurface.fill((255,0,0))
self.key_states = None
self.world_position_x = 0
self.world_position_y = 0
self.a = False
def move_player(self):
if self.key_states["up"] == True:
self.player_position_y -= self.velocity
if self.key_states["down"] == True:
self.player_position_y += self.velocity
if self.key_states["left"] == True:
self.player_position_x -= self.velocity
if self.key_states["right"] == True:
self.player_position_x += self.velocity
if self.key_states["up_left"] == True:
self.player_position_x -= self.velocity / 2
self.player_position_y -= self.velocity / 2
if self.key_states["up_right"] == True:
self.player_position_x += self.velocity / 2
self.player_position_y -= self.velocity / 2
if self.key_states["down_left"] == True:
self.player_position_x -= self.velocity / 2
self.player_position_y += self.velocity / 2
if self.key_states["down_right"] == True:
self.player_position_x += self.velocity / 2
self.player_position_y += self.velocity / 2
# Draw
def drawPlayer(self,_game_display, _display_size):
_game_display.blit(self.playerSurface, (320,320))
# Update
def updatePlayer(self, _key_states):
self.setkeyStates(_key_states)
self.move_player()
# Setters
def setkeyStates(self, _key_state):
self.key_states = _key_state
# Getters
def getPlayerPos(self):
return(-self.player_position_x,-self.player_position_y)
|
{"/engine/core.py": ["/engine/window.py", "/engine/events.py", "/engine/loader.py", "/engine/scenes.py", "/engine/entities/player.py"]}
|
22,268
|
MrMahus/DraftPygame
|
refs/heads/master
|
/engine/scenes.py
|
import pygame
class Map:
def __init__(self, _display_settings):
self.map_size = _display_settings
self.current_map = [0,0]
self.north_map = []
self.northwest_map = []
self.northeast_map = []
self.east_map = []
self.west_map = []
self.south_map = []
self.southeast_map = []
self.southwest_map = []
self.current_map_layers = []
self.north_map_layers = []
self.northwest_map_layers = []
self.northeast_map_layers = []
self.east_map_layers = []
self.west_map_layers = []
self.south_map_layers = []
self.southeast_map_layers = []
self.southwest_map_layers = []
self.current_map_surface = pygame.Surface(_display_settings)
self.north_map_surface = pygame.Surface(_display_settings)
self.northwest_map_surface = pygame.Surface(_display_settings)
self.northeast_map_surface = pygame.Surface(_display_settings)
self.east_map_surface = pygame.Surface(_display_settings)
self.west_map_surface = pygame.Surface(_display_settings)
self.south_map_surface = pygame.Surface(_display_settings)
self.southeast_map_surface = pygame.Surface(_display_settings)
self.southwest_map_surface = pygame.Surface(_display_settings)
self.updateMapsArroundPlayer()
def updateMapsArroundPlayer(self):
self.north_map = [ self.current_map[0] , self.current_map[1] - 1 ]
self.northwest_map = [ self.current_map[0] - 1, self.current_map[1] - 1 ]
self.northeast_map = [ self.current_map[0] + 1, self.current_map[1] - 1 ]
self.east_map = [ self.current_map[0] + 1, self.current_map[1] ]
self.west_map = [ self.current_map[0] - 1, self.current_map[1] ]
self.south_map = [ self.current_map[0], self.current_map[1] + 1 ]
self.southeast_map = [ self.current_map[0] + 1, self.current_map[1] + 1 ]
self.southwest_map = [ self.current_map[0] - 1, self.current_map[1] + 1 ]
def loadMaps(self, _loader):
self.current_map_layers = _loader.loadMapFile(self.current_map)
self.north_map_layers = _loader.loadMapFile(self.north_map)
self.northwest_map_layers = _loader.loadMapFile(self.northwest_map)
self.northeast_map_layers = _loader.loadMapFile( self.northeast_map)
self.east_map_layers = _loader.loadMapFile( self.east_map)
self.west_map_layers = _loader.loadMapFile(self.west_map)
self.south_map_layers = _loader.loadMapFile(self.south_map)
self.southeast_map_layers = _loader.loadMapFile(self.southeast_map)
self.southwest_map_layers = _loader.loadMapFile(self.southwest_map)
def drawMapsSurface(self, _loader):
self.current_map_surface = _loader.drawTileMap(self.current_map_layers, self.current_map_surface)
self.north_map_surface = _loader.drawTileMap(self.north_map_layers, self.north_map_surface)
self.northwest_map_surface = _loader.drawTileMap(self.northwest_map_layers, self.northwest_map_surface)
self.northeast_map_surface = _loader.drawTileMap(self.northeast_map_layers, self.northeast_map_surface)
self.east_map_surface = _loader.drawTileMap(self.east_map_layers, self.east_map_surface)
self.west_map_surface = _loader.drawTileMap(self.west_map_layers, self.west_map_surface)
self.south_map_surface = _loader.drawTileMap(self.south_map_layers, self.south_map_surface)
self.southeast_map_surface = _loader.drawTileMap(self.southeast_map_layers, self.southeast_map_surface)
self.southwest_map_surface = _loader.drawTileMap(self.southwest_map_layers, self.southwest_map_surface)
def assembleMapsTogether(self, _display, _player_pos):
if self.current_map_surface != None:
_display.blit(self.current_map_surface, _player_pos)
if self.north_map_surface != None:
_display.blit(self.north_map_surface, (_player_pos[0], _player_pos[1]-self.map_size[1]))
if self.south_map_surface != None:
_display.blit(self.south_map_surface, (_player_pos[0], _player_pos[1]+self.map_size[1]))
if self.west_map_surface != None:
_display.blit(self.west_map_surface, (_player_pos[0] - self.map_size[0], _player_pos[1]))
if self.east_map_surface != None:
_display.blit(self.east_map_surface, (_player_pos[0] + self.map_size[0], _player_pos[1]))
if self.northwest_map_surface != None:
_display.blit(self.northwest_map_surface, (_player_pos[0]- self.map_size[0], _player_pos[1]-self.map_size[1]))
if self.northeast_map_surface != None:
_display.blit(self.northeast_map_surface, (_player_pos[0]+ self.map_size[0], _player_pos[1]-self.map_size[1]))
if self.southwest_map_surface != None:
_display.blit(self.southwest_map_surface, (_player_pos[0] - self.map_size[0], _player_pos[1]+self.map_size[1]))
if self.southeast_map_surface != None:
_display.blit(self.southeast_map_surface, (_player_pos[0] + self.map_size[0], _player_pos[1]+self.map_size[1]))
def update(self, _loader):
self.updateMapsArroundPlayer()
self.loadMaps(_loader)
def draw(self, _loader, _display, _player_pos):
self.drawMapsSurface(_loader)
self.assembleMapsTogether(_display, _player_pos)
|
{"/engine/core.py": ["/engine/window.py", "/engine/events.py", "/engine/loader.py", "/engine/scenes.py", "/engine/entities/player.py"]}
|
22,269
|
MrMahus/DraftPygame
|
refs/heads/master
|
/engine/events.py
|
import pygame
class Events:
def __init__(self):
self.key_states = {
"up" : False,
"down" : False,
"left" : False,
"right" : False,
"up_left" : False,
"up_right" : False,
"down_left" : False,
"down_right" : False,
}
def processEvent(self):
#pygame.key.set_repeat(0, 500)
for _event in pygame.event.get():
if _event.type == pygame.QUIT:
self.triggerEventAction("quit")
# Keydown Events
if _event.type == pygame.KEYDOWN:
if _event.key == pygame.K_ESCAPE:
pass
# Arrows key
if _event.key == pygame.K_UP:
self.key_states["up"] = True
if _event.key == pygame.K_DOWN:
self.key_states["down"] = True
if _event.key == pygame.K_LEFT:
self.key_states["left"] = True
if _event.key == pygame.K_RIGHT:
self.key_states["right"] = True
# Key Up Events
if _event.type == pygame.KEYUP:
if _event.key == pygame.K_UP:
self.key_states["up"] = False
if _event.key == pygame.K_DOWN:
self.key_states["down"] = False
if _event.key == pygame.K_LEFT:
self.key_states["left"] = False
if _event.key == pygame.K_RIGHT:
self.key_states["right"] = False
# Diagonal arrows
if pygame.key.get_pressed()[pygame.K_RIGHT] == True and \
pygame.key.get_pressed()[pygame.K_DOWN] == True:
self.key_states["down_right"] = True
else:
self.key_states["down_right"] = False
if pygame.key.get_pressed()[pygame.K_LEFT] == True and \
pygame.key.get_pressed()[pygame.K_DOWN] == True:
self.key_states["down_left"] = True
else:
self.key_states["down_left"] = False
if pygame.key.get_pressed()[pygame.K_RIGHT] == True and \
pygame.key.get_pressed()[pygame.K_UP] == True:
self.key_states["up_right"] = True
else:
self.key_states["up_right"] = False
if pygame.key.get_pressed()[pygame.K_LEFT] == True and \
pygame.key.get_pressed()[pygame.K_UP] == True:
self.key_states["up_left"] = True
else:
self.key_states["up_left"] = False
def triggerEventAction(self, _action):
if _action == 'quit':
pygame.quit()
quit(print("Exit successfull"))
# Getters
def getKeyStates(self):
return self.key_states
|
{"/engine/core.py": ["/engine/window.py", "/engine/events.py", "/engine/loader.py", "/engine/scenes.py", "/engine/entities/player.py"]}
|
22,270
|
MrMahus/DraftPygame
|
refs/heads/master
|
/engine/window.py
|
import pygame
class Window:
def __init__(self):
self.window_width = 640
self.window_height = 640
self.window_caption = 'Title'
self.window_framerate = 60
self.game_display = None
self.createWindow()
# Color constants
self.red = (255,0,0)
self.green = (0,255,0)
self.blue = (0,0,255)
self.black = (0,0,0)
def createWindow(self):
self.game_display = pygame.display.set_mode((self.window_width, self.window_height))
pygame.display.set_caption(self.window_caption)
def getWindowSize(self):
return (self.window_width, self.window_height)
|
{"/engine/core.py": ["/engine/window.py", "/engine/events.py", "/engine/loader.py", "/engine/scenes.py", "/engine/entities/player.py"]}
|
22,271
|
MrMahus/DraftPygame
|
refs/heads/master
|
/engine/core.py
|
import pygame
import time
from engine.window import Window
from engine.events import Events
from engine.loader import Loader
from engine.scenes import Map
from engine.entities.player import Player
# ----- Initialisation ------
# Pygame
pygame.init()
# Window
game_window = Window()
game_display = game_window.game_display
game_window_settings = game_window.getWindowSize()
# Events
loop = True
event_manager = Events()
# Loader
loader = Loader()
# Entities
user_player = Player()
#Scenes
map_manager = Map(game_window_settings)
map_manager.loadMaps(loader)
# Time
clock = pygame.time.Clock()
# ------ Main game loop -----
while(loop):
# Time
start_time = time.time()
# Get events
event_manager.processEvent()
# Clear screen
game_display.fill(game_window.black)
# Update datas
user_player.updatePlayer(event_manager.getKeyStates())
map_manager.update(loader)
# Draw things
map_manager.draw(loader, game_display, user_player.getPlayerPos())
user_player.drawPlayer(game_display, game_window_settings)
# Flip screen
pygame.display.flip()
# Set framerate
clock.tick(30)
framerate = 1 / (time.time() - start_time)
FPS = "FPS : "+ str(int(framerate))
pygame.display.set_caption(FPS)
pygame.quit()
quit()
|
{"/engine/core.py": ["/engine/window.py", "/engine/events.py", "/engine/loader.py", "/engine/scenes.py", "/engine/entities/player.py"]}
|
22,272
|
MrMahus/DraftPygame
|
refs/heads/master
|
/engine/loader.py
|
import pygame
import os
class Loader:
def __init__(self):
self.tile_collection = {
0 : None, # Tile Transparent
1 : None, # Tile Grass
2 : None, # Tile 2
3 : None, # Tile 3
4 : None, # Tile 4
5 : None, # Tile 5
6 : None, # Tile 6
7 : None, # Tile 7
8 : None, # Tile 8
}
self.tile_width = 32
self.tile_height = 32
self.tile_rows = 20 - 1
self.tile_cols = 20 - 1
self.loadTilesFiles()
def loadTilesFiles(self):
for id in self.tile_collection:
if os.path.exists("res/tiles/tile_" + str(id) + ".png") == True:
self.tile_collection[id] =\
pygame.image.load("res/tiles/tile_" + str(id) + ".png").convert_alpha()
else:
self.tile_collection[id] = None
def loadMapFile(self,_map_index):
if os.path.exists("res/maps/map_" + str(_map_index[0])
+ "_" + str(_map_index[1]) + ".txt"):
file_map = open("res/maps/map_" + str(_map_index[0])
+ "_" + str(_map_index[1]) + ".txt", "r")
buffer = []
for lines in file_map:
buffer.append(lines.rstrip('\n'))
file_map.close()
current_layer = 1
layer_1 = ''
layer_2 = ''
layer_3 = ''
for element in buffer:
if element == 'first_layer,':
current_layer = 1
if element == 'second_layer,':
current_layer = 2
if element == 'third_layer,':
current_layer = 3
if current_layer == 1 and element != 'first_layer,':
layer_1 += element
if current_layer == 2 and element != 'second_layer,':
layer_2 += element
if current_layer == 3 and element != 'third_layer,':
layer_3 += element
layer_1 = layer_1.replace(',', "")
layer_1 = list(layer_1)
layer_2 = layer_2.replace(',', "")
layer_2 = list(layer_2)
layer_3 = layer_3.replace(',', "")
layer_3 = list(layer_3)
map_infos = (layer_1, layer_2, layer_3)
return map_infos
else:
#print("map file "+str(_map_index)+ " could not be read")
return None
def drawTileMap(self, _map_layers, _surface):
if _map_layers != None:
for layer in _map_layers:
tile_xPos = 0
tile_yPos = 0
for cell in layer:
_surface.blit(self.tile_collection[int(cell)], (tile_xPos,tile_yPos))
if tile_xPos < self.tile_width*self.tile_cols:
tile_xPos += 32
else:
tile_xPos = 0
tile_yPos += 32
return _surface
|
{"/engine/core.py": ["/engine/window.py", "/engine/events.py", "/engine/loader.py", "/engine/scenes.py", "/engine/entities/player.py"]}
|
22,276
|
Corwind/systran-demo
|
refs/heads/master
|
/systran/forms.py
|
from django import forms
from captcha.fields import ReCaptchaField
class OpinionForm(forms.Form):
sentence = forms.CharField(widget=forms.Textarea(attrs={'class': 'form-control'}))
|
{"/systran/views.py": ["/systran/forms.py"]}
|
22,277
|
Corwind/systran-demo
|
refs/heads/master
|
/systran/views.py
|
from django.http import HttpResponse, HttpResponseRedirect
from django.template import RequestContext, loader
import re
from .forms import OpinionForm
from .format import read_dic, clean_string
from subprocess import call
from nltk.stem.snowball import PorterStemmer as Stemmer
from nltk. corpus import stopwords
from pprint import pprint as print
def index(request):
template = loader.get_template('systran/index.html')
if request.method == 'POST':
form = OpinionForm(request.POST)
if form.is_valid():
try:
sentence = form.cleaned_data['sentence']
dic = read_dic('dic_98')
s = extract_keywords(sentence)
for word in s:
print(word)
try:
dic[word] += 1
except:
pass
l = []
with open('test', 'w') as f:
f.write("0")
i = 1
for key, value in dic.items():
if value != 0:
print('{} ({}) : {}'.format(key, i, value))
f.write(" {}:{}".format(i, value))
i += 1
f.write('\n')
call(["./svm_classify", "test", "model", "tmp"])
with open('tmp', 'r') as r:
pred = r.readline()
template = loader.get_template('systran/results.html')
context = RequestContext(request, {'sentence': sentence,
'prediction': pred, 'keywords' : s})
return HttpResponse(template.render(context))
except Exception as e:
print(e)
return HttpResponseRedirect('/systran/error')
else:
return HttpResponseRedirect('/systran/error')
else:
form = OpinionForm()
context = RequestContext(request, {'form': form})
return HttpResponse(template.render(context))
def extract_keywords(sentence):
sentence = sentence.lower()
not_stopw = ["no", "nor", "not", "over", "under", "again", "further",
"but", "against", "too", "very"]
stopw = stopwords.words('english')
for x in not_stopw:
stopw.remove(x)
print(stopw)
pattern = re.compile(r'\b(' + r'|'.join(stopw) + r')\b\s*')
sentence = sentence.replace('\n', '')
sentence = sentence.replace("n't", " not")
sentence = clean_string(sentence)
sentence = pattern.sub('', sentence)
stemmer = Stemmer()
s = [stemmer.stem(w) for w in sentence.split()]
b = zip(*[s[i:] for i in [0, 1]])
b = [bigram[0] + " " + bigram[1] for bigram in b]
return s + b
def error(request):
template = loader.get_template('systran/error.html')
context = RequestContext(request, {})
return HttpResponse(template.render(context))
|
{"/systran/views.py": ["/systran/forms.py"]}
|
22,278
|
HarunaHaju/DQN_flappy_bird
|
refs/heads/master
|
/flappybird.py
|
import sys
sys.path.append("game/")
import numpy as np
import cv2
import game.wrapped_flappy_bird as game
from dueling_DQN import DeepQNetworks
def preprocess(observation):
observation = cv2.cvtColor(cv2.resize(observation, (80, 80)), cv2.COLOR_BGR2GRAY)
ret, observation = cv2.threshold(observation, 1, 1, cv2.THRESH_BINARY)
return np.reshape(observation, (80,80,1))
def playFlappyBird():
action = 2
brain = DeepQNetworks(action)
flappyBird = game.GameState()
action0 = np.array([1,0])
observation0, reward0, terminal = flappyBird.frame_step(action0)
observation0 = cv2.cvtColor(cv2.resize(observation0, (80, 80)), cv2.COLOR_BGR2GRAY)
ret, observation0 = cv2.threshold(observation0, 1, 1, cv2.THRESH_BINARY)
brain.setInitState(observation0)
while True:
action = brain.getAction()
score = flappyBird.score
next_observation, reward, terminal = flappyBird.frame_step(action)
next_observation = preprocess(next_observation)
brain.setPerception(next_observation, action, reward, terminal)
if terminal:
brain.log_score(score)
if __name__ == "__main__":
playFlappyBird()
|
{"/flappybird.py": ["/dueling_DQN.py"]}
|
22,279
|
HarunaHaju/DQN_flappy_bird
|
refs/heads/master
|
/dueling_DQN.py
|
import random
import numpy as np
import tensorflow as tf
import replay_buffer
# replay buffer, target network, dueling
SIGN = 'dueling_DQN'
class DeepQNetworks:
def __init__(self, n_actions,
starter_learning_rate=0.000025,
gamma=0.99,
memory_size=50000,
batch_size=32,
n_explore=10000,
frame_per_action=4,
replace_target_iter=500):
self.n_actions = n_actions
self.gamma = gamma
self.memory_size = memory_size
self.batch_size = batch_size
self.n_explore = n_explore
self.frame_per_action = frame_per_action
self.replace_target_iter = replace_target_iter
self.time_step = 0
self.replay_memory = replay_buffer.ReplayBuffer(memory_size)
self.global_step = tf.Variable(0, trainable=False, name='global_step')
self.lr = tf.train.exponential_decay(starter_learning_rate, self.global_step, 10000, 0.96)
self.createNetwork()
q_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='Q_network')
t_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='target_network')
self.replace_target_op = [tf.assign(t, q) for t, q in zip(t_params, q_params)]
self.merged = tf.summary.merge_all()
self.saver = tf.train.Saver()
self.sess = tf.Session()
self.sess.run(tf.global_variables_initializer())
self.sess.graph.finalize()
ckpt = tf.train.get_checkpoint_state(SIGN)
if ckpt and ckpt.model_checkpoint_path:
self.saver.restore(self.sess, ckpt.model_checkpoint_path)
print("Successfully loaded:", ckpt.model_checkpoint_path)
else:
print("Could not find old network weights")
self.writer = tf.summary.FileWriter("logs/" + SIGN, self.sess.graph)
def createNetwork(self):
self.state_input = tf.placeholder(tf.float32, [None,80,80,4], name='state_input')
self.target_state_input = tf.placeholder(tf.float32, [None,80,80,4], name='target_state_input')
self.y_input = tf.placeholder(tf.float32, [None], name='y_input')
self.action_input = tf.placeholder(tf.float32, [None, self.n_actions], name='action_input')
def conv_layer(input, filter_size, channels_in, channels_out, strides, name='conv'):
with tf.variable_scope(name):
w = tf.get_variable('W', [filter_size, filter_size, channels_in, channels_out], initializer=tf.variance_scaling_initializer())
b = tf.get_variable('B', [channels_out], initializer=tf.constant_initializer())
conv = tf.nn.conv2d(input, w, strides=[1,strides,strides,1], padding='SAME')
return tf.nn.relu(conv + b)
def fc_layer(input, channels_in, channels_out, activation=None, name='fc'):
with tf.variable_scope(name):
w = tf.get_variable('W', [channels_in, channels_out], initializer=tf.variance_scaling_initializer())
b = b_fc0 = tf.get_variable('B', [channels_out], initializer=tf.constant_initializer())
fc = tf.matmul(input, w) + b
if activation == None:
return fc
else:
return activation(fc)
with tf.variable_scope("Q_network"):
conv1 = conv_layer(self.state_input, 8, 4, 32, 4, name='conv1')
pool1 = tf.nn.max_pool(conv1, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME', name='pool1')
conv2 = conv_layer(pool1, 4, 32, 64, 2, name='conv2')
conv3 = conv_layer(conv2, 3, 64, 64, 1, name='conv3')
flattened = tf.reshape(conv3, [-1, 5 * 5 * 64])
fc1 = fc_layer(flattened, 5 * 5 * 64, 512, activation=tf.nn.relu, name='fc1')
with tf.variable_scope('Value'):
V = fc_layer(fc1, 512, 1, name='V')
with tf.variable_scope('Advantage'):
A = fc_layer(fc1, 512, self.n_actions, name='A')
self.Q_value = V + A
tf.summary.scalar('mean_V_value', tf.reduce_mean(V))
tf.summary.scalar('mean_A_value', tf.reduce_mean(A))
tf.summary.scalar('mean_Q_value', tf.reduce_mean(self.Q_value))
with tf.variable_scope('loss'):
Q_action = tf.reduce_sum(tf.multiply(self.Q_value, self.action_input), reduction_indices = 1)
self.loss = tf.reduce_mean(tf.square(self.y_input - Q_action))
tf.summary.scalar('mean_loss', self.loss)
with tf.variable_scope('train'):
self.train_op = tf.train.AdamOptimizer(self.lr).minimize(self.loss, global_step=self.global_step)
with tf.variable_scope("target_network"):
conv1 = conv_layer(self.target_state_input, 8, 4, 32, 4, name='conv1')
pool1 = tf.nn.max_pool(conv1, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME', name='pool1')
conv2 = conv_layer(pool1, 4, 32, 64, 2, name='conv2')
conv3 = conv_layer(conv2, 3, 64, 64, 1, name='conv3')
flattened = tf.reshape(conv3, [-1, 5 * 5 * 64])
fc1 = fc_layer(flattened, 5 * 5 * 64, 512, activation=tf.nn.relu, name='fc1')
with tf.variable_scope('Value'):
V = fc_layer(fc1, 512, 1, name='V')
with tf.variable_scope('Advantage'):
A = fc_layer(fc1, 512, self.n_actions, name='A')
self.Q_target = V + A
def setInitState(self, observation):
self.current_state = np.stack((observation, observation, observation, observation), axis = 2)
def setPerception(self, next_observation, action, reward, terminal):
new_state = np.append(self.current_state[:,:,1:], next_observation, axis = 2)
self.replay_memory.add(self.current_state, action, reward, new_state, terminal)
if self.time_step > self.batch_size:
# Train the network
self.trainQNetwork()
self.current_state = new_state
self.time_step += 1
def trainQNetwork(self):
# Step 1: obtain random minibatch from replay memory
state_batch, action_batch, reward_batch, next_state_batch, terminal_batch = self.replay_memory.sample(self.batch_size)
# Step 2: calculate y
Q_target_batch = self.sess.run(self.Q_target, feed_dict={self.target_state_input: next_state_batch})
y_batch = np.where(terminal_batch, reward_batch, reward_batch + self.gamma * np.max(Q_target_batch, axis=1))
summary, _ = self.sess.run([self.merged, self.train_op], feed_dict={
self.state_input: state_batch,
self.y_input: y_batch,
self.action_input: action_batch})
self.writer.add_summary(summary, self.sess.run(self.global_step))
if self.time_step % self.replace_target_iter == 0:
self.sess.run(self.replace_target_op)
if self.sess.run(self.global_step) % 10000 == 0:
self.saver.save(self.sess, SIGN + '/Qnetwork', global_step=self.global_step)
def getAction(self):
action = np.zeros(self.n_actions)
if self.sess.run(self.global_step) < self.n_explore:
if self.sess.run(self.global_step) % self.frame_per_action == 0:
Q_value = self.sess.run(self.Q_value, feed_dict={self.state_input: [self.current_state]})[0]
action_index = random.randrange(self.n_actions) if random.random() <= 0.5 else np.argmax(Q_value)
action[action_index] = 1
else:
action[0] = 1 # do nothing
else:
Q_value = self.sess.run(self.Q_value, feed_dict={self.state_input: [self.current_state]})[0]
action[np.argmax(Q_value)] = 1
return action
def log_score(self, score):
summary = tf.Summary(value=[tf.Summary.Value(tag='score', simple_value=score)])
self.writer.add_summary(summary, self.sess.run(self.global_step))
|
{"/flappybird.py": ["/dueling_DQN.py"]}
|
22,283
|
lalluz/basic_user_API
|
refs/heads/master
|
/tests/test_helper_functions.py
|
import pytest
from codechallenge.app import is_date_valid
from codechallenge.app import is_email_valid
# Test is_date_valid(date)
@pytest.mark.parametrize('date', [
'26-08-1977',
'30-09-2010',
'30-11-1983',
'01-01-1934',
'01-12-1900'
])
def test_correct_date(date):
assert is_date_valid(date) == True
@pytest.mark.parametrize('date', [
'26-08-1899',
'30-13-2010',
'31-11-1983',
'30-02-1934',
'02/06/1979',
'01 02 2011',
'11122000',
'abcdefghil',
'',
])
def test_incorrect_date(date):
assert is_date_valid(date) == False
# Test is_email_valid(email)
@pytest.mark.parametrize('email', [
'larazilio@gmail.com',
'la@kalsdkjkalsdj.de',
'asdsdad@ssadsd.sdad.com',
'lara.zilio@gmail.com',
])
def test_correct_email(email):
assert is_email_valid(email) == True
@pytest.mark.parametrize('email', [
'larazilio#gmail.com',
'larazilio@gmail,com',
'laraziliogmailcom',
'lara$ilio@gmail.com',
'12234@23232.232',
'@sdsd.com',
'',
])
def test_incorrect_email(email):
assert is_email_valid(email) == False
|
{"/app.py": ["/database_setup.py"], "/db_populator.py": ["/app.py"]}
|
22,284
|
lalluz/basic_user_API
|
refs/heads/master
|
/app.py
|
from database_setup import Base, User, Address
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from sqlalchemy.orm.exc import NoResultFound
from flask import Flask
from flask import flash
from flask import jsonify
from flask import make_response
from flask import redirect
from flask import render_template
from flask import request
from flask import url_for
import re
app = Flask(__name__)
engine = create_engine('postgresql+psycopg2://codechallenge:secret_password@localhost:5432/codechallenge') # nopep8
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
session = DBSession()
@app.route('/users/getusers/', methods=['GET'])
def user_list():
"""List all users."""
users = session.query(User).all()
serialized_users = jsonify(Users=[u.serialize for u in users])
return make_response(serialized_users, 200)
@app.route('/users/createUsers/', methods=['POST'])
def user_create():
"""Create a user."""
ra = request.args
name = None if 'name' not in ra else ra['name']
email = None if 'email' not in ra else ra['email']
birthdate = None if 'birthdate' not in ra else ra['birthdate']
address_id = None if 'address_id' not in ra else ra['address_id']
if not (name and email and birthdate and address_id):
return make_response(jsonify({"message": "invalid input"}), 405)
if not is_email_valid(email):
return make_response(jsonify({"message": "invalid email input"}), 405)
if not is_date_valid(birthdate):
return make_response(jsonify({"message": "invalid date input"}), 405)
user = User(name=name,
email=email.lower(),
birthdate=birthdate,
address_id=address_id)
session.add(user)
session.commit()
serialized_user = jsonify(UserCreated=user.serialize)
return make_response(serialized_user, 201)
@app.route('/users/getusersById/<user_id>/', methods=['GET'])
# @app.route('/getusersById/<int:user_id>/', methods=['GET'])
def user_detail(user_id):
"""Get a user."""
try:
user_id = int(user_id)
except ValueError:
return make_response(jsonify({"message": "invalid user id"}), 400)
try:
user = session.query(User).filter_by(id=user_id).one()
except NoResultFound:
return make_response(jsonify({"message": "user not found"}), 404)
serialized_user = jsonify(User=user.serialize)
return make_response(serialized_user, 200)
@app.route('/users/updateUsersById/<user_id>/', methods=['PUT'])
def user_update(user_id):
"""Update a user."""
try:
user_id = int(user_id)
except ValueError:
return make_response(jsonify({"message": "invalid user id"}), 400)
try:
user = session.query(User).filter_by(id=user_id).one()
except NoResultFound:
return make_response(jsonify({"message": "user not found"}), 404)
ra = request.args
if ra == {}:
return make_response(jsonify({"message": "invalid input"}), 405)
name = user.name if 'name' not in ra else ra['name']
email = user.email if 'email' not in ra else ra['email']
birthdate = user.birthdate if 'birthdate' not in ra else ra['birthdate']
address_id = user.address_id if 'address_id' not in ra else ra['address_id'] # nopep8
if not is_email_valid(email):
return make_response(jsonify({"message": "invalid email input"}), 405)
if not is_date_valid(birthdate):
return make_response(jsonify({"message": "invalid date input"}), 405)
updated_user = User(name=name,
email=email.lower(),
birthdate=birthdate,
address_id=address_id)
session.add(updated_user)
session.commit()
serialized_user = jsonify(UserUpdated=updated_user.serialize)
return make_response(serialized_user, 200)
@app.route('/users/deleteUsersById/<user_id>/', methods=['DELETE'])
def user_delete(user_id):
"""Delete a user."""
try:
user_id = int(user_id)
except ValueError:
return make_response(jsonify({"message": "invalid user id"}), 400)
try:
user = session.query(User).filter_by(id=user_id).one()
except NoResultFound:
return make_response(jsonify({"message": "user not found"}), 404)
session.delete(user)
session.commit()
return make_response(jsonify({"message": "user deleted"}), 200)
def is_email_valid(email):
'''Email format check'''
# https://www.scottbrady91.com/Email-Verification/Python-Email-Verification-Script
if re.match(r'^[_a-z0-9-]+(\.[_a-z0-9-]+)*@[a-z0-9-]+(\.[a-z0-9-]+)*(\.[a-z]{2,4})$', email): # nopep8
return True
return False
def is_date_valid(birthdate):
'''Simplify date check, should check leap years also.'''
if len(birthdate) == 10:
try:
day = int(birthdate[0:2])
month = int(birthdate[3:5])
year = int(birthdate[6:10])
except ValueError:
return False
if not (birthdate[2] == '-' and birthdate[5] == '-'):
return False
months_with_30_days = [4, 6, 9, 11]
months_with_31_days = [1, 3, 5, 6, 7, 8, 10, 12]
if (day in range(1, 32) and month in range(1, 13) and year in range(1900, 2019)): # nopep8
if month in months_with_30_days and day < 31:
return True
if month in months_with_31_days and day < 32:
return True
if month == 2 and day < 30:
return True
return False
return False
if __name__ == '__main__':
app.secret_key = "secret_key"
app.debug = True
app.run(host='0.0.0.0', port=5000)
|
{"/app.py": ["/database_setup.py"], "/db_populator.py": ["/app.py"]}
|
22,285
|
lalluz/basic_user_API
|
refs/heads/master
|
/database_setup.py
|
from sqlalchemy import Column, ForeignKey, Integer, String
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship
from sqlalchemy import create_engine
Base = declarative_base()
class Address(Base):
__tablename__ = "address"
id = Column(Integer, primary_key=True)
street = Column(String(100))
state = Column(String(50))
city = Column(String(50))
country = Column(String(50))
zip = Column(String(20))
# JSON serialization
@property
def serialize(self):
return {
"id": self.id,
"street": self.street,
"state": self.state,
"city": self.city,
"country": self.country
}
class User(Base):
__tablename__ = "user"
id = Column(Integer, primary_key=True)
name = Column(String(50), nullable=False)
email = Column(String(100), nullable=False)
birthdate = Column(String(10), nullable=False)
# ForeignKey
address_id = Column(Integer, ForeignKey("address.id"))
address = relationship(Address)
# JSON serialization
@property
def serialize(self):
return {
"id": self.id,
"name": self.name,
"email": self.email,
"birthdate": self.birthdate,
"address": {
"id": self.address.id,
"street": self.address.street,
"state": self.address.state,
"city": self.address.city,
"country": self.address.country
}
}
engine = create_engine('postgresql+psycopg2://codechallenge:secret_password@localhost:5432/codechallenge') # nopep8
Base.metadata.create_all(engine)
|
{"/app.py": ["/database_setup.py"], "/db_populator.py": ["/app.py"]}
|
22,286
|
lalluz/basic_user_API
|
refs/heads/master
|
/db_populator.py
|
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from app import Base, User, Address
engine = create_engine('postgresql+psycopg2://codechallenge:secret_password@localhost:5432/codechallenge') # nopep8
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
session = DBSession()
def add_users():
for i in range(1, 11):
user = User(name=f"user_{i}",
email=f"user_{i}@example.com",
birthdate=f"12-12-2000",
address_id=i)
session.add(user)
session.commit()
return
def add_addresses():
for i in range(1, 11):
address = Address(street=f"street_{i}",
state=f"state_{i}",
city=f"city_{i}",
country=f"country_{i}",
zip=i+15320)
session.add(address)
session.commit()
return
add_addresses()
add_users()
|
{"/app.py": ["/database_setup.py"], "/db_populator.py": ["/app.py"]}
|
22,294
|
zyiyy/repr-ortho
|
refs/heads/master
|
/util.py
|
import time
import tensorflow as tf
def get_scope_variable(scope, var, shape=None):
with tf.variable_scope(scope, reuse=tf.AUTO_REUSE):
v = tf.get_variable(var, shape)
return v
def avg(l):
return sum(l)/len(l)
def chunker(seq, size):
return (seq[pos:pos + size] for pos in xrange(0, len(seq), size))
def measure_time(f):
def timed(*args, **kw):
ts = time.time()
result = f(*args, **kw)
te = time.time()
print '%r (%r, %r) %2.2f sec' % \
(f.__name__, args, kw, te-ts)
return result
return timed
def replace_none_with_zero(l):
return [0 if i==None else i for i in l]
|
{"/train.py": ["/util.py", "/ranking.py", "/ortho.py", "/model.py"]}
|
22,295
|
zyiyy/repr-ortho
|
refs/heads/master
|
/model.py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from six.moves import xrange
import tensorflow as tf
from functools import reduce
slim = tf.contrib.slim
def convnet(image, nb_classes):
''' vanilla convnet '''
output = image
ini = tf.truncated_normal_initializer(stddev=0.04)
conv_activations = []
for layer_num, filter_size in enumerate([32]*3):
scope = 'conv'+str(layer_num)
output = slim.conv2d(output, filter_size, [5, 5], scope=scope, weights_initializer=ini)
# add filter level mask for the network
betas = tf.placeholder(dtype=tf.float32, shape=(filter_size), name=scope+'/yeta')
tf.add_to_collection('yeta', betas)
print(output.shape, betas.shape)
output = output * betas
conv_activations.append(output)
output = slim.max_pool2d(output, [3, 3], stride=2, padding='SAME', data_format='NHWC', scope='pool'+str(layer_num))
if layer_num >= 7:
# ideally BN should not be added to see RePr effects but it is harder to train deeper network without BN
output = slim.batch_norm(output, scope='batchnorm'+str(layer_num))
final_shape = output.get_shape().as_list()[1:]
number_of_dense = reduce(lambda a, b: a * b, final_shape)
output_conv = tf.reshape(output, [-1, number_of_dense])
output_dense = tf.layers.dense( output_conv, nb_classes, activation=None, name='dense1', kernel_initializer=ini)
return output_dense, conv_activations
|
{"/train.py": ["/util.py", "/ranking.py", "/ortho.py", "/model.py"]}
|
22,296
|
zyiyy/repr-ortho
|
refs/heads/master
|
/ortho.py
|
import tensorflow as tf
import numpy as np
def get_ortho_weights(var, gain):
''' compute the orthogonal initialization, this is only an approximate '''
num_rows = 1
for dim in var.shape[:-1]:
num_rows *= dim
num_cols = var.shape[-1]
flat_shape = (num_cols, num_rows) if num_rows < num_cols else (num_rows, num_cols)
a = tf.reshape(tf.nn.l2_normalize(var), flat_shape)
# using svd would be better approximation but tf.qr seems faster
q, r = tf.qr(a, full_matrices=False)
d = tf.diag_part(r)
q *= tf.sign(d)
if num_rows < num_cols:
q = tf.matrix_transpose(q)
# gain is used to scale the new weights, needed for deeper networks
return tf.reshape(gain*q, var.shape)
|
{"/train.py": ["/util.py", "/ranking.py", "/ortho.py", "/model.py"]}
|
22,297
|
zyiyy/repr-ortho
|
refs/heads/master
|
/train.py
|
from __future__ import print_function, division
import os, sys, argparse
import tensorflow as tf
import numpy as np
import random
random.seed(1337)
tf.set_random_seed(1337)
np.random.seed(1337)
from util import *
from ranking import *
from ortho import *
from model import *
parser = argparse.ArgumentParser()
parser.add_argument('--standard', dest='standard', action='store_true', help='Train standard instead of RePr')
parser.add_argument('-learning_rate', type=float, default=0.05, help='Initial learning Rate for SGD')
parser.add_argument('-epochs', type=int, default=200, help='Number of epochs to train')
parser.add_argument('-tuner', type=int, default=20, help='Number of epochs to train the sub-network')
parser.add_argument('-freq', type=int, default=20, help='Number of epochs to train the full-network')
parser.add_argument('-batch_size', type=int, default=64, help='Batch size')
parser.add_argument('-rank', type=int, default=50, help='Percentage of filters to drop')
parser.add_argument('-gain', type=float, default=1.00, help='Multiplier for the ortho initializer')
args = parser.parse_args()
print(args)
path = 'data/'
nb_classes = 10
X, X_test = np.load(path+'x.npy'), np.load(path+'x_test.npy')
Y, Y_test = np.load(path+'y.npy'), np.load(path+'y_test.npy')
X_train, X_val = X[:45000], X[45000:]
Y_train, Y_val = Y[:45000], Y[45000:]
# X_train, X_val = X, X
# Y_train, Y_val = Y, Y
# validation split was used to run the oracle ranking
train_indices = list(xrange(0,len(X_train)))
test_indices = list(xrange(0,len(X_test)))
val_indices = list(xrange(0,len(X_val)))
x = tf.placeholder("float", [None, 32, 32, 3])
y = tf.placeholder("float", [None, nb_classes])
logits, conv_activations = convnet(x, nb_classes)
def test(sorted_vars):
''' single epoch over the test data '''
test_epoch_acc = []
beta_dict = get_feed_dict(sorted_vars)
for ind in chunker(test_indices, args.batch_size):
feed_dict = {x: X_test[ind], y: Y_test[ind]}
test_epoch_acc.append(accuracy.eval(dict(feed_dict.items() + beta_dict.items())))
return avg(test_epoch_acc)
def train(epoch, sorted_vars):
''' single epoch for the training '''
random.shuffle(train_indices)
train_epoch_cost = []
train_epoch_acc = []
var_list = tf.trainable_variables()
# beta is the binary mask which is used to turn on/off the filters
beta_dict = get_feed_dict(sorted_vars)
for ind in chunker(train_indices, args.batch_size):
if len(ind) != args.batch_size: continue
feed_dict={x: X_train[ind], y: Y_train[ind]}
_, lgt, lr, c,acc = sess.run([optimizer, logits, learning_rate, cost, accuracy],
feed_dict=dict(feed_dict.items() + beta_dict.items()))
train_epoch_cost.append(c)
train_epoch_acc.append(acc)
return avg(train_epoch_cost), avg(train_epoch_acc), lr
def one_epoch(epoch, sorted_vars):
''' wrapper for a single epoch of training '''
train_cost, train_acc, lr = train(epoch, sorted_vars)
print("Epoch:{:3d} lr={:.3f} cost={:.4f} Training accuracy={:.3f} Test accuracy={:.3f}"
.format(epoch,lr, train_cost, train_acc, test(sorted_vars)))
def prune_with_train():
''' RePr training which trains the sub-network '''
sorted_vars = rank(args.rank)
# rank function returns a binary mask
for iepoch in xrange(args.tuner):
one_epoch(iepoch, sorted_vars)
# after doing the sub-network training, re-initialize the filters
# to be orthogoanl (approximately)
def reinitialize_pruned():
ops = []
for weights, filters in sorted_vars:
# sorted_vars maintains the filters that needs to be dropped
c = get_ortho_weights(weights, args.gain)[:,:,:,filters]
ops.append(weights[:,:,:,filters].assign(c))
sess.run(ops)
reinitialize_pruned()
with tf.Session() as sess:
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits, labels=y))
prediction= tf.equal(tf.argmax(logits, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(prediction, tf.float32))
global_step = tf.Variable(0, trainable=False)
learning_rate = tf.train.exponential_decay( args.learning_rate, global_step, 25000, 0.50, staircase=True)
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost, global_step=global_step)
sess.run(tf.global_variables_initializer())
for epoch in range(1,args.epochs):
# train the full-network
one_epoch(epoch, sorted_vars=[])
if epoch % args.freq == 0 and epoch >= 1:
if args.rank and not args.standard:
# train the sub-network with pruning
prune_with_train()
else:
# if running standard mode, do normal training instead
for iepoch in xrange(args.tuner):
one_epoch(iepoch, sorted_vars=[])
|
{"/train.py": ["/util.py", "/ranking.py", "/ortho.py", "/model.py"]}
|
22,298
|
zyiyy/repr-ortho
|
refs/heads/master
|
/ranking.py
|
import numpy as np
import tensorflow as tf
def conv_to_yeta(name):
names = name.split('/')
name = names[:-1] + ['yeta:0']
return '/'.join(name)
def get_feed_dict(sorted_vars):
''' helper function to make the binary mask for the given convnet,
uses the name of the variables to find conv filters '''
beta_dict = {}
for w in tf.get_collection('yeta'):
# by default all of the filters are enabled
filters = w.get_shape().as_list()[-1]
beta_dict[w.name] = [1]*filters
for weights, filter in sorted_vars:
# this helper function converts weight name to binary masks
yeta_name = conv_to_yeta(weights.name)
try:
beta_dict[yeta_name][filter]=0
except:
try:
# this messy nested block is because of how tf does naming of the variables
y = yeta_name.split('/')
y = '/'.join([y[0], y[0]] + y[1:])
beta_dict[y][filter]=0
except:
print('Not pruning {}'.format(yeta_name))
return beta_dict
def inter_ortho(vars_to_prune, acc_fn):
''' inter-filter orthogonality ranking '''
scores = {}
for weights in vars_to_prune:
weights_flat = tf.reshape(weights, (-1,weights.shape[-1]))
norm_weight = tf.nn.l2_normalize(weights_flat)
# normalizing weight is necessary for comparing different filters
projection = tf.matmul(norm_weight, norm_weight, transpose_a=True)
# remove the diagonal elements as they are self-projection
identity = tf.diag(tf.diag_part(projection))
ortho_proj = tf.reduce_mean(projection-identity, axis=0)
for filter in range(ortho_proj.get_shape().as_list()[-1]):
# ortho aggregation is done per layer
v = tf.abs(ortho_proj[filter])
scores[(weights,filter)] = float(acc_fn(v.eval()))
return sorted(scores.iteritems(), key=lambda (k,v): v, reverse=True)
def rank(p):
def fn(x, axis=None):
# aggregative function composition was inspired from Molchanov paper
f = np.mean
g = np.abs
if axis is not None:
return f(g(x),axis)
return g(f(x))
var_name = 'weights'
vars_to_prune=[]
for weights in tf.trainable_variables():
# hardcoded names are used to find conv filters to rank the filters
if 'conv' not in weights.name.lower() or var_name not in weights.name.lower() or 'logits' in weights.name.lower():
continue
vars_to_prune.append(weights)
sorted_vars = inter_ortho(vars_to_prune, fn)
cutoff = int(len(sorted_vars)*p/100)
# cutff-off is global across the full network
return [s[0] for s in sorted_vars[:cutoff]]
|
{"/train.py": ["/util.py", "/ranking.py", "/ortho.py", "/model.py"]}
|
22,299
|
KornbergFresnel/CommNet
|
refs/heads/master
|
/plot_lot.py
|
import sys
import matplotlib.pyplot as plt
import matplotlib
import numpy as np
filename = sys.argv[1]
data = [[], []]
with open(filename, 'r') as f:
i = 0
for line in f.readlines():
items = line.split("\t")
row = []
# [loss, reward]
for item in items[1:]:
t = eval(item.split(':')[1])
row.append(t)
if len(row) > 0:
data[i % 2].append(row)
i += 1
# actor: loss, reward
data[0] = np.array(data[0])
# critic: loss, baseline
data[1] = np.array(data[1])
matplotlib.style.use("ggplot")
plt.figure(figsize=(15, 10))
fig1 = plt.subplot(211)
fig2 = plt.subplot(212)
if sys.argv[2] == "plotloss":
fig1.plot(np.arange(len(data[0])), data[0][:, 0])
fig1.set_ylabel("Loss value")
fig1.set_title("Loss per 10 episodes")
fig2.plot(np.arange(len(data[1])), data[1][:, 0])
fig2.set_xlabel("Episodes per 10")
fig2.set_ylabel("Loss value")
elif sys.argv[2] == "plotvalue":
fig1.plot(np.arange(len(data[0])), data[0][:, 1])
fig1.set_ylabel("Rate value")
fig1.set_title("Reward per 10 episodes")
fig2.plot(data[1][:, 1])
fig2.set_xlabel("Episodes per 10")
fig2.set_ylabel("Baseline value")
fig2.set_title("Baseline per 10 episodes")
plt.show()
|
{"/leaver_game.py": ["/base.py"], "/leaver_train.py": ["/leaver_game.py"]}
|
22,300
|
KornbergFresnel/CommNet
|
refs/heads/master
|
/leaver_game.py
|
import numpy as np
import tensorflow as tf
from base import BaseModel
class CommNet(BaseModel):
def __init__(self, num_leaver=5, num_agents=500, vector_len=128, num_units=10, learning_rate=0.0005, batch_size=64,
episodes=500):
super().__init__(num_leaver, num_agents, vector_len, num_units, learning_rate, batch_size, episodes)
self.base_line = tf.placeholder(tf.float32, shape=(None, 1))
self.base_reward = tf.placeholder(tf.float32, shape=(None, 1))
self.bias = 1e-4
# ==== create network =====
with tf.variable_scope("CommNet"):
self.eval_name = tf.get_variable_scope().name
self.look_up = tf.get_variable("look_up_table", shape=(self.num_agents, self.vector_len),
initializer=tf.random_normal_initializer)
self.dense_weight = tf.get_variable("dense_w", shape=(self.vector_len, self.n_actions),
initializer=tf.random_uniform_initializer)
self.policy = self._create_network()
self.reward = self._get_reward()
self.loss = self._get_loss()
self.train_op = tf.train.RMSPropOptimizer(self.alpha).minimize(self.loss)
self.sess = tf.Session()
self.sess.run(tf.global_variables_initializer())
def _create_network(self):
# look-up table
input_one_hot = tf.one_hot(self.input, self.num_agents)
# CommNet
h0 = tf.einsum("ijk,kl->ijl", input_one_hot, self.look_up)
h1 = self._create_cell("step_first", self.c_meta, h0, h0)
c1 = self._mean(h1)
h2 = self._create_cell("step_second", c1, h1, h0)
out = tf.einsum("ijk,kl->ijl", h2, self.dense_weight)
# soft-max
soft = tf.nn.softmax(out)
return soft
def _sample_action(self):
reshape_policy = tf.reshape(self.policy, shape=(-1, self.n_actions))
# sample actions
self.actions = tf.multinomial(tf.log(reshape_policy + self.bias), num_samples=1)
one_hot = tf.one_hot(self.actions, depth=self.n_actions)
self.one_hot = tf.reshape(one_hot, shape=(-1, self.num_leaver, self.n_actions))
def _get_reward(self):
self._sample_action()
distinct_num = tf.reduce_sum(tf.cast(tf.reduce_sum(self.one_hot, axis=1) > 0, tf.float32), axis=1,
keep_dims=True)
return distinct_num / self.num_leaver
def _get_loss(self):
# advantage: n, 1, 1
meta = tf.reshape(self.base_reward - self.base_line, shape=(-1, 1, 1))
labels = tf.reshape(tf.cast(self.one_hot, dtype=tf.float32) * tf.tile(meta, [1, 5, 5]),
shape=(-1, self.n_actions))
prob = tf.reshape((self.policy + self.bias), shape=(-1, self.n_actions))
loss = tf.reduce_mean(tf.reduce_sum(-1.0 * labels * tf.log(prob), axis=1))
return loss
def get_reward(self, ids):
# produce
reward, gun, policy = self.sess.run([self.reward, self.dense_weight, self.policy], feed_dict={
self.input: ids,
self.mask: self.mask_data,
self.c_meta: np.zeros((self.batch_size, self.num_leaver, self.vector_len))
})
return reward
def train(self, ids, base_line=None, base_reward=None, **kwargs):
_, loss, reward, policy = self.sess.run([self.train_op, self.loss, self.reward, self.policy], feed_dict={
self.input: ids,
self.mask: self.mask_data,
self.c_meta: np.zeros((self.batch_size, self.num_leaver, self.vector_len)),
self.base_line: base_line,
self.base_reward: base_reward
})
log = kwargs["log"]
itr = kwargs["itr"]
sum_loss = np.sum(loss) / self.batch_size
sum_base = np.sum(reward) / self.batch_size
if (itr + 1) % 20 == 0:
log.info("iteration:{0}\tloss:{1}\treward:{2}".format(itr, sum_loss, sum_base))
class BaseLine(BaseModel):
def __init__(self, num_leaver=5, num_agents=500, vector_len=128, num_units=10, learning_rate=0.0005, batch_size=64,
episodes=500):
super().__init__(num_leaver, num_agents, vector_len, num_units, learning_rate, batch_size, episodes)
self.n_actions = 1
self.eta = 0.003
self.reward = tf.placeholder(tf.float32, shape=(None, 1))
# ==== create network =====
with tf.variable_scope("Arctic"):
self.dense_weight = tf.get_variable("dense_weight", shape=(self.vector_len, 1),
initializer=tf.random_normal_initializer)
self.baseline = self._create_network() # n * 5 * n_actions
# cross entropy: n * 5 * 1
self.loss = self._get_loss()
self.train_op = tf.train.RMSPropOptimizer(self.alpha).minimize(self.loss)
self.sess = tf.Session()
self.sess.run(tf.global_variables_initializer())
def _create_network(self):
# look-up table
look_up = tf.get_variable("look_up_table", shape=(self.num_agents, self.vector_len),
initializer=tf.random_normal_initializer)
input_one_hot = tf.one_hot(self.input, self.num_agents)
# CommNet-Baseline
h0 = tf.einsum("ijk,kl->ijl", input_one_hot, look_up)
h1 = self._create_cell("step_first", self.c_meta, h0, h0)
c1 = self._mean(h1)
h2 = self._create_cell("step_second", c1, h1, h0)
dense = tf.einsum("ijk,kl->ijl", h2, self.dense_weight)
# out = tf.einsum("ijk,kl->ijl", dense)
self.t = tf.sigmoid(dense)
out = tf.reduce_mean(self.t, axis=1)
return out
def _get_loss(self):
loss = tf.reduce_sum(tf.square(self.reward - self.baseline)) * self.eta
return loss
def get_reward(self, ids):
return self.sess.run(self.baseline, feed_dict={
self.input: ids,
self.mask: self.mask_data,
self.c_meta: np.zeros((self.batch_size, self.num_leaver, self.vector_len))
})
def train(self, ids, base_line=None, base_reward=None, **kwargs):
_, loss, base = self.sess.run([self.train_op, self.loss, self.baseline], feed_dict={
self.input: ids,
self.mask: self.mask_data,
self.c_meta: np.zeros((self.batch_size, self.num_leaver, self.vector_len)),
self.reward: base_reward
})
log = kwargs["log"]
itr = kwargs["itr"]
sum_loss = np.sum(loss) / self.batch_size
sum_base = np.sum(base) / self.batch_size
if (itr + 1) % 20 == 0:
log.info("iteration:{0}\tloss:{1}\tbase:{2}".format(itr, sum_loss, sum_base))
# print("iteration:{0}\tloss:{1}\tbase:{2}".format(itr, sum_loss, sum_base))
|
{"/leaver_game.py": ["/base.py"], "/leaver_train.py": ["/leaver_game.py"]}
|
22,301
|
KornbergFresnel/CommNet
|
refs/heads/master
|
/base.py
|
import tensorflow as tf
import numpy as np
class BaseModel:
def __init__(self, num_leaver=5, num_agents=500, vector_len=128, num_units=10, learning_rate=0.003,
batch_size=64, episodes=500):
# ===== parameters =====
self.num_leaver = num_leaver
self.num_agents = num_agents
self.alpha = learning_rate
self.vector_len = vector_len
self.num_units = num_units
self.batch_size = batch_size
self.n_actions = num_leaver
self.episodes = episodes
# ===== pre-define data: look-up table, id =====
self.ids = None
self.mask_data = np.ones(shape=(self.num_leaver, self.num_leaver), dtype=np.float32)
self.mask_data[np.arange(self.num_leaver), np.arange(self.num_leaver)] = 0.0
# ===== network define =====
self.input = tf.placeholder(tf.int32, shape=(None, self.num_leaver))
self.c_meta = tf.placeholder(tf.float32, shape=(None, self.num_leaver, self.vector_len))
def _create_cell(self, name, c, h, h_meta):
with tf.variable_scope(name):
self.H = tf.get_variable("w_h", shape=(self.vector_len, self.vector_len),
initializer=tf.random_normal_initializer())
self.C = tf.get_variable("w_c", shape=(self.vector_len, self.vector_len),
initializer=tf.random_normal_initializer())
self.H_META = tf.get_variable("w_h_meta", shape=(self.vector_len, self.vector_len),
initializer=tf.random_normal_initializer())
dense_h = tf.einsum("ijk,kl->ijl", h, self.H)
dense_c = tf.einsum("ijk,kl->ijl", c, self.C)
dense_h_meta = tf.einsum("ijk,kl->ijl", h_meta, self.H_META)
dense = dense_h + dense_c + dense_h_meta
return tf.nn.relu(dense)
def _mean(self, h):
amount = self.num_leaver - 1
self.mask = tf.placeholder(tf.float32, shape=(self.num_leaver, self.num_leaver))
c = tf.einsum("ij,kjl->kil", self.mask, h) / amount
return c
def _create_network(self):
pass
def _sample_action(self):
pass
def get_reward(self, ids_one_hot):
pass
def _get_loss(self):
pass
def train(self, ids, base_line=None, base_reward=None, **kwargs):
pass
|
{"/leaver_game.py": ["/base.py"], "/leaver_train.py": ["/leaver_game.py"]}
|
22,302
|
KornbergFresnel/CommNet
|
refs/heads/master
|
/leaver_train.py
|
import numpy as np
import logging as log
from leaver_game import BaseLine, CommNet
N_AGENTS = 50
VECTOR_LEN = 128
BATCH_SIZE = 64
LEVER = 5
# set logger
log.basicConfig(level=log.INFO, filename="leaver_train.log")
console = log.StreamHandler()
console.setLevel(log.INFO)
log.getLogger("").addHandler(console)
def train(episode):
actor = CommNet(num_agents=N_AGENTS, vector_len=VECTOR_LEN, batch_size=BATCH_SIZE)
critic = BaseLine(num_agents=N_AGENTS, vector_len=VECTOR_LEN, batch_size=BATCH_SIZE)
for i in range(episode):
ids = np.array([np.random.choice(N_AGENTS, LEVER, replace=False)
for _ in range(BATCH_SIZE)])
reward = actor.get_reward(ids)
baseline = critic.get_reward(ids)
actor.train(ids, base_line=baseline, base_reward=reward, itr=i, log=log)
critic.train(ids, base_reward=reward, itr=i, log=log)
if __name__ == "__main__":
train(100000)
|
{"/leaver_game.py": ["/base.py"], "/leaver_train.py": ["/leaver_game.py"]}
|
22,303
|
aconstantinou123/sorting_algorithms
|
refs/heads/master
|
/quick_sort.py
|
def quick_sort(a, p, r):
if p < r:
q = partition(a, p, r)
quick_sort(a, p, q - 1)
quick_sort(a, q + 1, r)
return a
def partition(a, p, r):
x = a[r]
i = p - 1
j = p
while j <= r -1:
if a[j] <= x:
i = i + 1
temp = a[i]
a[i] = a[j]
a[j] = temp
j = j + 1
temp = a[i + 1]
a[i + 1] = a[r]
a[r] = temp
return i + 1
|
{"/main.py": ["/selection_sort.py", "/insertion_sort.py", "/merge_sort.py", "/quick_sort.py"]}
|
22,304
|
aconstantinou123/sorting_algorithms
|
refs/heads/master
|
/main.py
|
from selection_sort import selection_sort
from insertion_sort import insertion_sort
from merge_sort import merge_sort
from quick_sort import quick_sort
selection_unsorted = [99, 77, 44, 16, 1000, 7, 4, 8, 22, 3, 16, 99, 205, 33, 1, 100, 19, 12, 55]
insertion_unsorted = [99, 77, 44, 16, 1000, 7, 4, 8, 22, 3, 16, 99, 205, 33, 1, 100, 19, 12, 55]
merge_unsorted = [99, 77, 44, 16, 1000, 7, 4, 8, 22, 3, 16, 99, 205, 33, 1, 100, 19, 12, 55]
quick_unsorted = [99, 77, 44, 16, 1000, 7, 4, 8, 22, 3, 16, 99, 205, 33, 1, 100, 19, 12, 55]
test = [10, 7, 1, 3, 5, 8, 9, 6]
print('selection sort')
selection_sort_result = selection_sort(selection_unsorted)
print(selection_sort_result, '\n')
print('insertion sort')
insertion_sort_result = insertion_sort(insertion_unsorted)
print(insertion_sort_result, '\n')
print('merge sort')
merge_sort_result = merge_sort(merge_unsorted, 0, len(merge_unsorted) - 1)
print(merge_sort_result, '\n')
print('quick sort')
quick_sort_result = quick_sort(quick_unsorted, 0, len(quick_unsorted) - 1)
print(quick_sort_result, '\n')
|
{"/main.py": ["/selection_sort.py", "/insertion_sort.py", "/merge_sort.py", "/quick_sort.py"]}
|
22,305
|
aconstantinou123/sorting_algorithms
|
refs/heads/master
|
/insertion_sort.py
|
def insertion_sort(a):
i = 1
while i <= len(a) - 1:
element = a[i]
j = i - 1
while j >= 0 and a[j] > element:
a[j + 1] = a[j]
j = j - 1
a[j + 1] = element
i = i + 1
return a
|
{"/main.py": ["/selection_sort.py", "/insertion_sort.py", "/merge_sort.py", "/quick_sort.py"]}
|
22,306
|
aconstantinou123/sorting_algorithms
|
refs/heads/master
|
/selection_sort.py
|
def selection_sort(a):
i = 0
while i <= len(a) - 1:
min = i
j = i + 1
while j < len(a):
if a[j] < a[min]:
min = j
j = j + 1
temp = a[i]
a[i] = a[min]
a[min] = temp
i = i + 1
return a
|
{"/main.py": ["/selection_sort.py", "/insertion_sort.py", "/merge_sort.py", "/quick_sort.py"]}
|
22,307
|
aconstantinou123/sorting_algorithms
|
refs/heads/master
|
/merge_sort.py
|
from math import floor, inf
def merge_sort(a,l, h):
if l < h:
m = floor((l + h) / 2)
merge_sort(a, l, m)
merge_sort(a, m + 1, h)
merge(a, l, m, h)
return a
def merge(a, l, m, h):
left = a[l:m + 1]
left.append(inf)
right = a[m + 1:h + 1]
right.append(inf)
i = j = 0
k = l
while k <= h:
if left[i] < right[j]:
a[k] = left[i]
i = i + 1
else:
a[k] = right[j]
j = j + 1
k = k + 1
|
{"/main.py": ["/selection_sort.py", "/insertion_sort.py", "/merge_sort.py", "/quick_sort.py"]}
|
22,309
|
ksze/django-allauth
|
refs/heads/master
|
/allauth/socialaccount/providers/stackexchange/tests.py
|
from django.test import TestCase
from datetime import datetime
from allauth.socialaccount.providers.stackexchange.models import StackExchangeSite
from django.utils.timezone import utc
#from allauth.socialaccount.models import SocialApp
#from stackpy.api import API
class StackExchangeSiteModelTest(TestCase):
def test_creating_new_stackexchangesite_and_saving_it_to_database(self):
sesite = StackExchangeSite()
# Mandatory fields
sesite.api_site_parameter = 'stackoverflow'
sesite.audience = 'professional and enthusiast programmers'
sesite.favicon_url = 'http://cdn.sstatic.net/stackoverflow/img/favicon.ico'
sesite.icon_url = 'http://cdn.sstatic.net/stackoverflow/img/apple-touch-icon.png'
sesite.launch_date = datetime.utcfromtimestamp(1221436800).replace(tzinfo=utc)
sesite.logo_url = 'http://cdn.sstatic.net/stackoverflow/img/logo.png'
sesite.name = 'Stack Overflow'
sesite.site_state = 'normal'
sesite.site_type = 'main_site'
sesite.site_url = 'http://stackoverflow.com'
sesite.styling_link_color = '#0077CC'
sesite.styling_tag_foreground_color = '#3E6D8E'
sesite.styling_tag_background_color = '#E0EAF1'
# Save it
sesite.save()
# Now see if we can get it back
all_stackexchange_sites_in_database = StackExchangeSite.objects.all()
self.assertEquals(len(all_stackexchange_sites_in_database), 1)
only_stackexchange_site_in_database = all_stackexchange_sites_in_database[0]
self.assertEquals(only_stackexchange_site_in_database, sesite)
# Check if all attributes are saved properly
self.assertEquals(sesite.api_site_parameter, only_stackexchange_site_in_database.api_site_parameter)
self.assertEquals(sesite.audience, only_stackexchange_site_in_database.audience)
self.assertEquals(sesite.favicon_url, only_stackexchange_site_in_database.favicon_url)
self.assertEquals(sesite.icon_url, only_stackexchange_site_in_database.icon_url)
self.assertEquals(sesite.launch_date, only_stackexchange_site_in_database.launch_date)
self.assertEquals(sesite.logo_url, only_stackexchange_site_in_database.logo_url)
self.assertEquals(sesite.name, only_stackexchange_site_in_database.name)
self.assertEquals(sesite.site_state, only_stackexchange_site_in_database.site_state)
self.assertEquals(sesite.site_type, only_stackexchange_site_in_database.site_type)
self.assertEquals(sesite.site_url, only_stackexchange_site_in_database.site_url)
self.assertEquals(sesite.styling_link_color, only_stackexchange_site_in_database.styling_link_color)
self.assertEquals(sesite.styling_tag_foreground_color, only_stackexchange_site_in_database.styling_tag_foreground_color)
self.assertEquals(sesite.styling_tag_background_color, only_stackexchange_site_in_database.styling_tag_background_color)
|
{"/allauth/socialaccount/providers/stackexchange/tests.py": ["/allauth/socialaccount/providers/stackexchange/models.py"]}
|
22,310
|
ksze/django-allauth
|
refs/heads/master
|
/allauth/admin.py
|
from django.contrib import admin
from models import BannedUsername
admin.site.register(BannedUsername)
|
{"/allauth/socialaccount/providers/stackexchange/tests.py": ["/allauth/socialaccount/providers/stackexchange/models.py"]}
|
22,311
|
ksze/django-allauth
|
refs/heads/master
|
/allauth/socialaccount/providers/stackexchange/models.py
|
from django.db import models
class StackExchangeSite(models.Model):
SITE_STATES = (
('normal', 'Normal'),
('closed_beta', 'Closed Beta'),
('open_beta', 'Open Beta'),
('linked_meta', 'Linked Meta'),
)
SITE_TYPES = (
('main_site', 'Main Site'),
('meta_site', 'Meta Site'),
)
api_site_parameter = models.CharField(max_length=30)
audience = models.CharField(max_length=200)
favicon_url = models.URLField()
icon_url = models.URLField()
launch_date = models.DateTimeField()
logo_url = models.URLField()
name = models.CharField(max_length=30)
site_state = models.CharField(max_length=20, choices=SITE_STATES)
site_type = models.CharField(max_length=20, choices=SITE_TYPES)
site_url = models.URLField()
styling_link_color = models.CharField(max_length=7)
styling_tag_foreground_color = models.CharField(max_length=7)
styling_tag_background_color = models.CharField(max_length=7)
|
{"/allauth/socialaccount/providers/stackexchange/tests.py": ["/allauth/socialaccount/providers/stackexchange/models.py"]}
|
22,312
|
ksze/django-allauth
|
refs/heads/master
|
/allauth/socialaccount/providers/twitter/urls.py
|
from allauth.socialaccount.providers.oauth.urls import default_urlpatterns
from provider import TwitterProvider
from django.conf.urls import patterns, url
from views import oauth_authorize
urlpatterns = default_urlpatterns(TwitterProvider)
urlpatterns += patterns('',
url('^' + TwitterProvider.id + '/authorize/$', oauth_authorize,
name=TwitterProvider.id + '_authorize'),
)
|
{"/allauth/socialaccount/providers/stackexchange/tests.py": ["/allauth/socialaccount/providers/stackexchange/models.py"]}
|
22,313
|
ksze/django-allauth
|
refs/heads/master
|
/allauth/models.py
|
from django.db import models
from django.core.exceptions import ValidationError
import re
def is_valid_regex(value):
try:
re.compile(value)
except re.error:
raise ValidationError('"Expression" must be a valid regular expression.')
class BannedUsername(models.Model):
expression = models.CharField(max_length=100, validators=[is_valid_regex])
def match(self, username):
return re.match(self.expression, username, flags=re.I)
def __unicode__(self):
return self.expression
|
{"/allauth/socialaccount/providers/stackexchange/tests.py": ["/allauth/socialaccount/providers/stackexchange/models.py"]}
|
22,314
|
ksze/django-allauth
|
refs/heads/master
|
/allauth/tests.py
|
# -*- coding: utf-8 -*-
import requests
from django.test import TestCase
from django.core.exceptions import ValidationError
from models import BannedUsername
import utils
class MockedResponse(object):
def __init__(self, status_code, content, headers={}):
self.status_code = status_code
self.content = content
self.headers = headers
def json(self):
import json
return json.loads(self.content)
class mocked_response:
def __init__(self, *responses):
self.responses = list(responses)
def __enter__(self):
self.orig_get = requests.get
self.orig_post = requests.post
def mockable_request(f):
def new_f(*args, **kwargs):
if self.responses:
return self.responses.pop(0)
return f(*args, **kwargs)
return new_f
requests.get = mockable_request(requests.get)
requests.post = mockable_request(requests.post)
def __exit__(self, type, value, traceback):
requests.get = self.orig_get
requests.post = self.orig_post
class BasicTests(TestCase):
def test_generate_unique_username(self):
examples = [('a.b-c@gmail.com', 'a.b-c'),
(u'Üsêrnamê', 'username'),
('', 'user')]
for input, username in examples:
self.assertEquals(utils.generate_unique_username(input),
username)
def test_email_validation(self):
s = 'unfortunately.django.user.email.max_length.is.set.to.75.which.is.too.short@bummer.com'
self.assertEquals(None, utils.valid_email_or_none(s))
s = 'this.email.address.is.a.bit.too.long.but.should.still.validate.ok@short.com'
self.assertEquals(s, utils.valid_email_or_none(s))
s = 'x' + s
self.assertEquals(None, utils.valid_email_or_none(s))
self.assertEquals(None, utils.valid_email_or_none("Bad ?"))
def test_creating_and_saving_bannedusernames(self):
banned_username = BannedUsername()
banned_username.expression = 'account'
banned_username.save()
# Retrieve it
all_bu = BannedUsername.objects.all()
self.assertEquals(len(all_bu), 1)
only_bu = all_bu[0]
self.assertEquals(only_bu, banned_username)
# Check if the fields are the same
self.assertEquals(banned_username.expression, only_bu.expression)
def test_bannedusername_validation(self):
bad_bu = BannedUsername()
bad_bu.expression = '('
try:
bad_bu.full_clean()
self.fail('Validation not working')
except ValidationError:
pass
|
{"/allauth/socialaccount/providers/stackexchange/tests.py": ["/allauth/socialaccount/providers/stackexchange/models.py"]}
|
22,315
|
ksze/django-allauth
|
refs/heads/master
|
/allauth/socialaccount/providers/stackexchange/views.py
|
import requests
from allauth.socialaccount.providers.oauth2.views import (OAuth2Adapter,
OAuth2LoginView,
OAuth2CallbackView)
from allauth.socialaccount.providers.oauth2.client import OAuth2Error
from allauth.socialaccount.models import SocialAccount, SocialLogin
from allauth.socialaccount.providers import registry
from allauth.utils import get_user_model
from django.conf import settings
from provider import StackExchangeProvider
User = get_user_model()
class StackExchangeOAuth2Adapter(OAuth2Adapter):
provider_id = StackExchangeProvider.id
access_token_url = 'https://stackexchange.com/oauth/access_token'
authorize_url = 'https://stackexchange.com/oauth/'
profile_url = 'https://api.stackexchange.com/2.1/me'
def complete_login(self, request, app, token):
provider = registry.by_id(app.provider)
site = provider.get_site()
resp = requests.get(self.profile_url,
params={ 'access_token': token.token,
'key': app.key,
'site': site })
extra_data = resp.json()
# extra_data is something of the form:
#
# {
# "items": [
# {
# "user_id": 654321,
# "user_type": "registered",
# "creation_date": 1234567890,
# "display_name": "Alice",
# "profile_image": "http://www.my-site.com/my-handsome-face.jpg",
# "reputation": 44,
# "reputation_change_day": 0,
# "reputation_change_week": 0,
# "reputation_change_month": 0,
# "reputation_change_quarter": 15,
# "reputation_change_year": 31,
# "age": 30,
# "last_access_date": 1355724123,
# "last_modified_date": 1332302654,
# "is_employee": false,
# "link": "http://stackoverflow.com/users/654321/alice",
# "website_url": "http://twitter.com/alice",
# "location": "Japan",
# "account_id": 123456,
# "badge_counts": {
# "gold": 0,
# "silver": 0,
# "bronze": 6
# },
# "accept_rate": 100
# }
# ],
# "quota_remaining": 9997,
# "quota_max": 10000,
# "has_more": false
# }
if len(extra_data['items']) > 0:
uid = str(extra_data['items'][0]['user_id'])
user = User(username=extra_data['items'][0]['display_name'])
account = SocialAccount(extra_data=extra_data['items'][0],
uid=uid,
provider=self.provider_id,
user=user)
return SocialLogin(account)
else:
raise OAuth2Error("stackexchange/no_site_profile_error.html",
{ 'se_site': site })
oauth2_login = OAuth2LoginView.adapter_view(StackExchangeOAuth2Adapter)
oauth2_callback = OAuth2CallbackView.adapter_view(StackExchangeOAuth2Adapter)
|
{"/allauth/socialaccount/providers/stackexchange/tests.py": ["/allauth/socialaccount/providers/stackexchange/models.py"]}
|
22,316
|
ksze/django-allauth
|
refs/heads/master
|
/allauth/socialaccount/providers/stackexchange/provider.py
|
from allauth.socialaccount import providers
from allauth.socialaccount.providers.base import ProviderAccount
from allauth.socialaccount.providers.oauth2.provider import OAuth2Provider
from allauth.socialaccount.models import SocialToken
class StackExchangeAccount(ProviderAccount):
def get_profile_url(self):
return self.account.extra_data.get('html_url')
def get_avatar_url(self):
return self.account.extra_data.get('avatar_url')
def __unicode__(self):
dflt = super(StackExchangeAccount, self).__unicode__()
return self.account.extra_data.get('display_name', dflt)
def get_brand(self):
token = SocialToken.objects.get(account = self.account)
return dict(id=token.app.id,
name=token.app.name)
class StackExchangeProvider(OAuth2Provider):
id = 'stackexchange'
name = 'Stack Exchange'
package = 'allauth.socialaccount.providers.stackexchange'
account_class = StackExchangeAccount
def get_site(self):
settings = self.get_settings()
return settings.get('SITE', 'stackoverflow')
def get_default_scope(self):
scope = ['read_inbox', 'write_access', 'private_info']
return scope
providers.registry.register(StackExchangeProvider)
|
{"/allauth/socialaccount/providers/stackexchange/tests.py": ["/allauth/socialaccount/providers/stackexchange/models.py"]}
|
22,329
|
igalab2015/webcrawler
|
refs/heads/master
|
/crawler/urls.py
|
# -*- coding: utf-8 -*-
from django.conf.urls import patterns, url
from crawler.views import UrlListView
urlpatterns = [
# URL
url(r'^index/$', UrlListView.as_view(), name='index'), # 一覧
]
|
{"/crawler/urls.py": ["/crawler/views.py"], "/crawler/tasks.py": ["/crawler/models.py"], "/crawler/admin.py": ["/crawler/models.py"], "/crawler/views.py": ["/crawler/models.py"]}
|
22,330
|
igalab2015/webcrawler
|
refs/heads/master
|
/crawler/models.py
|
# -*- coding: utf-8 -*
from django.db import models
class Url_list(models.Model):
#id = models.IntegerField(u'ID', blank=True, default=0)
url = models.URLField(u'URL', unique=True, null=False, max_length=255)
title = models.CharField(u'タイトル',max_length=255, blank=True)
#created_at = models.DateTimeField(auto_now_add=True)
#updated_at = models.DateTimeField(auto_now=True)
def __str__(self):
return self.url
class Crawled_url_list(models.Model):
url = models.URLField('URL', unique=True, null=False,max_length=255)
title = models.CharField('Title', max_length=255, blank=True)
html_digest = models.CharField('Hash', max_length=255, blank=True)
# last_modified = models.DateTimeField('last_modified', blank=True)
def __str__(self):
return self.url
# class JVN_list(models.Model):
# url = models.URLField('URL', unique=True, null=False,max_length=255)
# title = models.CharField('Title', max_length=255, blank=True)
# date = models.DateTimeField('YearDateTime')
# def __str__(self):
# return self.url
class Dictionary_about_security(models.Model):
word = models.CharField('word', max_length=63, unique=True)
def __str__(self):
return self.word
|
{"/crawler/urls.py": ["/crawler/views.py"], "/crawler/tasks.py": ["/crawler/models.py"], "/crawler/admin.py": ["/crawler/models.py"], "/crawler/views.py": ["/crawler/models.py"]}
|
22,331
|
igalab2015/webcrawler
|
refs/heads/master
|
/crawler/tasks.py
|
from __future__ import absolute_import
from urllib.parse import urlparse
from webcrawler.celery import app
from celery.task import task
import urllib.request
from bs4 import BeautifulSoup
from crawler.models import Url_list, Crawled_url_list, Dictionary_about_security
from celery.schedules import crontab
import hashlib
import re
from datetime import datetime
from socket import timeout
# for print debug
MYDEBUG = False
# for log file debug
MYLOG = True
class Crawler(object):
def __init__(self, target_url, max_depth):
# target url is seed's url
self.target_url = target_url
self.max_depth = max_depth
self.reg_question_mark = re.compile('\?')
def dictionary_from_database(self):
# create dictionary of words about security using Dictionary_about_security
words = []
for data in Dictionary_about_security.objects.all():
words.append(data.word)
return words
def crawl(self):
# create regular expression list from dictionary
self.patterns = [re.compile(word) for word in self.dictionary_from_database()]
crawled = [] # crawled url list
tocrawl = [self.target_url] # to crawl list
next_depth = [] # to crawl next echelon
depth = 0 # current depth
while tocrawl and depth <= self.max_depth:
page = tocrawl.pop()
# if page is not checked yet, crawl it
if page not in crawled:
# set list to crawl at next depth
# remove overlapped elements
# scrape page
next_depth = list(set(next_depth).union(set(self.scrape(page))))
crawled.append(page)
if not tocrawl:
tocrawl = next_depth
next_depth = []
depth += 1
# scrape html
def scrape(self, page_url):
headers = { "User-Agent" : "Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)" }
try:
# print('try urlopen: '+ page_url)
req = urllib.request.Request(page_url, None, headers)
page = urllib.request.urlopen(req)
# print('success! urlopen!: '+ page_url)
except:
# print('error urlopen: '+ page_url)
return []
#else:
# print('else urlopen')
# return []
# print('scraope---------------------')
page_html = page.read()
# calculate hash of page's html
digest = hashlib.sha1(page_html).hexdigest()
crawled_url_data = Crawled_url_list.objects.all().filter(url=page_url)
# don't check this page if html digest equals that crawled
if crawled_url_data:
data = crawled_url_data[0]
if data.html_digest == digest:
if MYLOG:
write_log(' ', page_url + ' skipped')
return []
else:
self.update_digest(data, digest)
if MYLOG:
write_log('', page_url + ' Digest updated')
# create soup object and confirm whether title exists or not
soup = BeautifulSoup(page_html, "html.parser")
if soup.title:
title = soup.title.string
else:
# if MYDEBUG:
# print('there isn\'t title')
return []
# save data and return all link
if self.set_crawled_urls_and_titles_and_digest(page_url, title, digest):
return self.get_all_link(soup, page_url)
else:
return []
def update_digest(self, data, digest):
data.html_digest = digest
try:
data.save()
except:
pass
def set_crawled_urls_and_titles_and_digest(self, page_url, title, digest):
if title:
crawled_data = Crawled_url_list()
crawled_data.url = page_url
crawled_data.title = title
crawled_data.html_digest = digest
try:
crawled_data.save()
if MYLOG:
write_log('', page_url + ' saved')
except:
pass
return True
else:
return False
# collect all <a> tag
def get_all_link(self, soup, current_url):
links = []
for atag in soup.find_all('a'):
link = atag.get('href')
if not link:
continue
abs_path = self.get_absolute_path(current_url, link)
if not self.reg_question_mark.search(abs_path):
links.append(self.remove_trailing_slash(abs_path))
return links
def remove_trailing_slash(self, url):
if url[-1] == '/':
return url[0:-1]
else:
return url
# make relative path absolute path
def get_absolute_path(self, current_url, link):
parsed_link = urlparse(link)
if parsed_link.scheme:
return link
else:
if current_url[-1] == '/':
current_url = current_url[:-1]
return current_url + parsed_link.path
# only set titles and urls about security
def set_titles_and_urls_to_show(self):
for pair in Crawled_url_list.objects.all():
if pair.title:
if Url_list.objects.all().filter(url=pair.url):
pass
elif self.select_by_title(pair.title):
ut = Url_list()
ut.url = pair.url
ut.title = pair.title
ut.save()
def select_by_title(self, title):
black_word = ['はてなブックマーク', 'はてな検索']
patterns_for_excluding = [re.compile(word) for word in black_word]
for reg_black in patterns_for_excluding:
if reg_black.search(title):
return False
for reg_white in self.patterns:
if reg_white.search(title):
return True
else:
return False
class Dictionary(object):
black_list_of_words = ['Incept Inc.', '記号・数字']
def __init__(self, url):
self.dict_url = url
def update_dictionary(self):
headers = { "User-Agent" : "Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)" }
try:
req = urllib.request.Request(self.dict_url, None, headers)
page = urllib.request.urlopen(req)
except:
if MYDEBUG:
print('can\'t open '+ self.dict_url)
return
soup = BeautifulSoup(page, "html.parser")
for atag in soup.find_all('a'):
word = atag.string
if word:
if word in self.black_list_of_words:
continue
try:
ut = Dictionary_about_security()
ut.word = word
ut.save()
except:
pass
# save log into crawler.log
def write_log(mark, log_text):
with open('crawler.log','a') as log:
log.write(mark + datetime.now().strftime('%Y/%m/%d %H:%M:%S') + ': ' + log_text + '\n')
log.close()
@app.task
def run_crawler():
seed_sites_url = ['http://b.hatena.ne.jp/ctop/it', 'http://japan.zdnet.com', 'https://jvn.jp']
max_depth = 2 # depth to crawl
if MYLOG:
write_log('-----', ' crawl start')
for seed in seed_sites_url:
if MYLOG:
write_log('---', seed + ' crawl start')
crawler = Crawler(seed, max_depth)
crawler.crawl()
crawler.set_titles_and_urls_to_show()
if MYLOG:
write_log('---', seed + ' crawl finished')
if MYLOG:
write_log('-----', ' crawl finished\n')
# update dictionary about security
@app.task
def update_dictionary():
dict_url = 'http://e-words.jp/p/t-Security.html'
ewords_dictionary = Dictionary(dict_url)
if MYLOG:
write_log('---', ' updating dictionary start')
ewords_dictionary.update_dictionary()
if MYLOG:
write_log('---', ' updating dictionary finished')
|
{"/crawler/urls.py": ["/crawler/views.py"], "/crawler/tasks.py": ["/crawler/models.py"], "/crawler/admin.py": ["/crawler/models.py"], "/crawler/views.py": ["/crawler/models.py"]}
|
22,332
|
igalab2015/webcrawler
|
refs/heads/master
|
/crawler/migrations/0005_auto_20151226_1839.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2015-12-26 09:39
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('crawler', '0004_auto_20151224_0048'),
]
operations = [
migrations.RemoveField(
model_name='url_list',
name='timestamp',
),
migrations.AlterField(
model_name='url_list',
name='title',
field=models.CharField(blank=True, max_length=255, verbose_name='タイトル'),
),
migrations.AlterField(
model_name='url_list',
name='url',
field=models.URLField(max_length=255, unique=True, verbose_name='URL'),
),
]
|
{"/crawler/urls.py": ["/crawler/views.py"], "/crawler/tasks.py": ["/crawler/models.py"], "/crawler/admin.py": ["/crawler/models.py"], "/crawler/views.py": ["/crawler/models.py"]}
|
22,333
|
igalab2015/webcrawler
|
refs/heads/master
|
/crawler/admin.py
|
from django.contrib import admin
from django.contrib import admin
from crawler.models import Url_list,Crawled_url_list,Dictionary_about_security
class UrlAdmin(admin.ModelAdmin):
list_display = ('id', 'url', 'title')
list_display_links = ('url', 'title')
admin.site.register(Url_list,UrlAdmin)
admin.site.register(Crawled_url_list)
admin.site.register(Dictionary_about_security)
#admin.site.register(Url_list)
|
{"/crawler/urls.py": ["/crawler/views.py"], "/crawler/tasks.py": ["/crawler/models.py"], "/crawler/admin.py": ["/crawler/models.py"], "/crawler/views.py": ["/crawler/models.py"]}
|
22,334
|
igalab2015/webcrawler
|
refs/heads/master
|
/crawler/views.py
|
#from django.shortcuts import render
# Create your views here.
# -*- coding: utf-8 -*-
from django.http import HttpResponse
from django.shortcuts import render_to_response
from django.template import RequestContext
from crawler.models import Url_list
from django.views.generic import ListView
# from crawler.crawl import Crawler
class UrlListView(ListView):
#'''urlの一覧'''
model = Url_list
template_name = 'index.html'
context_object_name = 'index'
paginate_by = 100
def get_queryset(self):
#hatena = Crawler()
#max_depth = 2
#target_url = "http://b.hatena.ne.jp/search/text?q=%E3%82%BB%E3%82%AD%E3%83%A5%E3%83%AA%E3%83%86%E3%82%A3"
#hatena.crawl(target_url, max_depth)
#hatena.get_titles_and_urls()
return Url_list.objects.all().order_by('-id')
#def urls_list(request):
#return HttpResponse(u'URLの一覧')
# index = Url_list.objects.all().order_by('id')
# return render_to_response('index.html', # 使用するテンプレート
# {'index': index}, # テンプレートに渡すデータ
# context_instance=RequestContext(request)) # その他標準のコンテキスト
|
{"/crawler/urls.py": ["/crawler/views.py"], "/crawler/tasks.py": ["/crawler/models.py"], "/crawler/admin.py": ["/crawler/models.py"], "/crawler/views.py": ["/crawler/models.py"]}
|
22,335
|
igalab2015/webcrawler
|
refs/heads/master
|
/crawler/migrations/0007_auto_20160105_1334.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-01-05 04:34
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('crawler', '0006_crawled_url_list'),
]
operations = [
migrations.CreateModel(
name='JVN_list',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('url', models.URLField(max_length=255, unique=True, verbose_name='URL')),
('title', models.CharField(blank=True, max_length=255, verbose_name='Title')),
('date', models.DateTimeField(verbose_name='YearDateTime')),
],
),
migrations.AddField(
model_name='crawled_url_list',
name='html_digest',
field=models.CharField(blank=True, max_length=255, verbose_name='Hash'),
),
]
|
{"/crawler/urls.py": ["/crawler/views.py"], "/crawler/tasks.py": ["/crawler/models.py"], "/crawler/admin.py": ["/crawler/models.py"], "/crawler/views.py": ["/crawler/models.py"]}
|
22,336
|
igalab2015/webcrawler
|
refs/heads/master
|
/crawler/migrations/0003_auto_20151224_0041.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2015-12-23 15:41
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('crawler', '0002_auto_20151224_0037'),
]
operations = [
migrations.AlterField(
model_name='url_list',
name='title',
field=models.CharField(blank=True, max_length=255, verbose_name='タイトル'),
),
]
|
{"/crawler/urls.py": ["/crawler/views.py"], "/crawler/tasks.py": ["/crawler/models.py"], "/crawler/admin.py": ["/crawler/models.py"], "/crawler/views.py": ["/crawler/models.py"]}
|
22,337
|
tony2012/Tennis_Ball_Tracking-python
|
refs/heads/master
|
/TrajectoryManager.py
|
import numpy as np
class Trajectory:
def __init__(self, point):
self.__points = [] # points 의 각 요소는 [2] shape의 numpy 배열이어야 함
self.__length = 0
self.__missing_count = 0
self.__points.append(point)
# Trajectory 포인트를 리턴하는 함수
def getPoints(self):
return self.__points
# Trajectory 포인트를 추가하는 함수
def addPoint(self, point):
self.__points.append(point)
self.__missing_count = 0
last_points = self.__points[-2:]
point_diff = last_points[1] - last_points[0]
point_diff_mag = np.sqrt(point_diff.dot(point_diff))
self.__length += point_diff_mag
def getLength(self):
return self.__length
def checkNextPoint(self, point):
points_length = len(self.__points)
if points_length >= 3:
# points 배열에 3개 이상의 포인트가 있는 경우
# 다음 예측 포인트와 거리 확인
#nextPoint = Trajectory.predictNextPoint(self.__points)
#point_diff = point - nextPoint
#point_diff_mag = np.sqrt(point_diff.dot(point_diff))
#return (point_diff_mag < 8.0)
return Trajectory.checkTriplet(self.__points[-2:] + [point])
elif points_length == 0:
# points 배열이 비어있는 경우
# 무조건 True 리턴
return True
elif points_length == 1:
# points 배열에 1개의 포인트만 있는 경우
# 거리만 확인
point_diff = point - self.__points[0]
point_diff_mag = np.sqrt(point_diff.dot(point_diff))
return (point_diff_mag > 2.0) and (point_diff_mag < 80.0)
elif points_length == 2:
# points 배열에 2개의 포인트가 있는 경우
# Triplet 여부 확인
return Trajectory.checkTriplet(self.__points + [point])
# Missing 카운트 올리는 함수 -> 추적 계속 여부 리턴
def upcountMissing(self):
if len(self.__points) < 3:
return False
self.__missing_count += 1
# missing count 초과 여부 확인
if self.__missing_count > 1:
# 추적 종료
return False
else:
# 다음 추정 포인트 추가
nextPoint = self.predictNextPoint(self.__points)
self.__points.append(nextPoint)
return True
# 다음 포인트를 예측하여 리턴하는 함수
@classmethod
def predictNextPoint(self, points):
if len(points) >= 3 :
# 뒤에서 3개 포인트 추출
last3Points = points[-3:]
# 속도와 가속도 구함
velocity = [last3Points[1] - last3Points[0], last3Points[2] - last3Points[1]]
acceleration = velocity[1] - velocity[0]
# 다음 위치 추정
nextVelocity = velocity[1] + acceleration
nextPoint = last3Points[2] + nextVelocity
return nextPoint
# 초기 유효 3포인트 만족 여부를 확인하는 함
@classmethod
def checkTriplet(self, points):
if len(points) != 3:
return False
# 속도와 가속도 구함
velocity = [points[1] - points[0], points[2] - points[1]]
acceleration = velocity[1] - velocity[0]
#print("acceleration :", acceleration)
# 속도 크기가 비슷해야 함
velocity_mag = [np.sqrt(velocity[0].dot(velocity[0])), np.sqrt(velocity[1].dot(velocity[1]))]
if velocity_mag[0] > velocity_mag[1]:
if velocity_mag[1] / velocity_mag[0] < 0.6:
#print("velocity_mag[1] / velocity_mag[0] :", velocity_mag[1] / velocity_mag[0])
return False
else:
if velocity_mag[0] / velocity_mag[1] < 0.6:
#print("velocity_mag[0] / velocity_mag[1] :", velocity_mag[0] / velocity_mag[1])
return False
# 속도가 너무 작거나 크지 않아야 함
if velocity_mag[0] < 2.0 or velocity_mag[0] > 80.0:
#print("velocity_mag[0] :", velocity_mag[0])
return False
if velocity_mag[1] < 2.0 or velocity_mag[1] > 80.0:
#print("velocity_mag[1] :", velocity_mag[1])
return False
# 속도 방향 변화가 작아야 함
velocity_dot = velocity[1].dot(velocity[0])
acceleration_angle = np.arccos(velocity_dot / (velocity_mag[0] * velocity_mag[1]))
#print("acceleration_angle :", np.rad2deg(acceleration_angle))
if acceleration_angle > np.deg2rad(45.0):
return False
# 가속도가 작아야 함
acceleration_mag = np.sqrt(acceleration.dot(acceleration))
if acceleration_mag > 20.0:
#print("acceleration_mag :", acceleration_mag)
return False
if acceleration[0] < -2.0:
return False
return True
class TrajectoryManager:
def __init__(self):
self.__trajectorys = []
def getTrajectorys(self):
return self.__trajectorys
def setPointsFrame(self, points):
max_trajectory = (0, 0)
trajectorys_updated = [False] * len(self.__trajectorys)
for index, point in enumerate(points):
isAddedTrajectory = False
# 기존 Trajectory에 추가되는 포인트인지 확인
for index, updated in enumerate(trajectorys_updated):
if updated == False:
if self.__trajectorys[index].checkNextPoint(point):
self.__trajectorys[index].addPoint(point)
trajectorys_updated[index] = True
isAddedTrajectory = True
trajectory_length = self.__trajectorys[index].getLength()
if trajectory_length > max_trajectory[0]:
max_trajectory = (trajectory_length, index)
break
# Trajectory에 추가되지 않은 포인트는 신규 Trajectory로 생성
if isAddedTrajectory == False:
trajectory_new = Trajectory(point)
self.__trajectorys.append(trajectory_new)
# 높은 가능성의 Trajectory가 찾아지면 해당 Trajectory만 남김
if max_trajectory[0] > 30.0:
self.__trajectorys = [self.__trajectorys[max_trajectory[1]]]
else:
# 업데이트 되지 않은 Trajectory의 Missing Count 증가
for index, updated in reversed(list(enumerate(trajectorys_updated))):
if updated == False:
if self.__trajectorys[index].upcountMissing() == False:
self.__trajectorys.remove(self.__trajectorys[index])
|
{"/__main__.py": ["/TrajectoryManager.py"]}
|
22,338
|
tony2012/Tennis_Ball_Tracking-python
|
refs/heads/master
|
/__main__.py
|
import cv2
import numpy as np
import os
import TrajectoryManager as tm
print(os.getcwd())
bRecord = False
if bRecord == True:
fourcc = cv2.VideoWriter_fourcc(*'XVID')
#out = cv2.VideoWriter('{}.avi'.format(video_path.split('/')[-1].split('.')[0]),fourcc, 20.0, (640,360))
out = cv2.VideoWriter("renderOutput.mp4", cv2.VideoWriter_fourcc('a', 'v', 'c', '1'), 20.0, (640,360))
cap = cv2.VideoCapture('video/videoplayback.mp4')
bgSubtractor = cv2.createBackgroundSubtractorKNN(history = 10, dist2Threshold = 200.0)
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
kernel_size = 11
kernel_dilation = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(kernel_size,kernel_size))
kernel_open = cv2.getStructuringElement(cv2.MORPH_RECT,(3,3))
frame_count = 0
trajectory_image = np.zeros([360, 640, 3], np.uint8)
point_image = np.zeros([360, 640, 3], np.uint8)
manager = tm.TrajectoryManager()
while cap.isOpened() :
ret, frame = cap.read()
resize_scale = 640. / float(frame.shape[1])
frame = cv2.resize(frame, None, fx=resize_scale, fy=resize_scale)
#print(frame.shape)
# 가우시안 블러 적용
blur = cv2.GaussianBlur(frame, (7, 7), 0)
# Background 마스크 생성
fgmask = bgSubtractor.apply(blur)
blank_image = np.zeros(fgmask.shape, np.uint8)
# Background 마스크에 모폴로지 적용
fgmask = cv2.morphologyEx(fgmask, cv2.MORPH_OPEN, kernel)
fgmask = cv2.morphologyEx(fgmask, cv2.MORPH_DILATE, kernel_dilation)
point_image = cv2.addWeighted(point_image, 0.9, np.zeros(frame.shape, np.uint8), 0.1, 0)
#print("frame_count :", frame_count)
frame_count += 1
nlabels, labels, stats, centroids = cv2.connectedComponentsWithStats(fgmask)
#print(len(centroids))
points = []
for index, centroid in enumerate(centroids):
if stats[index][0] == 0 and stats[index][1] == 0:
continue
if np.any(np.isnan(centroid)):
continue
x, y, width, height, area = stats[index]
centerX, centerY = int(centroid[0]), int(centroid[1])
area_ratio = area / (width * height)
aspect_ratio = width / height
#print(x, y, area, width * height, area_ratio)
#if area > 2 and area < 2000:
if area_ratio > 0.6 and aspect_ratio > 0.333 and aspect_ratio < 3.0 and area < 500 and fgmask[centerY, centerX] == 255:
#cv2.circle(frame, (centerX, centerY), 1, (0, 255, 0), 2)
# cv2.rectangle(frame, (x-1, y-1), (x-1 + width+2, y-1 + height+2), (0, 0, 255))
cv2.rectangle(frame, (x - 1, y - 1), (x - 1 + width + 2, y - 1 + height + 2), (0, 255, 0))
point_image[centerY, centerX] = (255, 255, 255)
points.append(np.array([centerY, centerX]))
# 해당 포인트의 컬러 값 얻기
for pixel_y in range(y, y + height):
for pixel_x in range(x, x + width):
if fgmask[pixel_y, pixel_x] >= 0:
#frame[pixel_y, pixel_x] = [0, 255, 0]
blank_image[pixel_y, pixel_x] = 255
#else :
# cv2.rectangle(frame, (x - 1, y - 1), (x - 1 + width + 2, y - 1 + height + 2), (0, 0, 255))
manager.setPointsFrame(points)
for trajectory in manager.getTrajectorys():
points = trajectory.getPoints()
#print(points)
if len(points) < 3:
continue
for index, point in enumerate(points):
if point[0] < 360 and point[1] < 640:
trajectory_image[point[0], point[1]] = (0, 255, 0)
cv2.circle(frame, (point[1], point[0]), 1, (255, 255, 0), 2)
if index >= 1:
cv2.line(frame, (points[index-1][1], points[index-1][0]), (point[1], point[0]), (255, 255, 0), 1)
#cv2.imshow('processed', fgmask)
#cv2.imshow('point', point_image)
cv2.imshow('raw', frame)
# record
if bRecord == True:
out.write(frame)
# terminate
k = cv2.waitKey(4)
if k == 27:
cv2.destroyAllWindows()
cv2.waitKey(1)
break
if bRecord == True:
out.release()
cap.release()
cv2.destroyAllWindows()
|
{"/__main__.py": ["/TrajectoryManager.py"]}
|
22,343
|
ElonyMac/Alien-Invasion
|
refs/heads/master
|
/alien_invasion.py
|
import sys
import pygame
from settings import Settings
from ship import Ship
from bullet import Bullet
class AlienInvasion:
"""Ogolna klasa przeznaczona do zarzadzania zasobami i sposobem
dzialania gry."""
def __init__(self):
"""Inicjalizacja gry i utworzenie jej zasobow."""
pygame.init()
self.settings = Settings()
self.screen = pygame.display.set_mode((self.settings.screen_width, self.settings.screen_height))
pygame.display.set_caption("Alien Invasion")
self.ship = Ship(self)
self.bullets = pygame.sprite.Group()
def run_game(self):
"""Rozpoczecie petli glownej gry."""
while True:
self._check_events()
self.ship.update()
self._update_bullets()
self._update_screen()
# Odswiezanie ekranu w trakcie kazdej iteracji petli.
def _check_events(self):
"""Reakcja na zdarzenia generowane przez klawiature i mysz"""
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
elif event.type == pygame.KEYDOWN:
self._check_keydown_events(event)
elif event.type == pygame.KEYUP:
self._check_keyup_events(event)
def _check_keydown_events(self, event):
"""Reakcja na nacisniecie klawisza."""
if event.key == pygame.K_RIGHT:
self.ship.moving_right = True
elif event.key == pygame.K_LEFT:
self.ship.moving_left = True
elif event.key == pygame.K_q:
sys.exit()
elif event.key == pygame.K_SPACE:
self._fire_bullet()
def _check_keyup_events(self, event):
"""Reakcja na zwolnienie klawisza."""
if event.key == pygame.K_RIGHT:
self.ship.moving_right = False
elif event.key == pygame.K_LEFT:
self.ship.moving_left = False
elif event.key == pygame.K_q:
sys.exit()
def _fire_bullet(self):
"""Utworzenie nowego pocisku i dodanie go do grupy pociskow."""
if len(self.bullets) < self.settings.bullets_allowed:
new_bullet = Bullet(self)
self.bullets.add(new_bullet)
def _update_bullets(self):
"""Uaktualnienie polozenia pociskow i usun iecie tych niewidocznych na ekranie"""
# Uaktualnieni polozenia pociskow.
self.bullets.update()
# Usuniecie pociskow, ktore znajduja sie poza ekranem.
for bullet in self.bullets.copy():
if bullet.rect.bottom <= 0:
self.bullets.remove(bullet)
# print(len(self.bullets))
# Usuniecie pociskow, ktore znajduja sie poza ekranem.
def _update_screen(self):
"""Uaktualnienie obrazow na ekranie i przejscie do nowego ekranu"""
self.screen.fill(self.settings.bg_color)
self.ship.blitme()
for bullet in self.bullets.sprites():
bullet.draw_bullet()
# Wyswietlanie ostatnio zmodyfikowanego ekranu.
pygame.display.flip()
if __name__ == '__main__':
# Utworzenie egzemplarza gry i jej uruchomienie.
ai = AlienInvasion()
ai.run_game()
|
{"/alien_invasion.py": ["/ship.py"]}
|
22,344
|
ElonyMac/Alien-Invasion
|
refs/heads/master
|
/ship.py
|
import pygame
class Ship:
"""Klasa przeznaczona do zarzadzania statkiem kosmicznym"""
def __init__(self, ai_game):
"""Inicjalizacja statku kosmicznego i jego polozenie poczatkowe"""
self.screen = ai_game.screen
self.settings = ai_game.settings
self.screen_rect = ai_game.screen.get_rect()
# Wczytanie obrazu statku kosmicznego i pobranie jego prostokata.
self.image = pygame.image.load('images/ship.bmp')
self.rect = self.image.get_rect()
# Kazdy nowy statek kosmiczny pojawia sie na dole ekranu.
# Tutaj umiejscawiamy "rect" obrazka w miejscu screen_rect
# z obrazu przyslanego z funkcji ai_game tam wyzej /\
self.rect.midbottom = self.screen_rect.midbottom
# Polozenie poziome statku jest przechowywane w potaci liczby zmiennoprzecinkowej.
self.x = float(self.rect.x)
# Opcje wskazujace na poruszanie sie statku.
self.moving_right = False
self.moving_left = False
def update(self):
"""
Uaktualnienei polzenia statku na podstawie opcji wskazujacej
na jego ruch
"""
if self.moving_right and self.rect.right < self.screen_rect.right:
self.x += self.settings.ship_speed
if self.moving_left and self.rect.left > 0:
self.x -= self.settings.ship_speed
# Uaktualnienie obiektu rect na podstawie wartosci self.x
self.rect.x = self.x
def blitme(self):
"""Wyswietlanie statku kosmicznego w jego aktualnym polozeniu"""
self.screen.blit(self.image, self.rect)
|
{"/alien_invasion.py": ["/ship.py"]}
|
22,345
|
md131376st/UI
|
refs/heads/master
|
/firsttry/firsttry/urls.py
|
# """untitled URL Configuration
#
# The `urlpatterns` list routes URLs to views. For more information please see:
# https://docs.djangoproject.com/en/2.0/topics/http/urls/
# Examples:
# Function views
# 1. Add an import: from my_app import views
# 2. Add a URL to urlpatterns: path('', views.home, name='home')
# Class-based views
# 1. Add an import: from other_app.views import Home
# 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
# Including another URL conf
# 1. Import the include() function: from django.urls import include, path
# 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
# """
#
from django.conf.urls import url, include
from django.contrib.auth import views as auth_views
from django.contrib import admin
from first.forms import LoginForm
from first import views as viewsMe
urlpatterns = [
# url(r'^$', views.Home, name='home'),
url(r'',include('first.urls')),
url(r'^login/$', auth_views.login, {'template_name': 'registration/login.html', 'authentication_form': LoginForm}, name='login'),
url(r'^logout/$', auth_views.logout, {'next_page': 'login'}, name='logout'),
url(r'^signup/$', viewsMe.signup, {'template_name': 'registration/signup.html', 'next_page': 'registration/home.html'}, name='signup'),
url(r'^admin/', admin.site.urls),
url(r'home/$', viewsMe.home, {'template_name': 'registration/home.html'}, name='home'),
url(r'edit/$', viewsMe.edit, {'template_name': 'registration/profile_edit.html'}, name='edit'),
url(r'request/$', viewsMe.request, {'template_name': 'registration/request.html', 'next_page':'registration/requestdone.html'}, name='request'),
url(r'requestdone/$', viewsMe.requestdone, {'template_name': 'registration/requestdone.html'}, name='requestdone')
]
|
{"/firsttry/first/forms.py": ["/firsttry/first/models.py"], "/firsttry/first/views.py": ["/firsttry/first/forms.py"]}
|
22,346
|
md131376st/UI
|
refs/heads/master
|
/firsttry/first/forms.py
|
from django import forms
from django.contrib.auth.forms import UserCreationForm
from .models import User, Request
from django.contrib.auth.forms import AuthenticationForm
# class SignUpForm(UserCreationForm):
class SignUpForm(forms.ModelForm):
class Meta:
model = User
fields = ['email', 'first_name', 'last_name', 'date_Birth', 'contact', 'user_name', 'national_number','password']
# fields = ('ایمیل', 'نام', 'نام خانوادگی', 'تاریخ تولد','شماره ملی')
class Home(forms.ModelForm):
class Meta:
model = User
fields = ['email', 'first_name', 'last_name', 'user_name', 'contact', 'national_number', 'date_Birth',
'avatar']
class RequestForm(forms.ModelForm):
class Meta:
model = Request
fields = ['writer', 'subject', 'text']
class LoginForm(AuthenticationForm):
username = forms.EmailField(label="آدرس ایمیل",max_length=30, widget=forms.TextInput(attrs={'class': 'form-control', 'name': 'email'}))
password = forms.CharField(label="رمز عبور", max_length=30,
widget=forms.TextInput(attrs={'class': 'form-control', 'name': 'password'}))
# class Meta:
# model = User
# fields = ['email', 'password']
# class CustomAuthenticationForm(AuthenticationForm):
# pass
#
|
{"/firsttry/first/forms.py": ["/firsttry/first/models.py"], "/firsttry/first/views.py": ["/firsttry/first/forms.py"]}
|
22,347
|
md131376st/UI
|
refs/heads/master
|
/firsttry/first/urls.py
|
from django.urls import path, include
from django.contrib import admin
from django.views.generic.base import TemplateView
from . import views
from django.conf.urls import url
urlpatterns = [
# path('signup/', views.SignUp.as_view(), name='signup'),
url(r'^$', views.signup, name='signup')
# path('', views.signup, name='signup'),
]
|
{"/firsttry/first/forms.py": ["/firsttry/first/models.py"], "/firsttry/first/views.py": ["/firsttry/first/forms.py"]}
|
22,348
|
md131376st/UI
|
refs/heads/master
|
/firsttry/first/views.py
|
from django.contrib.auth import login, authenticate
from django.shortcuts import render, redirect
from django.contrib.auth.decorators import login_required
from .forms import SignUpForm, Home, RequestForm , LoginForm
# import first.forms as form_
# from firsttry.settings import AUTHENTICATION_BACKENDS
#
# from django.contrib.auth import views as auth_views
# from django.contrib.auth.forms import AuthenticationForm
# from .models import User as users
@login_required
def home(request, template_name):
if request.method == 'GET':
form = Home(request.GET)
# user = users.objects.filter()
return render(request, 'registration/home.html', {'form': form})
def signup(request, template_name, next_page):
# template_name = 'registration/signup.html'
if request.method == 'POST':
form = SignUpForm(request.POST)
if form.is_valid():
user = form.save()
user.refresh_from_db() # load the profile instance created by the signal
user.save()
# raw_password = form.cleaned_data.get('password1')
# username = form.cleaned_data.get('email')
# user = authenticate(username=username, password=raw_password)
login(request, user)
return redirect('home')
else:
form = SignUpForm()
return render(request, 'registration/signup.html', {'form': form})
def edit(request,template_name):
return render(request, 'registration/profile_edit.html')
def request(request, template_name, next_page):
if request.user.is_authenticated:
if request.method == 'POST':
form = RequestForm(request.POST)
form.save()
else:
form = RequestForm()
return render(request, 'registration/request.html', {'form': form})
def requestdone(request,template_name):
return render(request, 'registration/requestdone.html')
# def login_(request,template_name):
# # form = csrf(request)
# # auth_views.login()
# print("life")
# if request.method == 'POST':
# form = LoginForm(request.POST)
# print("me")
# print(form.cleaned_data.get('email'))
# print (form.is_valid())
# if form.is_valid():
# print("hi")
# login(request, form)
# return redirect('home')
# else:
# return render(request, 'registration/login.html', {'form': form})
# else:
# form = LoginForm()
# return render(request, 'registration/login.html', {'form': form})
# form = form_.UserCreationForm(request.POST)
# # form_.
# if form.is_valid():
# login(request, form)
# # authenticate(request , email=request.POST['email'],password=request.POST['password'])
# # myform = form_.CustomAuthenticationForm
# # form = AuthenticationForm(request.POST)
# username = request.POST['email']
# password = request.POST['password']
# user =authenticate(request, username=username,password=password)
# if user :
# login(request,user, backend=AUTHENTICATION_BACKENDS)
# # form.username()
# # user = authenticate(request, username=form.username(), password=form.password())
# if user != None:
# return redirect('home')
# else:
# # form = form_.CustomAuthenticationForm()
# print('hi')
# return render(request, 'registration/login.html',{'form': form_.AuthenticationForm})
#
# return render(request, 'registration/login.html', {'form': form_.AuthenticationForm})
|
{"/firsttry/first/forms.py": ["/firsttry/first/models.py"], "/firsttry/first/views.py": ["/firsttry/first/forms.py"]}
|
22,349
|
md131376st/UI
|
refs/heads/master
|
/firsttry/first/migrations/0001_initial.py
|
# Generated by Django 2.0.7 on 2018-07-21 10:21
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import first.UserManager
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0009_alter_user_last_name_max_length'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('email', models.EmailField(help_text='1@gmail.com', max_length=254, unique=True, verbose_name='آدرس ایمیل')),
('first_name', models.CharField(blank=True, max_length=30, verbose_name='نام')),
('last_name', models.CharField(blank=True, max_length=30, verbose_name='نام خانودگی')),
('date_Birth', models.DateTimeField(blank=True, help_text='روز-ماه-سال', null=True, verbose_name='تاریخ تولد')),
('contact', models.CharField(blank=True, max_length=30, null=True, verbose_name='شماره تماس')),
('user_name', models.CharField(max_length=30, unique=True, verbose_name='نلم کاربری')),
('avatar', models.ImageField(blank=True, null=True, upload_to='image/')),
('national_number', models.CharField(max_length=30, unique=True, verbose_name='شماره ملی')),
('is_active', models.BooleanField(default=True)),
('is_admin', models.BooleanField(default=False)),
('is_staff', models.BooleanField(default=False)),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'verbose_name': 'userProfile',
'verbose_name_plural': 'usersProfile',
'db_table': 'first_profile',
'ordering': ['-last_name', 'first_name'],
},
managers=[
('objects', first.UserManager.UserManager()),
],
),
migrations.CreateModel(
name='Request',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('subject', models.CharField(max_length=500, verbose_name='موضوع')),
('text', models.TextField(verbose_name='درخواست')),
('state', models.IntegerField(choices=[(0, 'درانتظار پاسخ رهی'), (1, 'در حال پاسخ دهی'), (2, 'اتمام کار')], default=0, verbose_name='وضعیت')),
('response', models.TextField(verbose_name='پاسخ')),
('writer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'request',
'verbose_name_plural': 'requests',
'db_table': 'Requests',
},
),
]
|
{"/firsttry/first/forms.py": ["/firsttry/first/models.py"], "/firsttry/first/views.py": ["/firsttry/first/forms.py"]}
|
22,350
|
md131376st/UI
|
refs/heads/master
|
/firsttry/first/models.py
|
from __future__ import unicode_literals
from django.db import models
from django.core.mail import send_mail
from django.contrib.auth.models import PermissionsMixin
from django.contrib.auth.base_user import AbstractBaseUser
from django.utils.translation import ugettext_lazy as _
from .UserManager import UserManager
class User(AbstractBaseUser, PermissionsMixin):
email = models.EmailField(_('آدرس ایمیل'), unique=True, help_text="1@gmail.com")
first_name = models.CharField(_('نام'), max_length=30, blank=True)
last_name = models.CharField(_('نام خانودگی'), max_length=30, blank=True)
date_Birth = models.DateTimeField(_('تاریخ تولد'), blank=True,null=True, help_text="روز-ماه-سال")
contact = models.CharField(_('شماره تماس'),null=True, max_length=30, blank=True)
# user_gender = models.CharField(_('جنسیت'), choices=((0, 'woman'), (1, 'men')), max_length=30)
user_name = models.CharField(_('نلم کاربری'), unique=True, max_length=30)
avatar = models.ImageField(upload_to='image/', null=True, blank=True)
national_number = models.CharField(_('شماره ملی'), unique=True, max_length=30)
# date_joined = models.DateTimeField(_('date joined'), default=timezone)
is_active = models.BooleanField(default=True)
is_admin = models.BooleanField(default=False)
is_staff = models.BooleanField(default=False)
AbstractBaseUser.password = (_('رمز عبور'))
objects = UserManager()
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['first_name', 'last_name', 'national_number']
class Meta:
db_table = 'first_profile'
ordering = ['-last_name', 'first_name']
verbose_name = _('userProfile')
verbose_name_plural = _('usersProfile')
def get_full_name(self):
'''
Returns the first_name plus the last_name, with a space in between.
'''
full_name = '%s %s' % (self.first_name, self.last_name)
return full_name.strip()
def get_short_name(self):
'''
Returns the short name for the user.
'''
return self.first_name
def email_user(self, subject, message, from_email=None, **kwargs):
'''
Sends an email to this User.
'''
send_mail(subject, message, from_email, [self.email], **kwargs)
class Request(models.Model):
writer = models.ForeignKey('User', on_delete=models.CASCADE)
subject = models.CharField(_('موضوع'), max_length=500)
text = models.TextField(_('درخواست'))
SRATUS = ((0, 'درانتظار پاسخ رهی'), (1, 'در حال پاسخ دهی'), (2, 'اتمام کار'))
state = models.IntegerField(_('وضعیت'), choices=SRATUS, default=0)
response = models.TextField(_('پاسخ'))
class Meta:
db_table = 'Requests'
verbose_name = _('request')
verbose_name_plural = _('requests')
def __str__(self):
return self.text
|
{"/firsttry/first/forms.py": ["/firsttry/first/models.py"], "/firsttry/first/views.py": ["/firsttry/first/forms.py"]}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.