text stringlengths 0 1.05M | meta dict |
|---|---|
__author__ = 'cody'
from webfront import failed_ip_login_attempt_counts, app
def increment_failed_logins(ip):
if ip not in failed_ip_login_attempt_counts:
failed_ip_login_attempt_counts[ip] = 1
else:
failed_ip_login_attempt_counts[ip] += 1
app.logger.warning("{} failed login count increased to {}".format(ip, failed_ip_login_attempt_counts[ip]))
def clear_failed_logins(ip):
if ip in failed_ip_login_attempt_counts:
del failed_ip_login_attempt_counts[ip]
app.logger.warning("{} failed login count has been reset".format(ip))
def ip_failed_previously(ip):
return ip in failed_ip_login_attempt_counts
def ip_attempts_past_threshold(ip):
if ip not in failed_ip_login_attempt_counts:
return False
return failed_ip_login_attempt_counts[ip] >= app.config["FAILED_ATTEMPTS_CAPTCHA_THRESHOLD"]
def set_ip_to_threshold(ip):
failed_ip_login_attempt_counts[ip] = app.config["FAILED_ATTEMPTS_CAPTCHA_THRESHOLD"]
def should_display_captcha(ip, forced_captcha=False):
if app.config["DISABLE_ALL_RECAPTCHA"]:
return False
if forced_captcha:
return app.config["FORCED_RECAPTCHA_ENABLED"]
if ip_attempts_past_threshold(ip) and app.config["REACTIVE_RECAPTCHA_ENABLED"]:
return True
return False
| {
"repo_name": "codyharrington/todolist",
"path": "webfront/controllers/__init__.py",
"copies": "1",
"size": "1296",
"license": "mit",
"hash": -5554895346370120000,
"line_mean": 37.1176470588,
"line_max": 110,
"alpha_frac": 0.7021604938,
"autogenerated": false,
"ratio": 3.3402061855670104,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9509814152676641,
"avg_score": 0.006510505338073688,
"num_lines": 34
} |
__author__ = 'cody'
from webfront.models import LocalBase
from utils.rest_api_utils import *
class LocalUser(LocalBase):
enabled = True
def is_authenticated(self):
# If a user isn't authenticated, we get None instead
# so there is no point in implementing this
return True
def is_active(self):
# To be updated once email confirmations are implemented
return self.enabled
def is_anonymous(self):
# We currently have no concept of anonymous users
return False
def get_id(self):
return self["username"]
class UserManager(RestClient):
def __init__(self, api_url):
super().__init__(api_url)
def get_user(self, username):
response_obj, status = self.get_resource("user/{}".format(username))
if status != HTTPStatusCodes.OK:
print(response_obj)
return None
return LocalUser(response_obj["data"])
def save_new_user(self, local_user):
response_obj, status = self.post_resource("user", data=local_user.copy())
if status != HTTPStatusCodes.CREATED:
print(response_obj)
return response_obj
def update_existing_user(self, local_user):
response_obj, status = self.put_resource("user/{}".format(local_user["username"]), data=local_user.copy())
if status != HTTPStatusCodes.OK:
print(response_obj)
return response_obj
def delete_user(self, username):
response_obj, status = self.delete_resource("user/{}".format(username))
if status != HTTPStatusCodes.NO_CONTENT:
print(response_obj)
return response_obj
def authenticate_user(self, username, password):
data = {"username": username, "password": password}
response_obj, status = self.post_resource("user/authenticate", data=data)
if status != HTTPStatusCodes.OK:
print(response_obj)
return None
return LocalUser(response_obj["data"])
| {
"repo_name": "codyharrington/todolist",
"path": "webfront/models/user.py",
"copies": "1",
"size": "2012",
"license": "mit",
"hash": -2502430996655593500,
"line_mean": 29.9538461538,
"line_max": 114,
"alpha_frac": 0.62972167,
"autogenerated": false,
"ratio": 4.1570247933884295,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.01693935763548872,
"num_lines": 65
} |
__author__ = 'cody'
import requests
from ujson import dumps, loads
from flask import Response, jsonify
from utils.messages import *
from utils.exceptions import *
class HTTPStatusCodes():
OK = 200
CREATED = 201
# Used after a DELETE request
NO_CONTENT = 204
BAD_REQUEST = 400
UNAUTHORISED = 401
FORBIDDEN = 403
NOT_FOUND = 404
IM_A_TEAPOT = 418
# This one means that the request is syntactically valid but semantically invalid
UNPROCESSABLE_ENTITY = 422
INTERNAL_SERVER_ERROR = 500
class RestClient(object):
headers = {'Content-type': 'application/json', 'Accept': 'text/plain'}
last = {}
def __init__(self, api_url):
self.api_url = api_url.rstrip("/")
def _store_last(self, request, response):
"""Store some details of request and response so they can be inspected later.
The body of the response is not stored as it could be very large"""
self.last["request"] = {
"headers": request.headers, "body": request.body,
"method": request.method, "url": request.url
}
self.last["response"] = {
"encoding": response.encoding, "headers": response.headers,
"is_redirect": response.is_redirect, "status_code": response.status_code,
"url": response.url
}
def get_resource(self, uri):
response = requests.get("{}/{}".format(self.api_url, uri.rstrip("/")))
self._store_last(response.request, response)
response_text = response.text
if len(response_text) == 0:
return {"message": "No data"}, HTTPStatusCodes.NO_CONTENT
return loads(response_text), response.status_code
def post_resource(self, uri, data=None):
response = requests.post("{}/{}".format(self.api_url, uri.lstrip("/")), data=dumps(data), headers=self.headers)
self._store_last(response.request, response)
response_text = response.text
if len(response_text) == 0:
return {"message": "No data"}, HTTPStatusCodes.NO_CONTENT
return loads(response_text), response.status_code
def put_resource(self, uri, data=None):
response = requests.put("{}/{}".format(self.api_url, uri.lstrip("/")), data=dumps(data), headers=self.headers)
self._store_last(response.request, response)
response_text = response.text
if len(response_text) == 0:
return {"message": "No data"}, HTTPStatusCodes.NO_CONTENT
return loads(response_text), response.status_code
def delete_resource(self, uri):
response = requests.delete("{}/{}".format(self.api_url, uri.lstrip("/")))
self._store_last(response.request, response)
response_text = response.text
if len(response_text) == 0:
return {"message": "No data"}, HTTPStatusCodes.NO_CONTENT
return loads(response_text), response.status_code
def rest_jsonify(data=None, status=HTTPStatusCodes.OK, **kwargs):
"""This method can take optional keyword arguments which will be added to the
dictionary to convert to json. Good for adding err or message parameters"""
if data is None:
data = {}
else:
data = {"data": data}
if len(kwargs) > 0:
data.update(kwargs)
return Response(dumps(data), mimetype="application/json", status=status)
def validate_convert_request(request_data, required_headers=None):
if required_headers is None:
required_headers = []
try:
data = loads(request_data.decode())
except ValueError:
raise MalformedJSONException
if len(data) == 0:
raise NoDataException
elif not isinstance(data, dict):
raise ExpectedJSONObjectException
elif not all([header in data for header in required_headers]):
raise InsufficientFieldsException(INSUFFICIENT_FIELDS.format(", ".join(required_headers)))
else:
return data
| {
"repo_name": "codyharrington/todolist",
"path": "utils/rest_api_utils.py",
"copies": "1",
"size": "3925",
"license": "mit",
"hash": -7917103601658134000,
"line_mean": 36.7403846154,
"line_max": 119,
"alpha_frac": 0.6410191083,
"autogenerated": false,
"ratio": 3.9928789420142423,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.01148702015729745,
"num_lines": 104
} |
__author__ = 'cody'
class Config():
"""Base configuration class. Any variable set here is loaded in all apps"""
DEBUG = True
SECRET_KEY = "1234"
LOG_DIRECTORY = "/tmp"
LOG_FORMAT_STRING = "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
MAX_LOG_BYTES = 10000
class Webfront(Config):
DATABASE_URL = "http://localhost:5000"
LOGIN_URI = "/login"
RECAPTCHA_VERIFY_URL = "http://www.google.com/recaptcha/api/verify"
RECAPTCHA_PUBLIC_KEY = ""
RECAPTCHA_PRIVATE_KEY = ""
DISABLE_ALL_RECAPTCHA = True # Overrides the enabled fields below.
REACTIVE_RECAPTCHA_ENABLED = False # Triggers on suspicious activity
FORCED_RECAPTCHA_ENABLED = False # User always has to fill this one in
FAILED_ATTEMPTS_CAPTCHA_THRESHOLD = 3
class Dbapi(Config):
USER = "todolist"
PASSWORD = "todolist"
HOST = "localhost"
PORT = "5432"
DBNAME = "todolist"
SQLALCHEMY_DATABASE_URI = "postgresql+psycopg2://{0}:{1}@{2}:{3}/{4}".format(
USER, PASSWORD, HOST, PORT, DBNAME
)
| {
"repo_name": "codyharrington/todolist",
"path": "config.py",
"copies": "1",
"size": "1050",
"license": "mit",
"hash": 6090491391147341000,
"line_mean": 27.3783783784,
"line_max": 81,
"alpha_frac": 0.6447619048,
"autogenerated": false,
"ratio": 3.2710280373831777,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9252938954784595,
"avg_score": 0.03257019747971641,
"num_lines": 37
} |
__author__ = 'cody'
from sqlalchemy.ext.declarative import declarative_base as real_declarative_base
from sqlalchemy.orm import relationship
from sqlalchemy import Column, Integer, String, DateTime, ForeignKey, Boolean
from dbapi import app_bcrypt
from bcrypt import gensalt
from datetime import datetime
# The below code was taken from
# https://blogs.gnome.org/danni/2013/03/07/generating-json-from-sqlalchemy-objects/
# with an added fromdict() method
# Let's make this a class decorator
declarative_base = lambda cls: real_declarative_base(cls=cls)
@declarative_base
class Base(object):
"""
Add some default properties and methods to the SQLAlchemy declarative base.
"""
@property
def columns(self):
return [c.name for c in self.__table__.columns]
@property
def columnitems(self):
return dict([(c, getattr(self, c)) for c in self.columns])
def __repr__(self):
return '{}({})'.format(self.__class__.__name__, self.columnitems)
def todict(self):
return self.columnitems
def fromdict(self, dict_obj):
forbidden_fields = ["id", "required_fields"]
[dict_obj.__delitem__(field) for field in forbidden_fields if field in dict_obj]
[self.__setattr__(key, dict_obj[key]) for key in dict_obj.keys() if key in self.columns]
# Define our schema
class User(Base):
__tablename__ = "user"
id = Column(Integer, primary_key=True)
username = Column(String(100), nullable=False, unique=True)
email = Column(String(100), unique=True)
password = Column(String(100))
salt = Column(String(50))
tasks = relationship("Task", cascade="all, delete-orphan", back_populates="user")
required_fields = ["username", "password"]
def __init__(self, dict_obj=None):
if dict_obj is None:
dict_obj = {}
self.fromdict(dict_obj)
def set_password(self, password):
self.salt = gensalt()
self.password = app_bcrypt.generate_password_hash(self.salt + str(password))
def check_password(self, password):
return app_bcrypt.check_password_hash(self.password, self.salt + str(password))
def todict(self, recurse=True):
data = super().todict()
# We don't want to reveal the salt or password
if "password" in data:
del data["password"]
if "salt" in data:
del data["salt"]
if recurse:
if hasattr(self, "tasks") and self.tasks is not None:
data["tasks"] = [task.todict(recurse=False) for task in self.tasks]
else:
data["tasks"] = []
return data
def fromdict(self, dict_obj):
super().fromdict(dict_obj)
# We want to make sure that the password is stored in the database as a hash
if "password" in dict_obj:
self.set_password(dict_obj["password"])
class Task(Base):
__tablename__ = "task"
id = Column(Integer, primary_key=True)
name = Column(String(50), nullable=False)
points = Column(Integer, default=0)
start = Column(DateTime)
end = Column(DateTime)
desc = Column(String(1000))
userid = Column(Integer, ForeignKey("user.id"))
required_fields = ["name"]
user = relationship("User", back_populates="tasks")
def __init__(self, dict_obj=None):
if dict_obj is None:
dict_obj = {}
self.fromdict(dict_obj)
def todict(self, recurse=True):
data = super().todict()
if recurse:
if hasattr(self, "user") and self.user is not None:
data["user"] = self.user.todict(recurse=False)
else:
data["user"] = None
return data
def fromdict(self, dict_obj):
super().fromdict(dict_obj)
if "start" in dict_obj and dict_obj["start"] is not None:
self.start = datetime.utcfromtimestamp(dict_obj["start"])
if "end" in dict_obj and dict_obj["end"] is not None:
self.end = datetime.utcfromtimestamp(dict_obj["end"])
| {
"repo_name": "codyharrington/todolist",
"path": "dbapi/models.py",
"copies": "1",
"size": "4037",
"license": "mit",
"hash": -4722335074975288000,
"line_mean": 31.8211382114,
"line_max": 96,
"alpha_frac": 0.6239782016,
"autogenerated": false,
"ratio": 3.7764265668849393,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49004047684849394,
"avg_score": null,
"num_lines": null
} |
__author__ = 'cody'
from utils.exceptions import *
from utils.rest_api_utils import *
from utils.error_utils import *
from dbapi import app
from sqlalchemy.exc import SQLAlchemyError
import traceback
@app.errorhandler(ExpectedJSONObjectException)
def not_json_object(e):
app.logger.error("{}\n{}".format(EXPECTED_JSON_OBJECT, traceback.format_exc()))
traceback.format_exc()
return rest_jsonify(err=EXPECTED_JSON_OBJECT, status=HTTPStatusCodes.UNPROCESSABLE_ENTITY)
@app.errorhandler(MalformedJSONException)
def malformed_json(e):
app.logger.error("{}\n{}".format(MALFORMED_JSON, traceback.format_exc()))
traceback.print_exc()
return rest_jsonify(err=MALFORMED_JSON, status=HTTPStatusCodes.BAD_REQUEST)
@app.errorhandler(InsufficientFieldsException)
def insufficient_fields(e):
app.logger.error("{}\n{}".format(INSUFFICIENT_FIELDS, traceback.format_exc()))
traceback.print_exc()
return rest_jsonify(err=INSUFFICIENT_FIELDS.format(e), status=HTTPStatusCodes.UNPROCESSABLE_ENTITY)
@app.errorhandler(NotFoundException)
def not_found(e):
app.logger.error("{}\n{}".format(RESOURCE_NOT_FOUND, traceback.format_exc()))
traceback.print_exc()
return rest_jsonify(err=handle_default_err_msg(e, RESOURCE_NOT_FOUND),
status=HTTPStatusCodes.NOT_FOUND)
@app.errorhandler(AlreadyExistsException)
def already_exists(e):
app.logger.error("{}\n{}".format(RESOURCE_ALREADY_EXISTS, traceback.format_exc()))
traceback.print_exc()
return rest_jsonify(err=handle_default_err_msg(e, RESOURCE_ALREADY_EXISTS),
status=HTTPStatusCodes.FORBIDDEN)
@app.errorhandler(NoDataException)
def no_data(e):
app.logger.error("{}\n{}".format(REQUEST_EMPTY, traceback.format_exc()))
traceback.print_exc()
return rest_jsonify(err=REQUEST_EMPTY, status=HTTPStatusCodes.UNPROCESSABLE_ENTITY)
@app.errorhandler(AuthenticationFailureException)
def authentication_failed(e):
app.logger.error("{}\n{}".format(AUTHENTICATION_FAILURE, traceback.format_exc()))
traceback.print_exc()
return rest_jsonify(err=AUTHENTICATION_FAILURE, status=HTTPStatusCodes.UNAUTHORISED)
@app.errorhandler(SQLAlchemyError)
def database_error(e):
app.logger.error("{}\n{}".format(INTERNAL_EXCEPTION, traceback.format_exc()))
traceback.print_exc()
return rest_jsonify(err=INTERNAL_EXCEPTION, status=HTTPStatusCodes.INTERNAL_SERVER_ERROR)
@app.errorhandler(InternalServerErrorException)
def internal_server_error(e):
app.logger.error("{}\n{}".format(INTERNAL_EXCEPTION, traceback.format_exc()))
traceback.print_exc()
return rest_jsonify(err=INTERNAL_EXCEPTION, status=HTTPStatusCodes.INTERNAL_SERVER_ERROR)
| {
"repo_name": "codyharrington/todolist",
"path": "dbapi/controllers/error.py",
"copies": "1",
"size": "2699",
"license": "mit",
"hash": -6459529981478929000,
"line_mean": 41.171875,
"line_max": 103,
"alpha_frac": 0.7469433123,
"autogenerated": false,
"ratio": 3.523498694516971,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9742094051773159,
"avg_score": 0.005669591008762411,
"num_lines": 64
} |
__author__ = 'cody'
from webfront import app, login_manager, task_manager
from flask import render_template, redirect, flash
from flask_login import current_user, login_required
from webfront.models.task import *
from webfront.models.user import *
from utils.request_utils import *
@app.route("/task/add", methods=["GET"])
@login_required
def add_task():
return render_template("task/task.html", task=LocalTask())
@app.route("/task/add", methods=["POST"])
@login_required
def process_new_task():
name = form_data("name")
if name is None:
return redirect("/task/add")
new_task = LocalTask()
new_task["name"] = name
new_task["desc"] = form_data("desc")
new_task["points"] = int(form_data("points")) if form_data("points") is not None and form_data("points").isdigit() else 0
new_task["userid"] = current_user["id"]
new_task["start"] = datetime.now()
response = task_manager.save_new_task(new_task)
if "err" in response:
flash(response["err"], category="error")
return redirect("/task/add")
flash(TASK_CREATED, category="success")
return redirect("/")
@app.route("/task/edit/<id>", methods=["GET"])
@login_required
def edit_task(id):
task = task_manager.get_task(id)
if task is None:
flash(TASK_NOT_FOUND, category="error")
return redirect("/")
else:
return render_template("task/task.html", task=task)
@app.route("/task/edit/<id>", methods=["POST"])
@login_required
def submit_task_edit(id):
name = form_data("name")
if name is None:
return redirect("/task/add")
updated_task = task_manager.get_task(id)
updated_task["name"] = name
updated_task["desc"] = form_data("desc")
updated_task["points"] = form_data("points")
response = task_manager.update_existing_task(updated_task)
if "err" in response:
flash(response["err"], category="error")
else:
flash(UPDATE_SUCCESSFUL, category="success")
return redirect("/")
@app.route("/task/delete/<id>")
@login_required
def delete_task(id):
response = task_manager.delete_task(id)
if "err" in response:
flash(response["err"], category="error")
return redirect("/")
@app.route("/task/finish/<id>")
@login_required
def complete_task(id):
response = task_manager.finish_task(id)
if "err" in response:
flash(response["err"], category="error")
else:
flash(UPDATE_SUCCESSFUL, category="success")
return redirect("/") | {
"repo_name": "codyharrington/todolist",
"path": "webfront/controllers/task.py",
"copies": "1",
"size": "2487",
"license": "mit",
"hash": -8481802618578898000,
"line_mean": 29.7160493827,
"line_max": 125,
"alpha_frac": 0.6485725774,
"autogenerated": false,
"ratio": 3.49789029535865,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.464646287275865,
"avg_score": null,
"num_lines": null
} |
__author__ = 'cody'
from webfront import app, user_manager
from flask_login import login_required, current_user,logout_user
from flask import render_template, redirect, flash
from utils.request_utils import form_data
from utils.messages import *
@app.route("/user/delete", methods=["GET"])
@login_required
def display_delete_user_page():
return render_template("user/user_delete.html")
@app.route("/user/delete", methods=["POST"])
@login_required
def delete_current_user():
password = form_data("password")
response = user_manager.authenticate_user(current_user["username"], password)
if response is None:
flash(INCORRECT_USERNAME_OR_PASSWORD, category="error")
return redirect("/user/delete")
else:
response = user_manager.delete_user(current_user["username"])
if "err" in response:
flash(response["err"], category="error")
return redirect("/")
flash(USER_DELETED, category="success")
return redirect("/logout")
@app.route("/user/repassword", methods=["GET"])
@login_required
def display_change_password_page():
return render_template("user/user_repassword.html")
@app.route("/user/repassword", methods=["POST"])
@login_required
def change_current_user_password():
password = form_data("password")
repassword = form_data("repassword")
if password != repassword:
flash(PASSWORDS_NOT_MATCH, category="warning")
return redirect("/user/repassword")
else:
current_user["password"] = password
response = user_manager.update_existing_user(current_user)
if "err" in response:
flash(response["err"], category="error")
return redirect("/user/password")
else:
flash(PASSWORDS_CHANGED_SUCCESSFULLY, category="success")
return redirect("/")
| {
"repo_name": "codyharrington/todolist",
"path": "webfront/controllers/user.py",
"copies": "1",
"size": "1846",
"license": "mit",
"hash": -8732467764394627000,
"line_mean": 32.5636363636,
"line_max": 81,
"alpha_frac": 0.6684723727,
"autogenerated": false,
"ratio": 3.9528907922912206,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.02025037688879944,
"num_lines": 55
} |
__author__ = 'cody'
from webfront.models import LocalBase
from utils.rest_api_utils import *
from datetime import datetime, timedelta
class LocalTask(LocalBase):
pass
class TaskManager(RestClient):
def __init__(self, api_url):
super().__init__(api_url)
def get_task(self, task_id):
response_obj, status = self.get_resource("task/{}".format(task_id))
if status != HTTPStatusCodes.OK:
print(response_obj)
return None
return LocalTask(response_obj["data"])
def get_all_tasks(self):
response_obj, status = self.get_resource("task")
if status != HTTPStatusCodes.OK:
print(response_obj)
return None
return [LocalTask(data) for data in response_obj["data"]]
def save_new_task(self, local_task):
response_obj, status = self.post_resource("task", data=local_task.copy())
if status != HTTPStatusCodes.CREATED:
print(response_obj)
return response_obj
def update_existing_task(self, local_task):
response_obj, status = self.put_resource("/task/{}".format(local_task["id"]), data=local_task.copy())
if status != HTTPStatusCodes.OK:
print(response_obj)
return response_obj
def delete_task(self, task_id):
response_obj, status = self.delete_resource("/task/{}".format(task_id))
if status != HTTPStatusCodes.NO_CONTENT:
print(response_obj)
return response_obj
def finish_task(self, task_id):
response_obj, status = self.post_resource("/task/{}/finish".format(task_id))
if status != HTTPStatusCodes.OK:
print(response_obj)
return response_obj
| {
"repo_name": "codyharrington/todolist",
"path": "webfront/models/task.py",
"copies": "1",
"size": "1719",
"license": "mit",
"hash": -6123239057925421000,
"line_mean": 30.8333333333,
"line_max": 109,
"alpha_frac": 0.6212914485,
"autogenerated": false,
"ratio": 3.8542600896860986,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9873604956042126,
"avg_score": 0.020389316428794376,
"num_lines": 54
} |
__author__ = 'cody'
import flask
from dbapi import app
from flask import request
from dbapi.models import *
from utils.rest_api_utils import *
from utils.exceptions import *
@app.route("/user", methods=["GET"])
def get_all_users():
all_users = [user.todict() for user in flask.g.db_session.query(User).all()]
return rest_jsonify(all_users)
@app.route("/user", methods=["POST", "PUT"])
def create_new_user():
data = validate_convert_request(request.data, required_headers=User.required_fields)
user = flask.g.db_session.query(User).filter(User.username == data["username"]).first()
if user is not None:
raise AlreadyExistsException(USER_ALREADY_EXISTS)
else:
user = User(data)
flask.g.db_session.add(user)
flask.g.db_session.commit()
return rest_jsonify(message=USER_CREATED, status=HTTPStatusCodes.CREATED)
@app.route("/user/<username>", methods=["POST", "PUT"])
def update_user(username):
data = validate_convert_request(request.data, required_headers=[])
user = flask.g.db_session.query(User).filter(User.username == username).first()
if user is None:
raise NotFoundException(USER_NOT_FOUND)
else:
user.fromdict(data)
flask.g.db_session.merge(user)
flask.g.db_session.commit()
return rest_jsonify(message=RESOURCE_UPDATED, status=HTTPStatusCodes.OK)
@app.route("/user/<username>", methods=["DELETE"])
def delete_user(username):
user = flask.g.db_session.query(User).filter(User.username == username).scalar()
if user is None:
raise NotFoundException(USER_NOT_FOUND)
else:
flask.g.db_session.delete(user)
flask.g.db_session.commit()
return rest_jsonify(message=RESOURCE_DELETED, status=HTTPStatusCodes.NO_CONTENT)
@app.route("/user/<username>", methods=["GET"])
def get_specific_user(username):
user = flask.g.db_session.query(User).filter(User.username == username).scalar()
if user is None:
raise NotFoundException(USER_NOT_FOUND)
else:
return rest_jsonify(user.todict())
@app.route("/user/authenticate", methods=["POST"])
def authenticate_user():
data = validate_convert_request(request.data, required_headers=["username", "password"])
user = flask.g.db_session.query(User).filter(User.username == data["username"]).scalar()
if user is None:
raise NotFoundException(USER_NOT_FOUND)
elif not user.check_password(data["password"]):
raise AuthenticationFailureException
else:
return rest_jsonify(user.todict())
| {
"repo_name": "codyharrington/todolist",
"path": "dbapi/controllers/user.py",
"copies": "1",
"size": "2556",
"license": "mit",
"hash": 7565915119992506000,
"line_mean": 37.1492537313,
"line_max": 92,
"alpha_frac": 0.6846635368,
"autogenerated": false,
"ratio": 3.574825174825175,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9665882523786857,
"avg_score": 0.018721237567663442,
"num_lines": 67
} |
__author__ = 'cody'
import flask
from flask import request
from dbapi import app
from dbapi.models import *
from utils.rest_api_utils import *
@app.route("/task/<id>", methods=["GET"])
def get_task(id):
task = flask.g.db_session.query(Task).filter(Task.id == id).scalar()
if task is None:
raise NotFoundException(TASK_NOT_FOUND)
return rest_jsonify(task.todict())
@app.route("/task", methods=["GET"])
def get_all_tasks():
all_tasks = [task.todict() for task in flask.g.db_session.query(Task).all()]
return rest_jsonify(all_tasks)
@app.route("/task", methods=["POST", "PUT"])
def create_new_task():
data = validate_convert_request(request.data)
task = flask.g.db_session.query(Task).filter(Task.name == data["name"]).first()
if task is not None:
raise AlreadyExistsException(TASK_ALREADY_EXISTS)
else:
task = Task(data)
flask.g.db_session.add(task)
flask.g.db_session.commit()
return rest_jsonify(message=TASK_CREATED, status=HTTPStatusCodes.CREATED)
@app.route("/task/<id>", methods=["POST", "PUT"])
def update_task(id):
data = validate_convert_request(request.data)
task = flask.g.db_session.query(Task).filter(Task.id == id).scalar()
if task is None:
raise NotFoundException(TASK_NOT_FOUND)
else:
task.fromdict(data)
flask.g.db_session.commit()
return rest_jsonify(message=RESOURCE_UPDATED)
@app.route("/task/<id>", methods=["DELETE"])
def delete_task(id):
task = flask.g.db_session.query(Task).filter(Task.id == id).scalar()
if task is None:
raise NotFoundException(TASK_NOT_FOUND)
else:
flask.g.db_session.delete(task)
flask.g.db_session.commit()
return rest_jsonify(message=RESOURCE_DELETED, status=HTTPStatusCodes.NO_CONTENT)
@app.route("/task/<id>/finish", methods=["POST", "PUT"])
def complete_task(id):
task = flask.g.db_session.query(Task).filter(Task.id == id).scalar()
if task is None:
raise NotFoundException(TASK_NOT_FOUND)
else:
task.end = datetime.now()
flask.g.db_session.commit()
return rest_jsonify(message=RESOURCE_UPDATED)
| {
"repo_name": "codyharrington/todolist",
"path": "dbapi/controllers/task.py",
"copies": "1",
"size": "2173",
"license": "mit",
"hash": 2448021208737021000,
"line_mean": 32.953125,
"line_max": 88,
"alpha_frac": 0.6589967786,
"autogenerated": false,
"ratio": 3.317557251908397,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.938432697012913,
"avg_score": 0.018445412075853512,
"num_lines": 64
} |
__author__ = 'coen'
from multiprocessing import Pipe
from logmultiprocessing import *
from stoppablemultiprocessing import StoppableProcess
def setuplogging(logfile, loglevel):
assert(isinstance(logfile, str))
assert(isinstance(loglevel, int))
logqueue = Queue()
message_conn, to_main = Pipe()
logger = StoppableProcessLogger(logqueue, message_conn, logfile)
return logqueue, logger, to_main
class StoppableLoggingProcess(LoggingProcess, StoppableProcess):
def __init__(self, logqueue, message_conn, name):
LoggingProcess.__init__(self, logqueue, name)
StoppableProcess.__init__(self, message_conn)
def run(self):
self.fulldebug("Starting process (beforerun method)")
self.beforerun()
self.fulldebug("Entering main loop (process method)")
while self.stayAlive:
self.process()
self.checkmessages()
self.fulldebug("Finalizing process (afterrun method)")
self.afterrun()
def checkmessages(self):
if self.messages.poll():
m = self.messages.recv()
self.fulldebug("Received message: {0}".format(m))
self.processmessage(m)
class StoppableProcessLogger(ProcessLogger, StoppableProcess):
def __init__(self, logqueue, message_conn, logfile):
ProcessLogger.__init__(self, logqueue, logfile)
StoppableProcess.__init__(self, message_conn)
def run(self):
self.fulldebug("Entering main loop")
while self.stayAlive:
self.processlogs()
self.checkmessages()
self.fulldebug("Exiting main loop, trying to empty queue")
self.getmessages()
self.sortandwrite(1)
def checkmessages(self):
if self.messages.poll():
m = self.messages.recv()
self.fulldebug("Received message: {0}".format(m))
self.processmessage(m) | {
"repo_name": "Bierkaai/python-mp-preprocessor-old",
"path": "enhancedmp/enhancedprocessors.py",
"copies": "1",
"size": "1904",
"license": "mit",
"hash": 2023541924853280500,
"line_mean": 30.75,
"line_max": 68,
"alpha_frac": 0.6491596639,
"autogenerated": false,
"ratio": 4.077087794432548,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5226247458332548,
"avg_score": null,
"num_lines": null
} |
__author__ = 'coen'
import mpfileprocessor
from multiprocessing import JoinableQueue
def info(m):
print m
mpfileprocessor.get_logger().info(m)
def duplicate(line):
return line + "COPY: " + line
if __name__ == "__main__":
mplogger = mpfileprocessor.configure_logger()
mplogger.debug('TEST')
readerqueue = JoinableQueue()
writerqueue = JoinableQueue()
reader = mpfileprocessor.FileReader(
'in.dat', readerqueue, outputpernlines=1, name='HappyFileReader')
writer = mpfileprocessor.FileWriter(
'out.dat', writerqueue, outputpernlines=1, overwrite=True,
timeout=1, retries=2)
processor1 = mpfileprocessor.FunctionLineProcessor(
readerqueue, writerqueue, duplicate, name='LineDuplicator1',
timeout=1, retries=2
)
processor2 = mpfileprocessor.FunctionLineProcessor(
readerqueue, writerqueue, duplicate, name='LineDuplicator2',
timeout=1, retries=2
)
info("Starting reader")
reader.start()
info("Starting writer")
writer.start()
info("Starting 2 processors")
processor1.start()
processor2.start()
info("Waiting for reader to join")
reader.join()
info("Waiting for readerqueue to join")
readerqueue.join()
info("Waiting for processors to join")
processor1.join()
processor2.join()
info("Waiting for writerqueue to join")
writerqueue.join()
info("Waiting for writer to join")
writer.join()
info("All done, thank you for your patience")
| {
"repo_name": "Bierkaai/mp-preprocessor",
"path": "test.py",
"copies": "1",
"size": "1524",
"license": "mit",
"hash": -3423242196821495000,
"line_mean": 24.8305084746,
"line_max": 73,
"alpha_frac": 0.6725721785,
"autogenerated": false,
"ratio": 3.858227848101266,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5030800026601265,
"avg_score": null,
"num_lines": null
} |
__author__ = 'coen'
import sys
import time
import random
from enhancedprocessors import *
from multiprocessing import Pipe
from stoppablemultiprocessing import Message, STOP
class RandomLogger(StoppableLoggingProcess):
def __init__(self, logqueue, message_conn, name):
super(RandomLogger, self).__init__(logqueue, message_conn, name)
random.seed()
def process(self):
time.sleep(random.randint(0,5))
self.debug("Slept a while. Woke up")
time.sleep(random.randint(0,5))
self.debug("Going back to sleep...")
if __name__ == "__main__":
logqueue, logger, logger_connection = setuplogging("logfile.log", FULLDEBUG)
sleepers = []
connections = []
for x in range(4):
to_process, to_me = Pipe()
connections.append(to_me)
sleeper = RandomLogger(logqueue, to_process, "Sleeper {0}".format(x))
sleepers.append(sleeper)
logqueue.put(LogMessage(DEBUG, "TEST"))
logger.start()
time.sleep(5)
for s in sleepers:
s.start()
time.sleep(20)
for c in connections:
c.send(Message(STOP))
for s in sleepers:
s.join()
time.sleep(10)
logger_connection.send(Message(STOP))
logger.join()
| {
"repo_name": "Bierkaai/python-mp-preprocessor-old",
"path": "enhancedmp/toytest.py",
"copies": "1",
"size": "1249",
"license": "mit",
"hash": 1033512161229273700,
"line_mean": 19.8166666667,
"line_max": 80,
"alpha_frac": 0.6293034428,
"autogenerated": false,
"ratio": 3.5282485875706215,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9569023223786259,
"avg_score": 0.017705761316872428,
"num_lines": 60
} |
__author__ = 'coen'
import time
import datetime
import sys
from multiprocessing import Process, Queue
from Queue import Empty, Full
# TODO: more configuration options!
FULLDEBUG = 5
DEBUG = 10
MOREINFO = 20
INFO = 30
WARNING = 40
ERROR = 50
CRITICAL = 60
DEFAULTLOGLEVEL = INFO
LEVELDESCRIPTION = {
5: "FULLDEBUG",
10: "DEBUG",
20: "MOREINFO",
30: "INFO",
40: "WARNING",
50: "ERROR",
60: "CRITICAL"
}
def get_level_description(level):
try:
return LEVELDESCRIPTION[level]
except Exception as e:
return "UNKNOWNLEVEL"
def setup(logfile):
logqueue = Queue()
logger = ProcessLogger(logqueue, logfile)
return logqueue, logger
class LogMessage(object):
''' LogMessage sent by multiprocessor
Note that comparison operators are defined in order to
sort log messages by their timestamp.
message1 < message2 is true iff message1.timestamp < message2.timestamp
'''
def __init__(self, level, message, origin="unknown"):
self.level = level
self.message = message
self.origin = origin
self.timestamp = time.time()
def get_level_description(self):
return get_level_description(self.level)
def __eq__(self, other):
return self.timestamp == other.timestamp
def __ne__(self, other):
return not self.__eq__(other)
def __lt__(self, other):
return self.timestamp < other.timestamp
def __gt__(self, other):
return self.timestamp > other.timestamp
def __le__(self, other):
return (self < other) or (self == other)
def __ge__(self, other):
return (self > other) or (self == other)
def __len__(self):
return len(self.message)
def age(self):
return time.time() - self.timestamp
def __str__(self):
return "{0}.{4} - {1}[{3}]: {2}".format(
datetime.datetime.fromtimestamp(self.timestamp).strftime(
"%Y-%m-%d %H:%M:%S"
),
self.get_level_description(),
self.message, self.origin,
str(self.timestamp - int(self.timestamp))[2:]
)
class Logger(object):
def __init__(self, name=None):
if name is not None:
assert (isinstance(name, str))
self.name = name
else:
self.name = "NameNotSet"
def log(self, message, level=DEFAULTLOGLEVEL):
raise NotImplementedError(
"log(self, message, level) has no implementation in logger {0}, class {1}"
.format(self.name, self.__class__)
)
def buildmessageandlog(self, message, level):
assert (isinstance(message, str))
assert (isinstance(level, int))
self.log(LogMessage(level, message, self.name))
def fulldebug(self, message):
self.buildmessageandlog(message, FULLDEBUG)
def debug(self, message):
self.buildmessageandlog(message, DEBUG)
def moreinfo(self, message):
self.buildmessageandlog(message, MOREINFO)
def info(self, message):
self.buildmessageandlog(message, INFO)
def warning(self, message):
self.buildmessageandlog(message, WARNING)
def error(self, message):
self.buildmessageandlog(message, ERROR)
def critical(self, message):
self.buildmessageandlog(message, CRITICAL)
class LoggingProcess(Process, Logger):
''' Multiprocessor with logging functionality
'''
def __init__(self, log_queue, name=None):
Process.__init__(self)
Logger.__init__(self, name)
self.log_queue = log_queue
def log(self, message):
retries = 0
success = False
# TODO: make retries a parameter
while retries < 5 and not success:
try:
self.log_queue.put(message)
success = True
except Full:
retries += 1
if not success:
raise Exception("Logging queue overflow")
elif retries > 0:
self.warning("Had to retry {0} times to write to log"
.format(retries))
class ProcessLogger(Process, Logger):
''' Should be instantiated by main process, collects and sorts log
messages
'''
def __init__(self, log_queue, logfile=None):
Process.__init__(self)
Logger.__init__(self, "ProcessLogger")
if logfile is None:
self.logfile = sys.stdout
else:
self.logfile = logfile
self.log_queue = log_queue
self.messagestack = []
def run(self):
''' Probably good to override this method, it won't stop... Ever...
'''
while True:
self.processlogs()
def processlogs(self):
''' Get log messages, sort them and write to logfile
'''
self.getmessages(5)
self.sortandwrite()
def getmessages(self, timeout=-1):
'''
:param timeout: negative timeout means: get full queue
:return:
'''
queue_empty = False
start = time.time()
# TODO: make max message a parameter and seconds as well
while (len(self.messagestack) < 10000
and not queue_empty
and ((time.time() - start < timeout) or timeout < 0)):
try:
message = self.log_queue.get(True, 2)
self.messagestack.append(message)
except Empty:
queue_empty = True
if queue_empty:
self.fulldebug("Emptied queue, stack size: {0}"
.format(len(self.messagestack)))
else:
self.fulldebug("Pulled messages from queue, stack size: {0}"
.format(len(self.messagestack)))
def sortandwrite(self, split=0.5):
self.messagestack.sort()
# TODO: make splitpoint a parameter
with open(self.logfile, 'a') as f_obj:
splitpoint = int(round(split * len(self.messagestack)))
writables = [str(x) + "\n" for x in self.messagestack[:splitpoint]]
self.messagestack = self.messagestack[splitpoint:]
f_obj.writelines(writables)
def log(self, message):
assert isinstance(message, LogMessage)
self.messagestack.append(message)
| {
"repo_name": "Bierkaai/python-mp-preprocessor-old",
"path": "enhancedmp/logmultiprocessing.py",
"copies": "1",
"size": "6314",
"license": "mit",
"hash": 6059313738473167000,
"line_mean": 26.5720524017,
"line_max": 86,
"alpha_frac": 0.5815647767,
"autogenerated": false,
"ratio": 4.140327868852459,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0001651093552281547,
"num_lines": 229
} |
__author__ = 'cogbot'
class GestureDoesNotExistError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class FacialExpressionDoesNotExistError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class ParamFormatting():
@staticmethod
def instance_name(instance):
instance_name = ''
for k, v in list(locals().iteritems()):
if v is instance:
instance_name = k
return instance_name
@staticmethod
def assert_types(method, parameter, *types):
method_name = method.__name__
param_name = ParamFormatting.instance_name(parameter)
if not isinstance(parameter, types):
raise TypeError("{0}() parameter {1}={2} is not part of type {3}".format(method_name, param_name, parameter, types))
@staticmethod
def assert_range(method, parameter, minimum, maximum):
method_name = method.__name__
param_name = ParamFormatting.instance_name(parameter)
if not (minimum <= parameter <= maximum):
raise TypeError("{0}() parameter {1}={2} is not between the range {3} - {4}".format(method_name, param_name, parameter, minimum, maximum))
@staticmethod
def assert_greater_than(method, parameter, other_value):
method_name = method.__name__
param_name = ParamFormatting.instance_name(parameter)
if not (parameter > other_value):
raise TypeError("{0}() parameter {1}={2} is not greater than {3}".format(method_name, param_name, parameter, other_value))
@staticmethod
def assert_greater_than_or_equal(method, parameter, other_value):
method_name = method.__name__
param_name = ParamFormatting.instance_name(parameter)
if not (parameter >= other_value):
raise TypeError("{0}() parameter {1}={2} is not greater than or equal to {3}".format(method_name, param_name, parameter, other_value))
@staticmethod
def assert_less_than(method, parameter, other_value):
method_name = method.__name__
param_name = ParamFormatting.instance_name(parameter)
if not (parameter < other_value):
raise TypeError("{0}() parameter {1}={2} is not less than {3}".format(method_name, param_name, parameter, other_value))
@staticmethod
def assert_less_than_or_equal(method, parameter, other_value):
method_name = method.__name__
param_name = ParamFormatting.instance_name(parameter)
if not (value <= other_value):
raise TypeError("{0}() parameter {1}={2} is not less than or equal to {3}".format(method_name, param_name, parameter, other_value))
| {
"repo_name": "jdddog/hri",
"path": "hri_api/src/hri_api/util/errors.py",
"copies": "1",
"size": "2779",
"license": "bsd-3-clause",
"hash": 9134163381993687000,
"line_mean": 35.5657894737,
"line_max": 150,
"alpha_frac": 0.635120547,
"autogenerated": false,
"ratio": 4.033381712626996,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5168502259626996,
"avg_score": null,
"num_lines": null
} |
""" Author: Cole Howard
Build additional training vectors, which are rotations, skews, and pans,
of the orginal MNIST dataset
"""
import csv
import numpy as np
from random import randrange
from skimage.transform import rotate
# from matplotlib import pyplot as plt
# from matplotlib import cm
def spin_o_rama(vector, ang):
vector = rotate(vector, ang, mode='constant')
return vector
# def visualization(vector):
# plt.imshow(vector, cmap=cm.Greys_r)
# plt.axis('off')
# plt.pause(0.0001)
# plt.show()
if __name__ == '__main__':
with open('train.csv', 'r') as f:
reader = csv.reader(f)
t = list(reader)
train = [[int(x) for x in y] for y in t[1:]]
ans_train = [x[0] for x in train]
train_set = [x[1:] for x in train]
ans_train.pop(0)
train_set.pop(0)
temp_train = [np.array(elem, dtype=float) for elem in train_set]
train_set = temp_train
for _ in range(10):
for idx, elem in enumerate(train_set):
x = elem.reshape((28, 28))
rand_ang = randrange(-120, 120)
y = spin_o_rama(x, rand_ang).flatten()
train_set.append(y)
ans_train.append(ans_train[idx])
with open('train_ext.txt', 'w') as g:
for idx, elem in enumerate(train_set):
g.writeline(ans_train[idx], elem)
# visualization(x)
# visualization(spin_o_rama(x, 25)) | {
"repo_name": "uglyboxer/finnegan",
"path": "mnist_build.py",
"copies": "1",
"size": "1414",
"license": "mit",
"hash": 7763643622065803000,
"line_mean": 22.1967213115,
"line_max": 73,
"alpha_frac": 0.6004243281,
"autogenerated": false,
"ratio": 3.1352549889135255,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9223138073538568,
"avg_score": 0.002508248694991389,
"num_lines": 61
} |
"""Author: Cole Howard
Email: uglyboxer@gmail.com
neuron.py is a basic linear neuron, that can be used in a perceptron
Information on that can be found at:
https://en.wikipedia.org/wiki/Perceptron
It was written as a class specifically for network ()
Usage:
From any python script:
from neuron import Neuron
API:
update_weights, fires are the accessible methods
usage noted in their definitions
"""
from math import e
from numpy import append as app
from numpy import dot
class Neuron:
""" A class model for a single neuron
Parameters
----------
vector_size : int
Length of an input vector
target : int
What the vector will associate with its weights. It will claim this
is the correct answer if it fires
sample_size : int
Total size of sample to be trained on
answer_set: list
The list of correct answers associated with the training set
Attributes
----------
threshold : float
The tipping point at which the neuron fires (speifically in relation
to the dot product of the sample vector and the weight set)
weights : list
The "storage" of the neuron. These are changed with each training
case and then used to determine if new cases will cause the neuron
to fire. The last entry is initialized to 1 as the weight of the
bias
expected : list
Either 0's or 1's based on whether this neuron should for each of the
vectors in the training set
guesses : list
Initialized to 0, and then updated with each training vector that
comes through.
"""
def __init__(self, vector_size, target, sample_size, answer_set):
self.threshold = .5
self.answer_set = answer_set
self.target = target
self.weights = [0 for x in range(vector_size + 1)]
self.weights[-1] = 1 # Bias weight
self.sample_size = sample_size
self.expected = [0 if y != self.target else 1 for y in self.answer_set]
self.guesses = [0 for z in range(self.sample_size)]
def train_pass(self, vector, idx):
""" Passes a vector through the neuron once
Parameters
----------
vector : a list
Training vector
idx : an int
The position of the vector in the sample
Returns
-------
None, always
"""
if self.expected == self.guesses:
return None
else:
error = self.expected[idx] - self.guesses[idx]
if self.fires(vector)[0]:
self.guesses[idx] = 1
else:
self.guesses[idx] = 0
self.update_weights(error, vector)
return None
def _dot_product(self, vector):
""" Returns the dot product of two equal length vectors
Parameters
----------
vector : list
Any sample vector
Returns
-------
float
The sum for all of each element of a vector multiplied by its
corresponding element in a second vector.
"""
if len(vector) < len(self.weights):
vector = app(vector, 1)
return dot(vector, self.weights)
def _sigmoid(self, z):
""" Calculates the output of a logistic function
Parameters
----------
z : float
The dot product of a sample vector and an associated weights
set
Returns
-------
float
It will return something between 0 and 1 inclusive
"""
if -700 < z < 700:
return 1 / (1 + e ** (-z))
elif z < -700:
return 0
else:
return 1
def update_weights(self, error, vector):
""" Updates the weights stored in the receptors
Parameters
----------
error : int
The distance from the expected value of a particular training
case
vector : list
A sample vector
Attributes
----------
l_rate : float
A number between 0 and 1, it will modify the error to control
how much each weight is adjusted. Higher numbers will
train faster (but risk unresolvable oscillations), lower
numbers will train slower but be more stable.
Returns
-------
None
"""
l_rate = .05
for idx, item in enumerate(vector):
self.weights[idx] += (item * l_rate * error)
def fires(self, vector):
""" Takes an input vector and decides if neuron fires or not
Parameters
----------
vector : list
A sample vector
Returns
-------
bool
Did it fire? True(yes) or False(no)
float
The dot product of the vector and weights
"""
dp = self._dot_product(vector)
if self._sigmoid(dp) > self.threshold:
return True, dp
else:
return False, dp
| {
"repo_name": "uglyboxer/linear_neuron",
"path": "mini_net/neuron.py",
"copies": "1",
"size": "5088",
"license": "mit",
"hash": 9062364177142762000,
"line_mean": 26.8032786885,
"line_max": 79,
"alpha_frac": 0.5636792453,
"autogenerated": false,
"ratio": 4.676470588235294,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5740149833535295,
"avg_score": null,
"num_lines": null
} |
""" Author: Cole Howard
Title: Finnegan
An extinsible neural net designed to explore Convolutional Neural Networks and
Recurrent Neural Networks via extensive visualizations.
"""
import numpy as np
from sklearn.preprocessing import normalize
from layer import Layer
# from matplotlib import cm
# from matplotlib import pyplot as plt
# import warnings
# warnings.filterwarnings("ignore", category=DeprecationWarning)
class Network:
""" A multi layer neural net with backpropogation.
Parameters
----------
layers : int
Number of layers to use in the network.
neuron_count : list
A list of integers that represent the number of neurons present in each
hidden layer. (Size of input/output layers are dictated by dataset)
vector : list
Example vector to get size of initial input
Attributes
----------
possible : list
A list of possible output values
"""
def __init__(self, layers, neuron_count, vector):
self.num_layers = layers
self.neuron_count = neuron_count
self.possible = [x for x in range(10)]
self.layers = [Layer(self.neuron_count[x], self.neuron_count[x-1]) if
x > 0 else Layer(self.neuron_count[x], len(vector))
for x in range(self.num_layers)]
def _pass_through_net(self, vector, dropout=True):
""" Sends a vector into the net
Parameters
----------
vector : numpy array
A numpy array representing a training input (without the target)
dropout : bool
Whether or not you should perform random dropout in the pass through
the net. (Set False for the tesing set vectors)
Returns
-------
numpy array
Output of the last layer in the chain
"""
for x, _ in enumerate(self.layers):
vector = self.layers[x]._vector_pass(vector, dropout)
return vector
def _softmax(self, w, t=1.0):
"""Author: Jeremy M. Stober, edits by Martin Thoma
Program: softmax.py
Date: Wednesday, February 29 2012 and July 31 2014
Description: Simple softmax function.
Calculate the softmax of a list of numbers w.
Parameters
----------
w : list of numbers
t : float
Return
------
a list of the same length as w of non-negative numbers
Examples
--------
>>> softmax([0.1, 0.2])
array([ 0.47502081, 0.52497919])
>>> softmax([-0.1, 0.2])
array([ 0.42555748, 0.57444252])
>>> softmax([0.9, -10])
array([ 9.99981542e-01, 1.84578933e-05])
>>> softmax([0, 10])
array([ 4.53978687e-05, 9.99954602e-01])
"""
e_x = np.exp(w - np.max(w))
out = e_x / e_x.sum()
return out
def _backprop(self, guess_vector, target_vector):
""" Takes the output of the net and initiates the backpropogation
In output layer:
generate error matrix [(out * (1-out) * (Target-out)) for each neuron]
update weights matrix [[+= l_rate * error_entry * input TO that
amount] for each neuron ]
In hidden layer
generate error matrix [out * (1-out) * dotproduct(entry in n+1 error
matrix, n+1 weight of that entry)] update weights matrix [[+= l_rate
for each weight] for each neuron]
Parameters
----------
guess_vector : numpy array
The output from the last layer during a training pass
target_vector : list
List of expected values
Attributes
----------
Returns
-------
True
As evidence of execution
"""
backwards_layer_list = list(reversed(self.layers))
for i, layer in enumerate(backwards_layer_list):
if i == 0:
hidden = False
layer_ahead = None
else:
hidden = True
layer_ahead = backwards_layer_list[i-1]
if layer._layer_level_backprop(guess_vector, layer_ahead, target_vector, hidden):
continue
else:
print("Backprop failed on layer: " + str(i))
for layer in self.layers:
layer._update_weights()
# for layer in self.layers:
# layer.error_matrix = []
return True
def train(self, dataset, answers, epochs):
""" Runs the training dataset through the network a given number of
times.
Parameters
----------
dataset : Numpy nested array
The collection of training data (vectors and the associated target
value)
answers : numpy array
The array of correct answers to associate with each training
vector
epochs : int
Number of times to run the training set through the net
"""
for x in range(epochs):
for vector, target in zip(dataset, answers):
target_vector = [0 if x != target else 1 for x in self.possible]
vector = np.array(vector).reshape(1, -1)
vector = vector.astype(float)
vector = normalize(vector, copy=False)[0]
y = self._pass_through_net(vector)
z = self._softmax(y)
self._backprop(z, target_vector)
amt_off = np.mean(np.abs(self.layers[self.num_layers-1].error))
print(amt_off)
if amt_off < .000000001:
break
def run_unseen(self, test_set):
""" Makes guesses on the unseen data, and switches over the test
answers to validation set if the bool is True
For each vector in the collection, each neuron in turn will either
fire or not. If a vector fires, it is collected as a possible
correct guess. Not firing is collected as well, in case
there an no good guesses at all. The method will choose the
vector with the highest dot product, from either the fired list
or the dud list.
Parameters
----------
test_set : list
List of numpy arrays representing the unseen vectors
Returns
-------
list
a list of ints (the guesses for each vector)
"""
guess_list = []
for vector in test_set:
vector = np.array(vector).reshape(1, -1)
vector = vector.astype(float)
temp = self._pass_through_net(normalize(vector, copy=False)[0],
dropout=False)
guess_list.append(temp.argmax())
return guess_list
def report_results(self, guess_list, answers):
""" Reports results of guesses on unseen set
Parameters
----------
guess_list : list
answers : list
"""
successes = 0
for idx, item in enumerate(guess_list):
if answers[idx] == item:
successes += 1
print(guess_list)
print("Successes: {} Out of total: {}".format(successes,
len(guess_list)))
print("For a success rate of: ", successes/len(guess_list))
def visualization(self, vector, vector_name):
y = np.reshape(vector, (28, 28))
plt.imshow(y, cmap=cm.Greys_r)
plt.suptitle(vector_name)
plt.axis('off')
plt.pause(0.0001)
plt.show()
if __name__ == '__main__':
print("Please use net_launch.py")
| {
"repo_name": "uglyboxer/finnegan",
"path": "finnegan/network.py",
"copies": "1",
"size": "7625",
"license": "mit",
"hash": 1441958661014855000,
"line_mean": 31.0378151261,
"line_max": 93,
"alpha_frac": 0.5596065574,
"autogenerated": false,
"ratio": 4.342255125284738,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5401861682684738,
"avg_score": null,
"num_lines": null
} |
__author__ = 'colinc'
import itertools
from collections import defaultdict
import pylev
from django.db import transaction
from django.db.models import get_models, Model
from django.contrib.contenttypes.generic import GenericForeignKey
from django.core.management.base import BaseCommand
from teams.models import Team
from runners.models import Runner
def levenshtein_ratio(str_one, str_two):
"""
Levenshtein ratio
"""
str_len = len(str_one + str_two)
return (str_len - pylev.levenshtein(str_one, str_two)) / float(str_len)
def find_duplicate_teams():
"""
Tries to intelligently identify different teams
"""
teams = Team.objects.all()
doubles = defaultdict(set)
for team_tuple in itertools.combinations(teams, 2):
if team_similarity(*team_tuple):
ordered_tuple = sorted(
team_tuple,
key=lambda j: len(j.runner_set.all()))
doubles[ordered_tuple[0].pk].add(ordered_tuple[1].pk)
return dict(doubles)
def find_duplicate_runners():
"""
Tries to find duplicate runners on the same team
"""
teams = Team.objects.all()
for j, team in enumerate(teams):
print("Analyzing team {0:d} of {1:d}".format(j, len(teams)))
doubles = defaultdict(set)
for runner_tuple in itertools.combinations(team.runner_set.all(), 2):
if runner_similarity(*runner_tuple):
ordered_tuple = sorted(
runner_tuple,
key=lambda j: len(j.result_set.all())
)
doubles[ordered_tuple[0].pk].add(ordered_tuple[1].pk)
doubles = combine_dict(dict(doubles))
merge_duplicate_objects(doubles, 'runner')
def combine_dict(dupes):
"""
Takes a dictionary of primary key: set(primary key),
and returns list of disjoint sets.
"""
all_sets = [[j[0]] + list(j[1]) for j in dupes.items()]
sets = []
for key in dupes:
sets.append({key})
old_len = 0
while old_len != len(sets[-1]):
old_len = len(sets[-1])
for new_key in sets[-1]:
sets[-1] = sets[-1].union(set(
sum([j for j in all_sets if new_key in j], [])))
sets = list(set([tuple(the_set) for the_set in sets]))
return sets
def merge_duplicate_objects(dupes, object_type):
"""
Accepts a list of tuples, looks them up in the team table, and merges
the objects
"""
for pks in dupes:
if object_type == 'team':
objects = Team.objects.filter(pk__in=pks)
objects = sorted(
objects,
key=lambda j: len(j.runner_set.all()))
elif object_type == 'runner':
objects = Runner.objects.filter(pk__in=pks)
objects = sorted(
objects,
key=lambda j:len(j.result_set.all())
)
merge_model_objects(objects[0], objects[1:])
def team_merge():
"""
Wraps up all the team and runner merging functionality
"""
team_dupes = find_duplicate_teams()
team_dupes = combine_dict(team_dupes)
merge_duplicate_objects(team_dupes, 'team')
def runner_similarity(runner_one, runner_two):
"""
Scores similarity between two runners (assumes they are
on the same team)
"""
if set(runner_one.result_set.all()).intersection(
set(runner_two.result_set.all())):
return False
if levenshtein_ratio(str(runner_one), str(runner_two)) < 0.9:
return False
try:
if str(runner_one) != str(runner_two):
print("Found {0} and {1} on {1.team}".format(runner_one, runner_two))
else:
print("Found a double of {0} on {1.team}".format(runner_one, runner_two))
except UnicodeEncodeError:
print('Unicode Error! Continuing...')
pass
return True
def team_similarity(team_one, team_two):
"""
Scores a similarity between two teams
"""
name_similarity = set(team_one.name.lower().split()).intersection(
set(team_two.name.lower().split())
)
if len(name_similarity) == 0:
return False
common_runners = set(
str(runner) for runner in team_one.runner_set.all()
).intersection(
set(
str(runner) for runner in team_two.runner_set.all()
)
)
if len(common_runners) < 2:
return False
common_meets = set(
meet.id for meet in team_one.meets
).intersection(
set(
meet.id for meet in team_two.meets))
if len(common_meets) > 2:
return False
print("Same team!: {0:s} and {1:s}".format(str(team_one), str(team_two)))
return True
@transaction.commit_on_success
def merge_model_objects(primary_object, alias_objects=None, keep_old=False):
"""
Use this function to merge model objects (i.e. Users, Organizations, Polls,
etc.) and migrate all of the related fields from the alias objects to the
primary object.
Usage:
from django.contrib.auth.models import User
primary_user = User.objects.get(email='good_email@example.com')
duplicate_user = User.objects.get(email='good_email+duplicate@example.com')
merge_model_objects(primary_user, duplicate_user)
"""
if not isinstance(alias_objects, list):
alias_objects = [alias_objects]
# check that all aliases are the same class as primary one and that
# they are subclass of model
primary_class = primary_object.__class__
if not issubclass(primary_class, Model):
raise TypeError('Only django.db.models.Model subclasses can be merged')
for alias_object in alias_objects:
if not isinstance(alias_object, primary_class):
raise TypeError('Only models of same class can be merged')
# Get a list of all GenericForeignKeys in all models
generic_fields = []
for model in get_models():
for field_name, field in filter(lambda x: isinstance(
x[1], GenericForeignKey), model.__dict__.items()):
generic_fields.append(field)
blank_local_fields = set([field.attname for
field in
primary_object._meta.local_fields if
getattr(primary_object, field.attname) in
[None, '']])
# Loop through all alias objects and migrate their data to the
# primary object.
for alias_object in alias_objects:
# Migrate all foreign key references from alias object to
# primary object.
for related_object in alias_object._meta.get_all_related_objects():
# The variable name on the alias_object model.
alias_varname = related_object.get_accessor_name()
# The variable name on the related model.
obj_varname = related_object.field.name
related_objects = getattr(alias_object, alias_varname)
for obj in related_objects.all():
setattr(obj, obj_varname, primary_object)
obj.save()
# Migrate all many to many references from alias object to
# primary object.
for related_many_object in alias_object._meta.get_all_related_many_to_many_objects():
alias_varname = related_many_object.get_accessor_name()
obj_varname = related_many_object.field.name
if alias_varname is not None:
# standard case
related_many_objects = getattr(alias_object, alias_varname).all()
else:
# special case, symmetrical relation, no reverse accessor
related_many_objects = getattr(alias_object, obj_varname).all()
for obj in related_many_objects.all():
getattr(obj, obj_varname).remove(alias_object)
getattr(obj, obj_varname).add(primary_object)
# Migrate all generic foreign key references from alias object to
# primary object.
for field in generic_fields:
filter_kwargs = {field.fk_field: alias_object._get_pk_val(),
field.ct_field: field.get_content_type(
alias_object)}
for generic_related_object in field.model.objects.filter(**filter_kwargs):
setattr(generic_related_object, field.name, primary_object)
generic_related_object.save()
# Try to fill all missing values in primary object by values of
# duplicates
filled_up = set()
for field_name in blank_local_fields:
val = getattr(alias_object, field_name)
if val not in [None, '']:
setattr(primary_object, field_name, val)
filled_up.add(field_name)
blank_local_fields -= filled_up
if not keep_old:
alias_object.delete()
primary_object.save()
return primary_object
class Command(BaseCommand):
help = "Cleans results in the database"
def handle(self, *args, **options):
team_merge()
find_duplicate_runners()
| {
"repo_name": "ColCarroll/bugbug",
"path": "libs/management/commands/duplicates.py",
"copies": "1",
"size": "8707",
"license": "mit",
"hash": -762908043209372800,
"line_mean": 32.2328244275,
"line_max": 93,
"alpha_frac": 0.6296083611,
"autogenerated": false,
"ratio": 3.803844473569244,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4933452834669244,
"avg_score": null,
"num_lines": null
} |
__version__ = 1.0
import smtplib, os
from email.MIMEMultipart import MIMEMultipart
from email.mime.application import MIMEApplication
from email.MIMEText import MIMEText
from email.Utils import COMMASPACE, formatdate
from email import Encoders
import time
import config
def send_mail(send_from, send_to, subject, text,smtp, files=[]):
assert isinstance(send_to, list)
assert isinstance(files, list)
msg = MIMEMultipart()
msg['From'] = send_from
msg['To'] = COMMASPACE.join(send_to)
msg['Date'] = formatdate(localtime=True)
msg['Subject'] = subject
msg.attach( MIMEText(text) )
for f in files:
part = MIMEApplication(open(f, 'rb').read())
part.add_header('Content-Disposition', 'attachment; filename="%s"' % os.path.basename(f))
msg.attach(part)
server = smtplib.SMTP(smtp)
server.ehlo()
server.starttls()
server.login(config.email['username'], config.email['password'])
server.sendmail(send_from, send_to, msg.as_string())
server.close()
def send_text(phone_number, msg):
fromaddr = "Address"
toaddrs = phone_number + "@txt.att.net"
msg = ("From: {0}\r\nTo: {1}\r\n\r\n{2}").format(fromaddr, toaddrs, msg)
server = smtplib.SMTP('smtp.gmail.com:587')
server.starttls()
server.login(config.email['username'], config.email['password'])
server.sendmail(fromaddr, toaddrs, msg)
server.quit()
def main(fname):
date = time.strftime("%m/%d/%Y")
send_from = 'sender'
send_to = ['recipients']
subject = 'subject'
text = 'Message text'
files=[fname]
smtp='smtp.gmail.com:587'
send_mail(send_from, send_to, subject, text,smtp, files)
print 'Email Sent'
if __name__ == "__main__":
main()
| {
"repo_name": "cmgerber/Email_SMS",
"path": "Email_SMS.py",
"copies": "1",
"size": "1781",
"license": "mit",
"hash": -1108613985671754900,
"line_mean": 27.7258064516,
"line_max": 97,
"alpha_frac": 0.6580572712,
"autogenerated": false,
"ratio": 3.25,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9368618263983214,
"avg_score": 0.007887801443357207,
"num_lines": 62
} |
__author__ = 'colinwren'
livingdoc_base = """
<!DOCTYPE html>
<html>
<head lang="en">
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1">
<title>Living Documentation for {{ metadata.name }}</title>
<link href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.4/css/bootstrap.min.css" rel="stylesheet"/>
<style type="text/css">
.panel{
box-shadow: none;
-webkit-box-shadow: none;
margin-bottom: 2em;
}
.bdd-hero-outer{
background-size: cover;
padding-right: 0;
}
.bdd-hero-inner{
background: linear-gradient(to bottom, rgba(255,255,255,0) 0%,rgba(255,255,255,1) 100%);
width: calc(100% + 30px);
padding-top: 200px;
}
.panel-title > h3 > small, h2 > small{
line-height: 2;
}
.panel .panel, .background .panel {
padding-left: 0;
padding-right: 0;
}
@media (min-width: 1200px){
.bdd-icons{
width: 12.33333%;
}
}
.bdd-icons > p {
height: 10em;
}
.bdd-icons > p > .icon{
position: absolute;
top: 26%;
left: 35%;
font-size: 3em;
}
.bdd-icons > p > .small-icon{
position: absolute;
top: 10%;
left: 50%;
font-size: 1.5em;
}
.bdd-icons span.icon{
top: 15%;
}
.bdd-icons > p > .icon > .glyphicon-minus:last-child{
top: -0.8em;
}
</style>
</head>
<body>
<nav class="navbar navbar-default" id="top">
<div class="container">
<div class="navbar-header">
<button type="button" class="navbar-toggle collapsed" data-toggle="collapse" data-target="#navbar" aria-expanded="false" aria-controls="navbar">
<span class="sr-only">Toggle navigation</span>
<span class="icon-bar"></span>
<span class="icon-bar"></span>
<span class="icon-bar"></span>
</button>
<a class="navbar-brand" href="#">{{ metadata.name }}</a>
</div>
<div class="navbar-collapse collapse" id="navbar">
<ul class="nav navbar-nav navbar-right">
<li><a href="{% block homelevel %}../{% endblock %}index.html">Home</a></li>
{% for item in metadata.menu %}
<li><a href="{% block navlevel%}../{% endblock %}{{ item.slug }}/index.html">{{ item.name }}</a></li>
{% endfor %}
</ul>
</div>
</div>
</nav>
<div class="container">
{% block content %}{% endblock %}
</div>
<script src="http://code.jquery.com/jquery-1.11.3.min.js" type="text/javascript"></script>
<script src="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.4/js/bootstrap.min.js" type="text/javascript"></script>
<script type="text/javascript">
$(document).ready(function(){
$('.scenario_outline_examples >tbody tr').click(function(){
var i = null, ec = null, tc = null;
i = $(this).attr('data-id');
ec = $(this).parents('.panel-body').find('.examples');
var e = $(this).parents('.panel').find('#'+ i);
tc = $(this).parents('.panel').find('.scenario_title');
var t = $(this).parents('.panel').find('.example_title #title_'+i);
ec[0].innerHTML = e[0].innerHTML;
tc[0].innerHTML = t[0].innerHTML;
});
$('.panel-heading').click(function(){
$(this).parents('.panel').find('.panel-body').toggle();
$(this).parents('.panel').find('.panel-footer').toggle();
});
$('img').each(function(){
$(this).attr('src', $(this).attr('delayedsrc'));
})
});
</script>
</body>
</html>
"""
livingdoc_index = """
{% extends "base.html" %}
{% block content %}
<h1>{{ site.name }}</h1>
{{ site.description }}
<h2>Check out</h2>
<ul>
{% for item in site.menu %}
<li><a href="{{ item.slug }}/index.html">{{ item.name }}</a></li>
{% endfor %}
</ul>
{% endblock %}
{% block navlevel %}{% endblock %}
{% block homelevel %}{% endblock %}
"""
livingdoc_directory_index = """
{% extends "base.html" %}
{% block content %}
{% for item in items %}
{% if loop.index is odd %}
<div class="row col-lg-12">
<div class="col-lg-4">
<h3>{% if item.title %}{{ item.title }}{% else %}{{ item.name }}{% endif %}</h3>
{% if item.blurb %}{{ item.blurb }}{% endif %}
<p><a href="{{ item.slug }}.html">Read more about {% if item.title %}{{ item.title }}{% else %}{{ item.name }}{% endif %} ></a></p>
</div>
<div class="col-lg-offset-2 col-lg-6">
{% if item.image %}
<img src="#" delayedsrc="{{ item.image }}" class="col-lg-12"/>
{% endif %}
</div>
<p> </p>
</div>
{% else %}
<div class="row col-lg-12">
<div class="col-lg-6">
{% if item.image %}
<img src="#" delayedsrc="{{ item.image }}" class="col-lg-12"/>
{% endif %}
</div>
<div class="col-lg-offset-2 col-lg-4">
<h3>{% if item.title %}{{ item.title }}{% else %}{{ item.name }}{% endif %}</h3>
{% if item.blurb %}{{ item.blurb }}{% endif %}
<p><a href="{{ item.slug }}.html">Read more about {% if item.title %}{{ item.title }}{% else %}{{ item.name }}{% endif %} ></a></p>
</div>
<p> </p>
</div>
{% endif %}
<div class="row col-lg-12">
<p> </p>
<p> </p>
</div>
{% endfor %}
{% endblock %}
{% block feature_active %} class="active"{% endblock %}
"""
livingdoc_directory_single = """
{% extends "base.html" %}
{% block content %}
<div class="col-lg-12">
<ul class="list-inline pull-right">
{% if item.background %}<li><a href="#background">Background</a></li>{% endif %}
{% if item.features %}<li><a href="#features">Features</a></li>{% endif %}
{% if item.scenarios %}<li><a href="#scenarios">Scenarios</a></li>{% endif %}
{% if item.other_resources %}<li><a href="#other-resources">Other Resources</a></li>{% endif %}
</ul>
</div>
<div class="row col-lg-12 bdd-hero-outer" style="background-image: url('{{ item.image }}');">
<div class="row col-lg-12 bdd-hero-inner">
<h1>{% if item.title %}{{ item.title }}{% else %}{{ item.name }}{% endif %}</h1>
{% if item.description %}
<p class="lead">{% for description in item.description %}{{description}}<br>{% endfor %}</p>
{% endif %}
</div>
</div>
<div class="row col-lg-12 background">
{% if item.background %}
<h2 id="background">Background <small class="pull-right"><a href="#top">Back to top</a></small></h2>
<hr>
{% for step in item.background.steps %}
{% if step.keyword=='Given' %}
<div class="panel panel-warning col-lg-3">
<div class="panel-heading">
<div class="panel-title"><h4>Given</h4></div>
</div>
<ul class="list-group">
{% endif %}
{% if step.keyword=='When' %}
</ul>
</div>
<div class="col-lg-1 bdd-icons">
<p>
<i class="glyphicon glyphicon-flash small-icon"></i><br>
<i class="glyphicon glyphicon-user icon"></i></p>
</div>
<div class="panel panel-info col-lg-3">
<div class="panel-heading">
<div class="panel-title"><h4>When</h4></div>
</div>
<ul class="list-group">
{% endif %}
{% if step.keyword=='Then' %}
</ul>
</div>
<div class="col-lg-1 bdd-icons">
<p><span class="icon"><i class="glyphicon glyphicon-minus"></i><br><i class="glyphicon glyphicon-minus"></i></span></p>
</div>
<div class="panel panel-success col-lg-3">
<div class="panel-heading">
<div class="panel-title"><h4>Then</h4></div>
</div>
<ul class="list-group">
{% endif %}
<li class="list-group-item">{% if step.keyword not in ['Given', 'When', 'Then'] %}<strong>{{ step.keyword }}</strong> {% endif %}{{ step.name }} {% if step.status=='passed' %}<span class="pull-right"><i class="glyphicon glyphicon-ok"></i></span>{% endif %}{% if step.status=='failed' %}<span class="pull-right"><i class="glyphicon glyphicon-remove"></i></span>{% endif %}
{% if step.text %}
<pre>{{ step.text}}</pre>
{% endif %}
{% if step.table %}
<table class="table">
<thead>
<tr>
{% for heading in step.table.headings %}
<th>{{ heading }}</th>
{% endfor %}
</tr>
</thead>
<tbody>
{% for row in step.table.rows %}
<tr>
{% for cell in row.cells %}
<td>{{ cell }}</td>
{% endfor %}
</tr>
{% endfor %}
</tbody>
</table>
{% endif %}
</li>
{% endfor %}
</ul>
</div>
<p> </p>
{% endif %}
</div>
{% if item.features %}
<div class="row col-lg-12">
<h2 id="features">Features <small class="pull-right"><a href="#top">Back to top</a></small></h2>
<hr>
<ul>
{% for feature in item.features %}
<li><a href="../features/{{ feature.slug }}.html">{{ feature.name }}</a></li>
{% endfor %}
</ul>
</div>
{% endif %}
<div class="row col-lg-12">
<h2 id="scenarios">Scenarios <small class="pull-right"><a href="#top">Back to top</a></small></h2>
<hr>
<!-- <div class="container"> -->
{% for scenario in item.scenarios %}
{% if scenario.type=='scenario' %}
<div class="panel panel-default" id="{{ scenario.slug_id }}">
<div class="panel-heading">
<div class="panel-title">
<h3><span class="text-muted">Scenario:</span> {{ scenario.name }}
<small class="pull-right">{% if scenario.tags %}
<span class="text-muted">Tags: </span>
{% for tag in scenario.tags %} <a href="../tags/{{ tag }}.html"><span class="badge btn-info">{{ tag }}</span></a> {% endfor %}
{% endif %}
</small>
</h3>
</div>
</div>
<div class="panel-body">
{% for step in scenario.steps %}
{% if step.keyword=='Given' %}
<div class="panel panel-warning col-lg-3">
<div class="panel-heading">
<div class="panel-title"><h4>Given</h4></div>
</div>
<ul class="list-group">
{% endif %}
{% if step.keyword=='When' %}
</ul>
</div>
<div class="col-lg-1 bdd-icons">
<p>
<i class="glyphicon glyphicon-flash small-icon"></i><br>
<i class="glyphicon glyphicon-user icon"></i></p>
</div>
<div class="panel panel-info col-lg-3">
<div class="panel-heading">
<div class="panel-title"><h4>When</h4></div>
</div>
<ul class="list-group">
{% endif %}
{% if step.keyword=='Then' %}
</ul>
</div>
<div class="col-lg-1 bdd-icons">
<p><span class="icon"><i class="glyphicon glyphicon-minus"></i><br><i class="glyphicon glyphicon-minus"></i></span></p>
</div>
<div class="panel panel-success col-lg-3">
<div class="panel-heading">
<div class="panel-title"><h4>Then</h4></div>
</div>
<ul class="list-group">
{% endif %}
<li class="list-group-item">{% if step.keyword not in ['Given', 'When', 'Then'] %}<strong>{{ step.keyword }}</strong> {% endif %}{{ step.name }} {% if step.status=='passed' %}<span class="pull-right"><i class="glyphicon glyphicon-ok"></i></span>{% endif %}{% if step.status=='failed' %}<span class="pull-right"><i class="glyphicon glyphicon-remove"></i></span>{% endif %}
{% if step.text %}
<pre>{{ step.text}}</pre>
{% endif %}
{% if step.table %}
<table class="table">
<thead>
<tr>
{% for heading in step.table.headings %}
<th>{{ heading }}</th>
{% endfor %}
</tr>
</thead>
<tbody>
{% for row in step.table.rows %}
<tr>
{% for cell in row.cells %}
<td>{{ cell }}</td>
{% endfor %}
</tr>
{% endfor %}
</tbody>
</table>
{% endif %}
</li>
{% endfor %}
</ul>
</div>
<p> </p>
<p><span class="pull-right"><a href="../features/{{ scenario.slug }}" class="btn">Link to scenario</a></span></p>
</div>
</div>
<p> </p>
{% endif %}
{% if scenario.type=='scenario_outline' %}
<div class="panel panel-default" id="{{ scenario.slug_id }}">
<div class="panel-heading">
<div class="panel-title">
<h3><span class="text-muted">Scenario Outline:</span> <span class="scenario_title">{{ scenario.name | e }}</span> <span class="pull-right">{% if scenario.tags %}<span class="text-muted">Tags: </span>{% for tag in scenario.tags %} <a href="../tags/{{ tag }}.html"><span class="badge btn-info">{{ tag }}</span></a> {% endfor %} {% endif %}</span></h3>
</div>
</div>
<div class="panel-body" id="{{ loop.index }}_content">
<div class="{{ loop.index }}_example examples">
{% for step in scenario.steps %}
{% if step.keyword=='Given' %}
<div class="panel panel-warning col-lg-3">
<div class="panel-heading">
<div class="panel-title"><h4>Given</h4></div>
</div>
<ul class="list-group">
{% endif %}
{% if step.keyword=='When' %}
</ul>
</div>
<div class="col-lg-1 bdd-icons">
<p>
<i class="glyphicon glyphicon-flash small-icon"></i><br>
<i class="glyphicon glyphicon-user icon"></i></p>
</div>
<div class="panel panel-info col-lg-3">
<div class="panel-heading">
<div class="panel-title"><h4>When</h4></div>
</div>
<ul class="list-group">
{% endif %}
{% if step.keyword=='Then' %}
</ul>
</div>
<div class="col-lg-1 bdd-icons">
<p><span class="icon"><i class="glyphicon glyphicon-minus"></i><br><i class="glyphicon glyphicon-minus"></i></span></p>
</div>
<div class="panel panel-success col-lg-3">
<div class="panel-heading">
<div class="panel-title"><h4>Then</h4></div>
</div>
<ul class="list-group">
{% endif %}
<li class="list-group-item">{% if step.keyword not in ['Given', 'When', 'Then'] %}<strong>{{ step.keyword }}</strong> {% endif %}{{ step.name | e }} {% if step.status=='passed' %}<span class="pull-right"><i class="glyphicon glyphicon-ok"></i></span>{% endif %}{% if step.status=='failed' %}<span class="pull-right"><i class="glyphicon glyphicon-remove"></i></span>{% endif %}</li>
</ul>
{% if step.table %}
<table class="table table-hover">
<thead>
<tr>
{% for heading in step.table.headings %}
<th>{{ heading | e }}</th>
{% endfor %}
</tr>
</thead>
<tbody>
{% for row in step.table.rows %}
<tr>
{% for cell in row.cells %}
<td>{{ cell | e }}</td>
{% endfor %}
</tr>
{% endfor %}
</tbody>
</table>
{% endif %}
{% endfor %}
</div>
</div>
{% if scenario.examples %}
<div class="container-fluid" id="{{ loop.index }}_examples">
{% for example in scenario.examples %}
<h3>Examples <small class="pull-right">Click on a row to change scenario text</small></h3>
<table class="table scenario_outline_examples table-hover">
<thead>
<tr>
{% for header in example.table.headings %}
<th>{{ header }}</th>
{% endfor %}
</tr>
</thead>
<tbody>
{% for row in example.table.rows %}
<tr data-id="example_{{ row.id | replace('.', '_') }}">
{% for cell in row.cells %}
<th>{{ cell }}</th>
{% endfor %}
</tr>
{% endfor %}
</tbody>
</table>
{% endfor %}
</div>
{% endif %}
<div class="hidden example_content">
{% for scen in scenario.scenarios %}
<div class="container-fluid" id="example_{{ scen._row.id | replace('.', '_') }}">
{% for step in scen.steps %}
{% if step.keyword=='Given' %}
<div class="panel panel-warning col-lg-3">
<div class="panel-heading">
<div class="panel-title"><h4>Given</h4></div>
</div>
<ul class="list-group">
{% endif %}
{% if step.keyword=='When' %}
</ul>
</div>
<div class="col-lg-1 bdd-icons">
<p>
<i class="glyphicon glyphicon-flash small-icon"></i><br>
<i class="glyphicon glyphicon-user icon"></i></p>
</div>
<div class="panel panel-info col-lg-3">
<div class="panel-heading">
<div class="panel-title"><h4>When</h4></div>
</div>
<ul class="list-group">
{% endif %}
{% if step.keyword=='Then' %}
</ul>
</div>
<div class="col-lg-1 bdd-icons">
<p><span class="icon"><i class="glyphicon glyphicon-minus"></i><br><i class="glyphicon glyphicon-minus"></i></span></p>
</div>
<div class="panel panel-success col-lg-3">
<div class="panel-heading">
<div class="panel-title"><h4>Then</h4></div>
</div>
<ul class="list-group">
{% endif %}
<li class="list-group-item">{% if step.keyword not in ['Given', 'When', 'Then'] %}<strong>{{ step.keyword }}</strong> {% endif %}{{ step.name | e }} {% if step.status=='passed' %}<span class="pull-right"><i class="glyphicon glyphicon-ok"></i></span>{% endif %}{% if step.status=='failed' %}<span class="pull-right"><i class="glyphicon glyphicon-remove"></i></span>{% endif %}</li>
</ul>
{% if step.table %}
<table class="table table-hover">
<thead>
<tr>
{% for heading in step.table.headings %}
<th>{{ heading | e }}</th>
{% endfor %}
</tr>
</thead>
<tbody>
{% for row in step.table.rows %}
<tr>
{% for cell in row.cells %}
<td>{{ cell | e }}</td>
{% endfor %}
</tr>
{% endfor %}
</tbody>
</table>
{% endif %}
{% endfor %}
</div>
</div>
{% endfor %}
</div>
<div class="hidden example_title">
{% for scen in scenario.scenarios %}
<span id="title_example_{{ scen._row.id | replace('.', '_') }}">{{ scen.name }}</span>
{% endfor %}
</div>
<p> </p>
<p><span class="pull-right"><a href="../features/{{ scenario.slug }}" class="btn">Link to scenario</a></span></p>
</div>
</div>
<p> </p>
{% endif %}
{% endfor %}
</div>
{% if item.other_resources %}
<div class="row col-lg-12">
<h2 id="other-resources">Other Resources <small class="pull-right"><a href="#top">Back to top</a></small></h2>
<hr>
<ul>
{% for entry in item.other_resources %}
<li><a href="{{ entry.url }}">{{ entry.name }}</a></li>
{% endfor %}
</ul>
<p> </p>
</div>
{% endif %}
{% endblock %}
{% block feature_active %} class="active"{% endblock %}
"""
livingdoc_single_page = """
{% extends "base.html" %}
{% block content %}
<div class="row">
<h2>{{ title }}</h2>
{% for item in items %}
{% if loop.index is odd %}
<div class="row col-lg-12">
<div class="col-lg-4">
<h3>{% if item.title %}{{ item.title }}{% else %}{{ item.name }}{% endif %}</h3>
{% if item.blurb %}{{ item.blurb }}{% endif %}
{% if item.features %}
<h4>Features</h4>
<ul>
{% for feature in item.features %}
<li><a href="../features/{{ feature.slug }}.html">{{ feature.name }}</a></li>
{% endfor %}
</ul>
{% endif %}
{% if item.scenarios %}
<h4>Scenarios</h4>
<ul>
{% for scenario in item.scenarios %}
<li><a href="../features/{{ scenario.slug }}">{{ scenario.name }}</a></li>
{% endfor %}
</ul>
{% endif %}
</div>
<div class="col-lg-offset-2 col-lg-6">
{% if item.image %}
<img src="#" delayedsrc="{{ item.image }}" class="col-lg-12"/>
{% endif %}
</div>
<p> </p>
</div>
{% else %}
<div class="row col-lg-12">
<div class="col-lg-6">
{% if item.image %}
<img src="#" delayedsrc="{{ item.image }}" class="col-lg-12"/>
{% endif %}
</div>
<div class="col-lg-offset-2 col-lg-4">
<h3>{% if item.title %}{{ item.title }}{% else %}{{ item.name }}{% endif %}</h3>
{% if item.blurb %}{{ item.blurb }}{% endif %}
{% if item.features %}
<h4>Features</h4>
<ul>
{% for feature in item.features %}
<li><a href="../features/{{ feature.slug }}.html">{{ feature.name }}</a></li>
{% endfor %}
</ul>
{% endif %}
{% if item.scenarios %}
<h4>Scenarios</h4>
<ul>
{% for scenario in item.scenarios %}
<li><a href="../features/{{ scenario.slug }}">{{ scenario.name }}</a></li>
{% endfor %}
</ul>
{% endif %}
</div>
<p> </p>
</div>
{% endif %}
<div class="row col-lg-12">
<p> </p>
<p> </p>
</div>
{% endfor %}
</div>
{% endblock %}
{% block tag_active %} class="active"{% endblock %}
""" | {
"repo_name": "Gimpneek/behave",
"path": "behave/reporter/livingdoc_templates.py",
"copies": "1",
"size": "32480",
"license": "bsd-2-clause",
"hash": -5952637886665612000,
"line_mean": 49.125,
"line_max": 425,
"alpha_frac": 0.3308189655,
"autogenerated": false,
"ratio": 5.139240506329114,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5970059471829114,
"avg_score": null,
"num_lines": null
} |
"""Author, Colin Zeidler"""
import copy
from A2.Player import Human, Computer, h_diff_vs_opponent, h_max_my_stacks, h_minimize_opponent
class Game(object):
def __init__(self, p1=None, p2=None):
# self.p1 = Human(self, "R")
if p1 is None:
self.p1 = Computer(self, "R", h_minimize_opponent)
else:
self.p1 = p1
self.p1.game = self
if p2 is None:
self.p2 = Computer(self, "G", h_minimize_opponent)
else:
self.p2 = p2
self.p2.game = self
self.board = GameBoard()
self.board.setup_2player(self.p1.id, self.p2.id)
self.current_player = self.p1
self.next_player = self.p2
def play(self):
keep_playing = True
while keep_playing:
# check if p1 owns any towers
self.current_player = self.p1
self.next_player = self.p2
if len(self.get_moves_for_current_player()) == 0:
print("Player", self.current_player.id, "lost")
keep_playing = False
continue
print("Player", self.current_player.id, "turn")
print(self.board)
move = self.current_player.get_move()
self.board.apply_move(move)
# check if p2 owns any towers
self.current_player = self.p2
self.next_player = self.p1
if len(self.get_moves_for_current_player()) == 0:
print("Player", self.current_player.id, "lost")
keep_playing = False
continue
print("Player", self.current_player.id, "turn")
print(self.board)
move = self.current_player.get_move()
self.board.apply_move(move)
print(self.board)
def get_moves_for_player(self, player_id, board=None):
moves = []
state_board = self.board
if board is not None:
state_board = board
for y, row in enumerate(state_board):
for x, tile in enumerate(row):
if tile is None or len(tile) <= 0:
continue
if tile[-1] == player_id:
for count in range(1, len(tile)+1):
for dist in range(1, count+1):
m = ((x, y, count), (x+dist, y, count))
if self.valid_move(m, player_id, board=state_board):
moves.append(m)
m = ((x, y, count), (x-dist, y, count))
if self.valid_move(m, player_id, board=state_board):
moves.append(m)
m = ((x, y, count), (x, y+dist, count))
if self.valid_move(m, player_id, board=state_board):
moves.append(m)
m = ((x, y, count), (x, y-dist, count))
if self.valid_move(m, player_id, board=state_board):
moves.append(m)
return moves
def get_moves_for_current_player(self, board=None):
return self.get_moves_for_player(self.current_player.id, board=board)
def get_moves_for_next_player(self, board=None):
return self.get_moves_for_player(self.next_player.id, board=board)
def ok_pos(self, x, y):
# Check that pos is within bounds of board
if x > 7:
return False
if y > 7:
return False
if x < 0:
return False
if y < 0:
return False
# check that pos is not in the deleted corners
if self.board[y][x] is None:
return False
return True
def valid_move(self, move, player_id, board=None):
""":returns: True if the move is a valid move, False otherwise
Things to check:
valid start and end positions
Move distance is <= to number of pieces moving
Number of pieces moving is <= to number of pieces at starting pos
Player making move owns the piece at top of the stack"""
state_board = self.board
if board is not None:
state_board = board
start = move[0]
dest = move[1]
if start == dest:
return False
xdiff = abs(start[0] - dest[0])
ydiff = abs(start[1] - dest[1])
# only allowed to move in one axis
if xdiff > 0 and ydiff > 0:
return False
# total move must be less or equal than the number moving
if max(xdiff, ydiff) > start[2]:
return False
if not self.ok_pos(start[0], start[1]):
return False
if not self.ok_pos(dest[0], dest[1]):
return False
if start[2] > len(state_board[start[1]][start[0]]):
return False
if state_board[start[1]][start[0]][-1] != player_id:
return False
return True
class GameBoard(object):
def __init__(self):
self.boardState = [[None, None, [], [], [], [], None, None],
[None, [], [], [], [], [], [], None],
[[], [], [], [], [], [], [], []],
[[], [], [], [], [], [], [], []],
[[], [], [], [], [], [], [], []],
[[], [], [], [], [], [], [], []],
[None, [], [], [], [], [], [], None],
[None, None, [], [], [], [], None, None]
]
def setup_2player(self, p1, p2):
self.boardState[1][1].append(p1)
self.boardState[1][2].append(p1)
self.boardState[1][3].append(p2)
self.boardState[1][4].append(p2)
self.boardState[1][5].append(p1)
self.boardState[1][6].append(p1)
self.boardState[2][1].append(p2)
self.boardState[2][2].append(p2)
self.boardState[2][3].append(p1)
self.boardState[2][4].append(p1)
self.boardState[2][5].append(p2)
self.boardState[2][6].append(p2)
self.boardState[3][1].append(p1)
self.boardState[3][2].append(p1)
self.boardState[3][3].append(p2)
self.boardState[3][4].append(p2)
self.boardState[3][5].append(p1)
self.boardState[3][6].append(p1)
self.boardState[4][1].append(p2)
self.boardState[4][2].append(p2)
self.boardState[4][3].append(p1)
self.boardState[4][4].append(p1)
self.boardState[4][5].append(p2)
self.boardState[4][6].append(p2)
self.boardState[5][1].append(p1)
self.boardState[5][2].append(p1)
self.boardState[5][3].append(p2)
self.boardState[5][4].append(p2)
self.boardState[5][5].append(p1)
self.boardState[5][6].append(p1)
self.boardState[6][1].append(p2)
self.boardState[6][2].append(p2)
self.boardState[6][3].append(p1)
self.boardState[6][4].append(p1)
self.boardState[6][5].append(p2)
self.boardState[6][6].append(p2)
def setup_4player(self):
pass
def new_board_from_move(self, move):
"""Returns a new GameBoard instance with the move applied. and the items that got deleted
Does not modify the current object"""
new_board = GameBoard()
new_board.boardState = copy.deepcopy(self.boardState)
deleted = new_board.apply_move(move)
return new_board, deleted
def apply_move(self, move):
sx = move[0][0]
sy = move[0][1]
dx = move[1][0]
dy = move[1][1]
c = move[0][2]
tmp = self.boardState[sy][sx][-c:]
del self.boardState[sy][sx][-c:]
self.boardState[dy][dx].extend(tmp)
# handle deleting pieces when more than 5 are in one tile
removed = []
new_h = len(self.boardState[dy][dx])
if new_h > 5:
removed = self.boardState[dy][dx][:new_h-5]
del self.boardState[dy][dx][:new_h-5]
# return pieces that get deleted
return removed
def __getitem__(self, item):
return self.boardState[item]
def __len__(self):
return len(self.boardState)
def __str__(self):
my_string = " 0 1 2 3 4 5 6 7\n"
for y, row in enumerate(self.boardState):
my_string += str(y) + " [ "
for x, item in enumerate(row):
if item is None:
my_string += "XXX "
else:
my_string += str(len(item)) + ":" + str(item[-1] if len(item) > 0 else "X") + " "
my_string += "]\n"
return my_string
if __name__ == "__main__":
print("Starting 2 player game")
print("Select Game mode:")
print("1. Human vs Computer")
print("2. Computer vs Computer")
bad_input = True
while bad_input:
selction = input("> ")
try:
selction = int(selction)
bad_input = False
if selction != 1 and selction != 2:
print("Choose from 1 to 2")
bad_input = True
except ValueError:
print("Enter an Integer")
print("Select a heuristic for the AI")
print("1. diff against opponent")
print("2. maximize my stack count")
print("3. minimize opponent stack count")
bad_input = True
while bad_input:
h_choice = input("> ")
try:
h_choice = int(h_choice)
bad_input = False
if h_choice != 1 and h_choice != 2 and h_choice != 3:
print("Choose from 1 to 3")
bad_input = True
except ValueError:
print("Enter an Integer")
if h_choice == 1:
h1 = h_diff_vs_opponent
if h_choice == 2:
h1 = h_max_my_stacks
if h_choice == 3:
h1 = h_minimize_opponent
if selction == 1:
player1 = Human(None, "R")
player2 = Computer(None, "G", h1)
elif selction == 2:
print("Select a heuristic for the second AI")
print("1. diff against opponent")
print("2. maximize my stack count")
print("3. minimize opponent stack count")
bad_input = True
while bad_input:
h_choice = input("> ")
try:
h_choice = int(h_choice)
bad_input = False
if h_choice != 1 and h_choice != 2 and h_choice != 3:
print("Choose from 1 to 3")
bad_input = True
except ValueError:
print("Enter an Integer")
if h_choice == 1:
h2 = h_diff_vs_opponent
if h_choice == 2:
h2 = h_max_my_stacks
if h_choice == 3:
h2 = h_minimize_opponent
player1 = Computer(None, "R", h1)
player2 = Computer(None, "G", h2)
game = Game(player1, player2)
game.play()
| {
"repo_name": "ColinZeidler/PythonAI",
"path": "A2/Game.py",
"copies": "1",
"size": "10953",
"license": "mit",
"hash": -6990341026296672000,
"line_mean": 33.5520504732,
"line_max": 101,
"alpha_frac": 0.4996804528,
"autogenerated": false,
"ratio": 3.658316633266533,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9656154114095643,
"avg_score": 0.00036859439417781363,
"num_lines": 317
} |
"""Author, Colin Zeidler"""
MAX_DEPTH = 3
class Player(object):
def __init__(self, game, id):
self.game = game
self.id = id
def get_move(self):
""":returns: (source, dest)
source and dest are tuples as (x, y, count)
count is the number of pieces to move and is the same for both source and dest
x is the column
y is the row"""
pass
class Human(Player):
def get_move(self):
"""Prompts the player for input"""
# let the player inspect the board to plan
planning = True
while planning:
print("To view the board type B")
print("To view a stack type S")
print("To enter your move type M")
choice = input("> ").upper()
if choice == "B":
print(self.game.board)
elif choice == "S":
x, y = get_pos(self.game.ok_pos)
print("Stack looks like:")
print("Bottom", self.game.board[y][x], "Top")
elif choice == "M":
planning = False
else:
print("Invalid Choice")
# Create move
planning = True
while planning:
# get start location
print("Start location:")
start_x, start_y = get_pos(self.game.ok_pos)
print("Destination")
dest_x, dest_y = get_pos(self.game.ok_pos)
count = get_int("Number of pieces to move")
move = ((start_x, start_y, count), (dest_x, dest_y, count))
planning = not self.game.valid_move(move, self.id)
if planning:
print("Invalid move")
return move
def get_pos(check_func):
bad_pos = True
while bad_pos:
x = get_int("Column / X")
y = get_int("Row / Y")
bad_pos = not check_func(x, y)
if bad_pos:
print("Invalid location")
return x, y
def get_int(text):
bad = True
while bad:
print(text)
try:
x = int(input("> "))
bad = False
except ValueError:
print("Enter an integer")
return x
class Computer(Player):
def __init__(self, game, id, h_func):
Player.__init__(self, game, id)
self.h_func = h_func
def get_move(self):
"""Looks at the current board state, and calculates best move"""
children_moves = self.game.get_moves_for_current_player()
best_move = None
best_score = -100000
print(len(children_moves))
for move in children_moves:
score = self.min_value(move, -100000, 100000, 1)
if score > best_score:
best_score = score
best_move = move
return best_move
def max_value(self, move, alpha, beta, depth):
board, killed_items = self.game.board.new_board_from_move(move)
if depth >= MAX_DEPTH:
return self.h_func(self, board, killed_items)
children_moves = self.game.get_moves_for_current_player(board=board)
for c in children_moves:
t = self.min_value(c, alpha, beta, depth+1)
alpha = max(alpha, t)
if alpha >= beta:
return alpha
return alpha
def min_value(self, move, alpha, beta, depth):
board, killed_items = self.game.board.new_board_from_move(move)
if depth >= MAX_DEPTH:
return self.h_func(self, board, killed_items)
children_moves = self.game.get_moves_for_next_player(board=board)
for c in children_moves:
t = self.max_value(c, alpha, beta, depth+1)
beta = min(beta, t)
if beta <= alpha:
return beta
return beta
# heuristic functions for the AI to use
def h_diff_vs_opponent(player, board, killed_items):
"""diff player owned stacks with opponent stacks"""
pid = player.id
count = 0
for y, row in enumerate(board):
for x, tile in enumerate(row):
if tile is None or len(tile) <= 0:
continue
if tile[-1] == pid:
count += 1
else:
count -= 1
for i in killed_items:
if i != pid:
count += 1
return count
def h_max_my_stacks(player, board, killed_items):
pid = player.id
count = 0
for y, row, in enumerate(board):
for x, tile in enumerate(row):
if tile is None or len(tile) <= 0:
continue
if tile[-1] == pid:
count += 1*len(tile) # larger stacks are more valuable
return count
def h_minimize_opponent(player, board, killed):
pid = player.id
score = 0
for row in board:
for tile in row:
if tile is None or len(tile) == 0:
continue
if tile[-1] != pid:
score -= 2
return score
| {
"repo_name": "ColinZeidler/PythonAI",
"path": "A2/Player.py",
"copies": "1",
"size": "4932",
"license": "mit",
"hash": 3271216167426208000,
"line_mean": 28.3571428571,
"line_max": 86,
"alpha_frac": 0.5231143552,
"autogenerated": false,
"ratio": 3.8865248226950353,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9909297087035703,
"avg_score": 0.00006841817186644773,
"num_lines": 168
} |
__author__ = 'Collaboration'
import numpy as np
import os.path
def calc_vel_update(positions, mass, dt):
r_ik3 = np.power(np.sum((positions[0] - positions[1])**2), 1.5)
updates = mass * (positions[1] - positions[0]) / r_ik3
return updates
def simulate_step(bodies, dt, dt_output=None):
current_t = 0
current_step = 0
n_bodies = bodies.r.shape[0]
if not dt_output:
dt_output = dt
while True:
for i in range(n_bodies):
bodies.r[i, :] += bodies.v[i, :] * dt
for i in range(n_bodies):
vel_update = np.array([0.0, 0.0, 0.0])
for k in range(n_bodies):
if i == k:
continue
vel_update += calc_vel_update((bodies.r[i, :], bodies.r[k, :]), bodies.m[k], dt)
bodies.v[i, :] += vel_update * dt
if current_step * dt_output <= current_t:
current_step += 1
yield current_t
current_t += dt | {
"repo_name": "kostassabulis/nbody-workshop-2015",
"path": "nbody/euler.py",
"copies": "1",
"size": "1081",
"license": "mit",
"hash": -5049375713432014000,
"line_mean": 25.7692307692,
"line_max": 96,
"alpha_frac": 0.4708603145,
"autogenerated": false,
"ratio": 3.3261538461538462,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.42970141606538464,
"avg_score": null,
"num_lines": null
} |
__author__ = 'College Persistence Team'
from abstractpandasfeature import *
class AbstractBoundedPandasFeature(AbstractPandasFeature):
'''
This represents a feature that takes in an optional start value and end value on one of its index columns
A temporal feature where you need to bound on a start time and an end time would be an example
of a bounded featured
'''
def __init__(self, lower_bound=None,upper_bound = None):
AbstractPandasFeature.__init__(self)
self.bound_col # will raise NotImplementedError if this is not dealt with in child class
self.lower_bound = lower_bound
self.upper_bound = upper_bound
if not '{}' in self.sql_query:
raise NotImplementedError('Bad bounded feature class definition; SQL query does not have a {} in which to place a bounding where clause.')
def load_rows(self,connection,read_from_cache=False,write_to_cache=False):
'''Overwritten version of this function that handles
the presence/expectation of bounds on a column'''
#Format sql query to accommodate bounds
f_query = self.generate_query()
self.read_sql_into_rows(f_query,connection)
print self.rows.head()
def generate_query(self):
if self.lower_bound == None and self.upper_bound == None:
f_query = self.sql_query.format('')
else:
if 'where' in self.sql_query:
add = 'and '
else:
add = 'where '
if self.lower_bound != None:
add += self.bound_col +' >= \''+self.lower_bound+'\''
if self.upper_bound != None:
if self.lower_bound != None:
add += ' and '
add += self.bound_col +' < \''+self.upper_bound+'\''
f_query = self.sql_query.format(add)
return f_query
@property
def bound_col(self):
raise NotImplementedError
| {
"repo_name": "dssg/education-college-public",
"path": "code/modeling/featurepipeline/abstractboundedpandasfeature.py",
"copies": "1",
"size": "1967",
"license": "mit",
"hash": 8031369162557921000,
"line_mean": 35.4259259259,
"line_max": 150,
"alpha_frac": 0.6085409253,
"autogenerated": false,
"ratio": 4.176220806794055,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5284761732094055,
"avg_score": null,
"num_lines": null
} |
__author__ = 'College Persistence Team'
from abstractfeature import *
class AbstractBoundedFeature(AbstractFeature):
'''
This represents a feature that takes in an optional start value and end value on one of its index columns
A temporal feature where you need to bound on a start time and an end time would be an example
of a bounded featured
'''
def __init__(self, lower_bound=None,upper_bound = None):
AbstractFeature.__init__(self)
self.bound_col # will raise NotImplementedError if this is not dealt with in child class
self.lower_bound = lower_bound
self.upper_bound = upper_bound
if not '{}' in self.sql_query:
raise NotImplementedError('Bad bounded feature class definition; SQL query does not have a {} in which to place a bounding where clause.')
def load_rows(self,connection,read_from_cache=False,write_to_cache=False):
'''Overwritten version of this function that handles
the presence/expectation of bounds on a column'''
#Format sql query to accommodate bounds
f_query = self.generate_query()
self.read_sql_into_rows(f_query,connection)
print self.rows.head()
def generate_query(self):
if self.lower_bound == None and self.upper_bound == None:
f_query = self.sql_query.format('')
else:
if 'where' in self.sql_query:
add = 'and '
else:
add = 'where '
if self.lower_bound != None:
add += self.bound_col +' >= \''+self.lower_bound+'\''
if self.upper_bound != None:
if self.lower_bound != None:
add += ' and '
add += self.bound_col +' < \''+self.upper_bound+'\''
f_query = self.sql_query.format(add)
return f_query
@property
def bound_col(self):
raise NotImplementedError
| {
"repo_name": "dssg/education-college-public",
"path": "code/modeling/featurepipeline/abstractboundedfeature.py",
"copies": "1",
"size": "1997",
"license": "mit",
"hash": -6362277662006396000,
"line_mean": 34.9814814815,
"line_max": 150,
"alpha_frac": 0.5873810716,
"autogenerated": false,
"ratio": 4.294623655913979,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5382004727513978,
"avg_score": null,
"num_lines": null
} |
__author__ = 'College Persistence Team'
from abstractpipeline import *
import pandas as pd
import postprocessors as pp
class AbstractFeature(AbstractPipelineConfig):
'''
Abstract representation of an feature. Should be extended by classes representing
individual features, not instantiated.
'''
def __init__(self):
#test to make sure subclass overwrote fields
self.name
self.sql_query
self.index_col
self.feature_col
self.index_level
self.feature_type
# self.default_value
self.postprocessors
# def mypost(df,param):
# return new_df
if self.feature_type not in ['boolean', 'numerical', 'categorical','date']:
raise ValueError("Feature type must be 'boolean', 'numerical', 'date' or 'categorical'.")
elif self.feature_type == 'categorical' and pp.getdummies not in self.postprocessors:
raise ValueError("Categorical features must have a getdummies postprocessor")
self.rows = None
def summary(self,prefix=''):
p = prefix
summary = p+self.name + '\n'
# summary += p+'Type: '+self.feature_type +'\n'
summary += p+'SQL query: '+self.sql_query +'\n'
summary += p+'Index column: ' + self.index_col+'\n'
summary += p+'Feature column: '+self.feature_col+'\n'
return summary
def load_rows(self,connection,read_from_cache=False,write_to_cache=False):
'''
Load the database rows that comprise this field either from the database or from the local
cache if told to do so. If instructed, write to the cache if it's not already there.
:param read_from_cache: whether to look for an read from a local cache if it is there
:param write_to_cache: whether to write loaded results to local cache if it isn't there
:return: nothing. Loads rows into rows instance variable
'''
self.read_sql_into_rows(self.sql_query,connection)
def read_sql_into_rows(self,query,connection):
print query
self.rows = pd.read_sql(query,connection)
self.rows.set_index(self.index_col,inplace=True)
# self.rows.sort(inplace=True)
# print self.rows.head(50)
def generate_query(self):
return self.sql_query
# A subclass must override these class fields
# to be instantiated without an error
@property
def name(self):
raise NotImplementedError
@property
def sql_query(self):
raise NotImplementedError
@property
def index_col(self):
raise NotImplementedError
@property
def feature_col(self):
raise NotImplementedError
@property
def feature_type(self):
raise NotImplementedError
# @property
# def default_value(self):
# raise NotImplementedError
@property
def postprocessors(self):
raise NotImplementedError
@property
def index_level(self):
raise NotImplementedError
| {
"repo_name": "dssg/education-college-public",
"path": "code/modeling/featurepipeline/abstractfeature.py",
"copies": "1",
"size": "3111",
"license": "mit",
"hash": 8248977813223213000,
"line_mean": 31.4731182796,
"line_max": 101,
"alpha_frac": 0.62487946,
"autogenerated": false,
"ratio": 4.425320056899004,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0134722396750721,
"num_lines": 93
} |
__author__ = 'Collin Day and Joe Kvedaras'
import random
# Example of how to implement an RPS player within the framework
import Player
import Message
class CDJKPlayer(Player.Player):
def __init__(self):
# Call super class constructor
Player.Player.__init__(self)
self.reset()
def play(self):
return RpsPlayingStrategy.play(self.opponents_moves)
def reset(self):
self.opponents_moves = []
def get_name(self):
return "Minimizer"
def notify(self, msg):
# We use notifications to store opponent's moves in past rounds
# Process match-start and round-end messages
# At the start of the match, clear opponent moves history since a new match has started
# At the end of a round, append move to opponent's move history. Move history is used
# to compute the next move played.
if msg.is_match_start_message():
players = msg.get_players()
if players[0] == self or players[1] == self:
self.reset()
elif msg.is_round_end_message():
players = msg.get_players()
# Check if this message is for me and only then proceed
if (players[0] == self) or (players[1] == self):
# In this case, (by convention) the info is a tuple of the moves made and result e.g. ((1, 0), 1) which
# means player 1 played paper (1), the player 2 played rock(0) and the result was that
# player 1 won
moves, result = msg.get_info()
# RPS is a two person game; figure out which of the players is me
# and which one is the opponent
if players[0] == self:
opponent = 1
else:
opponent = 0
# Update opponent's past moves history
self.opponents_moves.append(moves[opponent])
# An implementation of a simple rps playing strategy
class RpsPlayingStrategy(object):
@staticmethod
def play(opponents_moves):
# We assume other will assume to use either the most played or the
#least played move so play the middle one no one would expect that.
#also if the numbers are equal it will just pick one at random.
#we put random numbers in certain places to throw off others stratigies
# count number of rock, paper and scissors moves made in the past
count = [0, 0, 0]
for move in opponents_moves:
count[move] += 1
if (count[0] > count[1] & count[0] < count[2]) | (count[0] < count[1] & count[0] > count[2]):
use = 0
elif (count[1] > count[0] & count[1] < count[2]) | (count[1] < count[0] & count[1] > count[2]):
use = 1
elif (count[2] > count[0] & count[2] < count[1]) | (count[2] < count[0] & count[2] > count[1]):
use = 2
else:
use = random.randrange(0,3)
# Assuming that other will base their moves on either the most used or least used move
#ours is bassed off the middle used move
return use
# Test driver
# Run by typing "python3 CDKJPlayer.py"
if __name__ == "__main__":
player = CDJKPlayer()
opponent = CDJKPlayer()
players = [opponent,player]
fakeinfo = ((0,0),0)
fakeresult = 1
fakemoves = (0,2)
player.notify(Message.Message.get_match_start_message(players))
player.notify(Message.Message.get_round_start_message(players))
move = player.play()
print ("Move played: ", move)
player.notify(Message.Message.get_round_end_message(players,fakemoves,fakeresult))
| {
"repo_name": "geebzter/game-framework",
"path": "CDJKPlayer.py",
"copies": "1",
"size": "3670",
"license": "apache-2.0",
"hash": -6869283307057275000,
"line_mean": 33.6226415094,
"line_max": 119,
"alpha_frac": 0.5950953678,
"autogenerated": false,
"ratio": 3.8389121338912133,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4934007501691213,
"avg_score": null,
"num_lines": null
} |
__author__ = "Collin Petty"
__copyright__ = "Carnegie Mellon University"
__license__ = "MIT"
__maintainer__ = ["Collin Petty", "Peter Chapman"]
__credits__ = ["David Brumely", "Collin Petty", "Peter Chapman"]
__email__ = ["collin@cmu.edu", "peter@cmu.edu"]
__status__ = "Production"
import os
from random import randint
from operator import add
import tempfile
template_file = "xmlol.xml"
templates = "autogenerators/templates/"
def validate_dependencies():
print "DEPENDENCY CHECK - xmnlol.py (autogen)"
if not os.path.exists(_template_path()):
print "ERROR - xmlol - Could not find the template file (%s)" % template_file
return False
return True
def generate():
template = open(_template_path(), 'r').read()
key = reduce(add, [str(randint(999, 99999)) for _ in range(6)], '')
template = template.replace('###KEY###', key)
output = tempfile.NamedTemporaryFile(delete=False, suffix=".xml")
output.write(template)
output.close()
return [os.path.abspath(output.name)], key, """<p>The book has instructions on how to dump the corrupted\
configuration file from the robot's memory. You find a corrupted <a href='###file_1_url###'\
target='_blank'>XML file</a> and are looking for a configuration key.</p>"""
def _template_path():
return templates + template_file | {
"repo_name": "SabunMacTavish/CTF-Platform",
"path": "api/autogenerators/xmlol.py",
"copies": "2",
"size": "1336",
"license": "mit",
"hash": 5807237390205766000,
"line_mean": 34.1842105263,
"line_max": 109,
"alpha_frac": 0.6669161677,
"autogenerated": false,
"ratio": 3.452196382428941,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5119112550128941,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Collin Petty'
import os
from random import randint
from operator import add
import tempfile
template_file = "xmlol.xml"
templates = "autogenerators/templates/"
def validate_dependencies():
print "DEPENDENCY CHECK - xmnlol.py (autogen)"
if not os.path.exists(_template_path()):
print "ERROR - xmlol - Could not find the template file (%s)" % template_file
return False
return True
def generate():
template = open(_template_path(), 'r').read()
key = reduce(add, [str(randint(999, 99999)) for _ in range(6)], '')
template = template.replace('###KEY###', key)
output = tempfile.NamedTemporaryFile(delete=False, suffix=".xml")
output.write(template)
output.close()
return [os.path.abspath(output.name)], key, """<p>The book has instructions on how to dump the corrupted\
configuration file from the robot's memory. You find a corrupted <a href='###file_1_url###'\
target='_blank'>XML file</a> and are looking for a configuration key.</p>"""
def _template_path():
return templates + template_file | {
"repo_name": "picoCTF/2013-Problems",
"path": "XMLOL/autogenerators/xmlol.py",
"copies": "1",
"size": "1081",
"license": "mit",
"hash": 535991577442048450,
"line_mean": 32.8125,
"line_max": 109,
"alpha_frac": 0.6743755782,
"autogenerated": false,
"ratio": 3.7275862068965515,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49019617850965513,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Collin Petty'
import os
import random
import hashlib
dict_file = "/usr/share/dict/words"
def validate_dependencies():
print "DEPENDENCY CHECK - trythemall.py (autogen)"
if not os.path.exists(dict_file):
#utilities.send_email_to_list(common.admin_emails, "Autogenerator Validation Error",
# """Could not validate that the word list file for 'trythemall.py' exists.""")
print ("ERROR - trythemall - Could not validate that the word list file exists.")
return False
if _get_random_word() == "":
#tilities.send_email_to_list(common.admin_emails, "Autogenerator Validation Error",
# """Test load of a random word failed to return a non-empty string.""")
print ("ERROR - trythemall - Test load of a random word failed to return a non-empty string.")
return False
return True
def generate():
# we don't need the parameters for this problem
word = _get_random_word()
salt = str(random.randint(1000, 9999))
key_hash = hashlib.md5(word + salt).hexdigest()
desc = """You have found a passwd file containing salted passwords.
An unprotected configuration file has revealed a salt of %s.
The hashed password for the 'admin' user appears to be %s, try to brute force this password.""" % (salt, key_hash)
return None, word, desc
def _get_random_word():
word_list = open(dict_file, 'r')
file_size = os.stat(dict_file)[6]
word_list.seek((word_list.tell() + random.randint(0, file_size-1)) % file_size)
word_list.readline() # flushes line since we are probably in the middle
return word_list.readline().strip() | {
"repo_name": "picoCTF/2013-Problems",
"path": "Try Them All/autogenerators/trythemall.py",
"copies": "1",
"size": "1702",
"license": "mit",
"hash": 8956535190071611000,
"line_mean": 41.575,
"line_max": 118,
"alpha_frac": 0.651586369,
"autogenerated": false,
"ratio": 3.765486725663717,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4917073094663717,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Collin Petty'
import tempfile
import os
import random
import string
template_file = "readthemanual.txt"
templates = "autogenerators/templates/"
def validate_dependencies():
print "DEPENDENCY CHECK - readthemanual.py (autogen)"
if not os.path.exists(_template_path()):
print "ERROR - Read the Manual - Could not find the template file (%s)" % template_file
return False
return True
def generate():
template = open(_template_path(), 'r').read()
key = ''.join(random.choice(string.ascii_lowercase) for _ in range(12))
template = template.replace('###KEY###', key)
shift = random.randint(1, 26)
out_text = _caesar(template, shift)
output = tempfile.NamedTemporaryFile(delete=False, suffix=".txt")
output.write(out_text)
output.close()
return [os.path.abspath(output.name)], key, """<p>On the back of the broken panel you see a recovery\
<a href='###file_1_url###' target='_blank'>manual</a>. You need to find the emergency repair key in\
order to put the robot into <code>autoboot</code> mode, but it appears to be ciphered using a Caesar cipher.</p>"""
def _template_path():
return templates + template_file
def _caesar(text, shift):
ret = list()
for t in text:
t = ord(t)
if t in range(ord('a'), ord('z')+1):
ret.append(((t - ord('a') + shift) % 26) + ord('a'))
elif t in range(ord('A'), ord('Z')+1):
ret.append(((t - ord('A') + shift) % 26) + ord('A'))
elif t in range(ord('0'), ord('9')+1):
ret.append(((t - ord('0') + shift) % 10) + ord('0'))
else:
ret.append(t)
return string.joinfields(map(chr, ret), "") | {
"repo_name": "picoCTF/2013-Problems",
"path": "Read the Manual/autogenerators/readthemanual.py",
"copies": "1",
"size": "1706",
"license": "mit",
"hash": 1919156609305995300,
"line_mean": 33.8367346939,
"line_max": 119,
"alpha_frac": 0.6078546307,
"autogenerated": false,
"ratio": 3.3916500994035785,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44995047301035784,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Collin Petty'
import tempfile
import os
def validate_dependencies():
"""Validate external dependencies.
This function does NOT have to exist. If it does exist the runtime will call and execute it during api
initialization. The purpose of this function is to verify that external dependencies required to auto-generate
a problem are properly installed and configured on this system. Some common tasks that may be performed are
checking that a certain program is installed (such as 'javac') and that it is executable. You may also want to
verify that template files that the generator modifies exist in the templates/ directory. If any dependency
check fails the function should print out the respective error message and return False. If all checks pass
correctly the function should return True. If the function does not exist the API initializer will assume that
all dependencies are met and will add the generator to the pre-fetched generator list assuming there is an
auto-generated problem in the database that has the given generator set for it's 'generator' field.
The following code demonstrates how to check that the java compiler (javac) is present on the system and can be
executed by the current user.
"""
print "DEPENDENCY CHECK - TEMPLATE.py (TEMPLATE)"
javac_path = "/usr/bin/javac" # This should have scope across the entire module but doesn't for template purposes
if not os.path.exists(javac_path):
print "ERROR - TEMPLATE - The specified java compiler (%s) does not appear to exist." % javac_path
return False
if not os.access(javac_path, os.X_OK):
print "ERROR - TEMPLATE - javac is not executable by the python runtime."
return False
return True
def generate():
"""Generate an instance of the problem
This is the function that is responsible for generating an instance of the auto-generated problem. The function
has no concept of 'pid's or 'tid's. All generated files should use the tempfile module to build their output files
and return a list of these files to the API for moving and renaming.
Three values are returned as a tuple when generate() is called, the first value is a list of all user-facing
files created by the auto-generator (there is typically only 1 output file such as a .class file, .xml file,
etc etc). The second returned value is *either* the solution key or the name of a grader file (also created
using the tempfile module). The ability to return a key was added after we saw that a lot of the graders being
generated were simply checking if a passed value was equal to a given value. The problem's entry in the
'problems' database will specify whether or not the 'grader' is a 'key' or 'file' for auto-generated problems
(normal problems have the name of the grading script in this field). The third return value is the problem
description (html). Any of the url links in it should be returned with '###file_X_url###' as their href value,
where X is a number 1,2,3... representing the file in the return list (starts at 1 not 0). The API will move
the temporary files returned in the list to the web server and replace these place holders with the proper paths.
The following code generates a sample problem where a text file with a hard coded secret key is returned to a
the API with a simple problem statement that demonstrates the description text replacement functionality.
"""
key = "123456"
output = tempfile.NamedTemporaryFile(delete=False, suffix=".txt")
output.write("key = '%s'" % key)
output.close()
return [os.path.abspath(output.name)], key, """<p>Download <a href='###file_1_url###'>This file</a>\
and get the secret key.</p>"""
| {
"repo_name": "picoCTF/picoCTF-Platform-1",
"path": "api/autogenerators/TEMPLATE.py",
"copies": "2",
"size": "3811",
"license": "mit",
"hash": 8468881763839642000,
"line_mean": 63.593220339,
"line_max": 118,
"alpha_frac": 0.7360272894,
"autogenerated": false,
"ratio": 4.531510107015458,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.004274607598535578,
"num_lines": 59
} |
__author__ = 'Collin Petty'
import os
import random
import string
import tempfile
import zipfile
from subprocess import call
javac_path = "/usr/bin/javac"
template_file = "bytecode.Authenticator.java"
templates = "autogenerators/templates/"
def validate_dependencies():
print "DEPENDENCY CHECK - bytecode.py (autogen)"
if not os.path.exists(javac_path):
print "ERROR - bytecode - The specified java compiler (%s) does not appear to exist." % javac_path
return False
if not os.access(javac_path, os.X_OK):
print "ERROR - bytecode - javac is not executable by the python runtime."
return False
if not os.path.exists(_template_path()):
print "ERROR - bytecode - Could not locate the java template file (%s) ." % _template_path()
return False
return True
def generate():
template = open(_template_path(), 'r').read()
key_list = [ord(random.choice(string.letters)) for _ in range(10)]
key = ''.join([chr(k) for k in key_list])
magic_string = "ThisIsth3mag1calString%s" % str(random.randint(999, 9999))
for idx, k in enumerate(key_list):
template = template.replace("###char%s###" % str(idx), str(k))
template = template.replace("###string###", magic_string)
output_folder = tempfile.mkdtemp()
print "## random output_folder: %s" % output_folder
java_file = open("%s/Authenticator.java" % output_folder, 'w')
java_file.write(template)
java_file.close()
call([javac_path, '-d', output_folder, "%s/Authenticator.java" % output_folder])
class_path = "%s/Authenticator.class" % output_folder
out_zip = tempfile.NamedTemporaryFile(delete=True, suffix=".zip")
out_zip.close()
zf = zipfile.ZipFile(os.path.abspath(out_zip.name), mode='w')
zf.write(os.path.abspath(class_path), os.path.basename(class_path))
zf.close()
print "Returning Key: %s" % key
return [os.path.abspath(out_zip.name)], key, """<p>You need to authenticate with the guard to gain\
access to the loading bay! Enter the root password from the vault application to retrieve the passkey!\
<a href='###file_1_url###' target='_blank'>This</a> class file is the executable for the vault application.</p>"""
def _template_path():
return templates + template_file | {
"repo_name": "picoCTF/2013-Problems",
"path": "Byte Code/autogenerators/bytecode.py",
"copies": "1",
"size": "2289",
"license": "mit",
"hash": -8037058322768403000,
"line_mean": 39.1754385965,
"line_max": 119,
"alpha_frac": 0.6697247706,
"autogenerated": false,
"ratio": 3.5215384615384617,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9682657318105286,
"avg_score": 0.0017211828066352095,
"num_lines": 57
} |
__author__ = ["Collin Petty", "Peter Chapman"]
__copyright__ = "Carnegie Mellon University"
__license__ = "MIT"
__maintainer__ = ["Collin Petty", "Peter Chapman"]
__credits__ = ["David Brumely", "Collin Petty", "Peter Chapman", "Tyler Nighswander", "Garrett Barboza"]
__email__ = ["collin@cmu.edu", "peter@cmu.edu"]
__status__ = "Production"
import bcrypt
from common import db
debug_disable_general_login = False
def login(request, session):
"""Authenticates a user.
Takes POSTed auth credentials (teamname and password) and validates to mongo, adds the teamID to the session dict.
If the debug_disable_general_login flag is set only accounts with 'debugaccount' set to true will be able
to authenticate.
"""
if 'tid' in session: # we assume that if there is a tid in the session dict then the user is authenticated
return {"success": 1, "message": "You are already logged in."}
teamname = request.form.get('teamname', None) # get the teamname and password from the POSTed form
password = request.form.get('password', None)
if teamname is None or teamname == '':
return {'success': 0, 'message': "Team name cannot be empty."}
if password is None or password == '': # No password submitted
return {"success": 0, "message": "Password cannot be empty."}
if len(teamname) > 250:
return {"success": 0, "message": "STAHP!"}
teamCurr = db.teams.find({'teamname': teamname})
if teamCurr.count() == 0: # No results returned from mongo when searching for the user
return {"success": 0, "message": "Team '%s' not found." % teamname}
if teamCurr.count() > 1:
return {"success": 0, "message": "An error occurred querying your account information."}
checkTeam = teamCurr[0]
pwhash = checkTeam['pwhash'] # The pw hash from the db
if bcrypt.hashpw(password, pwhash) == pwhash:
if checkTeam.get('debugaccount', None):
session['debugaccount'] = True
if debug_disable_general_login:
if 'debugaccount' not in checkTeam or not checkTeam['debugaccount']:
return {'success': 2, "message": "Correct credentials! But the game has not started yet..."}
if checkTeam['tid'] is not None:
session['tid'] = checkTeam['tid']
else: # SET THE 'tid' TO str('_id') FOR MIGRATION PURPOSES AND ADD THE 'tid' TO THE DOCUMENT
session['tid'] = str(checkTeam['_id'])
db.teams.update({'_id': checkTeam['_id']}, {'tid': str(checkTeam['_id'])})
return {"success": 1, "message": "Logged in as '%s'." % teamname}
return {"success": 0, "message": "Incorrect password."}
def logout(session):
"""Logout
If the user has a teamID in the session it is removed and success:1 is returned.
If teamID is not in session success:0 is returned.
"""
if 'tid' in session:
session.clear()
return {"success": 1, "message": "Successfully logged out."}
else:
return {"success": 0, "message": "You do not appear to be logged in."}
def is_logged_in(session):
"""Check if the user is currently logged in.
If the user has a teamID in their session return success:1 and a message
If they are not logged in return a message saying so and success:0
"""
if 'tid' in session:
return {'success': 1, 'message': 'You appear to be logged in.'}
else:
return {"success": 0, "message": "You do not appear to be logged in."}
def is_blacklisted(tid):
return db.teams.find_one({'tid': tid}).get('blacklisted', False) | {
"repo_name": "SabunMacTavish/CTF-Platform",
"path": "api/auth.py",
"copies": "2",
"size": "3581",
"license": "mit",
"hash": -8293207273355245000,
"line_mean": 42.6829268293,
"line_max": 118,
"alpha_frac": 0.6392069254,
"autogenerated": false,
"ratio": 3.650356778797146,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5289563704197145,
"avg_score": null,
"num_lines": null
} |
import csv
import plotly.plotly as py
from plotly.graph_objs import *
from collections import defaultdict
#Contains Tweet Data, accesed by (...).value
class Tweet:
def __init__(self, data):
self.time, self.tweetid, self.text, self.rt, \
self.geo, self.placetag, self.favs, self.usr, \
self.usrloc, self.usrid, self.timezone, self.usrfollow, \
self.usrstats, self.usrfriends, self.usrhandle, \
self.hashtags, self.mentions = data
#Imports data as Tweet objects to the variable 'tweets'
with open('oscar_tweets.csv', 'rb') as File:
File = csv.reader(File, delimiter=',',quotechar='"')
tweets = [Tweet(data) for data in File][1:]
#Imports a list of states from a CSV file to the variable 'states'
#For use in location() function
with open('states.csv', 'rb') as File:
File = csv.reader(File, delimiter=',',quotechar='"')
states = [data for data in File]
####Functions####
#Determines Most Tweeted Nominees
def popularity():
nominees = ["American Sniper","Birdman","Boyhood",
"The Grand Budapest Hotel","The Imitation Game",
"Selma","The Theory of Everything","Whiplash"]
count = defaultdict(int)
for tweet in tweets:
text = tweet.text.lower()
for nominee in nominees:
if text.count(nominee.lower().strip('the ')) != 0:
count[nominee] += 1
top = sorted(count.items(),key=lambda x:x[1], reverse=True)
#Prints out results
count = 1
print("The Most Tweeted About Best Picture Nominees:")
for t in top:
print("\t"+str(count)+": "+t[0])
count += 1
#Graphs results
data = Data([Bar(x=[data[0] for data in top],
y=[data[1] for data in top],
marker=Marker(color='#b09953'))])
layout = Layout(
title='Tweets about Best Picture Nominees',
font=Font(
family='"Open sans", verdana, arial, sans-serif',
size=17,
color='#000'
),
yaxis=YAxis(title='Number of Tweets')
)
fig = Figure(data=data,layout=layout)
plot = py.plot(fig)
#Determines when Birdman (the winner) was most tweeted about
def winner():
count = defaultdict(int)
for tweet in tweets:
hour = int(tweet.time[11:13])
minute = int(tweet.time[14:16])
text = tweet.text.lower()
if text.count('birdman') != 0:
count[(hour,minute)] += 1
times = sorted(count.items(),key=lambda x:x[1], reverse=True)
#Prints results
print("Birdman was mentioned most frequently at:")
print("\t {:02d}:{:02d} GMT".format((times[0][0][0]-1)%12 +1, times[0][0][1]))
#Graphs results
x=[data[0][0] for data in times for i in range(data[1])]
y=[data[0][1] for data in times for i in range(data[1])]
data = Data([
Histogram2d(
x=x,
y=y,
autobinx=False,
xbins=XBins(
start=0.5,
end=6.5,
size=1
),
autobiny=False,
ybins=YBins(
start=0.5,
end=60.5,
size=1
),
colorscale=[[0, 'rgb(12,51,131)'], [0.25, 'rgb(10,136,186)'], [0.5, 'rgb(242,211,56)'], [0.75, 'rgb(242,143,56)'], [1, 'rgb(217,30,30)']]
)
])
layout = Layout(
title='Times where Birdman is Mentioned<br> (GMT)',
font=Font(
family='"Open sans", verdana, arial, sans-serif',
size=17,
color='#000'
),
yaxis=YAxis(title='Minute'),
xaxis=XAxis(title='Hour')
)
fig = Figure(data=data,layout=layout)
plot = py.plot(fig)
#Determines the top tweeting states in the US
def location():
count = defaultdict(int)
for tweet in tweets:
loc = tweet.usrloc
if len(loc) != 0:
for state in states:
if loc.count(state[0]) != 0 or loc.count(state[1]) != 0:
count[state[0]] += 1
times = sorted(count.items(),key=lambda x:x[1], reverse=True)
#Prints results
print("The top 10 tweeting US states were:")
for i in range(10):
print("\t" + str(i+1)+": "+times[i][0])
#Graphs results
x = [state[0] for state in times[:10]]
y = [state[1] for state in times[:10]]
text = [state[0] for state in times[:10]]
data = Data([Bar(x=x,y=y,text=text,marker=Marker(color='#b09953'))])
layout = Layout(
title='Top Tweeting States',
font=Font(
family='"Open sans", verdana, arial, sans-serif',
size=17,
color='#000'
),
yaxis=YAxis(title='Number of Tweets Sent')
)
fig = Figure(data=data,layout=layout)
plot = py.plot(fig, filename='Top Tweeting States')
#### Additional Functions ####
#Returns inforomation on the most retweeted Tweet of the night
def topRT():
toprt = 0
topTweet = tweets[0]
for tweet in tweets:
trt = int(tweet.rt)
if trt > toprt:
toprt = trt
topTweet = tweet
#Prints results
print("The top tweet was:")
print("\n{:s}".format(topTweet.text))
print("\nWith {:s} retweets".format(topTweet.rt))
print("URL: http://twitter.com/{:s}/status/{:s}".format(topTweet.usrhandle, topTweet.tweetid))
| {
"repo_name": "mecolmg/OscarTweets",
"path": "files/Tweeter.py",
"copies": "1",
"size": "5427",
"license": "apache-2.0",
"hash": 4287709996741110300,
"line_mean": 31.497005988,
"line_max": 149,
"alpha_frac": 0.5621890547,
"autogenerated": false,
"ratio": 3.273220747889023,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4335409802589023,
"avg_score": null,
"num_lines": null
} |
__author__ = 'col-n'
import timeit, random
'''
A python implementation of RSA (https://en.wikipedia.org/wiki/RSA_%28cryptosystem%29).
Finds random prime numbers of specified length (up to 32 bits).
Demonstrates why larger numbers are better by factoring the resulting private key from the computed public key,
using a very unsophisticated factoring method.
Uses the built in python bitwise exclusive or operator
'''
#let's define some functions for generating random numbers of length n.
#we do this by generating many random numbers via the crpyographically accecptable random.SystemRandom.
#generating lots of numbers and taking the least significant bit ensure maximum entropy ... which is good
def pprime(length):
import random
if length > 32:
print('max length is 32!')
return None
if length < 5:
print('min length is 5!')
pos_prime = '1'
for i in range(1,length):
x = str(('{0:032b}'.format(random.SystemRandom().randint(1, 4294967295))))
pos_prime = pos_prime+x[-1]
pos_prime = pos_prime+'1'
return pos_prime
'''
RSA depends on getting prime numbers and then doing things to them, like multiplying and taking exponents.
While we could use python's built in modular exponentiation algo, lets make our own
'''
def faste(a,x,n):
x = "{0:b}".format(x)
l = len(x)
y = 1
for i in range(0,l):
y = (y * y) % n
if x[i] == '1':
y = (a * y) % n
return y
'''
We need to test whether our random number is prime. We will use the Miller-Rabin Algo.
https://en.wikipedia.org/wiki/Miller%E2%80%93Rabin_primality_test
we need to fina random 'a' and take its exponent to the power of our 'possibly prime' minus 1,
to the modulus of our 'possibly prime'. There are other tests we do that you can read about in the wiki article
'''
#we need to generate lots of 'a's ...
def randoma(n):
import random
if type(n) is str:
n = int(n,2)
n = n-1
#in binary
#i = linno()+'the random a to be tested is: '+str(('{0:032b}'.format(random.SystemRandom().randint(1, mya))))
a = random.SystemRandom().randint(1, n)
return a
def millerrab(a, n):
#test for bad square root and that a isn't 1 or n-1
if (a*a%n) is 1 and a !=1 and a !=n-1:
return 0
#test a**(n-1) mod n; ensure it does not equal 1
#a return of 0 means 'n is definitely not prime'
if faste(a,(n-1),n) != 1:
return 0
#a return of 1 means that 'n is possibly a prime'
else:
return 1
#we'll test a certain number of 'a's to be 'sure' (probibalistcally speaking) we have a prime number
def isprime(num):
if type(num) is str:
num = int(num,2)
ppint = num
primelist = 0
#set the number of tests by changing the range. Most people in real implementations use 100.
for i in range (1, 101):
testa = randoma(ppint)
#print('the ',i,'a to be tested is:',testa)
isprime = millerrab(testa,ppint)
if isprime is 0:
#print("n is not prime")
return 0
break
primelist += isprime
if primelist > 99:
#print('n is likely prime enough for our purposes')
return 1
'''
Now we're ready to generate a list of 2 random prime numbers, which we will use to construct the private and public key
I add a while loop to check that the prime numbers are different, which is an issue for small bit lengths
'''
def getprimes(length):
listoprimes=[]
while len(listoprimes) <2:
x = 0
while x < 1:
pp = pprime(length)
x+=isprime(pp)
y = int(pp,2)
if y not in listoprimes:
listoprimes.append(y)
else:
getprimes(length)
return listoprimes
'''
There are three people in our system: Alice, Bob, and Trent. Alice and Bob wish to communicate.
Trent is the 'trusted' authority who signs Alice's public key so that Bob can be sure he is communicating with Alice.
We have generated two primes, p and q. We get the phi-n of them by (p-1) * (q-1).
We then find "n" by multiplying p * q.
We then find "e", the public key, by find a small number that is relatively prime with phi-n.
We then find "d", the private key, but getting the modular inverse
(https://en.wikipedia.org/wiki/Modular_multiplicative_inverse) of 'e' to the moduluous of phi-n
'''
#extended euclidean algorithm on 'a' and 'b'. Set the constants and continue in a loop.
#return the greatest common denominator and the multiplicative inverse and modulo
def xgcd(a, b):
x = 0
y = 1
u = 1
v = 0
while a != 0:
q = b//a
r = b%a
m = x-u*q
n = y-v*q
b = a
a = r
x = u
y = v
u = m
v = n
gcd = b
return gcd, x, y
#getting phi_n, the product of our two primes minus 1
def getphi(n):
phi_n = (n[0]-1)*(n[1]-1)
return phi_n
#get n, the product of two primes, in the form of a list of exactly two primes generated by the 'get primes' function
def getn(lp):
n = lp[0] * lp[1]
return n
#find a small number that is relatively prime with phi_n to be the public key e
#modern protocalls all use the same exponent, which is 65537, which is not necessary, but useful as it is easy to use
def find_rprime(phi_n):
f100p = [3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 101, 103, 107,
109, 113, 127, 131, 137, 139, 149, 151, 157, 163, 167, 173, 179, 181, 191, 193, 197, 199, 211, 223, 227,
229, 233, 239, 241, 251, 257, 263, 269, 271, 277, 281, 283, 293, 307, 311, 313, 317, 331, 337, 347, 349,
353, 359, 367, 373, 379, 383, 389, 397, 401, 409, 419, 421, 431, 433, 439, 443, 449, 457, 461, 463, 467,
479, 487, 491, 499, 503, 509, 521, 523, 541, 547, 557, 563, 569, 571, 577, 587, 593, 599, 601, 607, 613,
617, 619, 631, 641, 643, 647, 653, 659, 661, 673, 677, 683, 691, 701, 709, 719, 727, 733, 739, 743, 751,
757, 761, 769, 773, 787, 797, 809, 811, 821, 823, 827, 829, 839, 853, 857, 859, 863, 877, 881, 883, 887,
907, 911, 919, 929, 937, 941, 947, 953, 967, 971, 977, 983, 991, 997, 1009, 1013, 1019, 1021, 1031, 1033,
1039, 1049, 1051, 1061, 1063, 1069, 1087, 1091, 1093, 1097, 1103, 1109, 1117, 1123, 1129, 1151, 1153,
1163, 1171, 1181, 1187, 1193, 1201, 1213, 1217, 1223, 1229, 1231, 1237, 1249, 1259, 1277, 1279, 1283,
1289, 1291, 1297, 1301, 1303, 1307, 1319, 1321, 1327, 1361, 1367, 1373, 1381, 1399, 1409, 1423, 1427,
1429, 1433, 1439, 1447, 1451, 1453, 1459, 1471, 1481, 1483, 1487, 1489, 1493, 1499, 1511, 1523, 1531,
1543, 1549, 1553, 1559, 1567, 1571, 1579, 1583, 1597, 1601, 1607, 1609, 1613, 1619, 1621, 1627, 1637,
1657, 1663, 1667, 1669, 1693, 1697, 1699, 1709, 1721, 1723, 1733, 1741, 1747, 1753, 1759, 1777, 1783,
1787, 1789, 1801, 1811, 1823, 1831, 1847, 1861, 1867, 1871, 1873, 1877, 1879, 1889, 1901, 1907, 1913,
1931, 1933, 1949, 1951, 1973, 1979, 1987, 1993, 1997, 1999, 2003, 2011, 2017, 2027, 2029, 2039, 2053,
2063, 2069, 2081, 2083, 2087, 2089, 2099, 2111, 2113, 2129, 2131, 2137, 2141, 2143, 2153, 2161, 2179,
2203, 2207, 2213, 2221, 2237, 2239, 2243, 2251, 2267, 2269, 2273, 2281, 2287, 2293, 2297, 2309, 2311,
2333, 2339, 2341, 2347, 2351, 2357, 2371, 2377, 2381, 2383, 2389, 2393, 2399, 2411, 2417, 2423, 2437,
2441, 2447, 2459, 2467, 2473, 2477, 2503, 2521, 2531, 2539, 2543, 2549, 2551, 2557, 2579, 2591, 2593,
2609, 2617, 2621, 2633, 2647, 2657, 2659, 2663, 2671, 2677, 2683, 2687, 2689, 2693, 2699, 2707, 2711,
2713, 2719, 2729, 2731, 2741, 2749, 2753, 2767, 2777, 2789, 2791, 2797, 2801, 2803, 2819, 2833, 2837,
2843, 2851, 2857, 2861, 2879, 2887, 2897, 2903, 2909, 2917, 2927, 2939, 2953, 2957, 2963, 2969, 2971,
2999, 3001, 3011, 3019, 3023, 3037, 3041, 3049, 3061, 3067, 3079, 3083, 3089, 3109, 3119, 3121, 3137,
3163, 3167, 3169, 3181, 3187, 3191, 3203, 3209, 3217, 3221, 3229, 3251, 3253, 3257, 3259, 3271, 3299,
3301, 3307, 3313, 3319, 3323, 3329, 3331, 3343, 3347, 3359, 3361, 3371, 3373, 3389, 3391, 3407, 3413,
3433, 3449, 3457, 3461, 3463, 3467, 3469, 3491, 3499, 3511, 3517, 3527, 3529, 3533, 3539, 3541, 3547,
3557, 3559, 3571, 3581, 3583, 3593, 3607, 3613, 3617, 3623, 3631, 3637, 3643, 3659, 3671, 3673, 3677,
3691, 3697, 3701, 3709, 3719, 3727, 3733, 3739, 3761, 3767, 3769, 3779, 3793, 3797, 3803, 3821, 3823,
3833, 3847, 3851, 3853, 3863, 3877, 3881, 3889, 3907, 3911, 3917, 3919, 3923, 3929, 3931, 3943, 3947,
3967, 3989]
for item in f100p:
gcd, x, y = xgcd(item,phi_n)
if gcd != 1:
continue
else:
return item
#find the modular inverse of 'e' above modulo phi_n, which is the private key d, first testing if one exists
def modinv(a,m):
gcd, x, y = xgcd(a,m)
if gcd != 1:
return None
else:
return x % m
#we will need to hash a 'challenge' that alice and bob will mutually agree on.
#I will use a trivial one by taking the exclusive or of byte strings, but this could easily be extended to use a real one
#PSA. Don't use MD5 or SHA-1 - they are broken. Use SHA2 or better.
#take a list of bits as a string and return a list of bytes as integers
def get_list_bytes(st):
b_list = []
l = int(len(st)/8)+1
for i in range(1,l):
p1 = (i-1)*8
p2 = p1+7
byte = st[p1:p2]
b_list.append(int(byte,2))
return b_list
#take the exclusive or
def hash(mylist):
result = 0
for i in mylist:
result ^= i
return result
#given a compound integer of length n, determine the length in bits of the challenge (k) and each parties portion (u)
#returns the length of k, u1, u2
def find_k_u(anyn):
in_bits = '{0:032b}'.format(anyn)
in_bits = in_bits.strip('0')
len_k = len(in_bits)-1
each_u = (len_k-1)/2
if ((len_k-1) % 2 == 0):
return len_k, int(each_u), int(each_u)
else:
return len_k, int(each_u+.5), int(each_u-.5)
#find a random string of bits and return it as a string. this is bob or alice's portion.
def find_my_u(anyu):
import random
myu='1'
for i in range(1,anyu+1):
i = str(('{0:032b}'.format(random.SystemRandom().randint(1, 4294967295))))
myu+=i[-1]
return myu
#some of the less interesting and tedious portions are left out.
#Alice
primelist1 = getprimes(13)
#Trent
primelist2 = getprimes(13)
# get phi_n for alice...
phin1 = getphi(primelist1)
# ...and trent
phin2 = getphi(primelist2)
# get n for alice ...
alicen = getn(primelist1)
# ... and trent
trentn = getn(primelist2)
#get e for alice ...
alicee = find_rprime(phin1)
#... and trent
trente = find_rprime(phin2)
print('alice e is:',alicee)
#get d for alice ...
aliced = modinv(alicee,phin1)
# ... and trent
trentd = modinv(trente,phin2)
print('alice d is:',aliced)
print('Alices p=',primelist1[0],'q=',primelist1[1],'n=',alicen,'e=',alicee,'d=',aliced)
#trent will sign 'r', a string with padding, alice's info, and alice's public key and n
padding = '00000000'
string_alice = '{0:08b}'.format(ord('a'))+'{0:08b}'.format(ord('l'))+'{0:08b}'.format(ord('i'))+'{0:08b}'.format(ord('c'))+'{0:08b}'.format(ord('e'))
r = padding+string_alice+'{0:064b}'.format(alicen)+'{0:064b}'.format(alicee)
# create a list of the bytes and convert to integers for XOR...
b_list_1 = get_list_bytes(r)
trent_hash = hash(b_list_1)
#decrypt the hash with trent private key d:
s = faste(trent_hash, trentd, trentn)
#print r, h(r), s
print('r:',int(r,2))
print('h(r):',trent_hash)
print('s :',s)
#get the length of the challenge, k, and u bob and u alice, and concatenate them.
#in real life this would be a bunch of communication between them.
a_b_k = find_k_u(alicen)
alice_u = find_my_u(a_b_k[1])
bob_u = find_my_u(a_b_k[2])
the_u = alice_u+bob_u
print('k is:',a_b_k[0])
u = int(the_u,2)
print('u is:',int(the_u,2))
#hash u ...
hash_of_u = hash(get_list_bytes(the_u))
print('h(u):',hash_of_u)
#alice decrypts u with her private key, getting 'v'
alice_v = faste(hash_of_u, aliced, alicen)
#bob checks ...
bob_v_check = faste(alice_v,alicee,alicen)
print('v(d,h(u))=',alice_v)
print('e(e(v)=',bob_v_check)
if bob_v_check == hash_of_u:
print('you are talking to someone who knows alices private key!')
else:
print('something is wrong. it is either mallory, or your code ...')
#see how long it takes your computer generate p and q and then to factor the numbers back.
def getprimestime(length):
start_time = timeit.default_timer()
listoprimes=[]
while len(listoprimes) <2:
x = 0
while x < 1:
pp = pprime(length)
x+=isprime(pp)
y = int(pp,2)
if y not in listoprimes:
listoprimes.append(y)
else:
getprimes(length)
time1 = timeit.default_timer() - start_time
return listoprimes, time1
def factorstime(n):
start_time = timeit.default_timer()
wheel = [1,2,2,4,2,4,2,4,6,2,6]
w, f, fs = 0, 2, []
while f*f <= n:
while n % f == 0:
fs.append(f)
n /= f
f, w = f + wheel[w], w+1
if w == 11: w = 3
if n > 1: fs.append(int(n))
time2 = timeit.default_timer() - start_time
return fs, time2
#try creating and factoring random bit lengths from ... warning, this will take a long time with long bit lengths!
mylist = [10,11]
clist = []
flist = []
for i in mylist:
x,y = getprimestime(i)
n = (x[0]*x[1])
#print(n)
clist.append(y)
a,b = factorstime(n)
flist.append(b)
print(clist)
print(flist)
'''
I used the algo above on bit lengths of 10-32 bits. The results are below for a comp with ~3 GHz i7 and 16 GB ram.
in seconds:
bits create factor
10 0.005270632 0.000109706
11 0.005230701 0.000200344
12 0.005701843 0.000397583
13 0.006957714 0.001013058
14 0.004791609 0.001461723
15 0.004986819 0.002961732
16 0.007940306 0.005981596
17 0.006514073 0.018513429
18 0.005834183 0.025434709
19 0.013100112 0.049306284
20 0.005732156 0.092054049
21 0.006645892 0.198051598
22 0.004625693 0.344946078
23 0.007271184 0.804739178
24 0.010514663 1.766220151
25 0.007118291 4.565575443
26 0.008802504 8.07297894
27 0.00619971 16.92048314
28 0.013204597 23.02109739
29 0.007593304 65.92335069
30 0.011055429 108.7945463
31 0.009474718 336.7977034
32 0.021832555 598.2711962
As you can see, past trivial bit lengths (at about 24 bits) each additional bit about doubles the amount of time to
factor while having basically no effect on the time to create the factor.
'''
#for a great webcomic on passwords ...
#a good password: https://xkcd.com/936/
#https://drive.google.com/open?id=0B5eMNCdPTsppR1puTUEzeGg2dWs
| {
"repo_name": "col-n/col-n",
"path": "rsa_python_col-n.py",
"copies": "1",
"size": "14974",
"license": "apache-2.0",
"hash": 6973465575216016000,
"line_mean": 35.1690821256,
"line_max": 149,
"alpha_frac": 0.6270869507,
"autogenerated": false,
"ratio": 2.6825510569688285,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3809638007668828,
"avg_score": null,
"num_lines": null
} |
# import required dependencies
from bs4 import BeautifulSoup
import requests
import docx
from datetime import date
from operator import itemgetter, attrgetter, methodcaller
from Review import Review
import sys
reviewNum = input("How many reviews would you like?\nEnter a number from 0 to 25: \n")
# Open up TrustRadius page for Sitefinity
url = "https://www.trustradius.com/products/progress-sitefinity/reviews"
data = requests.get(url).text
soup = BeautifulSoup(data, "html.parser")
# Find all review links from main product page
query = soup.find_all('a', {
'class' : 'link-to-review-btn'
})
# List that holds URLs of individual reviews
links = []
# Append all the URLs to the list we created
for link in query:
links.append('https://www.trustradius.com' + link.get('href'))
# Print number of reviews to command line (if used for plural/singular case)
if reviewNum == 1:
print "%d links found. Creating %d review." % (len(links), reviewNum)
else:
print "%d links found. Creating %d reviews." % (len(links), reviewNum)
# Function for returning review sections from review page
# return (dictionary): a key-value list of the headings and review text
# parameters: link (string): the url for the review
def findMaterials(link):
# Parse the given link into some Beautiful Soup
req = requests.get(link).text
reviews = BeautifulSoup(req, 'html.parser')
# Set up list string variables.
reviewAuthor = []
reviewPosition = []
reviewCompany = []
reviewRating = []
sectionHeading = []
sectionText = []
sectionDate = ''
# Find the authors name (if there is one)
for review in reviews.find_all('span', {'itemprop': 'author'}):
reviewAuthor.append(review.contents[0].text)
# Find the author's position and company (if applicable)
for review in reviews.find_all('span', {'class': 'user-info'}):
reviewPosition.append(review.contents[0].text)
reviewCompany.append(review.contents[1].text)
# Find what the user rated Sitefinity
reviewRating = reviews.find_all('span', class_='number')[0].text
# Perform find.contents[] for all of the headings and text
# and append them to our functions variables
for review in reviews.find_all('div', {'class': 'description'}):
# Receive review section headings
for head in range(6):
sectionHeading.append(review.contents[0].contents[0].contents[1].contents[head].contents[0].contents[0].contents[0].contents[0].text)
# Receive review section bodies
for body in range(6):
sectionText.append(" %s" % review.contents[0].contents[0].contents[1].contents[body].contents[1].contents[0].contents[0].contents[0].text)
# Wrap up the review information into a dictionary, this is for easy handling
reviewDict = dict(zip(sectionHeading, sectionText))
# Get's the date of the review from the review's URL
sectionDate = link[56:-9]
days = date(int(sectionDate[:-6]), int(sectionDate[5:-3]), int(sectionDate[8:]))
# Create a new review using our Review class, and return that review
rev = Review(reviewAuthor, reviewPosition, reviewCompany, reviewRating, reviewDict, days)
print "Review created for %s..." % rev.name[0]
sys.stdout.flush()
return rev
# Create array of Review objects and populate with our reviews
reviewGuide = []
for num in range(reviewNum):
reviewGuide.append(findMaterials(links[num]))
# Sort our list based on date posted
#reviewGuideSorted = sorted(reviewGuide, key=attrgetter('day'), reverse=True)
# Create document and insert main heading
doc = docx.Document()
doc.add_heading('Trust Radius Weekly Report', 0)
# Func createPage: page (param): an instance of a Review object
def createPage(page):
# Insert Review Info
doc.add_heading(page.name, 1)
doc.add_heading("%s at %s" % (page.position[0], page.company[0]), 3)
doc.add_heading(page.day.strftime('%B %d, %Y'), 3)
doc.add_heading(page.rating + ' out of 10 stars', 3)
# Insert Review Text
for x, y in page.goodies.items():
doc.add_heading(x, 4)
doc.add_paragraph(y)
# Create new page for next review
if reviewNum != 1:
doc.add_page_break()
# Iterate through all of our reviews to create docx
for review in reviewGuide:
createPage(review)
# Print success and save docx (if used for plural/singular case)
if reviewNum == 1:
print 'Successfully created a .docx with %d review. Check out TrustRadiusReport.docx...' % reviewNum
else:
print 'Successfully created a .docx with %d reviews. Check out TrustRadiusReport.docx...' % reviewNum
doc.save('TrustRadiusReport.docx')
| {
"repo_name": "pizdetz/trustradiusCrawler",
"path": "index.py",
"copies": "1",
"size": "5293",
"license": "mit",
"hash": 5816659876774853000,
"line_mean": 34.5234899329,
"line_max": 150,
"alpha_frac": 0.6894010958,
"autogenerated": false,
"ratio": 3.716994382022472,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4906395477822472,
"avg_score": null,
"num_lines": null
} |
__author__ = 'commissar'
import json
import time
from alimq.http import util
import logging
import traceback
import requests
import arrow
log = logging.getLogger("alimq")
log.setLevel(logging.INFO)
"""
消费者
"""
class HttpConsumer(object):
def __init__(self,topic, consumer,access_key, access_secret,url='http://publictest-rest.ons.aliyun.com'):
"""签名字段"""
"""Consumer ID"""
self.consumerid = consumer
"""消费主题"""
self.topic = topic
self.url = url
self.access_key = access_key
self.access_secret = access_secret
def __build_get_items_header__(self, cur_time_str):
'''
返回请求所需要的http头。
:param cur_time: [str]时间戳字符串
:return: [object]为http 请求头的字典。
'''
newline = "\n"
signString = self.topic + newline + self.consumerid + newline + cur_time_str
sign = util.calSignature(signString,self.access_secret)
headers = {
util.HK_SIGN : sign,
util.HK_ACCESS_KEY : self.access_key,
util.HK_CONS_ID : self.consumerid,
"Content-Type": "application/json; charset=utf-8"#"text/plain"
}
return headers
def get_items(self,count):
'''
获取消息。
:param count:
:return: (status, data) status为int,1代表正常。 data为list,如没有数据,则返回空数组。
'''
status = 0
data = []
s = requests.Session()
try:
"""时间戳"""
date = repr(int(time.time() * 1000))[0:13]
headers = self.__build_get_items_header__(date)
req_url = "%s/message/?topic=%s&time=%s&num=%d"%(self.url,self.topic, date,count)
s.headers.update(headers)
resp = s.get(req_url)#,headers=header)
status_code = resp.status_code
content = resp.content
if status_code == 200:
msg = content.decode(resp.apparent_encoding)
data = json.loads(msg)
status = 1
else:
log.error("%d,%s"%(status_code,resp.reason))
except Exception:
log.error(traceback.format_exc())
s.close()
return status,data
def __build_ack_item_header__(self, cur_time_str,msgHandle):
'''
返回ack请求所需要的http头。
:param cur_time: [str]时间戳字符串
:return: [object]为http 请求头的字典。
'''
newline = "\n"
signString = self.topic + newline + self.consumerid + newline + msgHandle + newline + cur_time_str
sign = util.calSignature(signString,self.access_secret)
headers = {
util.HK_SIGN : sign,
util.HK_ACCESS_KEY : self.access_key,
util.HK_CONS_ID : self.consumerid,
"Content-Type": "application/json; charset=utf-8"#"text/plain"
}
return headers
def ack_item(self,msgHandle):
'''
确认已经处理了本消息。
:return:
'''
status = 0
data = []
s = requests.Session()
try:
"""时间戳"""
date = repr(int(time.time() * 1000))[0:13]
delUrl = self.url + "/message/?msgHandle="+ msgHandle+ "&topic="+self.topic+"&time="+date
headers = self.__build_ack_item_header__(date,msgHandle)
s.headers.update(headers)
resp = s.delete(delUrl)#,headers=header)
status_code = resp.status_code
content = resp.content
if status_code == 204:
status = 1
else:
log.error("%d,%s"%(status_code,resp.reason))
except Exception:
log.error(traceback.format_exc())
s.close()
return status,data
if __name__ == '__main__':
TOPIC_ID = "YJ_8215"
CONSUMER_ID = "CID_PZ_DEBUG"
ACC_KEY_ID = ""
ACC_KEY_SECRET = ""
cons = HttpConsumer(TOPIC_ID,CONSUMER_ID, ACC_KEY_ID, ACC_KEY_SECRET)
print(arrow.now())
cnt = 0
pull_cnt = 0
while(True):
print("======== one get_items =====[pull_count:%d]"%pull_cnt)
pull_cnt += 1
status, msgs = cons.get_items(10)
if msgs:
for msg in msgs:
cnt += 1
print("GET %d[key:%s][msgId:%s][bornTime:%s][msgHandle:%s]"%(cnt,msg["key"],msg["msgId"],msg["bornTime"],msg["msgHandle"]))
ack_status,ack_data = cons.ack_item(msg["msgHandle"])
print("DELETE [status:%d][data:%s]"%(ack_status,ack_data))
else:
print("Exit!!")
break
| {
"repo_name": "commissar/aliyun_mq",
"path": "alimq/http/consumer.py",
"copies": "1",
"size": "4873",
"license": "mit",
"hash": -5048887400564776000,
"line_mean": 24.6538461538,
"line_max": 140,
"alpha_frac": 0.5127436282,
"autogenerated": false,
"ratio": 3.4432153392330385,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4455958967433038,
"avg_score": null,
"num_lines": null
} |
import MySQLdb
from database.DBHelper import DBHelper
from database.DBConnectManager import DBConnectManager
from resourcefactories.AnalysisInitDefaultValue import AnalysisInitDefaultValue
db_helper = DBHelper()
class DataUtils:
def __init__(self):
print "init DataUtils"
def get_ActivitiesFromDB(self, db_connector):
activities = []
if db_connector is not None:
try:
query = "select * from activities"
activities = db_helper.get_Data(db_connector, db_connector.cursor(), query);
except Exception as e:
print e
return activities
def get_ActivitiesFromXML(self, db_connector):
activities = []
if db_connector is not None:
try:
query = "select * from activities_from_xml"
activities = db_helper.get_Data(db_connector, db_connector.cursor(), query);
except Exception as e:
print e
return activities
def get_PermissionFromDB(self, db_connector):
permissions = []
if db_connector is not None:
try:
query = "select * from permissions"
permissions = db_helper.get_Data(db_connector, db_connector.cursor(), query);
except Exception as e:
print e
return permissions
def get_PermissionFromXML(self, db_connector):
permissions = []
if db_connector is not None:
try:
query = "select * from permissions_from_xml"
permissions = db_helper.get_Data(db_connector, db_connector.cursor(), query);
except Exception as e:
print e
return permissions
def get_PermissionAnalysis(self, db_connector):
permission_detail = []
if db_connector is not None:
try:
query = "select permission_name, srcClass, srcMethod, srcMethodDes, dstClass, dstMethod, dstMethodDes " \
"from permission_analysis P1 INNER JOIN permissions P2 ON P1.permission_id = P2.id;"
permission_detail = db_helper.get_Data(db_connector, db_connector.cursor(), query);
except Exception as e:
print e
return permission_detail
def get_PackageFilter_Activity(self,db_connector, activities):
packages = []
if activities:
for ac in activities:
if db_connector is not None:
try:
select_stmt = "SELECT * FROM package_analysis WHERE srcClass like %(ac_name)s"
cursor = db_connector.cursor()
cursor.execute(select_stmt, { 'ac_name': "%" + ac[1]+ "%"})
rows = cursor.fetchall()
packages.extend(rows)
except Exception as e:
print e
return packages
def get_SensitiveAPIs(self, db_connector, table):
packages = []
if db_connector is not None:
for sen_APIs in AnalysisInitDefaultValue.Sensitive_APIs:
try:
select_stmt = "SELECT package_id, dstClass, dstMethod, dstMethodDes, srcClass, srcMethod, srcMethodDes FROM " + table + " WHERE dstMethod like %(sen_APIs)s"
cursor = db_connector.cursor()
cursor.execute(select_stmt, {'sen_APIs': "%" + sen_APIs + "%"})
rows = cursor.fetchall()
packages.extend(rows)
except Exception as e:
print e
return packages
def get_SensitiveAPIsFromDB(self, db_connector):
sensitive_apis = []
if db_connector is not None:
try:
query = "select * from sensitive_apis"
sensitive_apis = db_helper.get_Data(db_connector, db_connector.cursor(), query);
except Exception as e:
print e
return sensitive_apis | {
"repo_name": "congthuc/androguard-2.0-custom",
"path": "database/DataUtils.py",
"copies": "1",
"size": "4034",
"license": "apache-2.0",
"hash": 5702623493998318000,
"line_mean": 38.1747572816,
"line_max": 176,
"alpha_frac": 0.5654437283,
"autogenerated": false,
"ratio": 4.589306029579067,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5654749757879067,
"avg_score": null,
"num_lines": null
} |
import MySQLdb
from database.DBHelper import DBHelper
from database.DBConnectManager import DBConnectManager
from database.DataUtils import DataUtils
from resourcefactories.AnalysisInitDefaultValue import AnalysisInitDefaultValue
db_helper = DBHelper()
dataUtils = DataUtils()
class FileUltils:
def __init__(self):
print "init FileUtils"
def write_app_info_to_file(self, appName, file_obj):
db_connector = DBConnectManager().get_connection(appName.upper());
if db_connector is not None:
try:
select_stmt = "SELECT * FROM app_info"
cursor = db_connector.cursor()
cursor.execute(select_stmt)
app_info = cursor.fetchone()
print app_info
file_obj.write("\t" + "App name: " + app_info[1] +"\n")
file_obj.write("\t" + "Package: " + app_info[2] +"\n")
file_obj.write("\t" + "App version code: " + app_info[3] +"\n")
file_obj.write("\t" + "Appwrite_app_activities_to_file version name: " + app_info[4] +"\n")
file_obj.write("\t" + "Android Target: " + app_info[5] +"\n")
except Exception as e:
print e
def write_app_activities_to_file(self, appName, file_obj):
db_connector = DBConnectManager().get_connection(appName.upper());
if db_connector is not None:
activities = dataUtils.get_ActivitiesFromXML(db_connector)
file_obj.write("\t" + "---------------------------" +"\n")
file_obj.write("\t" + "List Activities " +"\n")
file_obj.write("\t" + "Activity Name " + "\t\t" + "Package Belong " + "\t\t" + "Main Activity" + "\t" + "Start Activity")
for ac in activities:
file_obj.write("\n\t" + ac[1] + "\t\t" + ac[2] + "\t\t" + str(ac[4]) + "\t\t\t\t" + str(ac[5]))
def write_app_permissions_to_file(self, appName, file_obj):
db_connector = DBConnectManager().get_connection(appName.upper());
if db_connector is not None:
permissions = dataUtils.get_PermissionFromXML(db_connector)
file_obj.write("\n\t" + "---------------------------")
file_obj.write("\n\t" + "List Permisssions was declared in AndroidManifest.xml:" + "\n")
for permission in permissions:
file_obj.write("\t" + permission[1] + "\n")
def write_all_permissions_to_file(self, appName, file_obj):
db_connector = DBConnectManager().get_connection(appName.upper());
if db_connector is not None:
permissions = dataUtils.get_PermissionFromDB(db_connector)
file_obj.write("\n\t" + "---------------------------")
file_obj.write("\n\t" + "List Permisssions was used in app" + "\n")
for permission in permissions:
file_obj.write("\t" + permission[1] + "\n")
def write_permissions_analysis_to_file(self, appName, file_obj):
db_connector = DBConnectManager().get_connection(appName.upper());
if db_connector is not None:
permissions_detail = dataUtils.get_PermissionAnalysis(db_connector)
file_obj.write("\n\t" + "---------------------------")
file_obj.write("\n\t" + "Permisssions Analysis Detail" + "\n")
file_obj.write("\n\t" + "(Permission Name, Source Class, Source Method, Source Method Des, Destination Class, "
"Destination Method, Destination Method Des" + "\n")
for permission_detail in permissions_detail:
file_obj.write("\t" + str(permission_detail) + "\n")
def write_sensitive_apis_to_file(self, appName, file_obj):
db_connector = DBConnectManager().get_connection(appName.upper());
if db_connector is not None:
permissions_detail = dataUtils.get_SensitiveAPIsFromDB(db_connector)
file_obj.write("\n\t" + "---------------------------")
file_obj.write("\n\t" + "Sensitive APIs" + "\n")
file_obj.write("\n\t" + "(Source Class, Source Method, Source Method Des, Destination Class, "
"Destination Method, Destination Method Des" + "\n")
for permission_detail in permissions_detail:
file_obj.write("\t" + str(permission_detail) + "\n") | {
"repo_name": "congthuc/androguard-2.0-custom",
"path": "fileUtils/FileUtils.py",
"copies": "1",
"size": "4383",
"license": "apache-2.0",
"hash": -3464572991835412000,
"line_mean": 53.1234567901,
"line_max": 133,
"alpha_frac": 0.5660506502,
"autogenerated": false,
"ratio": 3.752568493150685,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9795961151230754,
"avg_score": 0.004531598423986207,
"num_lines": 81
} |
__author__ = 'congthuc'
class TablesFactory(object):
'''
Define Tables for saving app analysis result data
'''
def __init__(self):
self.tables = dict()
self.tables['01.APP_INFO'] = (
"CREATE TABLE IF NOT EXISTS `APP_INFO` ("
" `id` int(11) NOT NULL AUTO_INCREMENT,"
" `app_name` varchar(200) NOT NULL UNIQUE,"
" `app_package` varchar(100),"
" `app_version_code` varchar(10),"
" `app_version_name` varchar(100),"
" `android_target_version` varchar(100),"
" PRIMARY KEY (`id`)"
") ENGINE=InnoDB")
self.tables['02.PERMISSIONS_FROM_XML'] = (
"CREATE TABLE IF NOT EXISTS `PERMISSIONS_FROM_XML` ("
" `id` int(11) NOT NULL AUTO_INCREMENT,"
" `permission_name` varchar(200) NOT NULL UNIQUE,"
" `permission_des` varchar(200),"
" PRIMARY KEY (`id`)"
") ENGINE=InnoDB")
self.tables['03.ACTIVITIES_FROM_XML'] = (
"CREATE TABLE IF NOT EXISTS `ACTIVITIES_FROM_XML` ("
" `id` int(11) NOT NULL AUTO_INCREMENT,"
" `activity_name` varchar(200) NOT NULL UNIQUE,"
" `package_belong` varchar(200) NOT NULL,"
" `activity_des` varchar(200),"
" `activity_main` TINYINT,"
" `activity_start` TINYINT,"
" PRIMARY KEY (`id`)"
") ENGINE=InnoDB")
self.tables['04.SERVICES'] = (
"CREATE TABLE IF NOT EXISTS `SERVICES` ("
" `id` int(11) NOT NULL AUTO_INCREMENT,"
" `service_name` varchar(200) NOT NULL UNIQUE,"
" `package_belong` varchar(100) NOT NULL,"
" `service_des` varchar(200),"
" `service_action` varchar(200),"
" `service_category` varchar(200),"
" PRIMARY KEY (`id`)"
") ENGINE=InnoDB")
self.tables['05.APP_FILES'] = (
"CREATE TABLE IF NOT EXISTS `APP_FILES` ("
" `id` int(11) NOT NULL AUTO_INCREMENT,"
" `fileName` varchar(200),"
" `fileDes` varchar(300),"
" `crc32` varchar(20),"
" PRIMARY KEY (`id`)"
") ENGINE=InnoDB")
self.tables['06.PERMISSIONS'] = (
"CREATE TABLE IF NOT EXISTS `PERMISSIONS` ("
" `id` int(11) NOT NULL AUTO_INCREMENT,"
" `permission_name` varchar(200) NOT NULL UNIQUE,"
" `permission_des` varchar(200),"
" PRIMARY KEY (`id`)"
") ENGINE=InnoDB")
self.tables['07.PERMISSION_ANALYSIS'] = (
"CREATE TABLE IF NOT EXISTS `PERMISSION_ANALYSIS` ("
" `id` int(11) NOT NULL AUTO_INCREMENT,"
" `permission_id` int(11) NOT NULL,"
" `srcClass` varchar(200) NOT NULL,"
" `srcMethod` varchar(150),"
" `srcMethodDes` varchar(300),"
" `dstClass` varchar(200) NOT NULL,"
" `dstMethod` varchar(150),"
" `dstMethodDes` varchar(300),"
" PRIMARY KEY (`id`),"
" CONSTRAINT `permission_id_fk` FOREIGN KEY (`permission_id`) "
" REFERENCES `PERMISSIONS` (`id`)"
") ENGINE=InnoDB")
self.tables['08.ACTIVITIES'] = (
"CREATE TABLE IF NOT EXISTS `ACTIVITIES` ("
" `id` int(11) NOT NULL AUTO_INCREMENT,"
" `activity_name` varchar(200) NOT NULL UNIQUE,"
" `activity_des` varchar(200),"
" `activity_start` TINYINT,"
" PRIMARY KEY (`id`)"
") ENGINE=InnoDB")
self.tables['09.ACTIVITY_ANALYSIS'] = (
"CREATE TABLE IF NOT EXISTS `ACTIVITY_ANALYSIS` ("
" `id` int(11) NOT NULL AUTO_INCREMENT,"
" `activity_id` int(11) NOT NULL,"
" `method` varchar(100) NOT NULL,"
" `dstClass` varchar(200),"
" `dstMethod` varchar(150),"
" `dstMethodDes` varchar(300),"
" PRIMARY KEY (`id`),"
" CONSTRAINT `activity_id_fk` FOREIGN KEY (`activity_id`) "
" REFERENCES `ACTIVITIES` (`id`)"
") ENGINE=InnoDB")
self.tables['10.PACKAGE_ANALYSIS'] = (
"CREATE TABLE IF NOT EXISTS `PACKAGE_ANALYSIS` ("
" `id` int(11) NOT NULL AUTO_INCREMENT,"
" `dstClass` varchar(200),"
" `dstMethod` varchar(150),"
" `dstMethodDes` varchar(300),"
" `srcClass` varchar(200),"
" `srcMethod` varchar(150),"
" `srcMethodDes` varchar(300),"
" PRIMARY KEY (`id`)"
") ENGINE=InnoDB")
self.tables['11.PACKAGE_FILTER_ACTIVITY'] = (
"CREATE TABLE IF NOT EXISTS `PACKAGE_FILTER_ACTIVITY` ("
" `id` int(11) NOT NULL AUTO_INCREMENT,"
" `package_id` int(11) NOT NULL,"
" `dstClass` varchar(200),"
" `dstMethod` varchar(150),"
" `dstMethodDes` varchar(300),"
" `srcClass` varchar(200),"
" `srcMethod` varchar(150),"
" `srcMethodDes` varchar(300),"
" PRIMARY KEY (`id`),"
" CONSTRAINT `package_id_fk` FOREIGN KEY (`package_id`) "
" REFERENCES `PACKAGE_ANALYSIS` (`id`)"
") ENGINE=InnoDB")
self.tables['12.RELATIONSHIP_BETWEEN_ACTIVITY'] = (
"CREATE TABLE IF NOT EXISTS `RELATIONSHIP_BETWEEN_ACTIVITY` ("
" `id` int(11) NOT NULL AUTO_INCREMENT,"
" `currentActivity` varchar(300),"
" `nextActivity` varchar(300),"
" `viewName` varchar(300),"
" PRIMARY KEY (`id`)"
") ENGINE=InnoDB")
self.tables['13.SENSITIVE_APIS'] = (
"CREATE TABLE IF NOT EXISTS `SENSITIVE_APIS` ("
" `id` int(11) NOT NULL AUTO_INCREMENT,"
" `package_id` int(11) NOT NULL,"
" `dstClass` varchar(200),"
" `dstMethod` varchar(150),"
" `dstMethodDes` varchar(300),"
" `srcClass` varchar(200),"
" `srcMethod` varchar(150),"
" `srcMethodDes` varchar(300),"
" PRIMARY KEY (`id`),"
" CONSTRAINT `package_id_fk_SENSITIVE_APIS` FOREIGN KEY (`package_id`) "
" REFERENCES `PACKAGE_FILTER_ACTIVITY` (`package_id`)"
") ENGINE=InnoDB")
def get_tables_define(self):
tables_define = self.tables
return tables_define
| {
"repo_name": "congthuc/androguard-2.0-custom",
"path": "resourcefactories/TablesFactory.py",
"copies": "1",
"size": "6588",
"license": "apache-2.0",
"hash": 4257293397041635000,
"line_mean": 39.6666666667,
"line_max": 85,
"alpha_frac": 0.4998482089,
"autogenerated": false,
"ratio": 3.5136,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45134482089,
"avg_score": null,
"num_lines": null
} |
__author__ = 'connor'
# first of all import the socket library
import socket
from protos import deck_pb2, hero_pb2, card_pb2, player_model_pb2, weapon_pb2, minion_pb2, update_pb2, board_model_pb2
# next create a socket object
s = socket.socket()
print "Socket successfully created"
# reserve a port on your computer in our
# case it is 12345 but it can be anything
port = 3332
# Next bind to the port
# we have not typed any ip in the ip field
# instead we have inputted an empty string
# this makes the server listen to requests
# coming from other computers on the network
s.bind(('', port))
print "socket binded to %s" %(port)
# put the socket into listening mode
s.listen(5)
print "socket is listening"
# a forever loop until we interrupt it or
# an error occurs
while True:
# Establish connection with client.
c, addr = s.accept()
print 'Got connection from', addr
data = c.recv(4096)
board_model = board_model_pb2.BoardModel()
board_model.ParseFromString(data)
print(board_model)
# send a thank you message to the client.
c.send('Thank you for connecting')
# Close the connection with the client
c.close() | {
"repo_name": "cmm863/HearthAttack",
"path": "server.py",
"copies": "1",
"size": "1150",
"license": "mit",
"hash": 5590933145123294000,
"line_mean": 27.775,
"line_max": 118,
"alpha_frac": 0.7234782609,
"autogenerated": false,
"ratio": 3.44311377245509,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9603188405303209,
"avg_score": 0.012680725610376304,
"num_lines": 40
} |
__author__ = 'connor'
from protos import deck_pb2, hero_pb2, card_pb2, player_model_pb2, weapon_pb2, minion_pb2, update_pb2, board_model_pb2
import json
from helpers import *
import socket
expansion_sets = [
"Basic",
"Blackrock Mountain",
"Classic",
"Curse of Naxxramas",
"Goblins vs Gnomes",
"The Grand Tournament",
"League of Explorers"
]
def update_board(l, home_m, enemy_m):
opponent = []
home_team = []
if l[0] == "Zone":
# l[0] = TYPE
# l[1] = ZONE
# l[2] = ID
# l[3] = NAME
if l[1] == "FRIENDLY HAND":
# Draw Card
card = get_card_from_deck(home_m, l[3])
with open("AllSets.json") as json_file:
data = json.load(json_file)
cost = 0
for expansion in expansion_sets:
for card_json in data[expansion]:
try:
if card_json["name"] == card.name:
cost = card_json["cost"]
except:
pass
if card is None:
if len(l[3]) > 0:
card = card_pb2.Card()
card.has_been_used = False
card.in_hand = False
card.name = l[3]
card.id = int(l[2])
card.mana = cost
home_m.hand.extend([card])
return home_m, enemy_m
card.in_hand = True
card.id = int(l[2])
card.mana = cost
home_m.hand.extend([card])
home_m.deck.cards.remove(card)
if home_m.submit:
print("----- Suggestions -----")
print("\n")
while home_m.mana > 0:
for card in home_m.hand:
if card.mana == home_m.mana:
print("Play " + card.name)
home_m.mana = home_m.mana - card.mana
break
home_m.mana = home_m.mana - 1
target = None
for minion in enemy_m.minions:
try:
if minion.taunt is True:
target = minion
except:
pass
for minion in home_m.minions:
try:
if minion.divine_shield and target is not None:
print(minion.card.name + " Attack " + target.card.name)
else:
print(minion.card.name + " Attack " + enemy_m.hero.minion.card.name)
except:
print(minion.card.name + " Attack " + enemy_m.hero.minion.card.name)
try:
if home_m.hero.weapon.name == "Light\'s Justice" or home_m.hero.weapon.name == "Coghammer":
done = False
for minion in enemy_m.minions:
if minion.health < home_m.hero.weapon.attack:
print(home_m.hero.weapon.name + " Attack " + minion.card.name)
done = True
break
if not done:
print(home_m.hero.weapon.name + " Attack " + enemy_m.hero.minion.card.name)
except:
pass
home_m.submit = False
elif l[1] == "OPPOSING HAND":
# Card returned to opp hand
pass
elif l[1] == "FRIENDLY PLAY (Hero)":
# Your Hero
hero = hero_pb2.Hero()
minion = minion_pb2.Minion()
card = card_pb2.Card()
card.name = l[3]
card.id = int(l[2])
minion.card.CopyFrom(card)
minion.health = 30
minion.max_health = 30
minion.damage = 0
minion.turn_played = 0
hero.minion.CopyFrom(minion)
home_m.hero.CopyFrom(hero)
elif l[1] == "FRIENDLY PLAY (Hero Power)":
# Your Hero Power
pass
elif l[1] == "OPPOSING PLAY (Hero)":
# Opp Hero
hero = hero_pb2.Hero()
minion = minion_pb2.Minion()
card = card_pb2.Card()
card.name = l[3]
card.id = int(l[2])
minion.card.CopyFrom(card)
minion.health = 30
minion.max_health = 30
minion.damage = 0
minion.turn_played = 0
hero.minion.CopyFrom(minion)
enemy_m.hero.CopyFrom(hero)
elif l[1] == "OPPOSING PLAY (Hero Power)":
# Opp Hero Power
pass
elif l[1] == "FRIENDLY DECK":
# Returned card to deck
card = get_card_from_hand(home_m, l[2])
if card is None:
return home_m, enemy_m
card.in_hand = False
home_m.hand.remove(card)
home_m.deck.cards.extend([card])
elif l[1] == "FRIENDLY PLAY":
# Friendly Minion
card = get_card_from_hand(home_m, l[2])
if card is None:
if len(l[3]) > 0:
card = card_pb2.Card()
card.has_been_used = False
card.in_hand = False
card.name = l[3]
card.id = int(l[2])
minion = return_minion(card)
home_m.minions.extend([minion])
return home_m, enemy_m
card.in_hand = False
home_m.hand.remove(card)
minion = return_minion(card)
minion.turn_played = home_m.turn_number
home_m.minions.extend([minion])
#output_minions(home_m)
elif l[1] == "OPPOSING PLAY":
# Opp Minion
card = card_pb2.Card()
card.has_been_used = False
card.in_hand = False
card.name = l[3]
card.id = int(l[2])
minion = return_minion(card)
minion.turn_played = enemy_m.turn_number
enemy_m.minions.extend([minion])
#output_minions(enemy_m)
elif l[1] == "FRIENDLY SECRET":
# Secret on your side
try:
home_m.hand.remove(get_card_from_hand(home_m, int(l[2])))
except:
pass
pass
elif l[1] == "OPPOSING SECRET":
# Secret on their side
pass
elif l[1] == "FRIENDLY GRAVEYARD":
# Card is gone
minion = None
for m in home_m.minions:
if m.card.id == int(l[2]):
minion = m
break
if minion is not None:
home_m.minions.remove(minion)
else:
for c in home_m.hand:
if c.id == int(l[2]):
home_m.hand.remove(c)
break
try:
if l[3] == home_m.hero.weapon.name:
home_m.hero.weapon = None
except:
pass
#output_minions(home_m)
elif l[1] == "OPPOSING GRAVEYARD":
# Opp card is gone
minion = None
for m in enemy_m.minions:
if m.card.id == int(l[2]):
minion = m
break
if minion is not None:
enemy_m.minions.remove(minion)
#output_minions(enemy_m)
elif l[1] == "FRIENDLY PLAY (Weapon)":
# Added weapon to your side
weapon = weapon_pb2.Weapon()
weapon.name = l[3]
if weapon.name == "Light\'s Justice":
weapon.attack = 1
weapon.durability = 4
elif weapon.name == "Coghammer":
home_m.hand.remove(get_card_from_hand(home_m, int(l[2])))
weapon.attack = 2
weapon.durability = 3
home_m.hero.weapon.CopyFrom(weapon)
elif l[1] == "OPPOSING PLAY (Weapon)":
# Added weapon to opp side
pass
elif l[1] == "":
# Spell Played
pass
elif l[0] == "Power":
# l[0] = TYPE
# l[1] = TAG
# l[2] = VALUE
# l[3] = ENTITY
## Occasionally there is no value but only at the beginning
if l[1] == "PLAYER_ID":
# Lets us know player ID and name of player Entity
if "The Innkeeper" in l[3]:
enemy_m.player_id = int(l[2])
enemy_m.name = l[3]
else:
home_m.player_id = int(l[2])
home_m.name = l[3]
pass
elif l[1] == "TURN":
# value = turn number
print("TURN: " + l[2])
enemy_m.turn_number = int(l[2])
home_m.turn_number = int(l[2])
elif l[1] == "RESOURCES":
if l[3] == home_m.name:
home_m.mana = int(l[2])
home_m.max_mana = home_m.mana
else:
enemy_m.mana = int(l[2])
enemy_m.max_mana = enemy_m.mana
elif l[1] == "DAMAGE":
if home_m.player_id == int(parse_player_id(l[3])):
minion = get_minion_from_field(home_m, parse_id(l[3]))
else:
minion = get_minion_from_field(enemy_m, parse_id(l[3]))
if minion is not None:
minion.damage = int(l[2])
minion.health = minion.max_health - minion.damage
elif parse_card_id(l[3])[:4] == "HERO":
if home_m.player_id == int(parse_player_id(l[3])):
home_m.hero.minion.damage = int(l[2])
home_m.hero.minion.health = home_m.hero.minion.max_health - home_m.hero.minion.damage
else:
enemy_m.hero.minion.damage = int(l[2])
enemy_m.hero.minion.health = enemy_m.hero.minion.max_health - enemy_m.hero.minion.damage
elif l[1] == "ARMOR":
if home_m.player_id == int(parse_player_id(l[3])):
home_m.hero.armor = int(l[2])
else:
enemy_m.hero.armor = int(l[2])
elif l[1] == "HEALTH":
if home_m.player_id == int(parse_player_id(l[3])):
minion = get_minion_from_field(home_m, parse_id(l[3]))
else:
minion = get_minion_from_field(enemy_m, parse_id(l[3]))
if minion is not None:
if minion.max_health < int(l[2]):
minion.health = minion.health + int(l[2]) - minion.max_health
minion.max_health = int(l[2])
if minion.max_health < minion.health:
minion.health = minion.max_health
minion.health = minion.max_health - minion.damage
elif l[1] == "ATK":
if home_m.player_id == int(parse_player_id(l[3])):
minion = get_minion_from_field(home_m, parse_id(l[3]))
else:
minion = get_minion_from_field(enemy_m, parse_id(l[3]))
if minion is not None:
minion.attack = int(l[2])
elif l[1] == "FROZEN":
if home_m.player_id == int(parse_player_id(l[3])):
minion = get_minion_from_field(home_m, parse_id(l[3]))
else:
minion = get_minion_from_field(enemy_m, parse_id(l[3]))
if minion is not None:
if int(l[2]) == 1:
minion.frozen = True
else:
minion.frozen = False
elif l[1] == "SILENCED":
if home_m.player_id == int(parse_player_id(l[3])):
minion = get_minion_from_field(home_m, parse_id(l[3]))
else:
minion = get_minion_from_field(enemy_m, parse_id(l[3]))
if minion is not None:
minion.silenced = True
elif l[1] == "TAUNT":
if home_m.player_id == int(parse_player_id(l[3])):
minion = get_minion_from_field(home_m, parse_id(l[3]))
else:
minion = get_minion_from_field(enemy_m, parse_id(l[3]))
if minion is not None:
if int(l[2]) == 1:
minion.taunt = True
else:
minion.taunt = False
elif l[1] == "CHARGE":
if home_m.player_id == int(parse_player_id(l[3])):
minion = get_minion_from_field(home_m, parse_id(l[3]))
else:
minion = get_minion_from_field(enemy_m, parse_id(l[3]))
if minion is not None:
if int(l[2]) == 1:
minion.charge = True
else:
minion.charge = False
elif l[1] == "WINDFURY":
if home_m.player_id == int(parse_player_id(l[3])):
minion = get_minion_from_field(home_m, parse_id(l[3]))
else:
minion = get_minion_from_field(enemy_m, parse_id(l[3]))
if minion is not None:
if int(l[2]) == 1:
minion.windfury = True
else:
minion.windfury = False
elif l[1] == "DEATHRATTLE":
if home_m.player_id == int(parse_player_id(l[3])):
minion = get_minion_from_field(home_m, parse_id(l[3]))
else:
minion = get_minion_from_field(enemy_m, parse_id(l[3]))
if minion is not None:
if int(l[2]) == 1:
minion.deathrattle = True
else:
minion.deathrattle = False
elif l[1] == "DIVINE_SHIELD":
if home_m.player_id == int(parse_player_id(l[3])):
minion = get_minion_from_field(home_m, parse_id(l[3]))
else:
minion = get_minion_from_field(enemy_m, parse_id(l[3]))
if minion is not None:
if int(l[2]) == 1:
minion.divine_shield = True
else:
minion.divine_shield = False
elif l[1] == "CURRENT_PLAYER":
if l[3] == home_m.name:
print "before if"
if int(l[2]) == 1:
print "before set"
home_m.current_player = True
home_m.submit = True
print "after submit"
else:
home_m.current_player = False
else:
if int(l[2]) == 1:
enemy_m.current_player = True
else:
enemy_m.current_player = False
print l
return home_m, enemy_m
def output_hand(home_m):
print("Hand:")
for card in home_m.hand:
print("\t" + card.name)
def output_deck(home_m):
print("Deck:")
for card in home_m.deck.cards:
print("\t" + card.name)
def output_minions(home_m):
print("Minions:")
for m in home_m.minions:
print(m)
def get_card_from_hand(home_m, card_id):
for card in home_m.hand:
if card.id == int(card_id):
return card
def get_card_from_deck(home_m, card_name):
for card in home_m.deck.cards:
if card.name == card_name:
return card
def get_minion_from_field(home_m, card_id):
for minion in home_m.minions:
if minion.card.id == int(card_id):
return minion
return None
def return_minion(card):
minion = minion_pb2.Minion()
minion.card.CopyFrom(card)
with open("AllSets.json") as json_file:
data = json.load(json_file)
for expansion in expansion_sets:
for card_json in data[expansion]:
if card_json["name"] == card.name:
minion.max_health = card_json["health"]
minion.health = minion.max_health
try:
minion.attack = card_json['attack']
except:
pass
minion.damage = 0
try:
for mechanic in card_json['mechanics']:
if mechanic == "Taunt":
minion.taunt = True
elif mechanic == "Divine Shield":
minion.divine_shield = True
elif mechanic == "Windfury":
minion.windfury = True
elif mechanic == "Charge":
minion.charge = True
elif mechanic == "Deathrattle":
minion.deathrattle = True
except:
pass
try:
race = card_json['race']
if race == "Beast":
minion.tribe = minion_pb2.Minion.BEAST
elif race == "Mech":
minion.tribe = minion_pb2.Minion.MECH
elif race == "Murloc":
minion.tribe = minion_pb2.Minion.MURLOC
elif race == "Pirate":
minion.tribe = minion_pb2.Minion.PIRATE
elif race == "Demon":
minion.tribe = minion_pb2.Minion.DEMON
elif race == "Dragon":
minion.tribe = minion_pb2.Minion.DRAGON
elif race == "Totem":
minion.tribe = minion_pb2.Minion.TOTEM
except:
minion.tribe = minion_pb2.Minion.NONE
return minion
def return_weapon(name):
weapon = weapon_pb2.Weapon()
weapon.name = name
with open("AllSets.json") as json_file:
data = json.load(json_file)
for expansion in expansion_sets:
for card_json in data[expansion]:
if card_json['name'] == weapon.name:
weapon.durability = card_json['durability']
weapon.attack = card_json['attack']
return weapon | {
"repo_name": "cmm863/HearthAttack",
"path": "handler.py",
"copies": "1",
"size": "18229",
"license": "mit",
"hash": 2709114361891275000,
"line_mean": 37.0584551148,
"line_max": 118,
"alpha_frac": 0.4487903889,
"autogenerated": false,
"ratio": 3.610417904535552,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45592082934355516,
"avg_score": null,
"num_lines": null
} |
__author__ = 'connor'
from subprocess import call
import os
import gnupg
import hashlib
import pickle
from identity.Identity import *
from node.Node import OBNode
##
# If program does not exist as a callable on the OS, return None
#
def which(program):
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
##
# This class holds the procedure to set up OpenBazaar on first run.
#
class BazaarInit(object):
##
# Creates a new GUID from the signed pubkey
# @param signed_pubkey: signed GPG public key
# @return: new GUID for OpenBazaar
@staticmethod
def create_GUID(signed_pubkey):
sha256 = hashlib.sha256()
rip160 = hashlib.new('ripemd160')
sha256.update(signed_pubkey)
tfs_hash = sha256.digest()
rip160.update(tfs_hash)
guid = rip160.hexdigest()
return hashlib.sha1(guid).digest()
##
# Generates GPG keys
# @param gpg_path: path to GPG on the local system
# @return: gnupg.GPG object
@staticmethod
def gen_keys(gpg_path):
call([gpg_path, '--batch', '--gen-key', 'init/unattend_init'])
return gnupg.GPG(homedir='./identity')
##
# Initializes the OpenBazaar
# @param port: port to open the node on
@staticmethod
def initialize_Bazaar(port):
gpg_which = which('gpg')
if gpg_which == None:
print "You do not have gpg installed. Please install gpg using \'sudo apt-get install gpg\' or some alternative."
exit()
else:
##
# Generate the gpg key in the identity directory,
# export the armored key, create a GUID
#
gpg = BazaarInit.gen_keys(gpg_which)
pub_key_armor = gpg.export_keys(gpg.list_keys()[0]['keyid'])
priv_key_armor = gpg.export_keys(gpg.list_keys()[0]['keyid'], secret=True)
guid = BazaarInit.create_GUID(str(gpg.sign(pub_key_armor, binary=True)))
##
# Create Node object
#
node = OBNode(guid, port)
##
# Create Identity module
#
id_mod = Identity(guid, pub_key_armor, priv_key_armor, gpg)
##
# Dump state of id and node objects for future retrieval.
#
id_mod.save()
node.saveState()
| {
"repo_name": "cgsheeh/SFWR3XA3_Redevelopment",
"path": "src/InitializationMod.py",
"copies": "1",
"size": "2744",
"license": "mit",
"hash": 7444027856174805000,
"line_mean": 29.4888888889,
"line_max": 125,
"alpha_frac": 0.5754373178,
"autogenerated": false,
"ratio": 3.7384196185286105,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48138569363286104,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Connor'
__author__ = 'Alex'
import webbrowser
import subprocess
from tkinter import * #Importing Tkinter package
class spiderGUI:
def __init__(self, master):
url = "http://localhost:5000" #WebUI Local Host Address
url2 = "http://pyspider.readthedocs.org/en/master/tutorial/" #Tutorial Address
def OpenUrl():
try:
webbrowser.open_new(url) #Open WebUI URL
master.destroy() #Close window after click
except:
pass
subprocess.Popen("run.bat")
def OpenUrl2():
try:
webbrowser.open_new(url2) #Open Tutorial URL
except:
pass
def window(): #Window Creation Function
labelfont = ('hel', 12, 'bold') #Label font variable that has preferences for the PySpider label
label = Label(master, text = 'PySpider Start', bg ='light green') #Create a header label
label.config(font=labelfont) #Adding fonts
label.pack() #Placing the label into the Tkinter window
button = Button(master, fg='red', text = 'Start Here!', command = OpenUrl, font = 'hel') #Create a button that opens localhost
button.pack(pady=10) #Space in between buttons and placing button in window
tutButton = Button(master, text = 'Tutorial!', command = OpenUrl2) #Open tutorial URL when button clicked
tutButton.pack() #Placing the button into the Tkinter window
window() #Call the window function when SpiderGUI class is called
main = Tk() #TK initialize
main.geometry("350x150") #Create window that can be re-sized
main.title("Welcome to PySpider!!!") #Create title for window
main.configure(bg='light green') #Make background color light green
gui = spiderGUI(main) #Instance of spiderGUI
main.mainloop() #Call main method that opens up the window with the preferences | {
"repo_name": "ConnorDFlynn/Group1PySpider",
"path": "spiderGUI.py",
"copies": "1",
"size": "1979",
"license": "apache-2.0",
"hash": -1685443280024740000,
"line_mean": 40.1489361702,
"line_max": 138,
"alpha_frac": 0.6250631632,
"autogenerated": false,
"ratio": 4.228632478632479,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5353695641832479,
"avg_score": null,
"num_lines": null
} |
import os
import subprocess
import random
import time
def main():
while True:
set_background_to_random_picture("/Users/cokelly/Google_Drive/Google Photos/Sorted Photos")
time.sleep(2)
def set_background_to_random_picture(searchDirectroy):
# Get a directory of mainly photos
pictureDirectroy = get_random_directory_of_mainly_pictures(searchDirectroy)
# Exclude certian directories
banned_list = ["/Users/cokelly/Google_Drive/Google Photos/Sorted Photos/100 sleeps of Eduardo", "/Users/cokelly/Google_Drive/Google Photos/Sorted Photos/Snapchat"]
if pictureDirectroy in banned_list:
# print("Problem", pictureDirectroy)
pictureDirectroy = get_random_directory_of_mainly_pictures(searchDirectroy)
# Get random picture
picture_path = get_random_picture_link(pictureDirectroy)
# Set background
if picture_path != None:
change_background(picture_path)
def get_random_directory_of_mainly_pictures(searchDirectory):
# Set starting directory
searchDirectory = "/Users/cokelly/Google_Drive/Google Photos/Sorted Photos"
# If directory contains mainly files
mostly_files = has_mainly_files_in_directory(searchDirectory)
while mostly_files:
# Get list of directories in current directory and choose one at random
list_directores = [file for file in os.listdir(searchDirectory) if os.path.isdir(searchDirectory+"/"+file) == True]
position = int(len(list_directores)*random.random())
searchDirectory = searchDirectory + "/" + list_directores[position]
# print(list_directores, position, searchDirectory, "\n\n")
# Check if mostly files
mostly_files = has_mainly_files_in_directory(searchDirectory)
# print(searchDirectory)
return searchDirectory
def change_background(file_location):
os_command = "osascript -e 'tell application \"Finder\" to set desktop picture to POSIX file \"" +file_location + "\"'"
os.system(os_command)
def get_random_picture_link(search_directory):
"""Return a random picture from the serach directory"""
print(search_directory)
# Check that the directory contents is not all directories
file_list = os.listdir(search_directory)
# for i in file_list: print(os.path.isdir(search_directory + "/" + i))
# Get list of non hidden files from directory and end with .jpg
picture_file_list = [file for file in os.listdir(search_directory) if file[0] != "." and file[-3:].lower() == "jpg"]
# print(file_list)
position = int(len(picture_file_list)*random.random())
if len(picture_file_list) > 0:
choosen_photo_path = search_directory + "/" + picture_file_list[position]
else:
choosen_photo_path = None
return choosen_photo_path
def has_mainly_files_in_directory(search_directory):
# Get file list in current path
file_list = os.listdir(search_directory)
# Determine if first 10 files are majority direcories. If so condlue files only contains directories and move to subfolder
no_directories = 0
if len(file_list) < 10:
searchLength = len(file_list)
else:
searchLength = 10
for new_path in file_list[0:searchLength]:
if (os.path.isdir(search_directory + "/" + new_path)):
no_directories += 1
if (no_directories / searchLength) > 0.5:
return True
else:
return False
if __name__=='__main__':
main()
| {
"repo_name": "c-okelly/small_jobs",
"path": "background_changer.py",
"copies": "1",
"size": "3598",
"license": "mit",
"hash": 8009602477727489000,
"line_mean": 29.2352941176,
"line_max": 167,
"alpha_frac": 0.689827682,
"autogenerated": false,
"ratio": 3.74012474012474,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.492995242212474,
"avg_score": null,
"num_lines": null
} |
import os
class DirectoryWalker:
def fileList(self,target,extension=""):
directores = []
files = []
otherFiles = []
currentDirectoryContents = os.listdir(target)
for item in currentDirectoryContents:
if (item[0]== "."):
pass
elif (os.path.isdir(target + '/' + item)):
directores.append(target + '/' + item)
# Add files to files list and filter by extension
elif (len(extension) == 0):
files.append(target + '/' + item)
elif (item[-(len(extension)):]== extension):
files.append(target + '/' + item)
else:
otherFiles.append(target + '/' + item)
for dir in directores:
sub_dir_contents = self.fileList(dir,extension)
files.extend(sub_dir_contents.get("files"))
# Filter out by extension
return {"files":files,"directores":directores,"otherFiles":otherFiles}
if (__name__ == '__main__'):
newOb = DirectoryWalker()
results = newOb.fileList("/Users/cokelly/Desktop/Reference cards")
print(results)
| {
"repo_name": "c-okelly/small_jobs",
"path": "directory_lister.py",
"copies": "1",
"size": "1191",
"license": "mit",
"hash": -7051181514750045000,
"line_mean": 23.306122449,
"line_max": 78,
"alpha_frac": 0.5465994962,
"autogenerated": false,
"ratio": 4.299638989169675,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5346238485369675,
"avg_score": null,
"num_lines": null
} |
"""
This file will contain all the functions that are required to fetch the extra data stored on external websites.
"""
import json
import re
import urllib.request as request
from info_requests.meta_critic_web_scraper import retieve_person_score
# Will also have all data required from rotton tomamtoes
# The imdb database api. Take name of movie and return info from imdb
def imdb_data_call(movie):
search_movie = movie.replace(" ", "+").replace("-", "+").replace(":","%3A") # Format move name input
# print(search_movie)
# request url
request_url = "http://www.omdbapi.com/?t="+search_movie+"=&plot=short&r=json&tomatoes=true"
# print(request_url)
#Request and get json file
current_json_file = request.urlopen(request_url).read()
# Error handling if movie not found
# Convert file to UTF-8
current_json_file = current_json_file.decode("UTF-8")
# Convert each station to a list of dicts
move_data = json.loads(current_json_file)
# Test that response as positive
if (move_data.get("Response") == "True"):
# Format awards for data set
awards = move_data.get("Awards")
# if len(awards) > 5:
try:
oscars = re.search("Won\s\d{1,2}\sOscar",awards).group().split(" ")[1] # Find string. Return match. Split on spaces. List item 1
except:
oscars = 0
try:
nom_oscars = re.search("Nominated for\s\d{1,2}\sOscar",awards).group().split(" ")[2]
except:
nom_oscars = 0
try:
wins = re.search("\s\d{1,4}\swin", awards).group().split(" ")[1]
except:
wins = 0
try:
nominations = re.search("\s\d{1,4}\snomination", awards).group().split(" ")[1]
except:
nominations = 0
# Seperate out generes
try:
genres = move_data.get("Genre")
genres = genres.replace(",", "").split(" ")
# Set genres according to number of items given from dict
if (len(genres) == 3):
genre_1 = genres[0]
genre_2 = genres[1]
genre_3 = genres[2]
elif (len(genres) == 2):
genre_1 = genres[0]
genre_2 = genres[1]
genre_3 = None
else:
genre_1 = genres
genre_2 = None
genre_3 = None
# print(genre_1,genre_2,genre_3)
except:
pass
# Get director score if found.
try:
director_name = move_data.get("Director")
# print(director_name)
director_score = retieve_person_score(director_name)
# print(director_score)
except:
print("Failed to find meta critic score for director " + move_data.get("Director"))
director_score = None
# Turn released date into only year
try:
released = move_data.get("Released").split()
except:
released = None
try:
release_year = released[2]
except:
release_year = None
try:
release_month = released[1]
except:
release_month = None
pass
# Convert run time into mins only
try:
string_time = move_data.get("Runtime")
string_time = string_time.split(" ")[0].strip()
run_time = int(string_time)
except:
runtime = move_data.get("Runtime")
# Selecte dub dict
new_move_dict = {"Title":move_data.get("Title"),
"imdbID":move_data.get("imdbID"),
"Title":move_data.get("Title"),
"Director":move_data.get("Director"),
"imdbRating":move_data.get("imdbRating"),
"Metascore":move_data.get("Metascore"),
"Rated":move_data.get("Rated"),
"Metascore":move_data.get("Metascore"),
# Run time
"Runtime":run_time,
# Release information
"release_year":release_year,
"release_month":release_month,
# Three genre attributes
"Genre1":genre_1,
"Genre2":genre_2,
"Genre3":genre_3,
#Formated awards
"oscars":oscars, # oscars,nom_oscars,wins,nominations
"nom_oscars":nom_oscars,
"wins":wins,
"nominations":nominations,
# Rotton tomatoes Data - Ratings
"tCriticScore":move_data.get("tomatoUserMeter"),
"tUserScore":move_data.get("tomatoMeter"),
# Rotton tomatoes Data - No of Reviews
"tNoUsers":move_data.get("tomatoUserReviews"),
"tNoCritics":move_data.get("tomatoReviews"),
# Rotton tomatoes Data - Avg review
"tUserAverage":move_data.get("tomatoUserRating"),
"tCriticAverage":move_data.get("tomatoRating"),
# Director meta critic rating
"directorMetaRating":director_score
}
return new_move_dict
# Else => move response not true (aka false)
else:
print("No response found for movie => " + movie)
if __name__ == '__main__':
x =imdb_data_call("Candle-to-Water")
# y =imdb_data_call("Jungle book")
print(x)
| {
"repo_name": "c-okelly/movie_script_analytics",
"path": "info_requests/information_apis.py",
"copies": "1",
"size": "5789",
"license": "mit",
"hash": -935794344571036000,
"line_mean": 34.2987804878,
"line_max": 140,
"alpha_frac": 0.5030229746,
"autogenerated": false,
"ratio": 4.014563106796117,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5017586081396117,
"avg_score": null,
"num_lines": null
} |
"""
This file will contain the script object. It will take the input of a string object and the name of the script.
Each object will the run functions of the script to generate data from it.
This results can then be called by attributes to be put into a csv document
"""
from text_objects import Speech,Scene_change,Discription,TextWorker
from information_apis import imdb_data_call
import re
import operator
import numpy as np
import nltk
import extra_character_info_for_movie_dict
class MoveDataNotFound(Exception):
def __init__(self,movie_name):
self.move_name = movie_name
def __repr__(self):
return "Failed to find OMDBAPI date for the movie " + self.move_name
class Script:
def __init__(self,script_string,movie_file_name):
# Set attributed from inputs. One script sting and assoicated file name
self.script = script_string
# Reformat movie file name
self.file_name = movie_file_name
self.movie_title = self.__return_cleaned_name(movie_file_name)
#Attempt to fetch omdbapi data using info api
try:
self.imdb_dict = imdb_data_call(self.movie_title)
except:
self.imdb_dict = None
raise MoveDataNotFound(movie_file_name)
# Created script info dict
self.script_info_dict = {}
self.scene_dict = {}
self.character_dict = {}
# Create arrays to hold different script object
self.__speech_object_array = []
self.__description_object_array = []
self.__scene_object_array = []
if self.imdb_dict != None:
# Call array builder function
self.__create_object_arrays_from_script()
# Finish building objects}
self.__finish_building_objects()
# Add data to script_info_dict if imdb data exists
self.__extract_data_from_movie()
else:
raise MoveDataNotFound(movie_file_name)
print("\nScript object",movie_file_name,"has successfully finished (printing from script init)")
# Testing
# print(len(self.__speech_object_array),len(self.__description_object_array),len(self.__scene_object_array))
# for i in self.__speech_object_array:
# print(i.speech_count)
def __repr__(self):
return "Moive script object of => " + self.movie_title + " file name => " + self.file_name
def __return_cleaned_name(self,dirty_file_name):
# Clean file name
cleand_name = dirty_file_name.replace("-", " ").replace(".txt", "")
# If last word in file is 'the' move it to front
test_name = cleand_name.split()[-1].lower()
if (test_name == "the"):
cleand_name = cleand_name.replace(", The","").replace(", the","")
cleaned_file_name = "The " + cleand_name
else:
cleaned_file_name = cleand_name
return cleaned_file_name
# Main algorithm to sort through through script and create the correct objects from text sections
def __create_object_arrays_from_script(self):
# print("Start")
text_string = self.script
total_words = len(re.findall("\w+",text_string)) # Create count of total words
current_word_count = 0 # Create varialbe for currnet word count
# Generate string list
string_list = self.__return_text_list_in_sections(text_string)
# Cycle through string list and sort. Call function to create object and add to correct array.
# Fine tunning for sorting need here and in the splitter function.
for text_section in string_list:
# Generate percentage count through script
current_word_count += (len(re.findall("\w+",text_section)))
percentage_count = current_word_count / total_words
### Scene Change
# Check first line to see if its all upper case => scene change => ext
if text_section.split("\n")[0].isupper() and ((re.search('\s{0,3}EXT\.', text_section)) or (re.search('\s{0,3}EXTERIOR\s', text_section))):
self.__add_scene_change_ob_to_array(text_section,percentage_count,change_to_outside=1)
# Check first line to see if its all upper case => scene change => int
elif (text_section.split("\n")[0].isupper() and (re.search('\s{0,3}INT\.', text_section) or re.search('\s{0,3}INTERIOR\s', text_section))):
self.__add_scene_change_ob_to_array(text_section,percentage_count,change_to_outside=0)
### Speech
# Check first line to see if its all upper case And that more then one line => character speech
# Mark as discriptiong if more then 5 words without and / to seperate them
# Third first line does not containtd the word omit / ommitted
# Forth check => loods for time / date in format of -month/month(?), 1927- for the first line
elif text_section.split("\n")[0].isupper() and text_section.count("\n") > 1 and len(re.findall("\w+",text_section.split("\n")[0]))< 5 and \
not re.search("(\s+OMIT\s*)|(\s+OMITTED\s*)",text_section.split("\n")[0]) \
and not re.match("^\s+-(\w+\s{0,3},?/?){0,4}(\s\d{0,5})?-",text_section.split("\n")[0]):
# print(re.match("^\s+-(\w+\s{0,3},?/?){0,4}(\s\d{0,5})?-",text_section.split("\n")[0]))
# print(text_section.split("\n")[0])
self.__add_speech_ob_to_array(text_section,percentage_count)
# Catches sections of discriptions that are in all caps. Normally words displayed on screen. Could be argued as speech????
# Check if character name follows by item in brackets => character speech Check first line does not containtd the word omit
### Only idenfifying orphaned speech sections at this moment ###
elif re.search("\A(\s*[A-Z]+?){1,2}\s*(\(.*\))?\s*\Z",text_section.split("\n")[0]) and not re.search("(\s+OMIT\s*)|(\s+OMITTED\s*)",text_section.split("\n")[0]):
self.__add_speech_ob_to_array(text_section,percentage_count)
# else:
# print(text_section, "One")
### Discription / Other
# # Description section / others
else:
# If section is more the 70% whitespace discard it
if (text_section.count(" ") > (len(text_section) * 0.7)):
# print(text_section, "Discarded")
pass
# Normal description section
else:
# print(text_section)
self.__add_discription_ob_to_array(text_section,percentage_count)
# Finishing building text objects using information collect after running over script once
def __finish_building_objects(self):
# Add speech count to each speech object
no_speech_words = 0
for i in self.__speech_object_array:
no_speech_words += i.no_words
current_speech_count = 0
# print(no_speech_words,current_speech_count)
for speech_ob in self.__speech_object_array:
# Calculate percent through speech
percent_through_speech = current_speech_count / no_speech_words
# Set speech count
speech_ob.add_speech_count(percent_through_speech)
current_speech_count += speech_ob.no_words # Increase word count
### Scene objects ###
# Add finish point of scene using over all text count
for no_i in range(len(self.__scene_object_array)):
current_scene_ob = self.__scene_object_array[no_i]
# Take next object in array and take start value. Set as end value for current scene object
try:
next_scene_ob = self.__scene_object_array[no_i+1]
current_finish = next_scene_ob.start_count
current_scene_ob.add_scene_finish_point(current_finish)
except IndexError:
# No more object this is the last. Ends at 1
current_scene_ob.add_scene_finish_point(1)
# Add object references to scene object array.
for scene_ob in self.__scene_object_array:
scene_start = scene_ob.start_count
scene_finish = scene_ob.finish_count
# Get objects in the specified range
speech_objects = self.return_object_of_type_in_range(scene_start, scene_finish, speech_normal_count=1)
description_objects = self.return_object_of_type_in_range(scene_start, scene_finish, discription=1)
# Add object array to current scene object
scene_ob.add_object_array(speech_objects,description_objects)
# For each scene object call data builder.
for scene_ob in self.__scene_object_array:
scene_ob.build_data_dict()
count = 0
for scene_ob in self.__scene_object_array:
scene_dict = scene_ob.scene_info_dict
scene_no = "scene_" + str(count)
count += 1
self.scene_dict[scene_no] = scene_dict
# Extract date from moive
def __extract_data_from_movie(self):
### Words counts
total_words = len(re.findall("\w+",self.script))
# Done with loop to be more accurate and remove all names
no_speech_words = 0
for i in self.__speech_object_array:
no_speech_words += i.no_words
no_descritption_words = len(re.findall("\w+",self.return_string_all_discription()))
no_scene_change_words = len(re.findall("\w+",self.return_string_all_scene_changes()))
# Get total words that have been captured by the sorting algo => character name / odd sections ommited
total_captured_words = no_speech_words + no_descritption_words + no_scene_change_words
# print(total_words,total_cleaned_words)
### Percentages
percent_of_speech = no_speech_words / total_captured_words
percent_of_description = no_descritption_words / total_captured_words
### Words per a minute
gen_words_per_min = total_words / self.imdb_dict.get("Runtime")
speech_words_per_min = no_speech_words / self.imdb_dict.get("Runtime")
description_words_per_min = no_descritption_words / self.imdb_dict.get("Runtime")
# print(total_words,gen_words_per_min,speech_words_per_min,disciription_words_per_min)
### Current stop point
### Generate character list
imdb_movie_code = self.imdb_dict.get("imdbID")
character_dict = self.__generate_dict_of_characters(imdb_movie_code)
no_significant_char_speech_words = character_dict.get("no_cleaned_speech_words")
# Remove item form dict as causes errors
del character_dict["no_cleaned_speech_words"]
# del character_dict["no_cleaned_speech_words"]
# print(character_dict)
# Character info
no_characters = len(character_dict) # Must speak twice to avoid noise
### Character gender
no_female_characters = 0
female_words_spoken = 0
no_male_characters = 0
male_words_spoken = 0
no_unknown_genders = 0
unknown_gender_words = 0
# Count gender
for character in character_dict:
# Get gender. Skip if int returned instead of a dict. As one dict used to store general information
try:
gender = character_dict.get(character).get("gender")
no_words = character_dict.get(character).get("no_words")
except AttributeError:
gender = "list_array"
no_words = 0
# print(gender)
if gender == "M":
no_male_characters += 1
male_words_spoken += no_words
elif gender == "F":
no_female_characters += 1
female_words_spoken += no_words
elif gender is None:
no_unknown_genders += 1
unknown_gender_words += no_words
# print(no_characters, no_female_characters,no_male_characters,no_unknown_genders)
## Calculate percent of words spoken in each parts
# Zero division error
try:
percent_male_chars = no_male_characters / no_characters
except ZeroDivisionError:
percent_male_chars = 0
try:
percent_female_chars = no_female_characters / no_characters
except ZeroDivisionError:
percent_female_chars = 0
try:
percent_unknown_chars = no_unknown_genders / no_characters
except ZeroDivisionError:
percent_unknown_chars = 0
# Percentages
try:
percent_male_words = male_words_spoken / no_significant_char_speech_words
except ZeroDivisionError:
percent_male_words = 0
try:
percent_female_words = female_words_spoken / no_significant_char_speech_words
except ZeroDivisionError:
percent_female_words = 0
try:
percent_unknown_words = unknown_gender_words / no_significant_char_speech_words
except ZeroDivisionError:
percent_unknown_words = 0
# print(percent_male_chars,percent_female_chars,percent_unknown_chars)
# Speaking parts
no_chars_speak_more_5_perent = 0
no_chars_speak_more_10_perent = 0
no_chars_speak_more_20_percent = 0
for character_1 in character_dict:
try:
current_char_percentage = character_dict.get(character_1).get("percent_clean_speech")
except AttributeError: # Current dict is array
current_char_percentage = 0
if current_char_percentage is None:
current_char_percentage = 0
if current_char_percentage >= 0.05:
no_chars_speak_more_5_perent += 1
if current_char_percentage >= 0.1:
no_chars_speak_more_10_perent += 1
if current_char_percentage >= 0.2:
no_chars_speak_more_20_percent += 1
# print(no_chars_speak_more_5_perent,no_chars_speak_more_10_perent,no_chars_speak_more_20_percent)
# Average meta_critic score of actors
total_score = 0
count = 0
for char in character_dict:
# Average meta_critic score of actors
score = (character_dict.get(char).get("meta_critic_score"))
if score != 0 and score is not None:
total_score += int(score)
count += 1
try:
actor_average_meta_score = total_score / count
except:
actor_average_meta_score = 0
### Sentiment ###
# Character sentiment
no_characters_overall_positive = 0
no_characters_overall_negative = 0
no_characters_overall_neutral = 0
for char_1 in character_dict:
sentiment = character_dict.get(char_1).get("overall_sentiment")
if sentiment > 0:
no_characters_overall_positive += 1
elif sentiment < 0:
no_characters_overall_negative += 1
elif sentiment == 0:
no_characters_overall_neutral += 1
# print(no_characters_overall_positive,no_characters_overall_negative,no_characters_overall_neutral)
# Percent of characters that where mapped
no_chars_not_mapped = no_unknown_genders
try:
percent_chars_not_mapped = no_chars_not_mapped / (len(character_dict))
except ZeroDivisionError:
percent_chars_not_mapped = 1
# print(percent_chars_not_mapped)
# Analysis of sentiment throughout the movie
sentiment_plot_of_speech = self.__return_sentiment_of_object_array(0.05,speech_array=1)
sentiment_plot_of_description = self.__return_sentiment_of_object_array(0.05,description_array=1)
overall_sentiment_plot = self.__return_sentiment_of_object_array(0.05,overall_plot=1)
average_speech_sentiment = self.__average_of_non_zeros_in_array(sentiment_plot_of_speech)
average_description_sentiment = self.__average_of_non_zeros_in_array(sentiment_plot_of_description)
average_overall_sentiment = self.__average_of_non_zeros_in_array(overall_sentiment_plot)
# print(average_speech_sentiment,average_description_sentiment,overall_sentiment)
### Top 5 character dicts by speech parts
# Generate list of characters and their % parts
speech_percent_array = []
for char in character_dict:
speech_percent_array.append([character_dict.get(char).get("character_name"),character_dict.get(char).get("percent_clean_speech")])
# Order list
speech_array_sorted = sorted(speech_percent_array, key=lambda x: x[1], reverse=True)
# print(speech_array_sorted)
stop_point = 5
if len(speech_array_sorted)< 5:
stop_point = len(speech_array_sorted)
# Add top 5 character dicts to dict
top_5_characters_dict = {}
# Add top 5 to dict
for i in range(0,stop_point):
top_5_characters_dict[speech_array_sorted[i][0]] = character_dict.get(speech_array_sorted[i][0])
# for char in top_5_characters_dict:
# print(char,top_5_characters_dict.get(char))
# Actors scores
running_score = 0
non_zero_scores = 0
for char in top_5_characters_dict:
score = top_5_characters_dict.get(char).get("meta_critic_score")
if score != 0 and score is not None:
running_score += int(score)
non_zero_scores += 1
try:
average_meta_critic_for_top_5 = running_score/non_zero_scores
except ZeroDivisionError:
average_meta_critic_for_top_5 = 0
# Dict summaries for Scene
## Does not return logically correct results
no_scenes = len(self.__description_object_array)
# print(self.__description_object_array)
# print(no_scenes)
no_description_only_scene = 0
no_mixed_scenes = no_scenes - no_description_only_scene
no_scene_only_1_main_char = 0
no_scene_only_2_main_char = 0
# Cycle through scene
for scene in self.__scene_object_array:
scene_info_dict = scene.scene_info_dict
# Scene type
scene_only_description = scene_info_dict.get("scene_only_description")
if scene_only_description == 1:
no_description_only_scene += 1
# Scene interaction type
scene_1_main = scene_info_dict.get("scene_interaction_dict").get("one_main_character")
scene_2_main = scene_info_dict.get("scene_interaction_dict").get("two_main_character")
if scene_1_main == 1:
no_scene_only_1_main_char += 1
elif scene_2_main == 1:
no_scene_only_2_main_char += 1
# Categories of language used => adverbs / adjectives
language_analysis = TextWorker()
# print(self.return_string_of_all_speech())
speech_language_dict = language_analysis.return_language_analysis_dict(self.return_string_of_all_speech())
description_language_dict = language_analysis.return_language_analysis_dict(self.return_string_all_discription())
# No of unique non stop words => vocab measure
### Create character Dict
self.character_dict = character_dict
## Create info dict
# print(self.character_dict)
# print(self.scene_dict)
self.info_dict = {"total_words":total_words,
"total_caputred_words":total_captured_words,
"no_speech_words":no_speech_words,
"no_description_words":no_descritption_words,
"no_scene_change_words":no_scene_change_words,
"no_significant_char_words_per_mins":no_significant_char_speech_words,
# Percentages
"percent_speech":percent_of_speech,
"percent_description":percent_of_description,
# WPM
"words_per_min":gen_words_per_min,
"speech_wpm":speech_words_per_min,
"description_wpm":description_words_per_min,
# IMDB code
"imdb_code":imdb_movie_code,
# Gender general
"no_characters":no_characters,
"no_female_chars":no_female_characters,
"no_male_chars":no_male_characters,
"no_unknown_char_gender":no_unknown_genders,
"no_female_words":female_words_spoken,
"no_male_words":male_words_spoken,
"no_unknown_gender_words":unknown_gender_words,
# Gender analysis
"percent_male_chars":percent_male_chars,
"percent_female_chars":percent_female_chars,
"percent_unknown_chars":percent_unknown_chars,
"percent_male_words":percent_male_words,
"percent_female_words":percent_female_words,
"percent_unknown_words":percent_unknown_words,
# Speaking
"chars_speach_no_then_5_percent":no_chars_speak_more_5_perent,
"chars_speach_no_then_10_percent":no_chars_speak_more_10_perent,
"chars_speach_no_then_20_percent":no_chars_speak_more_20_percent,
# Average score
"average_meta_critic_score":actor_average_meta_score,
# Average top 5 score
"top_5_meta_critic_score":average_meta_critic_for_top_5,
# Character sentiment
"no_positive_chars":no_characters_overall_positive,
"no_negative_chars":no_characters_overall_negative,
"no_neutral_chars":no_characters_overall_neutral,
# Mapping levels
"no_characters_not_mapped":no_chars_not_mapped,
"percent_chars_not_mapped":percent_chars_not_mapped,
# Sentiment plots
"speech_sent_plot":sentiment_plot_of_speech,
"description_sent_plot":sentiment_plot_of_description,
"overall_sentiment_plot":overall_sentiment_plot,
"average_speech_sent":average_speech_sentiment,
"average_description_sent":average_description_sentiment,
"average_overall_sent":average_overall_sentiment,
# Scenes
"no_scenes":no_scenes,
"no_description_only_scenes":no_description_only_scene,
"no_mixed_scenes":no_mixed_scenes,
"no_scenes_1_main_char":no_scene_only_1_main_char,
"no_scenes_2_main_chars":no_scene_only_2_main_char,
# Language analysis
"speech_language_dict":speech_language_dict,
"description_language_dict":description_language_dict
}
def __add_scene_change_ob_to_array(self,text,count,change_to_outside):
scene_object = Scene_change(text,count,change_to_outside)
self.__scene_object_array.append(scene_object)
# print(text,"added to scene_change \n")
def __add_speech_ob_to_array(self,text,count):
speech_object = Speech(text,count)
self.__speech_object_array.append(speech_object)
# print(text, speech_object.character,"added to speed\n")
def __add_discription_ob_to_array(self,text,count):
discription_object = Discription(text,count)
self.__description_object_array.append(discription_object)
# print(text,"added to discription\n")
def __return_text_list_in_sections(self,text_file):
# Split text on lines break
split_on_empty_line = re.split("\n", text_file)
split_on_empty_line.append("") # Add empty item to end of list
# Cycle through item in list. If line has content add to new string.
# If line empty append new string to list and reset new string.
# Will break section up on empty line breaks. This can def be done better but I care not.
new_sections_list = []
temp_section_stirng = ""
### Algo to recombine all the line together
## => potentially usefull regex to search for sections that is marked after page break informally and continue => ^\s*(\w)?(\d{1,4}){1,5}\sCONTINUED:\s(\(\d{0,4}\))?(\s\d{1,4})?
#
# Check the line is not either empty or all white space
for line in split_on_empty_line:
if (re.search("^\s*$",line)): # if line is only white space
new_sections_list.append(temp_section_stirng)
temp_section_stirng = ""
elif len(line) > 0:
temp_section_stirng += line + "\n"
else:
new_sections_list.append(temp_section_stirng)
temp_section_stirng = ""
# Remove items that have a length of 0
no_empty_list_item = []
for section in new_sections_list:
if (len(section) != 0):
no_empty_list_item.append(section)
return no_empty_list_item
# Fetch objects in specified range. It will return object pointers in a specified range.
# Four main serach types. To search that array type set one of them equal to 1
def return_object_of_type_in_range(self, start, finish, speech_normal_count=0, speech_speech_count=0, discription=0, scene=0): # Untested!!
object_array = []
selection_total = speech_normal_count + speech_speech_count + discription + scene
# Check that only one option has been selected
if selection_total > 1:
print("More then one type was selected")
object_array = None
elif selection_total == 0:
print("Type was not set")
object_array = None
# Search array
elif speech_normal_count == 1: # Measured by general count
search_array = self.__speech_object_array
# Return correct objects
for ob in search_array:
if ob.count > start and ob.count < finish:
object_array.append(ob)
elif speech_speech_count == 1: # Measured by speech count
search_array = self.__speech_object_array
# Return correct objects
for ob in search_array:
if ob.speech_count > start and ob.speech_count < finish:
object_array.append(ob)
elif discription == 1:
search_array = self.__description_object_array
# Return correct objects
for ob in search_array:
if ob.count > start and ob.count < finish:
object_array.append(ob)
# Scene object are returned depending on their start location
elif scene:
search_array = self.__scene_object_array
# Return correct objects
for ob in search_array:
if ob.start_count > start and ob.start_count < finish:
object_array.append(ob)
else:
print("Type was not set")
object_array = None
return object_array
def update_imdb_dict(self,new_search_name):
try:
self.imdb_dict = imdb_data_call(new_search_name)
self.__extract_data_from_movie()
except:
raise MoveDataNotFound(new_search_name)
return 1
def return_string_of_all_speech(self):
speech_string = ""
for speech_ob in self.__speech_object_array:
speech_string += speech_ob.text
return speech_string
def return_string_all_discription(self):
discription_string = ""
for discrip_ob in self.__description_object_array:
discription_string += discrip_ob.text
return discription_string
def return_string_all_scene_changes(self):
scene_string = ""
for scene_ob in self.__scene_object_array:
scene_string += scene_ob.text
return scene_string
## Used to generate dict of characters and related information
def __generate_dict_of_characters(self,imdb_movie_code):
characters_dict = {}
# For external info function => there are formatting requirements;
# Dict passed should be a hold each character as a sub-dict where the character name is the key
### Each sub-dict must contain the following keys and respective vars ###
# 1. => "character_name":$char_name 2. => "no_appearances":$no_times_appeared
# Generate Character name / no parts and store sub dicts in master dict
for speech_ob in self.__speech_object_array:
character_name = speech_ob.character
if characters_dict.get(character_name):
currenct_dict = characters_dict.get(character_name)
currenct_dict["no_appearances"] += 1
else:
characters_dict[character_name] = {"character_name":character_name,"no_appearances":1}
# print(characters_dict)
### Add no words for each character
for character in characters_dict:
current_char_name = characters_dict.get(character).get("character_name")
character_string = self.__get_string_character_speech(current_char_name)
# print(character_string)
# Get no words and calculate percentage
no_words_for_char = len(re.findall("\w+",character_string))
# Insert
characters_dict.get(character)["no_words"] = no_words_for_char
### Remove characters that do not have at least 30 words or their names are numbers
# Generate new total of speech parts with noise characters removed
# Create new characters dict
cleaned_characters_dict = {}
total_speech_cleaned = 0
for character_1 in characters_dict:
currenct_dict_1 = characters_dict.get(character_1)
current_name_1 = currenct_dict_1.get("character_name")
# Check that no words greater then 30 and name is not a number / number starting with letter and that at least 2 apperances
if currenct_dict_1.get("no_words") > 30 and not re.match("^\w?\d{1,4}\w?$",current_name_1) and currenct_dict_1.get("no_appearances") >= 2:
cleaned_characters_dict[current_name_1] = currenct_dict_1
total_speech_cleaned += currenct_dict_1.get("no_words")
else:
pass
# elif currenct_dict_1.get("no_words") < 30:
# print(currenct_dict_1, "not enough words \n")
# elif re.match("^\w?\d{1,4}$",current_name_1):
# print(currenct_dict_1, "name is a number \n")
# else:
# print(currenct_dict_1, "no matches \n")
### Add percentage of words from excluding noise eliminated
for character_2 in characters_dict:
current_dict_2 = characters_dict.get(character_2)
no_words_for_char_2 = current_dict_2.get("no_words")
try:
percentage_of_speech = no_words_for_char_2 / total_speech_cleaned
except ZeroDivisionError:
percentage_of_speech = 0
# Insert info into dict
current_dict_2["percent_clean_speech"] = percentage_of_speech
### Sentiment plot for each character and average sentiment => plot will be done by generate average within a range
for character_3 in cleaned_characters_dict:
current_dict_3 = cleaned_characters_dict.get(character_3)
char_name_3 = current_dict_3.get("character_name")
# Get sections of object array for character
### Variable to set ranges to select ### ##special_key_value##
range_selection = 0.05
sent = self.__get_char_sentiment_plot_overall_sentiment(char_name_3, range_selection)
# Add information to current char dict
for array_item in sent:
current_dict_3[array_item[0]] = array_item[1]
### This Section should use language dict analysis ###
### Text analysis of each character, no of unique non stop words => vocb, average sentence length
# Create analysis function
analysis_object = TextWorker()
for character_4 in cleaned_characters_dict:
current_dict_4 = cleaned_characters_dict.get(character_4)
character_name_4 = current_dict_4.get("character_name")
# Get character string
character_string_4 = self.__get_string_character_speech(character_name_4)
# Get language analysis dict
language_analysis_dict = analysis_object.return_language_analysis_dict(character_string_4)
# Carry out frequency dist calculation
frequency_of_non_stop_words = self.__word_count_analysis(character_string_4)
# print(character_name_4,frequency_of_non_stop_words)
# Insert information into dict
current_dict_4["language_analysis_dict"] = language_analysis_dict
### Not usefull at this point in time ###
current_dict_4["freq_of_non_stop_words"] = frequency_of_non_stop_words
#print("\n",cleaned_characters_dict)
### Uses function in extre_character_info_for_movie_dict to map character to actor,
# add the meta critic rating, gender and find imdb character name
updated_dict = extra_character_info_for_movie_dict.add_extra_info_to_current_dict(cleaned_characters_dict,imdb_movie_code)
# Not calling to external just yet as creates a huge number of external html requests
### Add general script info found => no_cleaned_speech_words
updated_dict["no_cleaned_speech_words"] = total_speech_cleaned
return updated_dict
## Not used
def __get_chracter_info_by_name(self,seach_name):
# Usd as builder for __add_extra_info_to_characters_dict
sentiment = 0
count = 0
word_count = 0
for object in self.__speech_object_array:
if object.character == seach_name.upper():
count += 1
sentiment += object.sentiment
word_count += object.no_words
average_sentiment = sentiment / count
# Greate return dict
info_dict = {"word_count": word_count, "sentiment": average_sentiment}
return info_dict
def __get_string_character_speech(self,search_name):
return_string = ""
for object in self.__speech_object_array:
# print(object.text)
if object.character == search_name.upper():
return_string += object.cleaned_text
# print(return_string)
return return_string
def __get_character_object_by_name_and_range(self,name,start_range,finish_range):
obj_return_array = []
rn_of_objects = self.return_object_of_type_in_range(start=start_range,finish=finish_range,speech_normal_count=1)
for obj in rn_of_objects:
if obj.character == name:
obj_return_array.append(obj)
return obj_return_array
def __return_sentiment_summary_of_array(self,sentiment_array):
no_non_zero_values = 0
running_total_sentiment = 0
# print(sentiment_array)
if len(sentiment_array) > 0:
for text_ob in sentiment_array:
current_sentiment = text_ob.sentiment
if abs(current_sentiment) > 0:
no_non_zero_values += 1
running_total_sentiment += current_sentiment
else:
average_sentiment = 0
try:
average_sentiment = running_total_sentiment / no_non_zero_values
except ZeroDivisionError:
average_sentiment = 0
return average_sentiment
def __return_sentiment_of_object_array(self,range,speech_array=0,description_array=0,overall_plot=0):
return_array = []
search_array = []
for i in np.arange(0,1,range):
start = i
finish = i + range
if speech_array == 1:
current_object_ran = self.return_object_of_type_in_range(start,finish,speech_normal_count=1)
elif description_array == 1:
current_object_ran = self.return_object_of_type_in_range(start,finish,discription=1)
elif overall_plot == 1:
current_object_ran = self.return_object_of_type_in_range(start,finish,speech_normal_count=1) + self.return_object_of_type_in_range(start,finish,discription=1)
else:
current_object_ran = []
non_zero_count = 0
running_total = 0
for ob in current_object_ran:
sent = ob.sentiment
if sent != 0:
non_zero_count += 1
running_total += sent
try:
section_sentiment = running_total / non_zero_count
except ZeroDivisionError:
section_sentiment = 0
return_array.append(section_sentiment)
return return_array
## To be used for refatoring of character analysis
def __get_char_sentiment_plot_overall_sentiment(self, char_name_3, range_selection):
return_array = []
text_object_in_selected_ranges = []
for i in np.arange(0,1,range_selection):
start = i
finish = i + range_selection
object_range = self.__get_character_object_by_name_and_range(char_name_3,start,finish)
text_object_in_selected_ranges.append(object_range)
# print(text_object_in_selected_ranges, len(text_object_in_selected_ranges))
# Use range array to convert into plot
sentiment_plot_array = []
for sentiment_array in text_object_in_selected_ranges:
avg_sent_for_section = self.__return_sentiment_summary_of_array(sentiment_array)
sentiment_plot_array.append(avg_sent_for_section)
# Calculate over all sentiment for character
non_zero_items = 0
for i in sentiment_plot_array:
if abs(i) > 0:
non_zero_items += 1
try:
over_all_sentiment = sum(sentiment_plot_array) / non_zero_items
except ZeroDivisionError:
over_all_sentiment = 0
# print(char_name_3,over_all_sentiment)
# # Insert sentiment information into array
return_array.append(["sentiment_plot",sentiment_plot_array])
return_array.append(["sentiment_plot_range",range_selection])
return_array.append(["overall_sentiment",over_all_sentiment])
return return_array
def __word_count_analysis(self,string_to_be_analysed,length_frequency_array=10):
default_stopwords = set(nltk.corpus.stopwords.words('english'))
# Tokenize all words
token_words = nltk.word_tokenize(string_to_be_analysed)
# Remove words shorted then 1
token_words = [word for word in token_words if len(word) > 2]
# Remove numbers
token_words = [word for word in token_words if not word.isnumeric()]
# Lower case all words
token_words = [word.lower() for word in token_words]
# Remove stop words
non_stop_words = [word for word in token_words if word not in default_stopwords]
# print(non_stop_words)
# print(len(non_stop_words))
# print(len(set(non_stop_words)))
word_f_dist = nltk.FreqDist(non_stop_words)
# Get top 10 words => default 10
most_frequent_words = []
for word, frequency in word_f_dist.most_common(length_frequency_array):
most_frequent_words.append([word,frequency])
# print(most_frequent_words)
return most_frequent_words
def __average_of_non_zeros_in_array(self,array):
total = 0
non_zero_count = 0
for i in array:
if i != 0:
total += i
non_zero_count += 1
try:
average = total / non_zero_count
except ZeroDivisionError:
average = 0
return average
# This will attempt to capture the level of error that has occoured
def generate_error_report(self):
# Checks that at least 95 % of words make it into the object arrays.
total_words = len(re.findall("\w+",self.script))
no_speech_words = len(re.findall("\w+",self.return_string_of_all_speech()))
no_discritption_words = len(re.findall("\w+",self.return_string_all_discription()))
no_scene_words = len(re.findall("\w+",self.return_string_all_scene_changes()))
words_captured = (no_speech_words+ no_discritption_words+ no_scene_words) / total_words
# No Characters with less then 1 speaking part as percentage of whole => possible mis name if high
# Amount of script capture by main characters
return words_captured
if __name__ == '__main__':
with open("../Data/scripts_text/Alien-Nation.txt") as file:
text_file = file.read()
# try:
test_script = Script(text_file,"Alien-Nation.txt")
print("Done!")
print(test_script.scene_dict)
# print(test_script.info_dict.get("description_language_dict"))
# except Exception as e:
# print(e)
# print("Error")
# print(test_script)
# print(test_script.imdb_dict)
# print(test_script.generate_error_report())
| {
"repo_name": "c-okelly/movie_script_analytics",
"path": "objects/script_object.py",
"copies": "1",
"size": "42380",
"license": "mit",
"hash": 6595503693308392000,
"line_mean": 41.0854021847,
"line_max": 185,
"alpha_frac": 0.5920245399,
"autogenerated": false,
"ratio": 3.953358208955224,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.006259396460508986,
"num_lines": 1007
} |
"""
This package will attempted to take the name of a director from the IMDB api.
This will the return the average career score of that director.
"""
from bs4 import BeautifulSoup
import urllib.request as request
import urllib
import re
import unicodedata
class MetaCriticRequestFailed(Exception):
def __init__(self,actor_name):
self.actor_name = actor_name
def __repr__(self):
return "Failed meta critic request for actor name " + self.actor_name
def retieve_person_score(search_name):
# Format search term
search_name_formated = search_name.lower().replace(" ", "-").replace(".","")
normalise_name = str(unicodedata.normalize('NFKD', search_name_formated).encode('ascii','ignore'))
strip_encoding = normalise_name[1:].replace("'","")
# Create request url
request_url = "http://www.metacritic.com/person/" + strip_encoding
# print(request_url)
# Format request with headers to spoof server
req = request.Request(
request_url,
data=None,
headers={
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.1916.47 Safari/537.36'
}
)
# Do request and catch any errors
try:
html_page = request.urlopen(req)
cleaned_html = BeautifulSoup(html_page,"html.parser")
average_reviews_section = cleaned_html.find("tr", {"class": "review_average"}).getText() # Find correct div and retrun only text
try:
score = re.search("\d{1,2}",average_reviews_section).group()
except:
score = None
# print(average_reviews_section)
# print(score)
return score
except urllib.error.HTTPError as err:
if err.code == 404:
# print("404 page not found errror for search term => " + search_name)
pass
else:
print(err.code + "error for search term => " + search_name)
return 0
except:
return 0
pass
# raise MetaCriticRequestFailed(search_name)
if __name__ == '__main__':
print("Start")
x = retieve_person_score("Stellan Skarsgård")
# y = retieve_person_score('Scarlett Johansson')
print(x) | {
"repo_name": "c-okelly/movie_script_analytics",
"path": "info_requests/meta_critic_web_scraper.py",
"copies": "1",
"size": "2250",
"license": "mit",
"hash": -3513287055765272600,
"line_mean": 28.2207792208,
"line_max": 143,
"alpha_frac": 0.6322810138,
"autogenerated": false,
"ratio": 3.6808510638297873,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48131320776297876,
"avg_score": null,
"num_lines": null
} |
"""
This package will create have a main function that takes the argument of an imdb code and a character dict
It will match the character in the movie to an actor using imdb. Then add gender and meta critic rating of actor
This will then read in a csv file containing data for characters
There are two main varialbes on this page that may affect data output.
First is the row count no to break on set in scrape_and_format_page_info. This limits the no of cast members in the dict generated
Currently set to 25 or first section of credited members
Second is the error margin on the regex fuzzy search. This currently allows for up to 15 insertions and 2 substitutions but tries without subs first
"""
from bs4 import BeautifulSoup
import urllib.request as request
import urllib
import re
import regex
import meta_critic_web_scraper
class UrlRequestFailed(Exception):
def __init__(self,imdb_code):
self.imdb_code = imdb_code
def __repr__(self):
return "Failed to find OMDBAPI page for the movie / actor code " + self.imdb_code
def scrape_and_format_page_info(imdb_code):
# Create request url
request_url = "http://www.imdb.com/title/" + imdb_code + "/fullcredits"
# Execute request and catch and errors
try:
html_page = request.urlopen(request_url).read()
except:
raise UrlRequestFailed(imdb_code)
# print("Page recieved")
cleaned_html = BeautifulSoup(html_page,"html.parser")
cast_section = cleaned_html.find("table", {"class": "cast_list"}) # Find correct div and return all info including html
only_rows = cast_section.findAll('tr') # Divide table up into rows
character_dict = {}
list_of_characters = []
row_count = 0
# Cycle through each row. Extra data and add dic to main character dict. Skip first row.
for i in range(1,len(only_rows)):
row = only_rows[i]
row_count += 1
# Break loop when table row get down to uncredited cast members or after first 25 cast members
if len(row) == 1:# or row_count > 25:
pass
else:
# Find all individual cells in current row
cells = row.findAll("td")
# Skip cell 0 only image and get actor link and name
actor_link_code = re.search("(nm\d+)",str(cells[1])).group().replace("/","")
actor_name = cells[1].getText().strip().replace("\n","")
# print(actor_link_code,actor_name)
# Skip cell 2 only ... and find character information. Possibly multiple.
character_link = re.findall("(ch\d+)",str(cells[3]))
character_name = cells[3].getText().replace("\n","").split("/") # Remove html, strip, replace \n split \n
# Create new character dict
single_character = {"actor_name":actor_name.strip(),"actor_link_code":actor_link_code}
# Add character name and links for each character
count = 0
for i in character_name:
char_name = "character_name_" + str(count).strip()
name_to_inset = character_name[count].strip().upper()
# print(name_to_inset)
name_to_inset = re.sub("\((\s*\w+)*\)","",name_to_inset)# Remove information in brackets
# print(name_to_inset)
single_character[char_name] = name_to_inset # Remove voice information
char_link = "character_link_" + str(count)
# Try as some character my not have links
try:
single_character[char_link] = character_link[count]
except:
single_character[char_link] = ""
# Add character name to list of character names
list_of_characters.append(i.replace("(voice)","").strip().upper())
count += 1
for i in range(len(character_name)):
# Add character dict to multiple char dict.
name = single_character.get("character_name_" + str(i)).strip()
character_dict[name] = single_character # Whole name
# print(single_character.get("actor_name"),single_character.get("character_name_0"))
character_dict['list_of_characters'] = list_of_characters
# print(character_dict)
return character_dict
def add_gender_and_meta_critic_info(character_dict): # Add extra info to the dicts. Using sqllite db to minimise number of calls
# To be reformatted in the future to use a database that is built with each new call.
# Cut short as ran out of time for this part of the project. Will revisit later.
# Cycle through each actor in dict
for character in character_dict:
current_dict = character_dict.get(character)
current_actor_link_code = current_dict.get("actor_link_code")
current_actor_name = current_dict.get("actor_name")
# Add info to current dict using calls
if current_actor_link_code != None:
gender = get_gender_from_actor_id(current_actor_link_code)
else:
gender = None
if current_actor_name != None:
# print(current_actor_name)
meta_critic_score = meta_critic_web_scraper.retieve_person_score(current_actor_name)
else:
meta_critic_score = None
current_dict["gender"] = gender
current_dict["meta_critic_score"] = meta_critic_score
return character_dict
def get_gender_from_actor_id(actor_id):
# Create request url
request_url = "http://www.imdb.com/name/" + actor_id + "/"
# Execute request and catch and errors
try:
html_page = request.urlopen(request_url).read()
except:
raise UrlRequestFailed(actor_id)
cleaned_html = BeautifulSoup(html_page,"html.parser")
actor_info_bar = cleaned_html.find("div", {"class": "infobar"}) # Find correct div and return all info including html
cleaned_text = actor_info_bar.getText().lower()
if "actor" in cleaned_text:
gender = "M"
elif "actress" in cleaned_text:
gender = "F"
else:
gender = None
return gender
def combine_dicts_together(basic_dict,imdb_actor_info_dict):
# Take the list of character and turn into single string with ! before and after every character.
# Had used , but might cause errors later
character_list = imdb_actor_info_dict.get("list_of_characters")
cleaned_character_list = []
for char in character_list:
cleaned_char = re.sub("\((\s*\w+.)*\){1,5}","",char).strip() # Remove information in brackets
cleaned_character_list.append(cleaned_char)
character_string = "!" + "!".join(cleaned_character_list) + "!"
# print(cleaned_character_list)
finished_combined_dict = {}
for char_dict in basic_dict:
# Set name equal to current character name. Use long character string to perform fuzzy regex search.
script_character_name = basic_dict.get(char_dict).get("character_name")
if not re.search("^\w?\d{1,4}",script_character_name.strip()): # Ensure character name is not only numbers => normally an error on sortings
search_object = regex.search(r"!("+script_character_name+"){i<=15}!",character_string , regex.BESTMATCH) # Only insertions
# print(script_character_name)
#### Commented out as was adding noise only ####
# if search_object == None: # If nothing found allows substitutions
# search_object = regex.search(r"!("+script_character_name+"){i<=10,s<=2}!",character_string , regex.BESTMATCH)
if search_object == None and len(script_character_name.split(" ")) >= 2: # Seach using first half word and then second half
script_character_name = script_character_name.split(" ")[0]
search_object = regex.search(r"!("+script_character_name+"){i<=1}!",character_string, regex.BESTMATCH)
# print(search_object, script_character_name)
if search_object == None: # Allow search for substring with deletions
search_object = regex.search(r"!("+script_character_name+"){d<=2}!",character_string , regex.BESTMATCH)
# print(search_object, script_character_name, "3")
else:
search_object = None
# print(script_character_name,search_object)
# Try set result string as best match. If failed set as none
try:
closest_character_match = search_object.group()
# Check that no two character names combined
if closest_character_match.count("!")<= 2:
closest_character_match = closest_character_match.replace("!","")
else:
# print("Doulbe match. Error from extra_character_info_file combine function",closest_character_match,script_character_name)
closest_character_match = None
except:
closest_character_match = None
# print(script_character_name," matched to ",closest_character_match)
# print(search_object, "\n")
basic_dict_copy = basic_dict.get(char_dict)
correct_imdb_dict = imdb_actor_info_dict.get(closest_character_match)
# print(imdb_actor_info_dict)
### Add to this section to change what information is combined into the final dict
###
if correct_imdb_dict != None:
# Merge imdb into basic
actor_name = correct_imdb_dict.get("actor_name")
actor_link_code = correct_imdb_dict.get("actor_link_code")
# Insert into into
basic_dict_copy["actor_name"] = actor_name
basic_dict_copy["actor_link_code"] = actor_link_code
# No of characters
no_chars_in_dict = (len(correct_imdb_dict)-2)/2
# Find match between closes_character_match and character name. Create variable of character link no
for i in range(0,int(no_chars_in_dict)):
if closest_character_match == correct_imdb_dict.get("character_name_" + str(i)):
full_character_name = correct_imdb_dict.get("character_name_" + str(i))
basic_dict_copy["full_character_name"] = full_character_name
character_link_no = correct_imdb_dict.get("character_link_" + str(i))
basic_dict_copy["character_link_no"] = character_link_no
else:
basic_dict_copy["actor_name"] = None
basic_dict_copy["actor_link_code"] = None
basic_dict_copy["character_link_no"] = None
basic_dict_copy["full_character_name"] = None
# Add results to return dict
finished_combined_dict[basic_dict_copy.get("character_name")] = basic_dict_copy
return finished_combined_dict
def add_extra_info_to_current_dict(basic_character_dict,imdb_movie_code):
# Generate scrape dict from moive code
imdb_scrape_dict = scrape_and_format_page_info(imdb_movie_code)
partical_extened_dict = combine_dicts_together(basic_character_dict,imdb_scrape_dict)
## Add extra info => currently commented out
completed_extened_dict = add_gender_and_meta_critic_info(partical_extened_dict)
return completed_extened_dict
if __name__ == '__main__':
character_list = [['TONY', 134], ['NICK FURY', 118], ['BANNER', 80], ['STEVE', 77], ['NATASHA', 74], ['LOKI', 74], ['THOR', 50], ['CONTEXT NAME', 50], ['AGENT PHIL', 46], ['CAPTAIN AMERICA', 43], ['PEPPER', 27], ['IRON MAN', 27], ['CLINT BARTON', 23], ['AGENT MARIA', 22], ['BLACK WIDOW', 20], ['WORLD SECURITY', 18], ['SELVIG', 15], ['JARVIS', 11], ['HAWKEYE', 11], ['SECURITY GUARD', 8], ['THE OTHER', 8], ['LUCHKOV', 6], ['OUTSIDE THE', 3], ['POLICE SERGEANT', 3], ['LITTLE GIRL', 3], ['BARTON', 3]]
basic_character_dict = {}
sub_char_dict = {}
for i in character_list:
sub_char_dict["character_name"] = i[0]
sub_char_dict["no_appearances"] = i[1]
basic_character_dict[i[0]] = sub_char_dict
sub_char_dict = {}
try:
finished_dict = add_extra_info_to_current_dict(basic_character_dict,'tt0848228')
print(finished_dict)
except Exception as e:
print(e)
| {
"repo_name": "c-okelly/movie_script_analytics",
"path": "info_requests/extra_character_info_for_movie_dict.py",
"copies": "1",
"size": "12268",
"license": "mit",
"hash": -664205220848119900,
"line_mean": 41.0136986301,
"line_max": 506,
"alpha_frac": 0.6226768829,
"autogenerated": false,
"ratio": 3.840951784596118,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9912894006246946,
"avg_score": 0.01014693224983423,
"num_lines": 292
} |
from bs4 import BeautifulSoup
import os
import sys
def clean_html_and_save_to_file(file_directory,file_name,save_directory):
try:
#File location
file_path = file_directory + file_name
# Open file and turn into beautiful soup object
with open(file_path, "r",encoding="utf-8",errors="ignore") as file:
soup_object = BeautifulSoup(file,"html.parser")
# Extract only text from document
text_section = soup_object.getText()
# Convert all text to string
str_text_section = str(text_section)
# Trim file based on following; ALL SCRIPTS => start and "Back to IMSDb" the end of script
start= str_text_section.find("ALL SCRIPTS")
finish = str_text_section.find("Back to IMSDb")
cleaned_text = str_text_section[start:finish]
# Save text to file
new_file_name = file_name.replace(".html",".txt") # change file extension
save_path = save_directory + new_file_name
new_file = open(save_path,"w")
new_file.write(cleaned_text)
except FileExistsError:
print("File not found")
def run_text_cleaner_for_directory(file_directory, save_directory):
# Find all files in directory and run them through clean_html_and_save_to_file
list_all_files = os.listdir(file_directory)
# Trim list for unwanted pdf and hidden files
list_files_to_convert = []
count = 0
for item in list_all_files:
if item[-3:] != "pdf": # Check file is not pdf
if item[0:1] != ".": # Check not hidden file
list_files_to_convert.append(item) # Add to list of target files
count += 1
print(list_files_to_convert)
# Run all items through function
for file in list_files_to_convert:
try:
clean_html_and_save_to_file(file_directory,file,save_directory)
# print(file)
except:
print("The file %s has failed to convert" % file)
print(sys.exc_info()[0])
pass
if __name__ == '__main__':
print("Start")
file_directory = "Data/scripts_html/"
save_directory = "Data/scripts_text/"
run_text_cleaner_for_directory(file_directory,save_directory)
# clean_html_and_save_to_file(file_directory,"12.html",save_directory) | {
"repo_name": "c-okelly/movie_script_analytics",
"path": "Data_phraser.py",
"copies": "1",
"size": "2398",
"license": "mit",
"hash": -2816304925529633000,
"line_mean": 34.8059701493,
"line_max": 98,
"alpha_frac": 0.6267723103,
"autogenerated": false,
"ratio": 3.6779141104294477,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48046864207294476,
"avg_score": null,
"num_lines": null
} |
__author__="cooke"
__date__ ="$02-Feb-2012 15:26:23$"
import logging
import os
from subprocess import Popen
import tempfile
from experimentcontrol.core.InertiaTechnologySocketDriver import InertiaTechnologySocketDriver
from sofiehdfformat.core.SofieFileUtils import importdata
NETKEY = '\xB9\xA5\x21\xFB\xBD\x72\xC3\x45'
TMPLOGFILE='tmp-output.csv';
IMURUNEXTENSION='/imu'
IMUPORT=1234
IMUHOST='127.0.0.1'
class IntertiaTechnologyListener(object):
"""
Listener for the Inertia Technology IMU. One needs to install the PromoveGUI to be able to get this
working.
"""
def __init__(self,outfile,runName,serial,port=IMUPORT,host=IMUHOST):
self.processString = ['/usr/bin/ProMoveGUI','-p 1234'];
self.driver = InertiaTechnologySocketDriver(
host=host,port=port,device=serial,
mode='w')
self.outfile = outfile;
self.runName = runName+IMURUNEXTENSION;
self.tmpdir = tempfile.mkdtemp()
self.tmplogfile = os.path.join(self.tmpdir, TMPLOGFILE)
def __del__(self):
self.driver.close();
self.process.terminate()
def open(self):
logging.debug('Executing command: '+str(self.processString))
self.process = Popen(self.processString)
#Setup the logger:
logging.debug('Opening Socket')
self.driver.open();
self.driver.startRecording(self.tmplogfile);
def sync(self):
#Setup the logger:
self.driver.rtcTrigger();
def close(self):
self.driver.stopRecording();
if os.path.isfile(self.tmplogfile):
importdata(self.tmplogfile,
self.outfile,
self.runName,
'description',
True,
False)
if os.path.isfile(self.tmplogfile):
os.remove(self.tmplogfile)
if os.path.isdir(self.tmpdir):
os.rmdir(self.tmpdir)
self.process.terminate() | {
"repo_name": "agcooke/ExperimentControl",
"path": "experimentcontrol/core/InertiaTechnologyListener.py",
"copies": "1",
"size": "1962",
"license": "bsd-3-clause",
"hash": 7588407092035645000,
"line_mean": 31.1803278689,
"line_max": 103,
"alpha_frac": 0.6345565749,
"autogenerated": false,
"ratio": 3.66044776119403,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.479500433609403,
"avg_score": null,
"num_lines": null
} |
__author__="cooke"
__date__ ="$02-Feb-2012 15:26:23$"
"""
Initialize a basic broadcast slave channel for listening to
an ANT+ Bicycle cadence and speed senser, using raw messages
and event handlers.
"""
import logging
import time
from time import sleep
from ant.core import driver
from ant.core import event
from ant.core.message import ChannelAssignMessage
from ant.core.message import ChannelFrequencyMessage
from ant.core.message import ChannelIDMessage
from ant.core.message import ChannelOpenMessage
from ant.core.message import ChannelPeriodMessage
from ant.core.message import ChannelSearchTimeoutMessage
from ant.core.message import NetworkKeyMessage
from ant.core.message import SystemResetMessage
from experimentcontrol.core.SofieHdfFormatLogWriter import LogWriter
NETKEY = '\xB9\xA5\x21\xFB\xBD\x72\xC3\x45'
# Event callback
class MyCallback(event.EventCallback):
def process(self, msg):
pass
class AntPlusListener(object):
"""
Used to start and stop the ant plus listener
"""
def __init__(self,outfile,runName,serial):
self.outfile = outfile;
self.runName = runName;
self.serial = serial;
self.logger = None;
def open(self):
#Setup the logger:
self.logger = LogWriter(filename=self.outfile,runName=self.runName)
# Initialize driver
self.stick = driver.USB1Driver(self.serial, log=self.logger,debug=True,
baud_rate=4800)
self.stick.open()
# Initialize event machine
self.evm = event.EventMachine(self.stick)
self.evm.registerCallback(MyCallback())
self.evm.start()
# Reset
logging.debug( "\n\n-------------------------------\n:")
logging.debug( "Setting UP")
logging.debug( "\n\n-------------------------------\n")
msg = SystemResetMessage()
self.stick.write(msg.encode())
time.sleep(1)
# Set network key
msg = NetworkKeyMessage(key=NETKEY)
self.stick.write(msg.encode())
sleep(1);
# if self.evm.waitForAck(msg) != RESPONSE_NO_ERROR:
# logging.debug( 'ERROR SETTING UP: SETTING NETWORK KEY')
# sys.exit()
# Initialize it as a receiving channel using our network key
msg = ChannelAssignMessage()
self.stick.write(msg.encode())
sleep(1);
# if self.evm.waitForAck(msg) != RESPONSE_NO_ERROR:
# logging.debug( 'ERROR SETTING UP: INITIALISING AS RECEIVING CHANNEL')
# sys.exit()
# Now set the channel id for pairing with an ANT+ bike cadence/speed sensor
msg = ChannelIDMessage(device_type=121)
self.stick.write(msg.encode())
sleep(1);
# if self.evm.waitForAck(msg) != RESPONSE_NO_ERROR:
# logging.debug( 'ERROR SETTING UP: SETTING CHANNEL ID ')
# sys.exit()
# Listen forever and ever (not really, but for a long time)
msg = ChannelSearchTimeoutMessage(timeout=255)
self.stick.write(msg.encode())
sleep(1);
# if self.evm.waitForAck(msg) != RESPONSE_NO_ERROR:
# logging.debug( 'ERROR SETTING UPL LISTENING TIMEOUT')
# sys.exit()
# We want a ~4.05 Hz transmission period
msg = ChannelPeriodMessage(period=8085)
self.stick.write(msg.encode())
sleep(1);
# if self.evm.waitForAck(msg) != RESPONSE_NO_ERROR:
# logging.debug( 'ERROR SETTING UP: TRANSMISSION FREQUENCY')
# sys.exit()
# And ANT frequency 57, of course
msg = ChannelFrequencyMessage(frequency=57)
self.stick.write(msg.encode())
sleep(1);
# if self.evm.waitForAck(msg) != RESPONSE_NO_ERROR:
# logging.debug( 'ERROR SETTING UP: SETTING FREQUENCY')
# sys.exit()
# Time to go live
msg = ChannelOpenMessage()
self.stick.write(msg.encode())
sleep(1);
# if self.evm.waitForAck(msg) != RESPONSE_NO_ERROR:
# logging.debug( 'ERROR SETTING UP: GOING LIVE')
# sys.exit()
logging.debug( "\n\n-------------------------------\n:")
logging.debug( "Listening for ANT events: Press CTRL+C to Exit.")
logging.debug( "\n\n-------------------------------\n")
def close(self):
logging.debug( "\n\n-------------------------------\nShutting down:")
logging.debug( "\n\n-------------------------------\n")
msg = SystemResetMessage()
self.stick.write(msg.encode())
time.sleep(1)
#self.evm.stop()
#self.stick.close()
def sync(self):
print 'ANT SYNCING'
self.logger.sync() | {
"repo_name": "agcooke/ExperimentControl",
"path": "experimentcontrol/core/AntPlusListener.py",
"copies": "1",
"size": "4660",
"license": "bsd-3-clause",
"hash": -1018453320749772500,
"line_mean": 33.7835820896,
"line_max": 83,
"alpha_frac": 0.6030042918,
"autogenerated": false,
"ratio": 3.6264591439688716,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9668540742202295,
"avg_score": 0.012184538713315318,
"num_lines": 134
} |
__author__ = 'cook'
from sensor import *
import RPIO as GPIO
# sensor is a string with the file name for the sensor from modprobe
# zones_dict has sensor as a key, the value is a list with a zone desciption string and the GPIO pin the zone is
# connected to.
class Zone:
def __init__(self, sensor, zones_dict):
try:
self.sensor = sensor
self.zone_name = zones_dict[sensor][0]
self.GPIO = zones_dict[self.sensor][1]
self.switch = None
self.temp = Temperature(sensor)
self.goal = self.read_goal()
GPIO.setup(self.GPIO, GPIO.OUT)
except KeyError:
raise TemperatureReadError
def __str__(self):
return self.zone_name + ' was ' + str(self.temp) + ' at ' + str(self.temp.time)
def get_current_temp(self):
self.temp.read_temp()
def thermostat(self):
self.get_current_temp()
try:
self.goal = self.read_goal()
except TemperatureReadError:
raise
if self.temp > self.goal: # this could be more complicated
self.set_zone('off')
else:
self.set_zone('on')
def set_zone(self, onoff):
if onoff == 'on':
GPIO.output(self.GPIO,1)
else:
GPIO.output(self.GPIO,0)
def read_goal(self):
with open(self.sensor+'.txt', 'r', encoding='utf-8') as f:
try:
temp = f.readline()
temp = temp.rstrip('\r\n')
return float(temp)
except ValueError:
raise TemperatureReadError
def write_temp_db(self, db):
## tdate - datetime
## ttime - datetime
## sensor - which is a string
## zone - string
## celcius - Number
## fahrenheit - Number
cursor = db.cnx.cursor()
add_temp = ("INSERT INTO temps " "(tdate, ttime, sensor, zone, celcius, fahrenheit)"
"VALUES(%s, %s, $s, %s, %s, %s)")
temp_data = (self.temp.time.date, self.temp.time.time, self.sensor, self.sensor_location,
self.temp.celcius, self.temp.fahrenheit)
cursor.execute(add_temp, temp_data) | {
"repo_name": "darwyncook/thermostat",
"path": "zones.py",
"copies": "1",
"size": "2234",
"license": "mit",
"hash": 765768136045542300,
"line_mean": 33.3846153846,
"line_max": 112,
"alpha_frac": 0.5447627574,
"autogenerated": false,
"ratio": 3.6622950819672133,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47070578393672136,
"avg_score": null,
"num_lines": null
} |
__author__ = 'cook'
import os
import datetime
class TemperatureReadError(ValueError):
sensor = None
class Temperature:
def __init__(self, temp_sensor): # start_time should be the time in seconds
os.system('modprobe w1-gpio')
os.system('modprobe w1-therm')
self.celcius = -90
self.fahrenheit = -90
self.time = ''
self.sensor = temp_sensor
try:
self.read_temp()
except KeyError:
TemperatureReadError.zone = temp_sensor
raise TemperatureReadError
def read_temp(self):
try:
sensor_file = '/sys/bus/w1/devices/' + self.sensor + '/w1_slave'
with open(sensor_file, 'r+', encoding='utf-8') as temp_sensor:
temps = temp_sensor.readlines()
print('In the read_temp procedure\n', temps)
temps[0] = temps[0].rstrip('\r\n')
temps[1] = temps[1].rstrip('\r\n')
if 'YES' not in temps[0]:
TemperatureReadError.sensor = temp_sensor
raise TemperatureReadError
temps[1] = temps[1].strip()
temp_position = temps[1].find('t=')
if temp_position != -1: # string not found
self.celcius = float(temps[1][temp_position+2:])/1000
self.fahrenheit = self.celcius*9/5+32.0
self.time = datetime.datetime.now()
else:
TemperatureReadError.sensor = temp_sensor
raise TemperatureReadError
# value error if the temp can't be converted to float, index error if the read file
# didn't read enough lines from the file, IOError and FileNotFoundError are the same
# but for different versions of Python
except (ValueError, IndexError, IOError):
print('There was a problem with the file\n')
TemperatureReadError.sensor = temp_sensor
raise TemperatureReadError
def __str__(self):
return str(self.fahrenheit) | {
"repo_name": "darwyncook/thermostat",
"path": "sensor.py",
"copies": "1",
"size": "2114",
"license": "mit",
"hash": -723725678291327000,
"line_mean": 38.9056603774,
"line_max": 100,
"alpha_frac": 0.5520340587,
"autogenerated": false,
"ratio": 4.096899224806202,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0014568215152025467,
"num_lines": 53
} |
__author__ = 'coolspeed'
# 3999 / 3999 test cases passed.
# Status: Accepted
# Runtime: 108 ms
class Solution:
# @param {integer} num
# @return {string}
def intToRoman(self, num):
ret = ""
dic = {
1: ('I', 'V'),
10: ('X', 'L'),
100: ('C', 'D'),
1000: ('M', '_')
}
for weight in [1000, 100, 10, 1]:
curr_digit = (num / weight) % 10
# print "weight =", weight
# print "curr_digit =", curr_digit
curr_tuple = dic[weight]
one, five = curr_tuple
if curr_digit < 4:
ret += curr_digit * one
elif curr_digit == 4:
ret += one + five
elif 5 <= curr_digit <= 8:
ret += five
for i in xrange(curr_digit - 5):
ret += one
elif curr_digit == 9:
ten = dic[weight * 10][0]
ret += one + ten
return ret
mysolution = Solution()
input_num = 3333
ret = mysolution.intToRoman(input_num)
print "ret =", ret
| {
"repo_name": "coolspeed/leetcode",
"path": "src/12_interger_to_roman.py",
"copies": "1",
"size": "1115",
"license": "cc0-1.0",
"hash": 9150950993990674000,
"line_mean": 23.2391304348,
"line_max": 48,
"alpha_frac": 0.4322869955,
"autogenerated": false,
"ratio": 3.8054607508532423,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9629052094179329,
"avg_score": 0.021739130434782608,
"num_lines": 46
} |
__author__ = 'coolspeed'
# 3999 / 3999 test cases passed.
# Status: Accepted
# Runtime: 168 ms
char_to_digit = {
'I': 1,
'V': 5,
'X': 1,
'L': 5,
'C': 1,
'D': 5,
'M': 1
}
char_to_weight = {
'I': 1,
'V': 1,
'X': 10,
'L': 10,
'C': 100,
'D': 100,
'M': 1000
}
dic = {
1: ('I', 'V'),
10: ('X', 'L'),
100: ('C', 'D'),
1000: ('M', '_'),
10000: ('_', '_')
}
class Solution:
# @param {string} s
# @return {integer}
def romanToInt(self, s):
ret = 0
for i in xrange(len(s)):
curr_char = s[i]
curr_digit = char_to_digit[curr_char]
curr_weight = char_to_weight[curr_char]
if curr_digit == 1:
curr_five = dic[curr_weight][1]
curr_ten = dic[curr_weight * 10][0]
if i < len(s) - 1 and s[i + 1] in [curr_five, curr_ten]:
ret -= curr_digit * curr_weight
else:
ret += curr_digit * curr_weight
else: # 5
ret += curr_digit * curr_weight
return ret
mysolution = Solution()
input_str = "XL"
ret = mysolution.romanToInt(input_str)
print "ret =", ret
| {
"repo_name": "coolspeed/leetcode",
"path": "src/13_roman_to_integer.py",
"copies": "1",
"size": "1231",
"license": "cc0-1.0",
"hash": -6746029730335679000,
"line_mean": 17.9384615385,
"line_max": 72,
"alpha_frac": 0.4346060114,
"autogenerated": false,
"ratio": 2.987864077669903,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8845547012146826,
"avg_score": 0.015384615384615385,
"num_lines": 65
} |
__author__ = 'coolspeed'
import bisect
class Solution:
recursion_depth = 0
# @param {integer[]} nums1
# @param {integer[]} nums2
# @return {float}
def findMedianSortedArrays(self, nums1, nums2):
print "recursion_depth =", self.recursion_depth
print "nums1 =", str(nums1), "nums2 =", str(nums2)
len1 = len(nums1)
len2 = len(nums2)
if len1 == 0:
return self.median(nums2)
if len2 == 0:
return self.median(nums1)
if len1 == len2 == 1:
return 0.5 * (nums1[0] + nums2[0])
if min(len1, len2) <= 2:
if len1 > 2:
nums1, nums2 = nums2, nums1
for elem in nums1:
bisect.insort_left(nums2, elem)
return self.median(nums2)
median1 = self.median(nums1)
median2 = self.median(nums2)
if median1 == median2:
return median1
if median1 > median2:
nums1, nums2 = nums2, nums1
print "nums1 =", str(nums1), "nums2 =", str(nums2)
len1, len2 = len2, len1
median1, median2 = median2, median1
assert median1 < median2
remove_len = self.removeLenOf2(nums1, nums2)
print "remove_len =", remove_len
# if remove_len <= 1 and min(len1, len2) <= 2:
# medians1 = self.getMedians(nums1)
# medians2 = self.getMedians(nums2)
# mediansSorted = sorted(medians1 + medians2)
# return self.median(mediansSorted)
#
# else:
new_nums1 = nums1[remove_len : ] # remove left part
new_nums2 = nums2[ : len2 - remove_len] # remove right part
print
print "----------------"
self.recursion_depth += 1
return self.findMedianSortedArrays(new_nums1, new_nums2)
def median(self, a_list):
list_len = len(a_list)
assert list_len > 0
if list_len % 2 == 0:
ret = 0.5 * (a_list[list_len / 2 - 1] + a_list[list_len / 2]) # 4 -> 1, 2
else:
ret = a_list[list_len / 2] # 3 -> 1
return float(ret)
def removeLenOf2(self, list_a, list_b): # judge through the shorter list
if len(list_a) >= len(list_b):
list_a, list_b = list_b, list_a
if len(list_a) % 2 == 0:
return len(list_a) / 2 - 1
else:
return len(list_a) / 2
def getMedians(self, aList):
listLen = len(aList)
if listLen % 2 == 0:
return aList[listLen / 2 - 1 : listLen / 2 + 1]
else:
return aList[listLen / 2 - 1 : listLen / 2 + 2]
mysolution = Solution()
nums1 = [1, 4, 5]
nums2 = [2, 3, 6, 7, 8]
myresult = mysolution.findMedianSortedArrays(nums1, nums2)
print
print "===================================="
print myresult
| {
"repo_name": "coolspeed/leetcode",
"path": "src/04_median_of_two_sorted_arrays_myown.py",
"copies": "1",
"size": "2858",
"license": "cc0-1.0",
"hash": -1902099702517600800,
"line_mean": 25.2201834862,
"line_max": 86,
"alpha_frac": 0.5150454864,
"autogenerated": false,
"ratio": 3.2737686139747995,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4288814100374799,
"avg_score": null,
"num_lines": null
} |
__author__ = 'corbinq27'
from twython import Twython
import json
from main import CreateData
from product_extractor import ProductExtractor
from pyshorteners import Shortener
import time
import requests
from s3_file_download import FileDownload
from s3_file_upload import FileUpload
tweeter_keys = {}
def lambda_handler(event, context):
with open("twitter_keys.json", "rb") as fp:
tweeter_keys = json.load(fp)
twitter = Twython(tweeter_keys["APP_KEY"], tweeter_keys["APP_SECRET"], tweeter_keys["OAUTH_TOKEN"],
tweeter_keys["OAUTH_TOKEN_SECRET"])
fdownload = FileDownload()
fdownload.file_download()
pe = ProductExtractor()
pe.product_extractor()
cd = CreateData()
new_data_filename = cd.create_data()
fupload = FileUpload()
if new_data_filename:
fupload.file_upload([new_data_filename])
else:
fupload.file_upload()
price_comparison = {}
with open("/tmp/price-comparison-recent.json", "rb") as fp:
price_comparison = json.load(fp)
#if any price comparisons have changed (as noted in their data) then we'll tweet the product and how it changed.
if price_comparison:
for product, info in price_comparison.iteritems():
short_url_obtained = False
try:
shortener = Shortener('Google', api_key = tweeter_keys["GOOGLE_URL_SHORTENER_KEY"])
short_url = shortener.short(info["product_url"])
short_url_obtained = True
except(requests.exceptions.Timeout, requests.exceptions.ConnectionError):
time.sleep(2)
if not short_url_obtained:
short_url = "http://goo.gl/p7KqbO" #hardcoded to wholesalegaming.biz
if info["is_discontinued_product"] and (info["old_price"] not in "N/A"):
#only tweet out if the old_price of a product is N/A.
#deals with issue of the discontinued product not getting removed
#from scraped site.
twitter.update_status(status="%s no longer for sale. %s" % (product, short_url))
elif info["is_new_product"]:
twitter.update_status(status="%s now available! $%s. %s" % (product, int(info["new_price"]), short_url))
elif info["is_difference"]:
twitter.update_status(status='New price for %s: $%s. Was $%s. %s' % (product, int(info["new_price"]),
int(info["old_price"]), short_url))
else:
print("no change for %s" % product)
try:
print('test printout for %s: $%s. Was $%s. %s' % (product, int(info["new_price"]),
int(info["old_price"]), short_url))
except(ValueError):
#deal with the N/A price.
print("test printout for %s: %s. Was %s. %s" % (product, info["new_price"],
info["old_price"], short_url))
| {
"repo_name": "corbinq27/priceTweeter",
"path": "tweeter.py",
"copies": "1",
"size": "3188",
"license": "mit",
"hash": 4678440448300259000,
"line_mean": 39.3544303797,
"line_max": 120,
"alpha_frac": 0.5526976161,
"autogenerated": false,
"ratio": 3.975062344139651,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.502775996023965,
"avg_score": null,
"num_lines": null
} |
__author__ = 'corbinq27'
import json
from time import gmtime, strftime
from scrapely import Scraper
class CreateData():
def __init__(self):
pass
def create_data(self):
training_url = "http://www.wholesalegaming.biz/startrek/trekalphastarterbox/"
data_training = {"product": "Star Trek Alpha Unlimited Starter Box", "price": "$15.00"}
#train scrapely
scraper = Scraper()
scraper.train(training_url, data_training)
#get the URLs to check
page_json = file("pages_to_check.json").read()
#format (all strings in unicode) : {"urls" : [ <url1 string>, <url2 string>, ... , <urln string> ] }
urls_to_check = json.loads(page_json)
#get data
#dictionary with "product name": "price"
price_list = {}
for each_url in urls_to_check["urls"]:
scraped_data = scraper.scrape(each_url)
#example of a scraped data: [{u'price': [u' $15.00 '], u'product': [u'Star Trek Alpha Unlimited Starter Box']}]
#let's sanitize the price to a float and make this a dictionary entry
dollar_string = scraped_data[0]["price"][0].replace(" ","")
removed_dollar_sign = dollar_string.replace("$", "")
try:
price_as_float = float(removed_dollar_sign)
except ValueError:
#If the value gotten isn't convertable to a float, then it
#most likely is "Product Unavailable" and we need to deal
#with this case later down. N/A will be our tell for that.
price_as_float = "N/A"
#get the product name by itself.
product_name = scraped_data[0]["product"][0]
#now add the sanitized price and product name to price list
price_list[product_name] = [price_as_float, each_url]
#Create a json file of the prices
timestamp = strftime("%Y-%m-%d-%H:%M:%S", gmtime())
with open("/tmp/prices-%s.json" % timestamp, "w") as fp:
json.dump(price_list, fp, sort_keys=True, indent=4)
#Compare this price list to the most "recent" price list
recent_price_list = {}
with open('/tmp/prices-recent.json', 'r') as fp:
recent_price_list = json.load(fp)
#This will be the output data of comparing the old data and new data
#format: {
# "product_one_name":
# {
# "old_price": <float>
# "new_price": <float>,
# "new_difference": <float of new price - old price>,
# "is_difference": <boolean>,
# "is_new_product": <boolean>,
# "is_discontinued_product": <boolean>
# },
# "product_two_name":...
#
comparison_data = {}
for old_product, old_price in recent_price_list.iteritems():
new_difference = 0.0
is_difference = False
is_new_product = False
is_discontinued_product = False
try:
new_price = price_list[old_product]
new_difference = new_price[0] - old_price[0]
except(KeyError, TypeError):
#take care of the case that old_product doesn't appear on price_list
#This also takes care of the case the the old_price isn't a float because
#the old price is marked as N/A
new_price = [0.0]
is_discontinued_product = True
if new_difference != 0.0:
is_difference = True
comparison_data[old_product] = {
"old_price": old_price[0],
"new_price": new_price[0],
"new_difference": new_difference,
"is_difference": is_difference,
"is_new_product": False,
"is_discontinued_product": is_discontinued_product,
"product_url": old_price[1]
}
#find all items on price_list that is not in recent_price_list
new_inventory_set = set(price_list.keys()) - set(recent_price_list.keys())
new_inventory_list = list(new_inventory_set)
for each_product in new_inventory_list:
comparison_data[each_product] = { "old_price": 0.0,
"new_price": price_list[each_product][0],
"new_difference": price_list[each_product][0],
"is_difference": True,
"is_new_product": True,
"is_discontinued_product": False,
"product_url": price_list[each_product][1]
}
#makes it easy to find the always most recent data
with open("/tmp/price-comparison-recent.json", "w") as fp:
json.dump(comparison_data, fp, sort_keys=True, indent=4)
#update the recent prices
with open("/tmp/prices-recent.json", "w") as fp:
json.dump(price_list, fp, sort_keys=True, indent=4)
#Create a file to be the most recent comparison data
timestamp = strftime("%Y-%m-%d-%H:%M:%S", gmtime())
if "True" in comparison_data:
filename = "/tmp/price-comparison-%s.json"
with open(filename, "w") as fp:
json.dump(comparison_data, fp, sort_keys=True, indent=4)
return filename
return None
| {
"repo_name": "corbinq27/priceTweeter",
"path": "main.py",
"copies": "1",
"size": "5940",
"license": "mit",
"hash": -345329366784635300,
"line_mean": 42.0434782609,
"line_max": 133,
"alpha_frac": 0.497979798,
"autogenerated": false,
"ratio": 4.233784746970777,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.004886836814285905,
"num_lines": 138
} |
__author__ = 'corey'
# -*- coding:utf-8 -*-
import urllib
import urllib2
import re,json
from bs4 import BeautifulSoup
import requests
import MySQLdb
def changekey(key):
return key.strip().replace("'",'"')
def hangkey(key):
return key.replace("\n", "!")
def get(url):
try:
request = urllib2.Request(url)
#request.add_header('Pragma', 'no-cache')
response = urllib2.urlopen(request,timeout=20)
page = response.read()
return page
except:
return ""
class Spider:
def __init__(self):
self.siteURL = 'https://www.nowcoder.com/discuss/'
def getPage(self,pageIndex):
self.Pindex = pageIndex
url = self.siteURL + str(pageIndex)+"?type=0&order=0&page=1"
return get(url)
def getContents(self,pageIndex):
page = self.getPage(pageIndex)
soup = BeautifulSoup(page,"lxml")
try:
#±êÌâ
self.title = soup.find("h1",{"class":"discuss-title"})
self.title = soup.h1.get_text().strip()
#×÷Õß
louzhu =soup.find("span",{"class":"post-name"})
louzhu = louzhu.find("a")
self.louzhu = louzhu.attrs['title']
self.louzhu = changekey(self.louzhu)
#ʱ¼ä
lztime =soup.find("span",{"class":"post-time"})
lztime = lztime.get_text()
lztime = lztime.split()
self.lztime = lztime[1]+" "+lztime[2]
self.lztime = changekey(self.lztime)
#·¢±íÄÚÈÝ
context =soup.find("div",{"class":"post-topic-des"})
context = context.get_text()
context = context.split()
self.context = context[0]
for i in range(1,len(context)-2):
self.context = self.context+context[i]
self.context = changekey(self.context)
#pattern = re.compile('h1 class="discuss-title".*')
#items = re.findall(pattern,page)
#print items
#ÆÀÂÛÊý
num = soup.findAll("h1")
num = num[2].get_text()
num = num.encode("utf-8")
num = re.findall(r'(\w*[0-9]+)\w*',num)
self.num = int(num[0])
return True
except:
return False
def getComPage(self,page):
#×î¶àpagesizeÊÇ100
#https://www.nowcoder.com/comment/list-by-page-v2?pageSize=200000&page=1&order=1&entityId=62719&entityType=8
url = "https://www.nowcoder.com/comment/list-by-page-v2?pageSize=100&page="+str(page)+"&order=1&entityId="+str(self.Pindex)+"&entityType="+str(8);
return get(url)
def getComments(self,pageid):
ids = []
subcom= []
compage = self.getComPage(pageid)
#ÿ´ÎÖ÷»Ø¸´µÄÏûÏ¢
self.compage =json.loads(compage)
#»Ø¸´µÄ×ӻظ´£¬Ã¿´ÎµÄÉî¶È×î¶àΪ1
if self.num>100:
num = 100
else:
num = self.num
for i in range(0,num):
ids.append(self.compage['data']['comments'][i]['id'])
self.ids = ids
for idtemp in ids:
subcom.append(json.loads(self.getID(idtemp)))
self.subcom = subcom
def getID(self,idindex):
#https://www.nowcoder.com/comment/list-by-page-v2?token=&pageSize=10&page=1&order=1&entityId=1091685&entityType=2&_=1511231131699
url = "https://www.nowcoder.com/comment/list-by-page-v2?token=&pageSize=100&page=1&order=1&entityId="+str(idindex)+"&entityType=2"
return get(url)
def save(self,sqlstr):
self.db_conn= MySQLdb.connect(
host='localhost',
port = 3306,
user='liu',
passwd='123456',
db ='newcoder',
charset='utf8',
)
self.db_cur = self.db_conn.cursor()
try:
self.db_cur.execute(sqlstr)
except:
print "error"
self.db_cur.close()
self.db_conn.commit()
self.db_conn.close()
def saveall(self):
#±£´æ±êÌâµÈ
sqlstr = "insert into discuss values('"+str(self.Pindex)+"','"+self.title+"','"+self.louzhu+"','"+self.lztime+"','"+self.context+"','"+str(self.num)+"')"
self.save(sqlstr)
#±£´æÒ»¼¶ÆÀÂÛ
if self.num >100:
num = 100
else:
num = self.num
for i in range (0,num):
sqlstr = "insert into comment values('"+str(self.Pindex)+"','0','"+str(self.compage['data']['comments'][i]['id'])+"','"+self.compage['data']['comments'][i]['authorName']+"','"+str(self.compage['data']['comments'][i]['authorId'])+"','"+self.compage['data']['comments'][i]['content']+"')"
self.save(sqlstr)
#±£´æ¶þ¼¶ÆÀÂÛ
for i in range (0,len(self.subcom)):
for j in range (0,int(self.subcom[i]["data"]["totalCnt"])):
print i,j
sqlstr = "insert into comment values('"+str(self.Pindex)+"','"+str(self.subcom[i]['data']['comments'][j]['toCommentId'])+"','"+str(self.subcom[i]['data']['comments'][j]['id'])+"','"+self.subcom[i]['data']['comments'][j]['authorName']+"','"+str(self.subcom[i]['data']['comments'][j]['authorId'])+"','"+self.subcom[i]['data']['comments'][j]['content']+"')"
self.save(sqlstr)
#¸ÃÍøÒ³ÐèÒªµÇ¼£¬¶Ô±ÈcookieÖ®ºó·¢ÏÖ´æÔÚ±äÁ¿t¼Ç¼µÇ¼ÐÅÏ¢£¬postmanʵÑé³É¹¦
def getUser(self,userindex):
url = "https://www.nowcoder.com/profile/"+str(userindex)+"/basicinfo"
try:
opener = urllib2.build_opener()
opener.addheaders.append(('Cookie', 't=C139A5266EFAE233DD9C9FC10C0A1B5C'))
page = opener.open(url,timeout=20)
soup = BeautifulSoup(page,"lxml")
istrue = soup.find("div",{"class":"null-tip"})
except:
istrue = ""
if istrue == None:
try:
name = soup.find("dd",{"id":"nicknamePart"})
name = name.get_text().strip()
except:
name = ""
try:
city = soup.find("li",{"class":"profile-city"})
city = city.get_text().strip()
except:
city = ""
try:
edu = soup.find("dd",{"id":"schoolInfoPart"})
edu = edu.get_text().strip()
except:
edu = ""
try:
intr = soup.find("dd",{"id":"introductionPart"})
intr = intr.get_text().strip()
except:
intr = ""
try:
liv = soup.find("dd",{"id":"livePlacePart"})
liv = liv.get_text().strip()
except:
liv = ""
try:
cur = soup.find("dd",{"id":"curIdentityPart"})
cur = cur.get_text().strip()
except:
cur = ""
try:
wor = soup.find("dd",{"id":"workTimePart"})
wor = wor.get_text().strip()
except:
wor = ""
try:
el = soup.find("dd",{"id":"eduLevelPart"})
el = el.get_text().strip()
except:
el = ""
try:
com = soup.find("dd",{"id":"companyInfoPart"})
com = com.get_text().strip()
except:
com = ""
try:
job = soup.find("dd",{"id":"jobInfoPart"})
job = job.get_text().strip()
except:
job = ""
#дÈëÊý¾Ý¿â
sqlstr = "insert into NCuser values('"+str(userindex)+"','"+name+"','"+city+"','"+edu+"','"+intr+"','"+liv+"','"+cur+"','"+wor+"','"+el+"','"+com+"','"+job+"')"
sqlstr = sqlstr.encode("utf-8")
try:
self.db_cur.execute(sqlstr)
print name,city,edu,intr,liv,cur,wor,el,com,job
except:
print "error"
else:
print "Ìø¹ý",userindex
def getQA(self,pageid,QAtype):
#https://www.nowcoder.com/ta/nine-chapter/review?page=2
url = "https://www.nowcoder.com/ta/"+QAtype+"/review?tpId=1&query=&asc=true&order=&page="+str(pageid)
page = get(url)
soup = BeautifulSoup(page,"lxml")
try:
que = soup.find("div",{"class":"final-question"})
que = que.get_text().strip()
que = changekey(que)
except:
que = ""
try:
asw = soup.find("div",{"class":"design-answer-box"})
asw = asw.get_text().strip()
asw = changekey(asw)
except:
asw = ""
sqlstr = "insert into question values('"+QAtype+"','"+que+"','"+asw+"','0')"
self.save(sqlstr)
def getcountQA(self,QAtype):
url = "https://www.nowcoder.com/ta/"+QAtype+"/review?tpId=1&query=&asc=true&order=&page=1"
page = get(url)
soup = BeautifulSoup(page,"lxml")
try:
count = soup.find("button",{"class":"btn btn-primary js-pagination-jump"})
count = count['data-total-page']
self.count = int(count)
except:
self.count = 0
def getAllQA(self):
QAlist = ["review-frontend","acm-solutions","review-c","review-test","review-java","review-network","front-end-interview","nine-chapter"]
for j in range(0,len(QAlist)):
self.getcountQA(QAtype=QAlist[j])
for i in range(1,self.count+1):
print j,i
self.getQA(i,QAtype=QAlist[j])
def getAllUser(self):
for i in range(176046,900000):
self.db_conn= MySQLdb.connect(
host='localhost',
port = 3306,
user='liu',
passwd='123456',
db ='newcoder',
charset='utf8',
)
self.db_cur = self.db_conn.cursor()
self.getUser(i)
self.db_cur.close()
self.db_conn.commit()
self.db_conn.close()
def getAllContents(self):
for i in range(63516,66174):
try:
istrue = self.getContents(i)
#´óÓÚ100µÄ±ØÐë·Ö¿ª
count = 1;
if istrue:
print i
while(self.num>100):
self.getComments(count)
self.saveall()
count = count+1
self.num -=100
self.getComments(count)
self.saveall()
else:
print "Ìø¹ý",i
except:
pass
import sys
stdout = sys.stdout
reload(sys)
sys.stdout = stdout
spider = Spider()
#spider.getAllQA()
spider.getAllContents()
'''
spider.getContents(62186)
spider.getComments()
#spider.printall()
'''
| {
"repo_name": "liucode/liucode.github.io",
"path": "bgtool/ques.py",
"copies": "1",
"size": "10989",
"license": "mit",
"hash": -2176716842617156000,
"line_mean": 33.4482758621,
"line_max": 370,
"alpha_frac": 0.4875784876,
"autogenerated": false,
"ratio": 3.3009912886752777,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4288569776275278,
"avg_score": null,
"num_lines": null
} |
__author__ = 'corey'
# -*- coding:utf-8 -*-
import urllib
import urllib2
import re,json
from bs4 import BeautifulSoup
import requests
import MySQLdb
def get(url):
try:
request = urllib2.Request(url)
request.add_header('Pragma', 'no-cache')
response = urllib2.urlopen(request,timeout=20)
return response.read()
except:
return ""
class Spider:
def __init__(self):
self.siteURL = 'https://www.nowcoder.com/discuss/'
def getPage(self,pageIndex):
self.Pindex = pageIndex
url = self.siteURL + str(pageIndex)+"?type=0&order=0&page=1"
return get(url)
def getContents(self,pageIndex):
page = self.getPage(pageIndex)
soup = BeautifulSoup(page,"lxml")
#±êÌâ
self.title = soup.find("h1",{"class":"discuss-title"})
self.title = soup.h1.get_text()
#×÷Õß
louzhu =soup.find("span",{"class":"post-name"})
louzhu = louzhu.find("a")
self.louzhu = louzhu.attrs['title']
#ʱ¼ä
lztime =soup.find("span",{"class":"post-time"})
lztime = lztime.get_text()
lztime = lztime.split()
self.lztime = lztime[1]+" "+lztime[2]
#·¢±íÄÚÈÝ
context =soup.find("div",{"class":"post-topic-des"})
context = context.get_text()
context = context.split()
self.context = context[0]
for i in range(1,len(context)-2):
self.context = self.context+context[i]
#pattern = re.compile('h1 class="discuss-title".*')
#items = re.findall(pattern,page)
#print items
#ÆÀÂÛÊý
num = soup.findAll("h1")
num = num[2].get_text()
num = num.encode("utf-8")
num = re.findall(r'(\w*[0-9]+)\w*',num)
self.num = num[0]
def getComPage(self):
#https://www.nowcoder.com/comment/list-by-page-v2?pageSize=20&page=1&order=1&entityId=62719&entityType=8
url = "https://www.nowcoder.com/comment/list-by-page-v2?page=1&order=1&entityId="+str(self.Pindex)+"&entityType="+str(8);
return get(url)
def getComments(self):
num = int(self.num)
ids = []
subcom= []
compage = self.getComPage()
#ÿ´ÎÖ÷»Ø¸´µÄÏûÏ¢
self.compage =json.loads(compage)
#»Ø¸´µÄ×ӻظ´£¬Ã¿´ÎµÄÉî¶È×î¶àΪ1
for i in range(0,num):
ids.append(self.compage['data']['comments'][i]['id'])
self.ids = ids
for idtemp in ids:
subcom.append(json.loads(self.getID(idtemp)))
self.subcom = subcom
def getID(self,idindex):
#https://www.nowcoder.com/comment/list-by-page-v2?token=&pageSize=10&page=1&order=1&entityId=1091685&entityType=2&_=1511231131699
url = "https://www.nowcoder.com/comment/list-by-page-v2?token=&pageSize=10&page=1&order=1&entityId="+str(idindex)+"&entityType=2"
return get(url)
def printall(self):
print self.title,self.louzhu,self.lztime,self.context,self.num,self.compage,self.ids,self.subcom
#¸ÃÍøÒ³ÐèÒªµÇ¼£¬¶Ô±ÈcookieÖ®ºó·¢ÏÖ´æÔÚ±äÁ¿t¼Ç¼µÇ¼ÐÅÏ¢£¬postmanʵÑé³É¹¦
def getUser(self,userindex):
url = "https://www.nowcoder.com/profile/"+str(userindex)+"/basicinfo"
try:
opener = urllib2.build_opener()
opener.addheaders.append(('Cookie', 't=C139A5266EFAE233DD9C9FC10C0A1B5C'))
page = opener.open(url,timeout=20)
soup = BeautifulSoup(page,"lxml")
istrue = soup.find("div",{"class":"null-tip"})
except:
istrue = ""
if istrue == None:
try:
name = soup.find("dd",{"id":"nicknamePart"})
name = name.get_text().strip()
except:
name = ""
try:
city = soup.find("li",{"class":"profile-city"})
city = city.get_text().strip()
except:
city = ""
try:
edu = soup.find("dd",{"id":"schoolInfoPart"})
edu = edu.get_text().strip()
except:
edu = ""
try:
intr = soup.find("dd",{"id":"introductionPart"})
intr = intr.get_text().strip()
except:
intr = ""
try:
liv = soup.find("dd",{"id":"livePlacePart"})
liv = liv.get_text().strip()
except:
liv = ""
try:
cur = soup.find("dd",{"id":"curIdentityPart"})
cur = cur.get_text().strip()
except:
cur = ""
try:
wor = soup.find("dd",{"id":"workTimePart"})
wor = wor.get_text().strip()
except:
wor = ""
try:
el = soup.find("dd",{"id":"eduLevelPart"})
el = el.get_text().strip()
except:
el = ""
try:
com = soup.find("dd",{"id":"companyInfoPart"})
com = com.get_text().strip()
except:
com = ""
try:
job = soup.find("dd",{"id":"jobInfoPart"})
job = job.get_text().strip()
except:
job = ""
#дÈëÊý¾Ý¿â
sqlstr = "insert into NCuser values('"+str(userindex)+"','"+name+"','"+city+"','"+edu+"','"+intr+"','"+liv+"','"+cur+"','"+wor+"','"+el+"','"+com+"','"+job+"')"
sqlstr = sqlstr.encode("utf-8")
try:
self.db_cur.execute(sqlstr)
print name,city,edu,intr,liv,cur,wor,el,com,job
except:
print "error"
else:
print "Ìø¹ý",userindex
def getAllUser(self):
for i in range(2000000,3000000):
self.db_conn= MySQLdb.connect(
host='localhost',
port = 3306,
user='liu',
passwd='123456',
db ='newcoder',
charset='utf8',
)
self.db_cur = self.db_conn.cursor()
print i
self.getUser(i)
self.db_cur.close()
self.db_conn.commit()
self.db_conn.close()
spider = Spider()
spider.getAllUser()
'''
spider.getContents(62186)
spider.getComments()
#spider.printall()
'''
| {
"repo_name": "liucode/liucode.github.io",
"path": "bgtool/main1.0.py",
"copies": "1",
"size": "6422",
"license": "mit",
"hash": -2526746466690457000,
"line_mean": 30.9502487562,
"line_max": 172,
"alpha_frac": 0.4962628465,
"autogenerated": false,
"ratio": 3.154223968565815,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8994114401431196,
"avg_score": 0.03127448272692388,
"num_lines": 201
} |
__author__ = 'Corrosion X'
__name__ = 'TimePlayed'
__version__ = '0.2'
import clr
import sys
clr.AddReferenceByPartialName("Pluton", "UnityEngine")
import UnityEngine
import Pluton
from System import String
import datetime
class TimePlayed:
def On_PluginInit(self):
Commands.Register("timeonline")\
.setCallback("timeonline")\
.setDescription("Gives your time online stats.")\
.setUsage("/timeonline")
def On_PlayerDisconnected(self, player):
playerid = player.GameID
getstamp = Plugin.GetTimestamp()
DataStore.Add("LastConnected", playerid, getstamp)
timedsession = player.TimeOnline
totaltime = DataStore.Get("TotalTimePlayed", playerid)
if totaltime is None:
totaltime = 0
totaltime += timedsession
DataStore.Add("TotalTimePlayed", playerid, totaltime)
def timeonline(self, unused, player):
playerid = player.GameID
##getstamp = Plugin.GetTimestamp()
timeplayed = DataStore.Get("LastConnected", playerid)
totaltime = DataStore.Get("TotalTimePlayed", playerid)
timeplayed = datetime.datetime.fromtimestamp(timeplayed).strftime('%Y-%m-%d %H:%M:%S')
if timeplayed and totaltime is not None:
player.Message("Last Played:" + str(timeplayed))
player.Message("Total Time Played:" + str(totaltime))
else:
player.Message("Your time hasn't been recorded yet.")
##player.Message(getstamp) | {
"repo_name": "stopspazzing/Pluton-Plugins",
"path": "TimePlayed.py",
"copies": "1",
"size": "1516",
"license": "mit",
"hash": 1188755501984198700,
"line_mean": 35.119047619,
"line_max": 94,
"alpha_frac": 0.6517150396,
"autogenerated": false,
"ratio": 3.877237851662404,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5028952891262404,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Corrosion X'
__version__ = '0.1'
__name__ = 'Item Modifier'
import clr
clr.AddReferenceByPartialName("Pluton")
clr.AddReferenceByPartialName("Assembly-CSharp")
import ItemDefinition
import ItemManager
import Pluton
itemkey = ItemManager.FindItemDefinition
class ItemModifier:
global itemkey
def On_ServerInit(self):
if not Plugin.IniExists("Items"):
Plugin.CreateIni("Items")
ini = Plugin.GetIni("Items")
itemdef = ItemManager.GetItemDefinitions()
for item in itemdef:
##ini.AddSetting("UID", item.shortname, str(item.uid))
ini.AddSetting("StackSizes", item.shortname, str(item.stackable))
ini.AddSetting("ConditionMax", item.shortname, str(item.condition.max))
ini.AddSetting("Flags", item.shortname, str(item.flags))
##ini.AddSetting("Amount", item.shortname, str(item.amount))
##ini.AddSetting("LockTime", item.shortname, str(item.locktime))
##ini.AddSetting("RemoveTime", item.shortname, str(item.removetime))
##ini.AddSetting("InstanceData", item.shortname, str(item.instanceData))
##ini.AddSetting("IsBlueprint", item.shortname, str(item.isBlueprint))
ini.Save()
ini = Plugin.GetIni("Items")
for key in ini.EnumSection("StackSizes"):
if itemkey(key) is None:
Util.Log("Failed to set stack size for: " + key)
continue
itemkey.stackable = int(ini.GetSetting("StackSizes", key))
for key in ini.EnumSection("ConditionMax"):
if itemkey(key) is None:
Util.Log("Failed to set Max Condition for: " + key)
continue
itemkey(key).condition.max = float(ini.GetSetting("ConditionMax", key))
for key in ini.EnumSection("Flags"):
if itemkey(key) is None:
Util.Log("Failed to set Flags for: " + key)
continue
##itemkey(key).flags = ItemDefinition.Flag.str(ini.GetSetting("Flags", key))
if itemkey(key).flags == "1":
itemkey(key).flags = ItemDefinition.Flag.NoDropping | {
"repo_name": "stopspazzing/Pluton-Plugins",
"path": "ItemModifier.py",
"copies": "1",
"size": "2232",
"license": "mit",
"hash": 2039769553417375700,
"line_mean": 43.66,
"line_max": 90,
"alpha_frac": 0.6008064516,
"autogenerated": false,
"ratio": 3.8885017421602788,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49893081937602785,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Corrosion X'
__version__ = '0.1'
__name__ = 'RollTheDice'
import clr
import sys
clr.AddReferenceByPartialName("UnityEngine")
clr.AddReferenceByPartialName("Pluton")
import UnityEngine
import Pluton
import re
from UnityEngine import Random
class RollTheDice:
def On_PluginInit(self):
Commands.Register("rtd")\
.setCallback("rtd")\
.setDescription("Have something random happen to you")\
.setUsage("/rtd")
def rtd(self, args, player):
cooldown = DataStore.Get("cooldown", player.GameID)
if cooldown:
player.Message("You have to wait longer before rolling dice again!")
else:
self.randomevents(player)
mydict = Plugin.CreateDict()
DataStore.Add("cooldown", player.GameID, True)
mydict["gid"] = player.GameID
Plugin.CreateParallelTimer("cooldown", 60000, dict).Start()
def cooldownCallback(self, timer):
mydict = timer.Args
gid = mydict["gid"]
DataStore.Remove("cooldown", gid)
timer.Kill()
def randomevents(self, player):
num = int(Random.Range(1, 20))
if num == 1:
##cleaninv
elif num == 2:
##kickplayer
elif num == 3:
##give random item
elif num == 4:
##forcesleep
elif num == 5:
##teleportupinair
elif num == 6:
##hurtrandomamount
elif num == 7:
##freezeinplace
elif num == 8:
##dosomethingelse | {
"repo_name": "stopspazzing/Pluton-Plugins",
"path": "RollTheDice.py",
"copies": "1",
"size": "1570",
"license": "mit",
"hash": -5056107709594197000,
"line_mean": 27.0535714286,
"line_max": 80,
"alpha_frac": 0.5681528662,
"autogenerated": false,
"ratio": 3.8199513381995134,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4888104204399513,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Corrosion X'
__version__ = '0.5'
__name__ = 'Permissions'
import clr
import sys
clr.AddReferenceByPartialName("UnityEngine")
clr.AddReferenceByPartialName("Pluton")
import UnityEngine
import Pluton
class Permissions:
def On_PluginInit(self):
##create perm. file if doesnt exist or load it if does
if not Plugin.IniExists("Permissions"):
setini = Plugin.CreateIni("Permissions")
setini.Save()
if not Plugin.IniExists("Settings"):
setini = Plugin.CreateIni("Settings")
setini.Save()
Commands.Register("permission")\
.setCallback("permission")\
.setDescription("Allow admin to add or remove permissions")\
.setUsage("/permission add/remove username permission")
##register commands for adding permissions
##def On_PlayerConnected(self, player):
##check what perms they have from perm file if not give them "default"
##For admins override all, for moderators have defined set amount
def On_CommandPermission(self, cpe):
player = cpe.player
if player is not player.Admin:
playerid = player.SteamID
name = player.Name
command = cpe.command
ini = Plugin.GetIni("Permissions")
ini2 = Plugin.GetIni("Settings")
debug = bool(ini2.GetSetting("Settings", Debug))
##setting = ini.GetSetting(playerid, "Permissions")
setting = ini.GetSetting(PluginName, command)
if setting is None or playerid not in setting:
BlockCommand("You Don't Have Permissions For That!")
if not debug:
Util.Log(name + " with steamid " + playerid + " attempted to executed command " + command +
" from plugin " + PluginName)
elif debug:
Util.Log(name + " with steamid " + playerid + " executed command " + command + " from plugin " +
PluginName)
##Check when player executes command if they have permissions to do so else prevent
def permissions(self, args, player):
if not player.Admin:
player.Message("Only admins can modify permissions!")
return
elif len(args) == 0:
player.Message("Use /permission add/remove username permission")
return
quoted = Util.GetQuotedArgs(args)
ini = Plugin.GetIni("Permissions")
ini2 = Plugin.GetIni("Settings")
if len(quoted) == 3:
if (quoted[0] or quoted[1] or quoted[2]) is not None:
##getplayer info: quoted[3]
if quoted[0] == "add":
ini.AddSetting(quoted[1], quoted[2]) | {
"repo_name": "stopspazzing/Pluton-Plugins",
"path": "Permissions.py",
"copies": "1",
"size": "2771",
"license": "mit",
"hash": -5106910100603256000,
"line_mean": 41,
"line_max": 112,
"alpha_frac": 0.5914832191,
"autogenerated": false,
"ratio": 4.350078492935636,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5441561712035636,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Corrosion X'
__version__ = '0.7'
__name__ = 'NPCDifficulty'
import clr
import sys
clr.AddReferenceByPartialName("UnityEngine")
clr.AddReferenceByPartialName("Pluton")
import UnityEngine
import Pluton
import re
from UnityEngine import Random
path = Util.GetPublicFolder()
sys.path.append(path + "\\Plugins\\EasyAttachEntities")
import EasyAttachEntities
class NPCDifficulty:
methodname = "AttachToAnimal"
ClassName = EasyAttachEntities.EasyAttachEntities()
AttachToAnimals = getattr(ClassName, methodname)
methodname = "TimedExplosive"
ClassName = EasyAttachEntities.EasyAttachEntities()
TimedExplosive = getattr(ClassName, methodname)
def On_PluginInit(self):
DataStore.Flush("marked4death")
def On_PlayerConnected(self, player):
player.Message("Animals take 200% less dmg, try not to kill too many...")
def On_PlayerDisconnected(self, player):
DataStore.Remove("marked4death", player.GameID)
def On_PlayerDied(self, pde):
DataStore.Remove("marked4death", pde.Victim.GameID)
def On_PlayerGathering(self, ge):
num = int(Random.Range(0, 10))
res = ge.Resource
player = ge.Gatherer
if res is None and num == 5:
player.Message("Oh noes! You found a hibernating bear!")
World.SpawnAnimal("bear", player.Location)
def On_NPCAttacked(self, npa):
npa.DamageAmount /= 200
def On_NPCKilled(self, nde):
baseplayer = nde.Attacker
if baseplayer is None or "NPC":
return
player = Server.Players[baseplayer.userID]
npc = nde.Victim
npcname = npc.Name
npcname = re.sub("\(Clone\)", "", npcname)
kills = DataStore.Get("kills", player.GameID)
marked = bool(DataStore.Get("marked4death", player.GameID))
timer = int(Random.Range(30, 180))
#player.Message("timer set")
if not marked:
#player.Message("You have angered the " + npcname + " Gods!")
if kills is None:
kills = 1
elif kills < 10:
kills += 1
elif kills >= 10:
kills = 1
DataStore.Add("marked4death", player.GameID, True)
pldict = Plugin.CreateDict()
pldict["uid"] = player.GameID
time = int(timer)*1000
Plugin.CreateParallelTimer("marked", time, pldict).Start()
player.Message("You have been marked for death!")
DataStore.Add("kills", player.GameID, kills)
for c in xrange(0, kills):
newexplosive = self.TimedExplosive()
self.AttachToAnimals(npcname, newexplosive, "head", True)
#World.SpawnAnimal(npcname, npc.Location)
def markedCallback(self, timer):
pldict = timer.Args
userid = pldict["uid"]
n = 3
DataStore.Remove("marked4death", userid)
player = Server.Players[userid]
player.MessageFrom("Animal Gods", "I hope you learn your lesson!")
for c in xrange(0, n):
World.SpawnAnimal("wolf", player.Location)
World.SpawnAnimal("bear", player.Location)
timer.Kill() | {
"repo_name": "stopspazzing/Pluton-Plugins",
"path": "NPCDifficulty.py",
"copies": "1",
"size": "3213",
"license": "mit",
"hash": 7906515700075293000,
"line_mean": 34.3186813187,
"line_max": 81,
"alpha_frac": 0.6181139122,
"autogenerated": false,
"ratio": 3.651136363636364,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47692502758363636,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Corrosion X'
__version__ = '0.9.1'
__name__ = 'EasyEntitySpawner'
# Special thanks to balu92, xEnt, and DreTaX
import clr
import sys
clr.AddReferenceByPartialName("Pluton", "UnityEngine")
import UnityEngine
import Pluton
from System import String
class EasyEntitySpawner:
teamrocket = ("bear", "wolf", "boar", "stag", "chicken", "horse")
networkable = None
def On_ServerInit(self):
mydict = Plugin.CreateDict()
networkables = UnityEngine.Resources.FindObjectsOfTypeAll[BaseNetworkable]()
for network in networkables:
mydict[str(self.networkable)] += network
def On_PluginInit(self):
Commands.Register("spawnhere")\
.setCallback("spawnhere")\
.setDescription("Spawn an entity at your location.")\
.setUsage("spawnhere campfire")
Commands.Register("spawn")\
.setCallback("spawn")\
.setDescription("Spawn an entity where you are looking.")\
.setUsage("spawn campfire")
Commands.Register("spawnhelp")\
.setCallback("spawnhelp")\
.setDescription("your description here")\
.setUsage("The usage here")
def On_PlayerConnected(self, pikachu):
pikachu.Message("Spawn in Entities and Animals! Type /spawnhelp for more help")
def spawnhere(self, args, player):
if len(args) == 0:
player.Message("Please see /spawnhelp for proper use!")
return
quoted = Util.GetQuotedArgs(args)
entity = quoted[0]
try:
count = int(quoted[1])
except ValueError:
count = 1
loc = player.Location
if 0 > count > 10:
player.Message("Valid quantities are: 1 - 10")
return
self.spawnit(entity, loc, count)
def spawn(self, args, player):
if len(args) == 0:
player.Message("Please see /spawnhelp for proper use!")
return
quoted = Util.GetQuotedArgs(args)
entity = quoted[0]
try:
count = int(quoted[1])
except ValueError:
count = 1
loc = player.Location
lookpos = player.GetLookPoint()
dist = Util.GetVectorsDistance(loc, lookpos)
if 0 > count > 10:
player.Message("Valid quantities are: 1 - 10")
return
elif dist > 50.0:
player.Message("Distance is too far from your current location. Please look where you want it to spawn")
return
else:
loc = lookpos
self.spawnit(entity, loc, count)
def spawnhelp(self, args, player):
if len(args) == 0:
msgtousr = ("-> EasyEntitySpawner by CorrosionX - Special thanks to balu92, xEnt, and DreTaX",
"To spawn entities, use \"/spawnhere entity\" or \"/spawn entity\"",
"For Lists:\"/spawnhelp entities\" or \"/spawnhelp animals\"")
for msg in msgtousr:
player.Message(msg)
return
quoted = Util.GetQuotedArgs(args)
if quoted[0] == "animals":
player.Message("List of Current Animals:")
for a in teamrocket:
player.Message(a)
return
elif quoted[0] == "entities":
player.Message("Available Entities: Too long to list. Use F1 -> Entity List - Partial Matches Allowed")
# player.Message(prefab)
else:
player.Message("Not A Valid Option")
def spawnit(self, entity, loc, count):
count = int(count)
entity = str(entity)
if entity not in self.teamrocket:
if entity == "player":
player = World.SpawnMapEntity("player/player", loc).ToPlayer()
player.displayName = "[Pluton Bot]"
player.EndSleeping()
else:
for a in self.networkables:
if entity in a:
for x in range(0, count):
World.SpawnMapEntity(a, loc)
else:
return
return
else:
for x in range(0, count):
World.SpawnAnimal(entity, loc) | {
"repo_name": "stopspazzing/Pluton-Plugins",
"path": "EasyEntitySpawner.py",
"copies": "1",
"size": "4260",
"license": "mit",
"hash": -1342016915626115600,
"line_mean": 35.1101694915,
"line_max": 116,
"alpha_frac": 0.5551643192,
"autogenerated": false,
"ratio": 4.15609756097561,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0021707592810047524,
"num_lines": 118
} |
__author__ = 'Corrosion X'
__version__ = "1.0.3"
# With major help from balu92
import clr
import sys
clr.AddReferenceByPartialName("UnityEngine")
clr.AddReferenceByPartialName("Pluton")
import UnityEngine
import Pluton
class EasyRemove:
def On_PluginInit(self):
DataStore.Flush("Remove")
def On_BuildingPartAttacked(self, attacked):
player = attacked.Attacker.ToPlayer()
if player is not None and Server.Players[player.userID].Admin:
gid = player.userID
if DataStore.Get("remove", gid) is not None:
Util.DestroyEntity(attacked.Victim.buildingBlock)
def On_Command(self, cmd):
if cmd.cmd == "remove" and cmd.User.Admin:
isdestroying = DataStore.Get("remove", cmd.User.GameID)
if isdestroying is not None:
DataStore.Remove("remove", cmd.User.GameID)
cmd.User.Message("Remove De-Activated!")
else:
DataStore.Add("remove", cmd.User.GameID, True)
mydict = Plugin.CreateDict()
mydict["gid"] = cmd.User.GameID
Plugin.CreateParallelTimer("removerDeactivator", 60000, mydict).Start()
cmd.User.Message("Remove Activated!")
def removerDeactivatorCallback(self, timer):
mydict = timer.Args
gid = mydict["gid"]
DataStore.Remove("remove", gid)
player = Server.Players[gid]
player.Message("Remove De-Activated!")
timer.Kill()
def On_PlayerDisconnected(self, pl):
DataStore.Remove("remove", pl.GameID) | {
"repo_name": "stopspazzing/Pluton-Plugins",
"path": "EasyRemove.py",
"copies": "1",
"size": "1589",
"license": "mit",
"hash": -2328133578984030000,
"line_mean": 34.3333333333,
"line_max": 87,
"alpha_frac": 0.6224040277,
"autogenerated": false,
"ratio": 3.6953488372093024,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48177528649093027,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Corrosion X'
__version__ = '1.0'
import clr
import sys
clr.AddReferenceByPartialName("UnityEngine")
clr.AddReferenceByPartialName("Pluton")
import UnityEngine
import Pluton
class EasySuicide:
def On_PlayerConnected(self, p):
p.MessageFrom("[EasySuicide]", "Use /die to suicide easily")
def On_PluginInit(self):
charmeleon = (" took the easy way out",
" tripped and split his dick open",
" fell asleep under the sun and burnt to a crisp",
" wanted to feed the roses",
" wanted a better spawn",
" now has 8 lives",
" thought is was a good idea to pull the pin of a grenade",
" overdosed on Rad Pills",
" deleted System32",
" tried to rock a wolf",
" took an arrow to the knee, didn\"t recover",
" thought it was a good idea to look down the barrel of his gun",
" believed he could fly",
" figured out how C4 works",
" thought he was Chuck Norris",
" took the red pill",
" tried to enter the cheat console with alt + f4")
warturtle = ("A naked army took out ")
if not Plugin.IniExists("responses"):
psyduck = Plugin.CreateIni("responses")
psyduck.AddSetting("PlayerResponses", charmeleon)
psyduck.AddSetting("ResponsesPlayer", warturtle)
def On_Command(self, charizard):
teamrocket = ("suicide", "sui", "killme", "die", "slay", "suicidal")
if charizard.cmd in teamrocket:
charizard.User.Kill() # Kill him with a thunderbolt
def On_PlayerDied(self, chansey):
World.SpawnAnimal("bear", chansey.Victim.Location)
World.SpawnAnimal("wolf", chansey.Victim.Location) | {
"repo_name": "stopspazzing/Pluton-Plugins",
"path": "EasySuicide.py",
"copies": "1",
"size": "1977",
"license": "mit",
"hash": 6184534686428939000,
"line_mean": 42.9555555556,
"line_max": 88,
"alpha_frac": 0.5472938796,
"autogenerated": false,
"ratio": 3.899408284023669,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49467021636236685,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Corrosion X'
__version__ = '1.0'
__name__ = 'AdminDoor'
import clr
import sys
clr.AddReferenceByPartialName("UnityEngine")
clr.AddReferenceByPartialName("Pluton")
import UnityEngine
import Pluton
class AdminDoor:
def On_PluginInit(self):
Commands.Register("admindoor")\
.setCallback("admindoor")\
.setDescription("Allow admins to open any door when activated")\
.setUsage("/admindoor (on/off)")
def On_DoorUse(self, de):
activated = DataStore.Get("admindoor", de.Player.SteamID)
if de.Player.Admin:
de.IgnoreLock = activated
def admindoor(self, unused, player):
activated = DataStore.Get("admindoor", player.SteamID)
if not activated:
player.Message("<color=yellow>AdminDoor</color> <color=green>Activated</color>!")
DataStore.Add("admindoor", player.SteamID, True)
else:
player.Message("<color=yellow>AdminDoor</color> <color=red>Deactivated</color>!")
DataStore.Add("admindoor", player.SteamID, False)
def On_PlayerConnected(self, pe):
if pe.Admin:
DataStore.Add("admindoor", pe.SteamID, False) | {
"repo_name": "stopspazzing/Pluton-Plugins",
"path": "AdminDoor.py",
"copies": "1",
"size": "1196",
"license": "mit",
"hash": -6018196890046913000,
"line_mean": 32.25,
"line_max": 93,
"alpha_frac": 0.6421404682,
"autogenerated": false,
"ratio": 3.369014084507042,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9488532246350133,
"avg_score": 0.004524461271381764,
"num_lines": 36
} |
__author__ = 'Corrosion X'
__version__ = '1.0'
__name__ = 'AdminGive'
import clr
import sys
clr.AddReferenceByPartialName("UnityEngine")
clr.AddReferenceByPartialName("Pluton")
import UnityEngine
import Pluton
import System
from System import *
class AdminGive:
def On_PluginInit(self):
Commands.Register("give")\
.setCallback("giveitem")\
.setDescription("Give a player an item")\
.setUsage("give john hatchet 2")
def giveitem(self, args, player):
if len(args) == 0:
player.Message("How to use /give ex. /give corrosion rock 1")
return
quoted = Util.GetQuotedArgs(args)
itemID = Pluton.InvItem.GetItemID(quoted[1])
try:
amount = int(quoted[2])
except ValueError:
amount = 1
if player.Admin:
playerr = self.CheckV(player, quoted[0])
if playerr is None:
return
else:
playerr.Inventory.Add(itemID, amount)
playerr.Message("You have been given " + str(amount) + itemID)
player.Message("You have given " + name + str(amount) + itemID)
def GetPlayerName(self, namee):
name = namee.lower()
for pl in Server.ActivePlayers:
if pl.Name.lower() == name:
return pl
return None
def CheckV(self, Player, args):
systemname = "AdminGive"
count = 0
if hasattr(args, '__len__') and (not isinstance(args, str)):
p = self.GetPlayerName(String.Join(" ", args))
if p is not None:
return p
for pl in Server.ActivePlayers:
for namePart in args:
if namePart.lower() in pl.Name.lower():
p = pl
count += 1
continue
else:
p = self.GetPlayerName(str(args))
if p is not None:
return p
s = str(args).lower()
for pl in Server.ActivePlayers:
if s in pl.Name.lower():
p = pl
count += 1
continue
if count == 0:
Player.MessageFrom(systemname, "Couldn't find " + String.Join(" ", args) + "!")
return None
elif count == 1 and p is not None:
return p
else:
Player.MessageFrom(systemname, "Found " + str(count) + " player with similar name. Use more correct name!")
return None | {
"repo_name": "stopspazzing/Pluton-Plugins",
"path": "AdminGive.py",
"copies": "1",
"size": "2567",
"license": "mit",
"hash": -468554558887883970,
"line_mean": 32.3506493506,
"line_max": 119,
"alpha_frac": 0.5181145306,
"autogenerated": false,
"ratio": 4.147011308562197,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.003971284790179854,
"num_lines": 77
} |
__author__ = "Corrosion X"
__version__ = '1.0'
__name__ = 'EasyAttachEntities'
# Massive thanks to DreTaX for getting custom function imported from c# working
import clr
clr.AddReferenceByPartialName("Pluton")
clr.AddReferenceByPartialName("UnityEngine")
clr.AddReferenceByPartialName("Assembly-CSharp")
import Pluton
import UnityEngine
from UnityEngine import Vector3
from UnityEngine import Quaternion
parts = ("l_upperarm", "r_upperarm", "head", "l_knee", "r_knee", "spine1", "spine2", "spine3", "spike4",
"l_hand", "r_hand", "r_hip", "l_hip", "l_eye", "r_eye", "l_toe", "r_toe", "pelvis", "l_clavicle",
"r_clavicle", "r_forearm", "l_forearm", "r_ulna", "l_ulna", "r_foot", "l_foot", "neck", "jaw")
try:
import GameManager as importedclass
except ImportError:
pass
class EasyAttachEntities:
DefaultVector = None
Quat = None
def On_PluginInit(self):
self.DefaultVector = Vector3(0, 0, 0)
self.Quat = Quaternion(0, 0, 0, 1)
Commands.Register("attach")\
.setCallback("attach")\
.setDescription("your description here")\
.setUsage("The usage here")
Commands.Register("attanimal")\
.setCallback("attanimal")\
.setDescription("your description here")\
.setUsage("The usage here")
Commands.Register("detach")\
.setCallback("detach")\
.setDescription("your description here")\
.setUsage("The usage here")
Commands.Register("test")\
.setCallback("test")\
.setDescription("your description here")\
.setUsage("The usage here")
def AttachToPlayer(self, Player, whatthing, towhere, Spawn01=False):
e = Player.basePlayer
people = importedclass.server.CreateEntity(whatthing, self.DefaultVector, self.Quat)
if people:
people.SetParent(e, towhere)
people.Spawn(Spawn01)
return people
def AttachToAnimal(self, animal, whatthing, towhere, Spawn01=False):
animals = importedclass.server.CreateEntity(whatthing, self.DefaultVector, self.Quat)
if animals:
animals.SetParent(animal, towhere)
animals.Spawn(Spawn01)
return animals
def TimedExplosive(self, parent):
explosive = importedclass.server.CreateEntity("items/timed.explosive.deployed", self.DefaultVector, self.Quat)
if explosive:
explosive.timerAmountMin = 3
explosive.timerAmountMax = 4
explosive.explosionRadius = 100
explosive.SetParent(parent, "head")
explosive.Spawn(True)
return explosive
def attach(self, args, player):
quoted = Util.GetQuotedArgs(args)
whatthing = quoted[0]
towhere = quoted[1]
attached = self.AttachToPlayer(player, whatthing, towhere, True)
player.Message("Attached entity to your " + towhere + "!")
DataStore.Add("attached", player.SteamID, attached)
def attachanimal(self, args, player):
quoted = Util.GetQuotedArgs(args)
animal = quoted[0]
whatthing = quoted[1]
towhere = quoted[2]
animals = World.SpawnAnimal(animal, player.Location)
attached = self.AttachToAnimal(animals, whatthing, towhere, true)
DataStore.Add("attachanimal", player.SteamID, attached)
def detach(self, args, player):
quoted = Util.GetQuotedArgs(args)
removewhat = quoted[0]
if len(removewhat) == 0:
player.Message("Choices are: all, attach, attanimal; Please try again")
return
isattached = DataStore.Get("attached", player.SteamID)
moreattached = DataStore.Get("attachanimal", player.SteamID)
if isattached is not None or moreattached is not None:
if removewhat == "all":
Util.DestroyEntity(isattached)
Util.DestroyEntity(moreattached)
player.Message("Everything has been destroyed!")
elif removewhat == "attach":
Util.DestroyEntity(isattached)
player.Message("Entities Attached Have Been Removed!")
elif removewhat == "attanimal":
Util.DestroyEntity(moreattached)
player.Message("Attached Animals Removed!")
else:
player.Message("You dont have anything attached!")
def test(self, unused, player):
attached = self.AttachToPlayer(player, "weapons/melee/boneknife.weapon", "head", True)
self.TimedExplosive(attached)
DataStore.Add("attached", player.SteamID, attached)
player.Message("Attached entity to your head!") | {
"repo_name": "stopspazzing/Pluton-Plugins",
"path": "EasyAttachEntities.py",
"copies": "1",
"size": "4690",
"license": "mit",
"hash": 8528051863747175000,
"line_mean": 39.7913043478,
"line_max": 118,
"alpha_frac": 0.6298507463,
"autogenerated": false,
"ratio": 3.7580128205128207,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48878635668128206,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Corrosion X'
__version__ = '1.0'
__name__ = 'ReservedSlots'
import clr
import sys
clr.AddReferenceByPartialName("UnityEngine")
clr.AddReferenceByPartialName("Pluton")
import UnityEngine
import Pluton
class ReservedSlots:
def On_PluginInit(self):
if not Plugin.IniExists("ReservedSlots"):
setini = Plugin.CreateIni("ReservedSlots")
setini.AddSetting("Settings", "ReserveSlots", "2")
setini.Save()
Commands.Register("rslots")\
.setCallback("rslots")\
.setDescription("Allows an admin to add and remove players from reserve slots.")\
.setUsage("/rslots add/remove steamid")
def On_ClientAuth(self, ae):
cnt = self.domath()
ini = Plugin.GetIni("ReservedSlots")
if cnt or ini.ContainsSetting("Reserved", str(ae.GameID)):
return
else:
ae.Reject("You have been kicked due to reserve slots.")
##Debug.Log("Player " + ae.Name + " : " + ae.GameID + " was rejected due to reserve slots.")
def domath(self):
cplayers = Server.ActivePlayers.Count
ini = Plugin.GetIni("ReservedSlots")
rslots = ini.GetSetting("Settings", "Reservedslots", "4")
maxpl = Server.MaxPlayers
check = maxpl - int(rslots)
if check > cplayers:
return True
else:
return False
def rslots(self, args, player):
quoted = Util.GetQuotedArgs(args)
player.Message(str(Server.MaxPlayers))
if not player.Admin:
return
if len(quoted) < 2:
player.Message("Please use /reserveslots add/remove steamid")
elif quoted[0] is "remove" or "add":
if len(quoted[1]) == 17:
ini = Plugin.GetIni("ReserveSlots")
if quoted[0] == "add":
ini.AddSetting("Reserved", quoted[1])
if quoted[0] == "remove":
if ini.ContainsSetting("Reserved", quoted[1]):
ini.DeleteSetting("Reserved", quoted[1])
else:
player.Message("Please provide a valid steamID") | {
"repo_name": "stopspazzing/Pluton-Plugins",
"path": "ReservedSlots.py",
"copies": "1",
"size": "2165",
"license": "mit",
"hash": -3381094093480730000,
"line_mean": 35.1,
"line_max": 104,
"alpha_frac": 0.5769053118,
"autogenerated": false,
"ratio": 3.9507299270072993,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.50276352388073,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Corrosion X'
__version__ = '1.0'
__name__ = 'UsefulCommands'
import clr
import sys
clr.AddReferenceByPartialName("UnityEngine")
clr.AddReferenceByPartialName("Pluton")
import UnityEngine
import Pluton
from UnityEngine import *
class UsefulCommands:
def On_PluginInit(self):
DataStore.Flush("markloc")
DataStore.Flush("markset")
def On_Command(self, cmd):
suicidelist = ("die", "kill", "slay", "dead")
locationlist = ("loc", "location")
if cmd.cmd == "day" and cmd.User.Admin:
World.Time = 8
elif cmd.cmd == "night" and cmd.User.Admin:
World.Time = 17
elif cmd.cmd in suicidelist:
cmd.User.Kill()
elif cmd.cmd in locationlist:
cmd.User.Message("You're at coordinates: " + str(cmd.User.Location))
elif cmd.cmd == "mark":
if cmd.quotedArgs[0] is None:
markset = bool(DataStore.Get("markset", cmd.User.GameID))
if not markset:
cmd.User.Message("Please \'/mark set\' first!")
return
markloc = DataStore.Get("markloc", cmd.User.GameID)
dist = Util.GetVectorsDistance(markloc, cmd.User.Location)
cmd.User.Message("You are " + str(dist) + " from your mark.")
elif cmd.quotedArgs[0] == "set":
markset = bool(DataStore.Get("markset", cmd.User.GameID))
if markset:
cmd.User.Message("Updated your Mark")
markloc = cmd.User.Location
cmd.User.Message("Location Marked")
DataStore.Add("markloc", cmd.User.GameID, markloc)
DataStore.Add("markset", cmd.User.GameID, True) | {
"repo_name": "stopspazzing/Pluton-Plugins",
"path": "UsefulCommands.py",
"copies": "1",
"size": "1759",
"license": "mit",
"hash": 23550568992791870,
"line_mean": 38.1111111111,
"line_max": 80,
"alpha_frac": 0.5679363275,
"autogenerated": false,
"ratio": 3.703157894736842,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9749900846267637,
"avg_score": 0.004238675193840886,
"num_lines": 45
} |
__author__ = 'CorrosionX'
__version__ = '1.3'
__name__ = 'EasyAirdrops'
import clr
import sys
clr.AddReferenceByPartialName("UnityEngine")
clr.AddReferenceByPartialName("Pluton")
import UnityEngine
import Pluton
from UnityEngine import Random
class EasyAirdrops:
def On_PluginInit(self):
if not Plugin.IniExists("EasyAirdrops"):
setini = Plugin.CreateIni("EasyAirdrops")
setini.AddSetting("Settings", "PlayersRequired", "3") # Default
setini.AddSetting("Settings", "DropFrequency", "3600") # In Seconds! 1 hour
setini.AddSetting("Settings", "DropDuring", "anytime") # (default) - Can be day/night/anytime
setini.AddSetting("Settings", "DropOnPlayer", "True") # Drop On A Player?
setini.Save()
else:
# If config exists, make sure each setting is set, else add what is missing
ini = self.easyairdropsini()
if ini.GetSetting("Settings", "PlayersRequired") is None:
ini.AddSetting("Settings", "PlayersRequired", "3") # Default
if ini.GetSetting("Settings", "DropFrequency") is None:
ini.AddSetting("Settings", "DropFrequency", "3600") # In Seconds! 1 hour
if ini.GetSetting("Settings", "DropDuring") is None:
ini.AddSetting("Settings", "DropDuring", "anytime")
if ini.GetSetting("Settings", "DropOnPlayer") is None:
ini.AddSetting("Settings", "DropOnPlayer", "True")
ini.Save()
ini = self.easyairdropsini()
try:
drop_time = int(ini.GetSetting("Settings", "DropFrequency"))*1000
except ValueError:
drop_time = 3600000 # Default time if number not set/invalid.
Plugin.CreateTimer("airdroptimer", drop_time).Start()
def airdroptimerCallback(self, unused):
ini = self.easyairdropsini()
players = self.playercount()
if players:
drop_during = str(ini.GetSetting("Settings", "DropDuring"))
if drop_during == "night":
if 17.5 < World.Time < 5.5:
self.dropit()
else:
return
elif drop_during == "day":
if 17.5 > World.Time > 5.5:
self.dropit()
else:
return
else:
self.dropit()
def easyairdropsini(self):
return Plugin.GetIni("EasyAirdrops")
def playercount(self):
ini = self.easyairdropsini()
count = Server.Players.Count
try:
req_players = int(ini.GetSetting("Settings", "PlayersRequired"))
except ValueError:
req_players = 3 # Default time if number not set/invalid.
if req_players <= count:
return True
else:
return False
def dropit(self):
ini = self.easyairdropsini()
rplayer = bool(ini.GetSetting("Settings", "DropOnPlayer"))
num = int(Random.Range(0, Server.ActivePlayers.Count))
rplay = Server.ActivePlayers[num]
if rplayer:
World.AirDropAtPlayer(rplay)
# Use commented out code if you dont want to hear or see the airdrop
#World.SpawnMapEntity("items/supply_drop", rplay.X, rplay.Y + 500, rplay.Z)
#Server.Broadcast("Airdrop on player " + str(rplay.Name))
else:
World.AirDrop()
# Use commented out code if you dont want to hear or see the airdrop
#rplay.X += float(Random.Range(-1000, 1000))
#rplay.Z += float(Random.Range(-1000, 1000))
#if -4000 < rplay.X < 4000 and -4000 < rplay.Z < 4000:
# World.SpawnMapEntity("items/supply_drop", rplay.X, 1000, rplay.Z)
# Server.Broadcast("Airdrop is on its way @ " + str(loc.X) + str(loc.Z)) | {
"repo_name": "stopspazzing/Pluton-Plugins",
"path": "EasyAirdrops.py",
"copies": "1",
"size": "3875",
"license": "mit",
"hash": 6528173985921354000,
"line_mean": 41.5934065934,
"line_max": 106,
"alpha_frac": 0.5772903226,
"autogenerated": false,
"ratio": 3.8177339901477834,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4895024312747783,
"avg_score": null,
"num_lines": null
} |
__author__ = "Cory J. Engdahl"
__license__ = "MIT"
__version__ = "1.0.0"
__email__ = "cjengdahl@gmail.com"
from enigma import enigma_machine
from enigma import enigma_exception
import configparser
import click
import os
# instantiate config parser
config = configparser.ConfigParser(interpolation=configparser.
ExtendedInterpolation())
pwd = os.path.dirname(__file__)
user_configs = os.path.join(pwd, 'enigma/config.ini')
class DecryptAlias(click.Group):
def get_command(self, ctx, cmd_name):
rv = click.Group.get_command(self, ctx, cmd_name)
if rv is not None:
return rv
elif cmd_name == "decrypt":
return click.Group.get_command(self, ctx, "encrypt")
else:
return None
def print_version(ctx, param, value):
if not value or ctx.resilient_parsing:
return
click.echo('Enigma Version %s' % __version__)
ctx.exit()
@click.command(cls=DecryptAlias)
@click.option('--version', is_flag=True, callback=print_version,
expose_value=False, is_eager=True)
def cli():
pass
@cli.command()
@click.argument("configuration", type=click.STRING, required=False)
def list(configuration):
"""
Lists the existing user configurations.
"""
if len(config.read(user_configs)) == 0:
click.echo("\nError: Config file, \"config.ini\", not found\n")
return
if configuration is not None:
# load config
try:
selected = load_config(configuration)
except configparser.NoSectionError:
click.echo("Error: Configuration %s could not be found" % configuration)
return
sorted_config = []
# convert dict to list
for key, value in selected.items():
sorted_config.append([key, value])
# sort
sorted_config.sort(key=lambda x: x[0])
click.echo("\n%s:\n" % configuration)
for component in sorted_config:
click.echo("%s: %s" % (component[0], component[1]))
click.echo("\n")
else:
click.echo("\nAvailable Configurations:\n")
sections = config.sections()
for section in sections:
# preferences is not a config
if section.upper() != 'PREFERENCES':
click.echo(section)
click.echo("\n")
@cli.command()
@click.option('--spaces', '-s', type=click.Choice(['remove', 'X', 'keep']), help='Set space handling preference')
@click.option('--newlines', '-n', type=click.Choice(['True', 'False']), help='Include newline characters')
@click.option('--space-detect', '-d', type=click.Choice(['True', 'False']), help='Convert decrypted Xs to spaces')
@click.option('--group', '-g', type=click.STRING, help='Set output letter grouping')
@click.option('--select', '-c', help='Select enigma machine configuration')
@click.option('--remember', '-k', type=click.Choice(['True', 'False']), help='Remember machine state after encryption')
@click.option('--progress', '-p', type=click.Choice(['True', 'False']), help="Show progressbar")
def pref(spaces, group, remember, space_detect, select, newlines, progress):
"""
Manages the default preferences. Invoked options updates preferences
"""
if len(config.read(user_configs)) == 0:
click.echo("\nConfig file, \"config.ini\", not found\n")
return
options = ['spaces', 'group', 'remember', 'space_detect', 'select', 'newlines', 'progress']
updated_options = {}
for option in options:
if eval(option) is not None:
updated_options[option] = eval(option)
if len(updated_options) != 0:
# update preferences
write_config("Preferences", updated_options)
click.echo("\nPreferences updated\n")
# list all preferences
else:
try:
selected = load_config("Preferences")
except configparser.NoSectionError:
click.echo("Error: Preferences could not be found")
return
preferences = []
# convert dict to list
for key, value in selected.items():
preferences.append([key, value])
# sort
preferences.sort(key=lambda x: x[0])
click.echo("\nPreferences:\n")
for pref in preferences:
click.echo("%s: %s" % (pref[0], pref[1]))
click.echo("\n")
return
@cli.command()
@click.option('--model', '-m', type=click.Choice(['EnigmaI', 'M2', 'M3', 'M4']), help='Enigma machine model')
@click.option('--fast', '-r1', type=click.STRING, help='Fast rotor config: id (1-8) , position (1-26), and ring setting (1-26)')
@click.option('--middle', '-r2', type=click.STRING, help='Middle rotor config: id (1-8) , position (1-26), and ring setting (1-26)')
@click.option('--slow', '-r3', type=click.STRING, help='Slow rotor config: id (1-8) , position (1-26), and ring setting (1-26)')
@click.option('--static', '-r4', type=click.STRING, help='Static rotor config: (9 for beta, 10 for gamma), position (1-26), and ring setting (1-26)')
@click.option('--reflect', '-r', type=click.Choice(['UKW-A', 'UKW-B', 'UKW-C', 'UKW-B_THIN', 'UKW-C_THIN']), help='Enigma reflector')
@click.option('--plugs', '-p', type=click.STRING, help='Plugs inserted in plugboard (e.g. "AB,XY")')
@click.argument('configuration', type=click.STRING, required=True)
def new(configuration, model, fast, middle, slow, static, reflect, plugs):
"""
Creates and saves new user configuration for enigma machine. New configuration is based on default configuration
overwritten with invoked options.
"""
if model == "M4" and (static is None or reflect is None):
click.echo("Cannot create configuration, M4 model requires static rotor and thin reflector")
return
# load default
local_config = load_config('Default')
# add changes locally
options = {'model': model, 'fast': fast, 'middle': middle, 'slow': slow, 'static': static, 'reflect': reflect,
'plugs': plugs}
local_config = update_config(local_config, options)
# assembly enigma machine
try:
enigma = assemble_enigma(local_config)
except enigma_exception.DuplicatePlug:
click.echo("Error: Duplicate plugs are not allowed")
return
except enigma_exception.MaxPlugsReached:
click.echo("Error: More plugs than allowed have been specified")
return
except enigma_exception.DuplicateRotor:
click.echo("Error: Duplicate rotors are not allowed")
return
except enigma_exception.InvalidRotor:
click.echo("Error: Invalid rotor specified")
return
except enigma_exception.InvalidRotorFour:
click.echo("Error: Invalid static rotor specified. "
"Must use rotor id 9 (Beta) or 10 (Gamma)")
return
except enigma_exception.InvalidReflector:
click.echo("Error: Invalid reflector specified")
return
# create new config and write local configuration to it
config[configuration] = {}
write_config(configuration, local_config)
@cli.command()
def clear():
"""
Clears all users configurations with the exception of 'Default' and 'User'.
"""
config.read(user_configs)
for x in config.sections():
if x.upper() not in ['DEFAULT', 'USER', 'PREFERENCES']:
config.remove_section(x)
# set config preference to User
config["Preferences"]["select"] = "User"
with open(user_configs, 'w') as configfile:
config.write(configfile)
@cli.command()
@click.argument('configuration', type=click.STRING, required=True)
def delete(configuration):
"""
Deletes specified user configuration. Default and User configs
can not be deleted
"""
if len(config.read(user_configs)) == 0:
click.echo("\nError: Config file, \"config.ini\", not found\n")
return
if configuration.upper() not in ['DEFAULT', 'USER']:
if configuration.upper() == "PREFERENCES":
click.echo("\nError: Configuration \"%s\" does not exist, cannot delete\n" % configuration)
return
# gather all configurations
configurations = []
sections = config.sections()
if len(sections) == 3:
click.echo("\nError: Configuration file contains no delete-able configurations\n")
return
for x in sections:
configurations.append(x)
if configuration not in configurations:
click.echo("Error: Configuration \"%s\" does not exist, cannot delete" % configuration)
else:
# remove config
config.remove_section(configuration)
# if config to be deleted is config preference
if config["Preferences"]["select"] == configuration:
# update config preference to be User config
config["Preferences"]["select"] = "User"
with open(user_configs, 'w') as configfile:
config.write(configfile)
else:
click.echo("\nError: Cannot delete \"Default\" or \"User\" configurations\n")
@cli.command()
@click.argument('configuration', type=click.STRING, required=True)
def reset(configuration):
"""
Resets specified configuration to \"Default\" settings.
"""
if len(config.read(user_configs)) == 0:
click.echo("\nError: Config file, \"config.ini\", not found\n")
return
if configuration.upper() != 'DEFAULT':
# gather all configurations
configurations = []
sections = config.sections()
if len(sections) == 0:
click.echo("\nError: Configuration file contains no configurations\n")
return
for x in sections:
configurations.append(x.upper())
if (configuration.upper() not in configurations) or configuration.upper() == "PREFERENCES":
click.echo("\nError: Configuration \"%s\" does not exist, cannot reset\n" % configuration)
else:
config.remove_section(configuration)
config[configuration] = {}
for x in config.options('Default'):
config[configuration][x] = config['Default'][x]
with open(user_configs, 'w') as configfile:
config.write(configfile)
else:
click.echo("\nError: Cannot reset \"Default\" configuration\n")
@cli.command()
# formatting options
@click.option('--spaces', '-s', type=click.Choice(['remove', 'X', 'keep']), help='Set space handling preference')
@click.option('--newlines', '-n', type=click.Choice(['True', 'False']), help='Include newline characters')
@click.option('--space-detect', '-d', type=click.Choice(['True', 'False']), help='Convert decrypted Xs to spaces')
@click.option('--group', '-g', help='Set output letter grouping')
# enigma setting options
@click.option('--model', '-m', type=click.STRING, help='Enigma machine model')
@click.option('--fast', '-r1', type=click.STRING, help='Fast rotor config: id (1-8) , position (1-26), and ring setting (1-26)')
@click.option('--middle', '-r2', type=click.STRING, help='Middle rotor config: id (1-8) , position (1-26), and ring setting (1-26)')
@click.option('--slow', '-r3', type=click.STRING, help='Slow rotor config: id (1-8) , position (1-26), and ring setting (1-26)')
@click.option('--static', '-r4', type=click.STRING, help='Static rotor config: (9 for beta, 10 for gamma), position (1-26), and ring setting (1-26)')
@click.option('--reflect', '-r', type=click.Choice(['UKW-A', 'UKW-B', 'UKW-C', 'UKW-B_THIN', 'UKW-C_THIN']), help='Enigma reflector')
@click.option('--plugs', '-p', type=click.STRING, help='Plugs inserted in plugboard (e.g. "AB,XY")')
# config management options
@click.option('--select', '-c', help='Select enigma machine configuration')
@click.option('--update', '-u', is_flag=True, help='Overwrite config file with invoked preferences, and options')
@click.option('--remember', '-k', type=click.Choice(['True', 'False']), help='Remember machine state after encryption')
# input/output options
@click.option('--input', '-f', type=click.File('r'), required=False, help="Path to input file")
@click.option('--output', '-o', type=click.File('w'), required=False, help="Path to output file")
# arguments
@click.argument('message', type=click.STRING, required=False)
def encrypt(spaces, group, model, fast, middle, slow, static, reflect, plugs, select, update, remember, message, input, output, space_detect, newlines):
"""
Encrypts text input with Enigma Machine. All input is converted to uppercase and non-alphabetic characters (with the exception
of spaces and newline characters) are removed.
"""
# get user configurations
if len(config.read(user_configs)) == 0:
click.echo("\nError: Config file, \"config.ini\", not found\n")
return
# load preferences
try:
preferences = load_config("Preferences")
except configparser.NoSectionError:
click.echo("Error: Preferences could not be found" % select)
return
# load model
try:
if select is None:
# load config in preferences
select = preferences["select"]
local_config = load_config(select)
except configparser.NoSectionError:
click.echo("Error: Configuration %s could not be found" % select)
return
# add config changes locally
options = {'model': model, 'fast': fast, 'middle': middle, 'slow': slow, 'static': static, 'reflect': reflect,
'plugs': plugs}
local_config = update_config(local_config, options)
# add preferences locally
pref_options = {'spaces': spaces, 'group': group, 'remember': remember,
'select': select, 'space_detect': space_detect, 'newlines': newlines}
preferences = update_config(preferences, pref_options)
# assembly enigma machine
try:
enigma = assemble_enigma(local_config)
except enigma_exception.DuplicatePlug:
click.echo("Error: Duplicate plugs are not allowed")
return
except enigma_exception.MaxPlugsReached:
click.echo("Error: More plugs than allowed have been specified")
return
except enigma_exception.DuplicateRotor:
click.echo("Error: Duplicate rotors are not allowed")
return
except enigma_exception.InvalidRotor:
click.echo("Error: Invalid rotor specified")
return
except enigma_exception.InvalidRotorFour:
click.echo("Error: Invalid static rotor specified")
return
except enigma_exception.InvalidReflector:
click.echo("Error: Invalid reflector specified, check model compatibility")
return
# if enigma assembled without error and update is specified, write config and preferences to config file
if update:
write_config(select, local_config)
write_config("Preferences", preferences)
# match preference options to local preferences
spaces = preferences["spaces"]
newlines = preferences["newlines"]
group = int(preferences["group"])
remember = preferences["remember"]
space_detect = preferences["space_detect"]
progress = preferences["progress"]
# ensure type
space_detect = str_to_bool(space_detect)
newlines = str_to_bool(newlines)
progress = str_to_bool(progress)
# if input file used, overwrite message
if message is None and input is not None:
message = input.read()
# if newlines is True, treat newlines as spaces
if not newlines:
message = message.replace('\n', ' ')
# encrypt message
ciphertext = _encrypt(enigma, message, spaces, space_detect, group, progress)
# save state of machine for next use, if requested
if str_to_bool(remember):
# get position of rotors 1-3 and write to config file
r1_p = enigma.rotor_pos("r1")
r2_p = enigma.rotor_pos("r2")
r3_p = enigma.rotor_pos("r3")
# replace value in config file
config[select]["r1_p"] = str(r1_p)
config[select]["r2_p"] = str(r2_p)
config[select]["r3_p"] = str(r3_p)
# write changes
with open(user_configs, 'w') as configfile:
config.write(configfile)
# print cipher
if output is not None:
output.write(ciphertext)
else:
click.echo(ciphertext)
##################################
# Local Helper Methods #
##################################
def _encrypt(enigma, message, spaces, space_detect, group, progress):
"""
Encrypts input and directs output appropriately, with standard-out as
the default. It also controls output format. By default, characters
are encrypted in groups of 5. If spaces are not previously filtered out,
output will not group characters but instead encrypt with spacing in its
natural state. All input must be upper case, or it will not be encrypted.
:return:
"""
# if space_detect, remove all spaces for processing
if space_detect:
group = 0
spaces = "remove"
if message is None:
return
plaintext = message.upper()
ciphertext = ""
count = 0
if progress:
with click.progressbar(plaintext, label="Encrypting", length = len(plaintext)) as bar:
for c in bar:
# for c in plaintext:
# if character is a space
if ord(c) == 32:
# and option is set to remove
if spaces.lower() == 'remove':
# do nothing (will not be entered into enigma)
pass
# and option is set to 'X
elif spaces.lower() == 'x':
# replace the space with the character 'X' and encrypt
ciphertext += enigma.encrypt('X')
# otherwise, the space will be evident in the cipher text
else:
ciphertext += " "
# if new line, keep it (if newlines undesired, it should have been replaced with space already)
elif ord(c) == 10:
ciphertext += '\n'
# if character is illegal, remove it
elif ord(c) < 65 or ord(c) > 90:
# do nothing (will not be entered into enigma)
pass
# otherwise character is legal, encrypt it
else:
e = enigma.encrypt(c)
# if space detect enabled, replaces Xs with spaces
if space_detect and e == 'X':
ciphertext += " "
else:
ciphertext += e
if group != 0:
count = (count + 1) % group
# if not keeping/replacing spaces, group characters for readability
if spaces.lower() == 'remove' and count == 0:
ciphertext += " "
else:
for c in plaintext:
# if character is a space
if ord(c) == 32:
# and option is set to remove
if spaces.lower() == 'remove':
# do nothing (will not be entered into enigma)
pass
# and option is set to 'X
elif spaces.lower() == 'x':
# replace the space with the character 'X' and encrypt
ciphertext += enigma.encrypt('X')
# otherwise, the space will be evident in the cipher text
else:
ciphertext += " "
# if new line, keep it (if newlines undesired, it should have been replaced with space already)
elif ord(c) == 10:
ciphertext += '\n'
# if character is illegal, remove it
elif ord(c) < 65 or ord(c) > 90:
# do nothing (will not be entered into enigma)
pass
# otherwise character is legal, encrypt it
else:
e = enigma.encrypt(c)
# if space detect enabled, replaces Xs with spaces
if space_detect and e == 'X':
ciphertext += " "
else:
ciphertext += e
if group != 0:
count = (count + 1) % group
# if not keeping/replacing spaces, group characters for readability
if spaces.lower() == 'remove' and count == 0:
ciphertext += " "
return ciphertext
def update_config(local_config, changes):
"""
Updates local config dict with changes from cli invoked options
:param local_config (dict): loaded enigma configuration where key = component, value = setting
:param changes (dict) : changes to be made to loaded configuration, where key = component, value = desired setting
:return (dict): loaded configuration with new changes from invoked cli options
"""
for key, value in changes.items():
if value is not None:
if key == 'fast':
rotor = [int(x) for x in value.split(",")]
local_config['r1_id'] = rotor[0]
local_config['r1_p'] = rotor[1]
local_config['r1_rs'] = rotor[2]
elif key == 'middle':
rotor = [int(x) for x in value.split(",")]
local_config['r2_id'] = rotor[0]
local_config['r2_p'] = rotor[1]
local_config['r2_rs'] = rotor[2]
elif key == 'slow':
rotor = [int(x) for x in value.split(",")]
local_config['r3_id'] = rotor[0]
local_config['r3_p'] = rotor[1]
local_config['r3_rs'] = rotor[2]
elif key == 'static':
rotor = [int(x) for x in value.split(",")]
local_config['r4_id'] = rotor[0]
local_config['r4_p'] = rotor[1]
local_config['r4_rs'] = rotor[2]
elif key == 'plugs':
plugs = value.split(",")
local_config['plugs'] = plugs
else:
local_config[key] = value
return local_config
def load_config(config_name):
"""
Loads the options and values of an existing configurations into a dictionary
:param config_name (str): name of configuration to change
:return (dict/bool) : dictionary of components
"""
config.read(user_configs)
components = {}
# load all components and convert rotor parameters to integer
for x in config.options(config_name):
if x in ['r1_id', 'r2_id', 'r3_id', 'r4_id', 'r1_p', 'r2_p', 'r3_p', 'r4_p',
'r1_rs', 'r2_rs', 'r3_rs', 'r4_rs']:
value = int(config[config_name][x])
elif x == 'plugs':
value = config[config_name][x].split(",")
else:
value = config[config_name][x]
components[x] = value
return components
def write_config(config_name, local_config):
"""
Writes locally stored config file to to initialization (.ini) file
:param config_name (str): name of config section to write (i.e. name of configuration)
:param local_config (dic) : dict of locally stored enigma configuration
:return:
"""
sorted_config = []
# convert dict to list
for key, value in local_config.items():
sorted_config.append([key, value])
# sort
sorted_config.sort(key=lambda x: x[0])
for component in sorted_config:
if component[0] == 'plugs':
plugs = ""
for plug in component[1]:
plugs += plug + ","
config[config_name][component[0]] = plugs[:len(plugs)-1]
else:
config[config_name][component[0]] = str(component[1])
# write the changes
with open(user_configs, 'w') as configfile:
config.write(configfile)
def assemble_enigma(components):
"""
Assembles enigma machine from local config dict
:param components (dict): local config file. key = component, value = setting
:return (enigma_machine):
"""
# assemble rotors from config file
r1 = [components['r1_id'], components['r1_p'], components['r1_rs']]
r2 = [components['r2_id'], components['r2_p'], components['r2_rs']]
r3 = [components['r3_id'], components['r3_p'], components['r3_rs']]
if components['model'] == 'M4':
r4 = [components['r4_id'], components['r4_p'], components['r4_rs']]
rotors = [r1, r2, r3, r4]
else:
rotors = [r1, r2, r3]
# assemble enigma from config file
e = enigma_machine.EnigmaMachine(components['model'], rotors, components['reflect'], components['plugs'])
return e
def str_to_bool(string):
"""
Converts strings to boolean
:param string (str): string to convert
: retrun (bool): True if string is equal to "True", False if equal to "False"
"""
if string == "True":
return True
elif string == "False":
return False
else:
return string
| {
"repo_name": "cjengdahl/Enigma",
"path": "enigma_driver.py",
"copies": "1",
"size": "25252",
"license": "mit",
"hash": 6854467553003949000,
"line_mean": 34.6666666667,
"line_max": 152,
"alpha_frac": 0.5922699192,
"autogenerated": false,
"ratio": 4.031933578157433,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0016291271098676317,
"num_lines": 708
} |
import os
import shutil
import subprocess
# The following env variables are expected: SLIDESHOW_PATH, FETCH_SLIDES_PATH, SLIDESHOW_WEB_PATH
FETCH_SLIDES_PATH = os.environ['FETCH_SLIDES_PATH'] if 'FETCH_SLIDES_PATH' in os.environ.keys() else '/signpi-server/signage/bin/fetch_slides'
SLIDESHOW_CONF_PATH = os.environ['SLIDESHOW_CONF_PATH'] if 'SLIDESHOW_CONF_PATH' in os.environ.keys() else '/signpi-server/signage/conf.d'
WEB_PATH = os.environ['SLIDESHOW_WEB_PATH'] if 'SLIDESHOW_WEB_PATH' in os.environ.keys() else '/signpi-server/frontend/static/frontend/web'
if not os.path.exists(SLIDESHOW_CONF_PATH):
os.mkdir(SLIDESHOW_CONF_PATH)
if not os.path.exists(WEB_PATH):
os.mkdir(WEB_PATH)
def delete_slideshow(name):
web_path = os.path.join(WEB_PATH, name)
conf_path = os.path.join(SLIDESHOW_CONF_PATH, name + '.conf')
if os.path.exists(web_path):
shutil.rmtree(web_path)
if os.path.isfile(conf_path):
os.remove(conf_path)
def list_slides(name):
path = os.path.join(WEB_PATH, name + "/slides")
return [slide for slide in os.listdir(path)]
def list_slideshows():
return [conf[:-5] for conf in os.listdir(SLIDESHOW_CONF_PATH)]
def get_info(name):
"""
Get's slideshow metadata.
:param name: name of the slideshow
:return: tuple containing desc, url
"""
conf_path = os.path.join(SLIDESHOW_CONF_PATH, name + '.conf')
f = open(conf_path, 'r')
url = ''
desc = ''
for line_number, line in enumerate(f.readlines()):
if line_number == 2:
desc = line[2:].strip() # remove '# '
if line[:14] == 'SLIDESHOW_PDF=':
url = line.strip()[15:-1]
f.close()
return desc, url
def create_slideshow(name, desc, url):
if name in list_slideshows():
raise Exception("The name [%s] is already taken!" % name)
web_path = os.path.join(WEB_PATH, name)
conf_path = os.path.join(SLIDESHOW_CONF_PATH, name + '.conf')
# create conf file
conf = '#/bin/bash\n'
conf += '#\n'
conf += '# %s\n'
conf += '#\n\n'
conf += 'SLIDESHOW_PDF="%s"\n'
conf += 'SLIDESHOW_OUTPUT="%s"\n'
conf = conf % (desc, url, web_path)
# write to file
f = open(conf_path, 'w')
f.write(conf)
f.flush()
f.close()
# call fetch_slides script
if os.path.exists(web_path):
shutil.rmtree(web_path)
os.mkdir(web_path)
os.chdir(web_path)
completed_process = subprocess.run([FETCH_SLIDES_PATH, conf_path])
return completed_process.returncode == 0
| {
"repo_name": "canance/signpi-server",
"path": "frontend/slideshow.py",
"copies": "1",
"size": "3199",
"license": "apache-2.0",
"hash": -6315238065761715000,
"line_mean": 30.362745098,
"line_max": 142,
"alpha_frac": 0.6614567052,
"autogenerated": false,
"ratio": 3.0938104448742747,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.42552671500742745,
"avg_score": null,
"num_lines": null
} |
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.index, name='index'),
url(r'^cast/', views.cast, name='cast'),
url(r'^change/', views.change, name='change'),
url(r'^create_device/', views.create_device, name="create_device"),
url(r'^create_device_group', views.create_device_group, name='create_device_group'),
url(r'^devices/', views.devices, name='devices'),
url(r'^device_groups/', views.device_groups, name='device_groups'),
url(r'^edit_device/(?P<dev>\d+)/$', views.edit_device, name='edit_device'),
url(r'^slideshows/', views.slideshows, name="slideshows"),
url(r'^slideshow/(?P<name>.*)/$', views.get_slideshow, name="slideshow"),
url(r'^slideshow_json/(?P<name>.*)/$', views.get_slideshow_json, name="slideshow_json"),
url(r'^streams/', views.streams, name="streams"),
url(r'^create_slideshow/', views.create_slideshow, name="create_slideshow"),
url(r'^create_stream/', views.create_stream, name="create_stream"),
url(r'^delete_slideshow/(?P<name>.*)/$', views.delete_slideshow, name="delete_slideshow"),
url(r'^edit_slideshow/(?P<name>.*)/$', views.edit_slideshow, name='edit_slideshow'),
url(r'^delete_stream/(?P<name>.*)/$', views.delete_stream, name="delete_stream"),
url(r'^edit_stream/(?P<name>.*)/$', views.edit_stream, name='edit_stream'),
url(r'^edit_device_group/(?P<grp>\d+)/$', views.edit_device_group, name='edit_device_group'),
]
| {
"repo_name": "canance/signpi-server",
"path": "frontend/urls.py",
"copies": "1",
"size": "2134",
"license": "apache-2.0",
"hash": -1148038581439403400,
"line_mean": 49.8095238095,
"line_max": 97,
"alpha_frac": 0.6841611996,
"autogenerated": false,
"ratio": 3.365930599369085,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9540814586862464,
"avg_score": 0.001855442421324266,
"num_lines": 42
} |
from django.db import models
class Stream(models.Model):
name = models.CharField(max_length=255, blank=False)
desc = models.TextField(blank=True)
url = models.CharField(max_length=255, blank=False)
def __str__(self):
return self.name
class Device(models.Model):
name = models.CharField(max_length=255)
desc = models.TextField(blank=True)
mac = models.CharField(max_length=17)
configuration = models.CharField(max_length=255, default='')
def __str__(self):
name = self.name if self.name else self.mac
if self.configuration != '':
return "%s - %s" % (name, self.configuration)
else:
return name
class Group(models.Model):
name = models.CharField(max_length=255)
desc = models.TextField(blank=True)
devices = models.ManyToManyField(Device)
def __str__(self):
return self.name
| {
"repo_name": "canance/signpi-server",
"path": "backend/models.py",
"copies": "1",
"size": "1564",
"license": "apache-2.0",
"hash": 394157013646892500,
"line_mean": 30.28,
"line_max": 74,
"alpha_frac": 0.6943734015,
"autogenerated": false,
"ratio": 3.8146341463414632,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 50
} |
from .models import Device, Stream
from django.core.exceptions import ObjectDoesNotExist
from django.http import HttpResponse
from django.core.urlresolvers import reverse
import frontend.slideshow
import json
def configuration(request):
if 'dev' not in request.GET:
return HttpResponse('Invalid request!')
mac_address = request.GET['dev']
if mac_address.strip() == '':
return HttpResponse('Empty MAC address!')
try:
device = Device.objects.get(mac__iexact=mac_address)
except ObjectDoesNotExist:
device = Device()
device.mac = mac_address
device.save()
else:
if device.configuration[:7] == 'stream:':
intent = 'stream'
stream = Stream.objects.get(name=device.configuration[7:])
url = stream.url
elif device.configuration[:10] == 'slideshow:':
intent = 'slideshow'
# _, url = frontend.slideshow.get_info(device.configuration[10:])
url = request.build_absolute_uri(reverse('frontend:slideshow', kwargs={'name': device.configuration[10:]}))
else:
url = ''
intent = 'error'
configuration_data = {
'intent': intent,
'url': url,
}
return HttpResponse(json.dumps(configuration_data, sort_keys=True, indent=4, separators=(',', ':')))
return HttpResponse('{}')
| {
"repo_name": "canance/signpi-server",
"path": "backend/views.py",
"copies": "1",
"size": "2071",
"license": "apache-2.0",
"hash": 7745620872089226000,
"line_mean": 32.4032258065,
"line_max": 119,
"alpha_frac": 0.6605504587,
"autogenerated": false,
"ratio": 4.23517382413088,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.01641141363322482,
"num_lines": 62
} |
"""
Django settings for signpi project.
Generated by 'django-admin startproject' using Django 1.9.1.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'q!n3@ftmhtvd!v-n*9p#xf0m72hio2f((o^w04pn!z+@0ki-nb'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'frontend.apps.FrontendConfig',
'backend.apps.BackendConfig',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'signpi.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'signpi.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'America/New_York'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
LOGIN_REDIRECT_URL = '/frontend'
LOGIN_URL = '/login'
| {
"repo_name": "canance/signpi-server",
"path": "signpi/settings.py",
"copies": "1",
"size": "4015",
"license": "apache-2.0",
"hash": 5781065320943522000,
"line_mean": 26.6896551724,
"line_max": 91,
"alpha_frac": 0.700871731,
"autogenerated": false,
"ratio": 3.5468197879858656,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47476915189858654,
"avg_score": null,
"num_lines": null
} |
"""signpi URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import include, url
from django.contrib.auth import views as auth_views
from frontend import views as frontend_views
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^frontend/', include('frontend.urls', namespace='frontend')),
url(r'^backend/', include('backend.urls', namespace='backend')),
url(r'^login$', auth_views.login),
url(r'^logout/$', auth_views.logout),
url(r'^$', frontend_views.index),
]
| {
"repo_name": "canance/signpi-server",
"path": "signpi/urls.py",
"copies": "1",
"size": "1794",
"license": "apache-2.0",
"hash": 6164567334205483000,
"line_mean": 38.8666666667,
"line_max": 79,
"alpha_frac": 0.7196209588,
"autogenerated": false,
"ratio": 3.683778234086242,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49033991928862425,
"avg_score": null,
"num_lines": null
} |
from django import forms
from django.forms import ModelForm, Form
from backend.models import Group, Device, Stream
class GroupForm(ModelForm):
class Meta:
model = Group
fields = '__all__'
class DeviceForm(ModelForm):
class Meta:
model = Device
fields = '__all__'
exclude = ("configuration",)
class SlideshowForm(Form):
name = forms.CharField(label="Name", max_length=100)
desc = forms.CharField(label="Description", max_length=255)
url = forms.CharField(label="PDF URL", max_length=255)
class StreamForm(ModelForm):
class Meta:
model = Stream
fields = '__all__'
| {
"repo_name": "canance/signpi-server",
"path": "frontend/forms.py",
"copies": "1",
"size": "1316",
"license": "apache-2.0",
"hash": -9091174682424937000,
"line_mean": 28.2444444444,
"line_max": 74,
"alpha_frac": 0.7036474164,
"autogenerated": false,
"ratio": 3.97583081570997,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.517947823210997,
"avg_score": null,
"num_lines": null
} |
__author__ = 'cosmin'
def solveCase2(l):
if l == [0]:
return [0]
# do a count sort
fr = [0 for i in range(0, 10)]
for x in l:
fr[x] += 1
for x in range(1, 10):
if fr[x] != 0:
first = x
break
fr[first] -= 1
ret = []
ret.append(first)
for x in range(0, 10):
for y in range (0, fr[x]):
ret.append(x)
return ret
def extractDigits(x): #functions that returns a list containing the digits of a number
'''
Extracts the digits of a number to a list of integers
x - integer, the number to ext dig from, x >= 0
throws...
returns a list of x's dig
'''
digits = []
if x == 0:
return [0]
while x > 0:
digits.append(x % 10)
x = x // 10
return digits
def readInput(): # function that reads input and returns an integer or None if the user did not type a number
try:
ret = int(input("Please input a natural number "))
if ret >= 0:
return ret
else:
print("You did not enter a valid number.")
return None
except:
print("You did not enter a valid number.")
return None
def solveCase1(digits): #function that returns the nondecreasing sorted list of list "digits"
return sorted(digits)
def getInput():
n = None
while n is None:
n = readInput()
return n
def solve(n):
#ambiguity in the problem statement
digits = extractDigits(n)
print (''.join(str(x) for x in solveCase2(digits)))
return int(''.join(str(x) for x in solveCase2(digits)))
def testSolution():
assert solve(0) == 0
assert solve(1) == 1
assert solve(4321) == 1234
assert solve(400213) == 100234
if __name__ == '__main__':
n = getInput()
solve(n)
print ("ceva")
| {
"repo_name": "rusucosmin/courses",
"path": "ubb/fop/lab01/ex9-homework.py",
"copies": "1",
"size": "1598",
"license": "mit",
"hash": 502985565465434750,
"line_mean": 19.4871794872,
"line_max": 110,
"alpha_frac": 0.6414267835,
"autogenerated": false,
"ratio": 2.823321554770318,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8677592470084101,
"avg_score": 0.05743117363724363,
"num_lines": 78
} |
__author__ = 'cosmin'
from commands import *
def displayStartMenu():
'''
Function to display the START MENU
'''
print("Hello. Please insert a command. Type 'help' to show menu.")
def clearWindow():
'''
Function to clear the terminal in Ubuntu
'''
def displayCommands():
'''
Function to display all the functionality of the application.
'''
clearWindow()
print("Here are all the command you can use:")
print(" 'list' - displays the list of all transactions")
print(" 'add X,in/out,description' - adds to the current day an in/out transaction of X RON with the given description")
print(" 'insert X,Y,in/out,description' - inserts in day X an in/out transaction of Y RON with the given description")
print(" 'remove X' - removes all the transactions from day X")
print(" 'remove from X to Y' - removes all the transactions from day X until day Y")
print(" 'remove in/out' - removes all the in/out transactions from the current month")
print(" 'replace X,in/out,description with Y' - replaces the amount for the in/out transaction having the specified description from day X with Y RON")
print(" 'greater than X' - writes all transactions greater than X")
print(" 'less than X before Y' - writes all transactions less than X which were made before day Y")
print(" 'all in/out' - writes all the in transactions.")
print(" 'balance X' - computes the account's balance on day X - should be integer")
print(" 'sum in/out' - writes the total amount from in transactions")
print(" 'max out day' - writes the day with the maximum amount in an out transaction")
print(" 'asc sort day' - sorts the total daily transactions in an ascending order")
print(" 'desc sort type' - sorts the total daily transactions per type (in/out) in a descending order")
print(" 'filter in/out' - filters the transaction so that only the in/out will remain")
print(" 'undo' - undo the last operation")
print(" 'redo' - redo the last operation - currently supporting only one redo at a time")
print(" 'exit' - to quit the application")
def printTransactions(transactionPack):
'''
function to show all the transactions stored in the application
:param transactionList: the list of transaction
each transaction is, in fact, a tuple (day-integer, amount of money - integer, transaction type - can be "in" or "out", description - string
'''
if transactionPack[0] is None:
return
transactionList = transactionPack[0]
if transactionList is None:
return
if len(transactionList) == 0:
print("There are no transactions made!")
else:
print("These are the transactions:")
for i in range(len(transactionList)):
print(str(1 + i) + ". " + ', '.join([str(x) for x in transactionList[i]]))
def printTransactionList(transactionPair):
if transactionPair[0] is None:
print("Here is the result of the query:")
for i in range(len(transactionPair[1])):
print(str(1 + i) + ". " + ', '.join([str(x) for x in transactionPair[1][i]]))
else:
print(str(transactionPair[0]))
def getCommand():
'''
Function to get the command from the user.
:return: a list of string, containing the command splitted by the whitespaces
'''
userInput = input("> ")
command = userInput.split(" ")
return command
def printError(exception):
print(str(exception))
def handleNewList(transactionPack, newList):
if newList[0] is None:
return newList[1]
else:
printError(newList[0])
return transactionPack
def handleFilteredList(newList):
if newList[0] is None:
printTransactions(newList[1])
else:
printError(newList[0])
def printHistory(transactionPack):
for list in transactionPack[1]:
print(list)
def runUi():
#transactionPack = [[(1, 100, "in", "cosmin"), (2, 1000, "out", "description"), (3, 150, "in", "ok"), (15, 123, "out", "salary"), (17,2000,"in","a year salary"), (11,2500, "in", "saled the car"), (11,5000,"out", "bought a macbook")], []] #todo: getTheTransactionList from a file or db
#transactionPack = [[(1,1,'in','a'), (2,2,'in','b'), (3,3,'in','c'), (4,4,'out','d'), (5,5,'in','e')], []]
transactionPack = [[], []]
#todo add the transactions having the same values
displayStartMenu()
while True:
#printHistory(transactionPack)
#transactionList = transactionPack[0]
command = getCommand()
if len(command) == 0:
continue
if command[0] == "help":
displayCommands()
elif command[0] == "list" or command[0] == 'ls':
printTransactions(transactionPack)
elif command[0] == "add":
newList = addTransaction(command, transactionPack)
transactionPack = handleNewList(transactionPack, newList)
elif command[0] == "insert":
newList = insertTransaction(command, transactionPack)
transactionPack = handleNewList(transactionPack, newList)
elif command[0] == "remove":
newList = removeTransaction(command, transactionPack)
transactionPack = handleNewList(transactionPack, newList)
elif command[0] == "replace":
newList = replaceTransaction(command, transactionPack)
transactionPack = handleNewList(transactionPack, newList)
elif command[0] == "greater" or command[0] == "less":
printTransactionList(filterPropertyTransactions(command, transactionPack))
elif command[0] == "all":
printTransactionList(filterAllTransactions(command, transactionPack))
elif command[0] == "balance":
print("Balance on the given day was ", computeBalance(command, transactionPack), ".")
elif command[0] == "sum":
print(getSum(command, transactionPack))
elif command[0] == "max":
print(getMax(command, transactionPack))
elif command[0] == "asc" or command[0] == "desc":
printTransactionList(sortTransactions(command, transactionPack))
elif command[0] == "filter":
newList = filterTransaction(command, transactionPack)
transactionPack = handleNewList(transactionPack, newList)
elif command[0] == "undo":
transactionPack = undo(transactionPack)
elif command[0] == "redo":
transactionPack = redo(transactionPack)
elif command[0] == "h":
printHistory(transactionPack)
elif command[0] == "exit":
print("Exiting...")
break
else:
print("Command not recognized. Try 'help'!") | {
"repo_name": "rusucosmin/courses",
"path": "ubb/fop/lab02-04/Bank Account Management/ui.py",
"copies": "1",
"size": "6814",
"license": "mit",
"hash": -4818161135754897000,
"line_mean": 43.5424836601,
"line_max": 288,
"alpha_frac": 0.6306134429,
"autogenerated": false,
"ratio": 4.058368076235855,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0048025999657192115,
"num_lines": 153
} |
__author__ = 'cosmin'
from exceptions import InvalidParameters, CommandError
import datetime
def representsInt(s):
'''
Function to return True if the :param s can be converted to int
:param s: the string that the user has given
:rtype : boolean
:return: True if string s is an integer
False is string s is not an integer
'''
try:
int(s)
return True
except ValueError:
return
def getAddTransaction(command):
'''
A function to handle the Add Transaction Feature
:param command: the command the user has inputted
:return a tuple (day, amount, in/out, description) which describes
the transaction the user wants to add
or None if there is a command syntax error
'''
if len(command) < 2:
raise CommandError("Add command - Syntax Error!")
argsList = command[1].split(',')
if len(argsList) < 3:
raise InvalidParameters("Add command - Not enough parameters!")
if not representsInt(argsList[0]):
raise InvalidParameters("Add command - The amount of money not an integer!")
if int(argsList[0]) <= 0:
raise InvalidParameters("Add command - The amount of money should be strictly positive!")
if argsList[1].lower() not in ["in", "out"]:
raise InvalidParameters("Add command - The only known types are in and out!")
description = ','.join(argsList[2:])
if len(command) > 2:
description = description + ' ' + ' '.join(command[2:])
transaction = (datetime.datetime.now().day, int(argsList[0]), argsList[1], description)
return transaction
def addTransaction(command, transactionPack):
'''
:param command: a list representing the command the user wants to make, which is the string splitted by spaces
:param transactionList: a list of tuples where each transaction are stored
:return: a new transaction list which is the updated one, if the command is correctly inputted, or transactionList if the command is not good
'''
try:
newTransaction = getAddTransaction(command)
transactionPack[1].append(transactionPack[0][:])
transactionPack[0].append(newTransaction)
return (None, transactionPack)
except CommandError as se:
return (se, None)
except InvalidParameters as ie:
return (ie, None)
def getInsertTransaction(command):
'''
A function to handle the Insert Transaction Feature
:param command: the command the user has inputted
:return a tuple (day, amount, in/out, description) which describes
the transaction the user wants to insert
or None if there is a command syntax error
'''
if len(command) < 2:
raise CommandError("Insert Command - Syntax Error!")
argsList = command[1].split(',')
if len(argsList) < 4:
raise InvalidParameters("Insert Command - Not enough parameters!")
if not representsInt(argsList[0]):
raise InvalidParameters("Insert Command - Day not an integer!")
if int(argsList[0]) <= 0:
raise InvalidParameters("Insert Command - Day should be strictly positive!")
if not representsInt(argsList[1]):
raise InvalidParameters("Insert Command - Amount not an integer!")
if int(argsList[1]) <= 0:
raise InvalidParameters("Insert Command - Amount cannot be negative or nul!")
if argsList[2].lower() not in ["in", "out"]:
raise InvalidParameters("Insert Command - The only known transaction types are in and out")
description = ','.join(argsList[3:])
if len(command) > 2:
description = description + ' ' + ' '.join(command[2:])
transaction = (int(argsList[0]), int(argsList[1]), argsList[2], description)
return transaction
def insertTransaction(command, transactionPack):
'''
Function to update the list with the correct new insert transaction
:param command: a list representing the command the user wants to make, which is the string split by spaces
:param transactionList: a list of tuples where each transaction are stored
:return: a new transaction list which is the updated one, if the command is correctly inputted, or transactionList if the command is not good
'''
try:
newTransaction = getInsertTransaction(command)
transactionPack[1].append(transactionPack[0][:])
transactionPack[0].append(newTransaction)
return (None, transactionPack)
except CommandError as se:
return (se, None)
except InvalidParameters as ie:
return (ie, None)
def getRemoveTransactionDay(command):
'''
Function to parse the remove command and to return the transaction day that needs to be removed
:param command:
:return: an integer - the date parsed from the command
'''
if not representsInt(command[1]):
raise InvalidParameters("Remove Command - Day not an integer.")
if int(command[1]) <= 0:
raise InvalidParameters("Remove Command - Date should be strictly positive.")
return int(command[1])
def removeTransactionDay(command, transactionPack):
'''
Function to remove all the transaction from a specific day
:param command: a list representing the command the user wants to make, which is the string splitted by spaces
:param transactionList: a list of tuples where each transaction are stored
:return: a new transaction list which is the updated one, if the command is correctly inputted, or transactionList if the command is not good
'''
try:
day = getRemoveTransactionDay(command)
transactionPack[1].append(transactionPack[0][:])
transactionPack[0] = [transaction for transaction in transactionPack[0] if transaction[0] != day]
return (None, transactionPack)
except CommandError as se:
return (se, None)
except InvalidParameters as ie:
return (ie, None)
def getRemoveTransactionInterval(command):
'''
A function to handle the Remove Interval Transaction
:param command: the command the user has inputted
:return a tuple (startDay, endDay) which describes
the date interval that the user wants to remoev
or None if there is a command syntax error
:raise CommandError, ValueError, SyntaxError
'''
if command[1] != 'from' or command[3] != 'to':
raise CommandError("Remove command - Syntax Error!")
if representsInt(command[2]) == False or representsInt(command[4]) == False:
raise InvalidParameters("Remove command - Dates should be integers!")
if int(command[2]) <= 0:
raise InvalidParameters("Remove command - Days should be strictly!")
startDate = int(command[2])
endDate = int(command[4])
if startDate > endDate:
raise InvalidParameters("Remove command - Dates do not form an interval!")
return (startDate, endDate)
def removeTransactionInterval(command, transactionPack):
'''
Function to remove all the transaction from a specific interval of days
:param command: a list representing the command the user wants to make, which is the string splitted by spaces
:param transactionList: a list of touples where each transaction are stored
:return: a new transaction list which is the updated one, if the command is correctly inputted, or transactionList if the command is not good
'''
try:
(startDate, endDate) = getRemoveTransactionInterval(command)
transactionPack[1].append(transactionPack[0][:])
transactionPack[0] = [transaction for transaction in transactionPack[0] if not startDate <= transaction[0] <= endDate]
return (None, transactionPack)
except CommandError as ce:
return (ce, None)
except InvalidParameters as ip:
return (ip, None)
def getRemoveTypeTransaction(command):
'''
Function to get the parameter of the Remove Type(in/out) transaction
:param command:
:return:
'''
if command[1] != 'in' and command[1] != 'out':
raise InvalidParameters("Remove command - The only known types of transactions are in and out!")
return command[1]
def removeTypeTransaction(command, transactionPack):
'''
Function to remove all the transaction with the type given by user (in / out)
:param command: a list representing the command the user wants to make, which is the string splitted by spaces
:param transactionPack: a pair of a list of touples where each transaction are stored adn the history
:return: a new transaction list which is the updated one, if the command is correctly inputted, or transactionList if the command is not good
'''
try:
arg = getRemoveTypeTransaction(command)
transactionPack[1].append(transactionPack[0][:])
transactionPack[0] = [transaction for transaction in transactionPack[0] if transaction[2] != arg]
return (None, transactionPack)
except InvalidParameters as ve:
return (ve, None)
def chooseRemoveType(command):
if len(command) <= 1:
raise SyntaxError("Not enough parameters.")
if len(command) == 2:
if command[1] in ["in", "out"]:
return 1 #removeTypeTransaction(command, transactionList)
else:
return 2 #removeTransactionDay(command, transactionList)
if len(command) == 5:
return 3 #removeTransactionInterval(command, transactionList)
else:
raise SyntaxError("Syntax error.")
def removeTransaction(command, transactionPack):
'''
Function to determine which remove function to be called, because there are 3 typed of remove functions:
remove X - removes all the transactions from day X")
remove from X to Y - removes all the transactions from day X until day Y")
remove in/out - removes all the in/out transactions from the current month")
:param command: a list representing the command the user wants to make, which is the string splitted by spaces
:param transactionList: a list of tuples where each transaction are stored
:return: a new transaction list which is the updated one, if the command is correctly inputted, or transactionList if the command is not good
'''
try:
x = chooseRemoveType(command)
if x == 1:
return removeTypeTransaction(command, transactionPack)
if x == 2:
return removeTransactionDay(command, transactionPack)
if x == 3:
return removeTransactionInterval(command, transactionPack)
except SyntaxError as se:
return (se, None)
def getReplaceTransaction(command):
'''
A function to handle the Replace Transaction Feature
:param command: the command the user has inputted
the transaction the user wants to insert
'''
if len(command) < 4:
raise CommandError("Replace command - Syntax error!")
if command[-2] != 'with':
raise CommandError("Replace command - Syntax error!")
if not representsInt(command[-1]):
raise InvalidParameters("Replace command - Amount not an integer!")
if int(command[-1]) <= 0:
raise InvalidParameters('Replace command - Amount cannot be negative or null!')
argsList = command[1].split(',')
if len(argsList) < 3:
raise CommandError("Replace command - Not enough parameters!")
if not representsInt(argsList[0]):
raise InvalidParameters("Replace command - Date not an integer!")
if int(argsList[0]) <= 0:
raise InvalidParameters("Replace command - Date should be strictly positive!")
if argsList[1] not in ['in', 'out']:
raise InvalidParameters('Replace command - Transaction type unknown, should be only in or out')
newAmount = int(command[-1])
day = int(argsList[0])
description = ','.join(argsList[2:])
if len(command) > 4:
description = description + ' ' + ' '.join(command[2:-2])
return (day, argsList[1], newAmount, description)
def replaceTransaction(command, transactionPack):
'''
Function to replace a transaction's amount of money.
:param command: a list representing the command the user wants to make, which is the string splitted by spaces
:param transactionList: a list of tuples where each transaction are stored
:return: a new transaction list which is the updated one, if the command is correctly inputted, or transactionList if the command is not good
'''
try:
(day, type, newAmount, description) = getReplaceTransaction(command)
transactionPack[1].append(transactionPack[0][:])
list = []
for i in range(len(transactionPack[0])):
if transactionPack[0][i][0] == day and transactionPack[0][i][2] == type and transactionPack[0][i][3] == description:
list.append(i)
if len(list) > 1:
print("Error - There are more than 1 such transaction. Please choose which one to replace.")
print("These are the transactions:")
for i in range(len(list)):
print(str(1 + i) + ". " + ', '.join([str(x) for x in transactionPack[0][list[i]]]))
x = input(">")
while not representsInt(x) or int(x) < 1 or int(x) > len(list):
print("Please insert an integer between 1 and ", len(list))
x = input(">")
x = int(x)
transactionPack[0][list[x - 1]] = (transactionPack[0][list[x - 1]][0], newAmount, transactionPack[0][list[x - 1]][2], transactionPack[0][list[x - 1]][3])
elif len(list) == 1:
transactionPack[0][list[0]] = (transactionPack[0][list[0]][0], newAmount, transactionPack[0][list[0]][2], transactionPack[0][list[0]][3])
return (None, transactionPack)
except CommandError as se:
return (se, None)
except InvalidParameters as ie:
return (ie, None)
def getProperties(command):
'''
Function to return a list containing the properties of the filter query
:param command: a list containing the user command, split by whitespaces
:return: another list containing the arguments of the filter command which can be of two types:
greater/less than X - all transactions greater/less than X (given amount of money)
greater/less than X before Y - all transactions greater/less than X (given amount of money) before given day(Y)
:raises exception whenever there is a syntax error, a value error or an invalid parameter error.
'''
if len(command) < 3:
raise CommandError("Filter command - Syntax error!")
if len(command) == 4:
raise CommandError("Filter command - Syntax error!")
if command[1] != 'than':
raise CommandError("Filter command - Syntax error!")
if len(command) > 3 and command[3] != 'before':
raise CommandError("Filter command - Syntax error!")
if not representsInt(command[2]):
raise InvalidParameters("Filter command - amount is not an integer!")
if int(command[2]) <= 0:
raise InvalidParameters("Filter command - amount should be strictly positive!")
if len(command) > 3 and not representsInt(command[4]):
raise InvalidParameters("Filter command - day is not an integer!")
if len(command) > 3 and int(command[4]) <= 0:
raise InvalidParameters("Filter command - day should be strictly positive!")
arguments = [command[0], int(command[2])]
if len(command) > 3:
arguments.append(int(command[4]))
return arguments
def filterPropertyTransactions(command, transactionPack):
'''
Function to return a new list which contains the filtered list by the 'mask' given by the user.
:param command:
:param transactionList:
:return: another list - filteredList
'''
try:
arguments = getProperties(command)
if arguments[0] == 'greater':
if len(arguments) == 2:
return (None, [transaction for transaction in transactionPack[0] if transaction[1] >= arguments[1]])
else:
return (None, [transaction for transaction in transactionPack[0] if transaction[1] >= arguments[1] and transaction[0] <= arguments[2]])
else:
if len(arguments) == 2:
return (None, [transaction for transaction in transactionPack[0] if transaction[1] <= arguments[1]])
else:
return (None, [transaction for transaction in transactionPack[0] if transaction[1] <= arguments[1] and transaction[0] <= arguments[2]])
except CommandError as se:
return (se, None)
except InvalidParameters as ve:
return (ve, None)
def getAllArguments(command):
'''
Function to get the arguments of All command
:param command: a list containing the whitespace-split command from the user
:return: either 'in' or 'out' parsed from what the user inserted
:raises exception whenever there is a syntax error, a value error or an invalid parameter error.
'''
if len(command) < 2:
raise CommandError("All filter - syntax error!")
if command[1] != "in" and command[1] != 'out':
raise InvalidParameters("All filter - the only known types are in/out")
return command[1]
def filterAllTransactions(command, transactionPack):
'''
Function to return the filtered transaction, that is, all the 'in' tranasction or the 'out' transactions
:param command:
:param transactionList:
:return: another list which is sublist of transactionList or None if there are exceptions
'''
try:
arg = getAllArguments(command)
return (None, [transaction for transaction in transactionPack[0] if transaction[2] == arg])
except CommandError as se:
return (se, None)
except InvalidParameters as ve:
return (ve, None)
def getBalanceArguments(command):
'''
Function to get the arguments of Balance command
:param command: a list containing the whitespace-split command from the user
:return: an integer representing the day when the use wants to know it's balance
:raises exception whenever there is a syntax error, a value error or an invalid parameter error.
'''
if len(command) < 2:
raise CommandError("Balance - syntax error!")
if not representsInt(command[1]):
raise InvalidParameters("Balance - day should be integer!")
if int(command[1]) <= 0:
raise InvalidParameters("Balance - day should be positive!")
return int(command[1])
def computeBalance(command, transactionPack):
'''
Function to compute the balance of the given day.
:param command:
:param transactionList:
:return:an integer representing the balance up the the given day
'''
try:
arg = getBalanceArguments(command)
sum = 0
for transaction in transactionPack[0]:
if transaction[0] <= arg:
if transaction[2] == 'in':
sum += transaction[1]
else:
sum -= transaction[1]
return sum
except CommandError as se:
return str(se)
except InvalidParameters as ve:
return str(ve)
def getSumArgument(command):
'''
Function to get the arguments of Sum command
:param command: a list containing the whitespace-split command from the user
:return: an integer representing the sum of all the 'in' transaction or 'out' transaction.
:raises exception whenever there is a syntax error, a value error or an invalid parameter error.
'''
if len(command) < 2:
raise CommandError("Sum command - Syntax error!")
if command[1] != 'in' and command[1] != 'out':
raise InvalidParameters("Sum command - Unknown transaction type!")
return command[1]
def getSum(command, transactionPack):
'''
Function to return the sum for the given arguments
:param command: the list containing the whitespace-split command from the user
:param transactionList:
:return: the sum of the filtere transactions
'''
try:
arg = getSumArgument(command)
return sum([transaction[1] for transaction in transactionPack[0] if transaction[2] == arg])
except CommandError as se:
return str(se)
except InvalidParameters as ve:
return str(ve)
def getMaxArguments(command):
'''
Function to get the arguments of Max command
:param command: a list containing the whitespace-split command from the user
:return: an string from the set {"in", "out"} which is the argument of the max command
:raises exception whenever there is a syntax error, a value error or an invalid parameter error.
'''
if len(command) < 3:
raise CommandError("Max command - Syntax error!")
if command[1] != 'in' and command[1] != 'out':
raise InvalidParameters("Max command - Unknown type of transaction")
if command[2] != 'day':
raise CommandError("Max command - Syntax error!")
return command[1]
def getMax(command, transactionPack):
'''
Function the compute the Max command or to raise the exception whenever the command is invalid.
:param command:
:param transactionList:
:return: am integer representing the day with the maximum amount in an out transaction
:raises exception whenever there is a syntax error, a value error or an invalid parameter error.
'''
try:
args = getMaxArguments(command)
maximum = -1
day = 0
for transaction in transactionPack[0]:
if transaction[2] == args and maximum < transaction[1]:
maximum = transaction[1]
day = transaction[0]
return day
except CommandError as se:
return str(se)
except InvalidParameters as ve:
return str(ve)
def getSortArguments(command):
'''
Function to get the arguments of Sort command
:param command: a list containing the whitespace-split command from the user
:return: a tuple ('asc'/'desc', 'day'/'in'/'out') representing the arguments the the sort function
:raises exception whenever there is a syntax error, a value error or an invalid parameter error.
'''
if len(command) < 3:
raise CommandError("Sort command - Syntax Error!")
if command[1] != 'sort':
raise CommandError("Sort command - Syntax Error!")
if command[2] != 'day' and command[2] != 'in' and command[2] != 'out':
raise CommandError("Sort command - Syntax Error!")
return (command[0], command[2])
def sortTransactions(command, transactionPack):
'''
Function to return the sorted transaction list by the comparator given by the user.
:param command:
:param transactionList:
:return: another list containing the sorted list
'''
try:
args = getSortArguments(command)
if args[1] == 'day':
if args[0] == 'asc':
return (None, sorted(transactionPack[0], key = lambda transaction : transaction[1]))
elif args[0] == 'desc':
return (None, sorted(transactionPack[0], key = lambda transaction : transaction[1], reverse=True))
else:
if args[0] == 'asc':
return (None, sorted([transaction for transaction in transactionPack[0] if transaction[2] == args[1]], key = lambda transaction : transaction[1]))
else:
return (None, sorted([transaction for transaction in transactionPack[0] if transaction[2] == args[1]], key = lambda transaction : transaction[1], reverse=True))
except CommandError as se:
return (se, None)
except InvalidParameters as ve:
return (ve, None)
def getFilterArguments(command):
'''
Function to return a list of arguments which represents the Filter command arguments
:param command: a list of strings which is the input string split by whitespaces
:return:
'''
if len(command) < 2:
raise CommandError("Filter command - Syntax Error.")
if command[1] != 'in' and command[1] != 'out':
raise InvalidParameters("Filter command - the first parameter should be either in or out.")
if len(command) > 2 and not representsInt(command[2]):
raise InvalidParameters("Filter command - the second arguments should be an integer.")
if len(command) > 2 and not int(command[2]) < 0:
raise InvalidParameters("Filter command - the second arguments should be positive.")
args = [command[1]]
if len(command) > 2:
args.append(command[2])
return args
def filterTransaction(command, transactionPack):
'''
Function to return the 'filtered' transactions list, filters given by the arguments.
:param command: a list of strings which is the input string split by whitespaces
:param transactionList: the initial list
:return: a new transaction list or None if there are exceptions
'''
try:
args = getFilterArguments(command)
if len(args) == 1:
transactionPack[0] = [transaction for transaction in transactionPack[0] if transaction[2] == args[0]]
return (None, transactionPack)
else:
transactionPack[0] = [transaction for transaction in transactionPack[0] if transaction[1] >= args[1] and transaction[2] == args[0]]
return (None, transactionPack)
except CommandError as ce:
return (ce, None)
except InvalidParameters as pe:
return (pe, None)
def undo(transactionPack):
'''
Function to implement the undo feature of the Bank Account Management
:param transactionPack: a tuple (list, historyList) of the transactions.
:return: a new tuple, which is the updated one.
'''
if len(transactionPack[1]) == 0:
return transactionPack
if len(transactionPack) == 3:
transactionPack[2] = (transactionPack[0][:])
else:
transactionPack.append(transactionPack[0][:])
transactionPack[0] = transactionPack[1][-1]
transactionPack[1] = transactionPack[1][0:-1]
return transactionPack
def redo(transactionPack):
'''
Function to implement the redo feature of the Bank Account Management
:param transactionPack: a tuple (list, historyList) of the transactions.
:return: a new tuple, which is the updated one.
'''
if len(transactionPack) != 3:
return transactionPack
else:
transactionPack[1].append(transactionPack[0])
transactionPack[0] = transactionPack[2]
transactionPack = transactionPack[:-1]
return transactionPack | {
"repo_name": "rusucosmin/courses",
"path": "ubb/fop/lab02-04/Bank Account Management/commands.py",
"copies": "1",
"size": "26488",
"license": "mit",
"hash": 4035444176271778300,
"line_mean": 43.3701842546,
"line_max": 176,
"alpha_frac": 0.6668680157,
"autogenerated": false,
"ratio": 4.420560747663552,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0034405152100262034,
"num_lines": 597
} |
__author__ = 'cosmin'
class Book:
'''
Represents an entity Book, with the properties:
-id - a uniquely determined id, random using uuid4() function
-title - the title of the book
-description - the description of the book
-author - the name of the book's author
'''
def __init__(self, id, title, description, author):
self._id = id
self._title = title
self._description = description
self._author = author
def __repr__(self):
'''
Function to print the Book in a nice way
'''
return "Book #%d:\nTitle: %s\nDescription: %s\nAuthor: %s\n" % (self._id, self._title, self._description, self._author)
def __eq__(self, other):
return (isinstance(other, self.__class__)
and self.__dict__ == other.__dict__)
def getId(self):
'''
Getter for the id of the book
:return: an integer representing the id of the book
'''
return self._id
def setId(self, id):
'''
Setter for the id of the book
:param id: the new id of the book
'''
self._id = id
def getTitle(self):
'''
Getter for the title of the book
:return: a string: the title of the book
'''
return self._title
def setTitle(self, title):
'''
Setter for the title of the book
:return: a string: the title of the book
'''
self._title = title
def getDescription(self):
'''
Getter for the description of the book
:return: a string: the description of the book
'''
return self._description
def setDescription(self, description):
'''
Setter for the description of the book
:param description: the new description of the book
'''
self._description = description
def getAuthor(self):
'''
Getter for the author of the book
:return: a string representing the author of the book
'''
return self._author
def setAuthor(self, author):
'''
Setter for the author of the book
:param description: the new author of the book
'''
self._author = author
@staticmethod
def cmpByTitle(x, y):
if x.getTitle() < y.getTitle():
return -1
elif x.getTitle() > y.getTitle():
return 1
else:
return 0
| {
"repo_name": "rusucosmin/courses",
"path": "ubb/fop/lab05-07/model/book.py",
"copies": "1",
"size": "2480",
"license": "mit",
"hash": -5645236747875453000,
"line_mean": 26.2527472527,
"line_max": 127,
"alpha_frac": 0.5447580645,
"autogenerated": false,
"ratio": 4.389380530973451,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0003101171787396277,
"num_lines": 91
} |
__author__ = 'cosmin'
class Client:
'''
Represents an entity for the clients who can rent books, which has the following properties:
-cnp - an uniquely determined id
-name - the name of the person (client)
'''
def __init__(self, cnp, name):
self._cnp = cnp
self._name = name
def __repr__(self):
'''
Function to print the Object in a nice way
'''
return "Client Name: %s\nCNP: %s" % (self._name, self._cnp)
def __eq__(self, other):
return (isinstance(other, self.__class__)
and self.__dict__ == other.__dict__)
def getCnp(self):
'''
Getter for the cnp property
:return: the cnp of the client
'''
return self._cnp
def setCnp(self, cnp):
'''
Setter for the cnp property
'''
self._cnp = cnp
def getName(self):
'''
Getter for the name property
:return: the name of the client (string)
'''
return self._name
def setName(self, name):
'''
Setter for the name of a client
'''
self._name = name
| {
"repo_name": "rusucosmin/courses",
"path": "ubb/fop/lab05-07/model/client.py",
"copies": "1",
"size": "1161",
"license": "mit",
"hash": -3302409646560259600,
"line_mean": 23.1875,
"line_max": 96,
"alpha_frac": 0.5133505599,
"autogenerated": false,
"ratio": 3.962457337883959,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49758078977839587,
"avg_score": null,
"num_lines": null
} |
__author__ = 'cosmin'
class Loan:
'''
Entity to represent a loan, which is basically a pair of Client, Book. Every book can have only one Client at a given moment.
Also, every Client can have one or more books rented.
So the Loan has:
- Renter (Client)
- Rented Book
'''
def __init__(self, client, book):
self._client = client
self._book = book
def __repr__(self):
'''
Function to print the Object in a nice way
'''
return "Client %s has the book #%d with the Title: %s" % (self._client.getName(), self._book.getId(), self._book.getTitle())
def __eq__(self, other):
return (isinstance(other, self.__class__)
and self.__dict__ == other.__dict__)
def getClient(self):
'''
Function to get the Client from a specific Loan
:return: a Client object
'''
return self._client
def getBook(self):
'''
Function to get the Book from a specific Loan
:return: a book object
'''
return self._book | {
"repo_name": "rusucosmin/courses",
"path": "ubb/fop/lab05-07/model/loan.py",
"copies": "1",
"size": "1090",
"license": "mit",
"hash": -3060270501999997000,
"line_mean": 27.7105263158,
"line_max": 132,
"alpha_frac": 0.5577981651,
"autogenerated": false,
"ratio": 4.128787878787879,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0019899807355288164,
"num_lines": 38
} |
__author__ = 'cosmin'
'''
Module to test all the functions from the module commands.py.
The name of the test functions are according to the function they test.
That is, if a function is called myFunction(), then the test function will be named
testMyFunction()
It also checks for raise Exception, edge cases and so on.
'''
from commands import *
from exceptions import InvalidParameters, CommandError
def testRepresentsInt():
assert representsInt(100) == True
assert not representsInt("Python is nice.")
assert representsInt(0) == True
assert representsInt(-100) == True
assert not representsInt("Python cannot convert string to int.")
def testGetAddTransaction():
assert getAddTransaction(["add", "10,in,cool", "description"]) == (datetime.datetime.now().day, 10, "in", "cool description")
assert getAddTransaction(["add", "100,out,cosmin", "rusu"]) == (datetime.datetime.now().day, 100,"out","cosmin rusu")
try:
getAddTransaction(["add"])
assert False
except CommandError:
pass
try:
getAddTransaction(["add", "15,inout,salary"])
assert False
except InvalidParameters:
pass
try:
getAddTransaction(["add", "in,in,salary"])
assert False
except InvalidParameters:
pass
def testGetInsertTransaction():
assert getInsertTransaction(["insert", "12,10,in,desc", "ription"]) == (12, 10, "in", "desc ription")
assert getInsertTransaction(["insert", "13,100,in,desc", "ription"]) == (13, 100, "in", "desc ription")
assert getInsertTransaction(["insert", "1,5,out,desc", "ription"]) == (1, 5, "out", "desc ription")
try:
getInsertTransaction(["insert"])
assert False
except CommandError:
pass
try:
getInsertTransaction(["insert", "data,12,description"])
assert False
except InvalidParameters:
pass
try:
getInsertTransaction(["insert", "-1,12,in,description"])
assert False
except InvalidParameters:
pass
try:
getInsertTransaction(["insert", "1,in,out,description"])
assert False
except InvalidParameters:
pass
try:
getInsertTransaction(["insert", "-1,-12,in,description"])
assert False
except InvalidParameters:
pass
try:
getInsertTransaction(["insert", "1,12,input,description"])
assert False
except InvalidParameters:
pass
def testGetRemoveTransactionDay():
assert getRemoveTransactionDay(["remove", "1"]) == 1
assert getRemoveTransactionDay(["remove", "128"]) == 128
try:
getRemoveTransactionDay(["remove", "amount"])
assert False
except InvalidParameters:
pass
try:
getRemoveTransactionDay(["remove", "-1"])
assert False
except InvalidParameters:
pass
def testGetRemoveTransactionInterval():
assert getRemoveTransactionInterval(["remove", "from", "3", "to", "4"]) == (3, 4)
assert getRemoveTransactionInterval(["remove", "from", "5", "to", "15"]) == (5, 15)
try:
getRemoveTransactionInterval(["remove", "to", "5", "to", "15"])
assert False
except CommandError:
pass
try:
getRemoveTransactionInterval(["remove", "from", "-5", "to", "15"])
assert False
except InvalidParameters:
pass
try:
getRemoveTransactionInterval(["remove", "from", "5", "to", "1"])
assert False
except InvalidParameters:
pass
def testGetRemoveTypeTransaction():
assert getRemoveTypeTransaction(["remove", "in"]) == "in"
assert getRemoveTypeTransaction(["remove", "out"]) == "out"
try:
getRemoveTypeTransaction(["remove", "inout"])
assert False
except InvalidParameters:
pass
def testReplaceTransaction():
assert getReplaceTransaction(["replace", "12,in,description", "with", "200"]) == (12, "in", 200, "description")
assert getReplaceTransaction(["replace", "13,out,description", "with", "700"]) == (13, "out", 700, "description")
try:
getReplaceTransaction(["replace", "13,out,description", "with"])
assert False
except CommandError:
pass
try:
getReplaceTransaction(["replace", "13,out,description", "to", "amount"])
assert False
except CommandError:
pass
try:
getReplaceTransaction(["replace", "13,out,description", "with", "newAmount"])
assert False
except InvalidParameters:
pass
try:
getReplaceTransaction(["replace", "13,out,description", "with", "-700"])
assert False
except InvalidParameters:
pass
try:
getReplaceTransaction(["replace", "13,description", "with", "700"])
assert False
except CommandError:
pass
try:
getReplaceTransaction(["replace", "day,out,description", "with", "700"])
assert False
except InvalidParameters:
pass
try:
getReplaceTransaction(["replace", "-13,in,description", "with", "700"])
assert False
except InvalidParameters:
pass
try:
getReplaceTransaction(["replace", "13,inout,description", "with", "700"])
assert False
except InvalidParameters:
pass
def testGetProperties():
assert getProperties(["greater", "than", "100"]) == ["greater", 100]
assert getProperties(["greater", "than", "10"]) == ["greater", 10]
assert getProperties(["less", "than", "100"]) == ["less", 100]
assert getProperties(["less", "than", "100", "before", "25"]) == ["less", 100, 25]
try:
getProperties(["less", "tham", "100", "before", "25""greater tham 100"])
assert False
except CommandError:
pass
try:
getProperties(["less", "than", "100", "befoe", "25"])
assert False
except CommandError:
pass
try:
getProperties(["less", "than"])
assert False
except CommandError:
pass
try:
getProperties(["less", "than", "100", "25"])
assert False
except CommandError:
pass
try:
getProperties(["less", "than", "-1", "before", "25"])
assert False
except InvalidParameters:
pass
try:
getProperties(["less", "than", "100", "before", "-25"])
assert False
except InvalidParameters:
pass
def testAllArguments():
assert getAllArguments(["all", "in"]) == "in"
assert getAllArguments(["all", "out"]) == "out"
try:
getAllArguments(["all", "100"])
assert False
except InvalidParameters:
pass
try:
getAllArguments(["all"])
assert False
except CommandError:
pass
def testGetBalanceArguments():
assert getBalanceArguments(["balance", "100"]) == 100
assert getBalanceArguments(["balance", "10000"]) == 10000
try:
getBalanceArguments(["balance"])
assert False
except CommandError:
pass
try:
getBalanceArguments(["balance", "day"])
assert False
except InvalidParameters:
pass
try:
getBalanceArguments(["balance", "-1"])
assert False
except InvalidParameters:
pass
def testGetSumArgument():
assert getSumArgument(["sum", "in"]) == "in"
assert getSumArgument(["sum", "out"]) == "out"
try:
getSumArgument(["sum"])
assert False
except CommandError:
pass
try:
getSumArgument(["sum", "100"])
assert False
except InvalidParameters:
pass
def testGetMaxArguments():
assert getMaxArguments(["max", "in", "day"]) == "in"
assert getMaxArguments(["max", "out", "day"]) == "out"
try:
getMaxArguments(["max"])
assert False
except CommandError:
pass
try:
getMaxArguments(["max", "inout", "day"])
assert False
except InvalidParameters:
pass
try:
getMaxArguments(["max", "in", "150"])
assert False
except CommandError:
pass
def testGetSortArguments():
assert getSortArguments(["asc", "sort", "day"]) == ("asc", "day")
assert getSortArguments(["desc", "sort", "in"]) == ("desc", "in")
assert getSortArguments(["desc", "sort", "out"]) == ("desc", "out")
try:
getSortArguments(["asc"])
assert False
except CommandError:
pass
try:
getSortArguments(["asc", "sorteaza", "day"])
assert False
except CommandError:
pass
try:
getSortArguments(["desc", "sort", "150"])
assert False
except CommandError:
pass
def testUndoRedo():
transactionPack = [[(1, 1, 'in', 'a'), (2, 2, 'out', 'b'), (3, 3, 'in', 'c'), (4, 4, 'out', 'd')], [[], [(1, 1, 'in', 'a')], [(1, 1, 'in', 'a'), (2, 2, 'out', 'b')], [(1, 1, 'in', 'a'), (2, 2, 'out', 'b'), (3, 3, 'in', 'c')]]]
transactionPack = undo(transactionPack)
assert transactionPack == [[(1, 1, 'in', 'a'), (2, 2, 'out', 'b'), (3, 3, 'in', 'c')], [ [], [(1, 1, 'in', 'a')], [(1, 1, 'in', 'a'), (2, 2, 'out', 'b')] ], [(1, 1, 'in', 'a'), (2, 2, 'out', 'b'), (3, 3, 'in', 'c'), (4, 4, 'out', 'd')]]
transactionPack = undo(transactionPack)
assert transactionPack == [[(1, 1, 'in', 'a'), (2, 2, 'out', 'b')], [[], [(1, 1, 'in', 'a')]], [(1, 1, 'in', 'a'), (2, 2, 'out', 'b'), (3, 3, 'in', 'c')]]
transactionPack = redo(transactionPack)
assert transactionPack == [[(1, 1, 'in', 'a'), (2, 2, 'out', 'b'), (3, 3, 'in', 'c')], [ [], [(1, 1, 'in', 'a')], [(1, 1, 'in', 'a'), (2, 2, 'out', 'b')] ]]
def runTests():
testRepresentsInt()
testGetAddTransaction()
testGetInsertTransaction()
testGetRemoveTransactionDay()
testGetRemoveTypeTransaction()
testGetRemoveTransactionInterval()
testReplaceTransaction()
testGetProperties()
testAllArguments()
testGetBalanceArguments()
testGetSumArgument()
testGetMaxArguments()
testGetSortArguments()
testUndoRedo() | {
"repo_name": "rusucosmin/courses",
"path": "ubb/fop/lab02-04/Bank Account Management/tests.py",
"copies": "1",
"size": "9969",
"license": "mit",
"hash": 5844026848824243000,
"line_mean": 31.4755700326,
"line_max": 240,
"alpha_frac": 0.5886247367,
"autogenerated": false,
"ratio": 3.97488038277512,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.506350511947512,
"avg_score": null,
"num_lines": null
} |
__author__ = 'coty'
from api_helper import parse_params
import json
from pymortgage.server.amortization_schedule import AmortizationSchedule
class RESTServer:
exposed = True
# if you were to request /foo/bar?woo=hoo, vpath[0] would be bar, and params would be {'woo': 'hoo'}.
def GET(self, *vpath, **params):
self.pps = parse_params(params)
if self.pps is None:
return "Not enough parameters provided."
monthly_schedule = self.getMonthlySchedule()
yearly_schedule = self.getYearlySchedule()
if len(vpath) is 0:
return json.dumps(monthly_schedule)
else: # len(vpath) > 0
if len(vpath) is 1:
if vpath[0] == 'year':
return json.dumps(yearly_schedule)
if vpath[0] == 'month':
return json.dumps(monthly_schedule)
else:
# test value
try:
month = int(vpath[0])
for month_info in monthly_schedule:
if str(month_info['month']) == str(month):
return json.dumps(month_info)
except ValueError:
return "Please provide a valid month integer."
else: # len(vpath) > 1
term = vpath[0]
# quick check to validate month/year
if term == "year":
schedule = yearly_schedule
elif term == "month":
schedule = monthly_schedule
else:
return "Please request month or year."
for term_info in schedule:
if str(term_info[term]) == str(vpath[1]):
return json.dumps(term_info)
return "No information for %s" % vpath[0]
def getMonthlySchedule(self):
return AmortizationSchedule(self.pps['rate'], self.pps['prin'], self.pps['term'],
self.pps['tax'], self.pps['ins'], self.pps['adj_freq'],
self.pps['adj_cap'], self.pps['life_cap'],
self.pps['extra_pmt'],
False).monthly_schedule
def getYearlySchedule(self):
return AmortizationSchedule(self.pps['rate'], self.pps['prin'], self.pps['term'],
self.pps['tax'], self.pps['ins'], self.pps['adj_freq'],
self.pps['adj_cap'], self.pps['life_cap'],
self.pps['extra_pmt'],
True).yearly_schedule
| {
"repo_name": "csutherl/pymortgage",
"path": "pymortgage/server/REST_Api.py",
"copies": "1",
"size": "2719",
"license": "apache-2.0",
"hash": 4034212091830186500,
"line_mean": 40.8307692308,
"line_max": 105,
"alpha_frac": 0.478484737,
"autogenerated": false,
"ratio": 4.302215189873418,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5280699926873418,
"avg_score": null,
"num_lines": null
} |
__author__ = 'coty'
from pymortgage.server.amortization_schedule import AmortizationSchedule
from api_helper import parse_params
from pymortgage.server.D3_schedule import D3Schedule
import json
class D3Server:
exposed = True
# if you were to request /foo/bar?woo=hoo, vpath[0] would be bar, and params would be {'woo': 'hoo'}.
def GET(self, *vpath, **params):
# parse and get the parameters
pps = parse_params(params)
if pps is None:
return "Not enough parameters provided."
# rate, P, n, annual_tax, annual_ins, adj_frequency, adj_cap, lifetime_cap
if len(vpath) > 0:
if vpath[0] == 'year':
am_sched = AmortizationSchedule(pps['rate'], pps['prin'], pps['term'], pps['tax'], pps['ins'],
pps['adj_freq'], pps['adj_cap'], pps['life_cap'], pps['extra_pmt'],
True)
d3_sched = D3Schedule(am_sched)
return json.dumps(d3_sched.yearly_d3_schedule)
else:
am_sched = AmortizationSchedule(pps['rate'], pps['prin'], pps['term'], pps['tax'], pps['ins'],
pps['adj_freq'], pps['adj_cap'], pps['life_cap'], pps['extra_pmt'], False)
d3_sched = D3Schedule(am_sched)
return json.dumps(d3_sched.monthly_d3_schedule)
| {
"repo_name": "csutherl/pymortgage",
"path": "pymortgage/server/d3_api.py",
"copies": "1",
"size": "1411",
"license": "apache-2.0",
"hash": -7190046277351813000,
"line_mean": 41.7575757576,
"line_max": 118,
"alpha_frac": 0.5506732814,
"autogenerated": false,
"ratio": 3.416464891041162,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4467138172441162,
"avg_score": null,
"num_lines": null
} |
__author__ = 'coty'
# from pymortgage.server.amortization_schedule import AmortizationSchedule
def parse_params(params):
'''
Params:
'r' = rate
'P' = principal
'n' = term
't' = taxes
'i' = insurance
'af' = adjustment frequency (in years)
'ac' = adjustment cap (percent in decimal form)
'lc' = lifetime cap (percent in decimal form)
'e' = extra payment amount
'''
# check keys and conversions
p = params # shorting the var name :)
# always required
try:
rate = float(p['r'])
prin = float(p['P'])
term = int(p['n'])
except KeyError as ke:
raise KeyError("A required parameter is missing: %s" % ke.message)
except ValueError as ve:
raise ValueError("A required parameter has an invalid value: %s" % ve.message)
# only required for adjustable
try:
adj_freq = int(p['af'])
adj_cap = float(p['ac'])
life_cap = float(p['lc'])
adjustable = True
except KeyError:
adj_freq = None
adj_cap = None
life_cap = None
adjustable = False
except ValueError as ve:
raise ValueError("A parameter has an invalid value: %s" % ve.message)
# always optional values
try:
try:
tax = float(p['t'])
except KeyError:
tax = 0
try:
ins = float(p['i'])
except KeyError:
ins = 0
try:
extra_pmt = float(p['e'])
except KeyError:
extra_pmt = 0
except ValueError as ve:
raise ValueError("A parameter has an invalid value: %s" % ve.message)
# check values
if rate < 0 or rate > 1:
raise Exception("Rate is not expressed as decimal.")
if adjustable:
if adj_cap < 0 or adj_cap > 1:
raise Exception("Adjustment cap is not expressed as decimal.")
elif life_cap < 0 or life_cap > 1:
raise Exception("Lifetime cap is not expressed as decimal.")
# if adjustable:
# return AmortizationSchedule(rate, prin, term, tax, ins, adj_freq, adj_cap, life_cap, extra_pmt)
# else:
# return AmortizationSchedule(rate, prin, term, tax, ins, extra_pmt=extra_pmt)
return {'rate': rate, 'prin': prin, 'term': term, 'tax': tax, 'ins': ins, 'adj_freq': adj_freq, 'adj_cap': adj_cap,
'life_cap': life_cap, 'extra_pmt': extra_pmt} #, 'adjustable': adjustable} # unnecessary data now I think | {
"repo_name": "csutherl/pymortgage",
"path": "pymortgage/server/api_helper.py",
"copies": "1",
"size": "2515",
"license": "apache-2.0",
"hash": 6605138625473885000,
"line_mean": 30.45,
"line_max": 119,
"alpha_frac": 0.5638170974,
"autogenerated": false,
"ratio": 3.736998514115899,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4800815611515899,
"avg_score": null,
"num_lines": null
} |
__author__ = 'coty'
from setuptools import setup, find_packages
import pymortgage
from setuptools.command.test import test as TestCommand
import sys
class Tox(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
#import here, cause outside the eggs aren't loaded
import tox
errcode = tox.cmdline(self.test_args)
sys.exit(errcode)
setup(
name='pymortgage',
version=pymortgage.__version__,
url='http://github.com/csutherl/pymortgage',
license='Apache LICENSE Version 2.0',
description='Application to chart mortgage and other loan data.',
author='Coty Sutherland',
packages=find_packages(),
install_requires=[
"CherryPy==3.2.4",
"pip==1.2.1",
"setuptools==0.6c11",
"wsgiref==0.1.2",
],
classifiers=[
'Programming Language :: Python',
'Development Status :: 4 - Beta'
'Natural Language :: English',
],
cmdclass = {'tox': Tox},
test_suite='pymortgage.test'
)
| {
"repo_name": "csutherl/pymortgage",
"path": "setup.py",
"copies": "1",
"size": "1118",
"license": "apache-2.0",
"hash": 4719369140009643000,
"line_mean": 26.95,
"line_max": 69,
"alpha_frac": 0.626118068,
"autogenerated": false,
"ratio": 3.461300309597523,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9548613567684078,
"avg_score": 0.007760961982689042,
"num_lines": 40
} |
import os
import os.path
import subprocess
import sys
import shlex
import socket
import fnmatch
import platform
import errno
import shutil
import urllib.request
import urllib.parse
import urllib.error
import json
from functools import reduce
import time
base_direct_port = 12000
base_api_port = 9000
base_couch_port = 9500
base_projector_port = 10000
base_xdcr_port = 13000
base_indexer_port = 9100
base_fts_port = 9200
base_eventing_port = 9300
base_cbas_port = 9600
base_prometheus_port = 9900
base_backup_http_port= 7100
base_backup_https_port= 17100
base_backup_grpc_port = 7200
node_start_timeout_s = 30
default_username = "Administrator"
default_pass = "asdasd"
script_dir = os.path.dirname(os.path.realpath(__file__))
ns_server_dir = os.path.dirname(script_dir)
def read_configuration():
configpath = os.path.join(ns_server_dir, "build", "cluster_run.configuration")
with open(configpath) as f:
def fn(line):
k, v = line.strip().split('=')
return k, shlex.split(v)[0]
return dict(fn(line) for line in f.readlines())
config = read_configuration()
PREFIX = config['prefix']
valid_bucket_types = ["ephemeral", "membase", "memcached"]
valid_service_types = {"kv", "n1ql", "index", "fts", "cbas", "eventing",
"backup"}
def setup_extra_ns_server_app_file(force_community, start_index):
# The extra/ebin directory contains modified versions of files also
# contained in other directories. The ones in extra/ebin are listed
# in the path directory such that they will take precedence when
# loaded. Note the -pa option used when starting erl reverses the
# order of the list.
extra_dirname = "extra"
extra_ebin_dirname = "{}/n_{}".format(extra_dirname, start_index)
extra_ebin_path = extra_ebin_dirname + "/ebin"
returned_path = None
# Clean up any residual files from prior runs.
try:
if force_community:
# Just delete the node-specific directory that we're going
# to recreate with new content. There could be concurrent
# instances running so can't more than that.
shutil.rmtree(extra_ebin_dirname)
else:
# Get rid of the entire directory as we don't want any residual
# files being found when walking the directory (see ebin_seach).
shutil.rmtree(extra_dirname)
except OSError as exc:
if exc.errno == errno.ENOENT:
pass
else:
raise
if force_community:
found_enterprise = False
with open("./ebin/ns_server.app", "r") as src_f:
lines = src_f.readlines()
lines_out = ""
for line in lines:
# The way to change Enterprise edition to Community edition is to
# simply change the "vsn" in the ns_server app.
if "vsn" in line and "enterprise" in line:
line = line.replace("enterprise", "community")
# Ensure only one line containing "vsn" and "enterprise".
assert found_enterprise is False
found_enterprise = True
lines_out = lines_out + line
if found_enterprise:
# Any errors here are "real" so we want exceptions thrown
os.makedirs(extra_ebin_path)
with open("./{}/ns_server.app".format(
extra_ebin_path), "w") as dst_f:
dst_f.write(lines_out)
returned_path = extra_ebin_path
return returned_path
def setup_path(ns_server_app_path):
def ebin_search(path_name):
dirs = os.walk(path_name)
ebins = []
for d, _, _ in dirs:
if os.path.basename(d) == "ebin":
ebins.append(d)
return ebins
path = ebin_search(ns_server_dir)
if ns_server_app_path in path:
# The ns_server_app_path needs to be first in the path. We remove
# it from what was found and append it to the path (it's at the
# end as the -pa argument used when starting erl reverses the
# order).
path.remove(ns_server_app_path)
path.append(ns_server_app_path)
couchpath = ebin_search("{0}/lib/couchdb/erlang/lib".format(PREFIX))
couch_plugins = ebin_search("{0}/lib/couchdb/plugins".format(PREFIX))
if len(couchpath) == 0:
sys.exit("Couch libs wasn't found.\nCan't handle it")
# Note the paths are passed via "-pa" to the erl process where their
# ordering is reversed.
return couchpath + path + couch_plugins
def maybe_mk_node_couch_config(i, ini_file_name, root_dir):
ini_dir = os.path.dirname(ini_file_name)
# If ini file exists, then don't overwrite it.
if os.path.isfile(ini_file_name):
return
try:
os.mkdir(ini_dir)
except os.error:
pass
abs_root_dir = os.path.abspath(root_dir)
with open(ini_file_name, "w") as f:
f.write("[httpd]\n")
f.write("port={0}\n".format(base_couch_port + i))
f.write("[couchdb]\n")
f.write("database_dir={0}/data/n_{1}/data\n".format(abs_root_dir, i))
f.write("view_index_dir={0}/data/n_{1}/data\n".format(abs_root_dir, i))
f.write("max_dbs_open=10000\n")
f.write("[upr]\n")
f.write("port={0}\n".format(base_direct_port + i * 2))
f.write("[dcp]\n")
f.write("port={0}\n".format(base_direct_port + i * 2))
def couch_configs(i, root_dir):
ini_file_name = os.path.join(root_dir, "couch", f"n_{i}_conf.ini")
maybe_mk_node_couch_config(i, ini_file_name, root_dir)
return ["{0}/etc/couchdb/default.ini".format(PREFIX),
"{0}/etc/couchdb/default.d/capi.ini".format(PREFIX),
"{0}/etc/couchdb/default.d/geocouch.ini".format(PREFIX),
ini_file_name]
def os_specific(args, params):
"""Add os-specific junk to the cluster startup."""
if platform.system() == 'Windows':
args += ["dont_suppress_stderr_logger", "false"]
else:
args += ["dont_suppress_stderr_logger", "true"]
if platform.system() == 'Darwin':
import resource
# OS X has a pretty tiny default fd limit. Let's increase it
# (if it hasn't already been).
(soft, hard) = resource.getrlimit(resource.RLIMIT_NOFILE)
if soft < 4096:
resource.setrlimit(resource.RLIMIT_NOFILE, (4096, 4096))
params['env'] = {"ERL_MAX_PORTS": "4096"}
params['env'].update(os.environ)
def prepare_start_cluster(force_community, start_index):
ns_server_app_path = setup_extra_ns_server_app_file(force_community,
start_index)
ebin_path = setup_path(ns_server_app_path)
return ebin_path
def quote_string_for_erl(s):
return '"' + s.replace("\\", "\\\\").replace("\"", "\\\"") + '"'
def generate_ssl_dist_optfile(datadir):
cfg_dir = os.path.join(datadir, "config")
in_file = os.path.join(ns_server_dir, "etc", "ssl_dist_opts.in")
out_file = os.path.join(cfg_dir, "ssl_dist_opts")
if not os.path.exists(cfg_dir):
os.makedirs(cfg_dir, 0o755)
with open(in_file) as f:
content = f.read().replace('@CONFIG_PREFIX@', cfg_dir)
with open(out_file, "w") as f:
f.write(content)
return out_file
def abs_path_join(*args):
return os.path.abspath(os.path.join(*args))
def erlang_args_for_node(i, ebin_path, extra_args, args_prefix, root_dir):
logdir = abs_path_join(root_dir, "logs", f"n_{i}")
args = args_prefix + ["erl", "+MMmcs" "30",
"+A", "16", "+sbtu",
"+sbwt", "none",
"+P", "327680", "-pa"] + ebin_path
args += [
"-setcookie", "nocookie",
"-kernel", "logger", "[{handler, default, undefined}]",
"-couch_ini"] + couch_configs(i, root_dir)
datadir = abs_path_join(root_dir, 'data', f'n_{i}')
tempdir = abs_path_join(root_dir, 'tmp')
nodefile = os.path.join(datadir, "nodefile")
babysitternodefile = os.path.join(
datadir, "couchbase-server.babysitter.node")
babysittercookiefile = os.path.join(
datadir, "couchbase-server.babysitter.cookie")
ssloptfile = generate_ssl_dist_optfile(datadir)
cb_dist_config = os.path.join(datadir, "config", "dist_cfg")
hosts_file = os.path.join(ns_server_dir, "etc", "hosts.cfg")
static_config = os.path.join(ns_server_dir, "etc", "static_config.in")
args += [
"-name", "babysitter_of_n_{0}@cb.local".format(i),
"-proto_dist", "cb",
"-ssl_dist_optfile", ssloptfile,
"-epmd_module", "cb_epmd",
"-hidden",
"-kernel", "dist_config_file", quote_string_for_erl(cb_dist_config),
"-kernel", "inetrc", f"\"{hosts_file}\"",
"-kernel", "external_tcp_port", "21400",
"-kernel", "external_tls_port", "21450",
"-ns_babysitter", "cookiefile", quote_string_for_erl(
babysittercookiefile),
"-ns_babysitter", "nodefile", quote_string_for_erl(babysitternodefile),
"-ns_server", "config_path", f'"{static_config}"',
"error_logger_mf_dir", quote_string_for_erl(logdir),
"path_config_etcdir", f'"{os.path.join(ns_server_dir, "priv")}"',
"path_config_bindir", quote_string_for_erl(PREFIX + "/bin"),
"path_config_libdir", quote_string_for_erl(PREFIX + "/lib"),
"path_config_datadir", quote_string_for_erl(datadir),
"path_config_tmpdir", quote_string_for_erl(tempdir),
"path_config_secdir", quote_string_for_erl(PREFIX + "/etc/security"),
"path_audit_log", quote_string_for_erl(logdir),
"rest_port", str(base_api_port + i),
"query_port", str(base_couch_port - 1 - i),
"ssl_query_port", str(10000 + base_couch_port - 1 - i),
"projector_port", str(base_projector_port + i),
"projector_ssl_port", str(base_projector_port + i),
"ssl_rest_port", str(10000 + base_api_port + i),
"capi_port", str(base_couch_port + i),
"ssl_capi_port", str(10000 + base_couch_port + i),
"memcached_port", str(base_direct_port + i * 2),
"memcached_dedicated_port", str(base_direct_port - i * 4 - 1),
"memcached_ssl_port", str(base_direct_port - i * 4 - 2),
"memcached_dedicated_ssl_port", str(base_direct_port - i * 4 - 3),
"memcached_prometheus", str(base_direct_port - i * 4 - 4),
"nodefile", quote_string_for_erl(nodefile),
"short_name", quote_string_for_erl('n_{0}'.format(i)),
"xdcr_rest_port", str(base_xdcr_port + i),
"indexer_admin_port", str(base_indexer_port + i * 6),
"indexer_scan_port", str(base_indexer_port + i * 6 + 1),
"indexer_http_port", str(base_indexer_port + i * 6 + 2),
"indexer_https_port", str(10000 + base_indexer_port + i * 6 + 2),
"indexer_stinit_port", str(base_indexer_port + i * 6 + 3),
"indexer_stcatchup_port", str(base_indexer_port + i * 6 + 4),
"indexer_stmaint_port", str(base_indexer_port + i * 6 + 5),
"fts_http_port", str(base_fts_port + i * 2),
"fts_ssl_port", str(10000 + base_fts_port + i * 2),
"fts_grpc_port", str(base_fts_port + i * 2 + 1),
"fts_grpc_ssl_port", str(10000 + base_fts_port + i * 2 + 1),
"eventing_http_port", str(base_eventing_port + i),
"eventing_https_port", str(10000 + base_eventing_port + i),
"eventing_debug_port", str(base_eventing_port + i * 6 + 1),
"cbas_http_port", str(base_cbas_port + i * 15),
"cbas_cc_http_port", str(base_cbas_port + i * 15 + 1),
"cbas_cc_cluster_port", str(base_cbas_port + i * 15 + 2),
"cbas_cc_client_port", str(base_cbas_port + i * 15 + 3),
"cbas_console_port", str(base_cbas_port + i * 15 + 4),
"cbas_cluster_port", str(base_cbas_port + i * 15 + 5),
"cbas_data_port", str(base_cbas_port + i * 15 + 6),
"cbas_result_port", str(base_cbas_port + i * 15 + 7),
"cbas_messaging_port", str(base_cbas_port + i * 15 + 8),
"cbas_debug_port", str(base_cbas_port + i * 15 + 9),
"cbas_parent_port", str(base_cbas_port + i * 15 + 10),
"cbas_admin_port", str(base_cbas_port + i * 15 + 11),
"cbas_replication_port", str(base_cbas_port + i * 15 + 12),
"cbas_metadata_port", str(base_cbas_port + i * 15 + 13),
"cbas_metadata_callback_port", str(base_cbas_port + i * 15 + 14),
"cbas_ssl_port", str(10000 + base_cbas_port + i),
"prometheus_http_port", str(base_prometheus_port + i),
"backup_http_port", str(base_backup_http_port + i),
"backup_https_port", str(base_backup_https_port + i),
"backup_grpc_port", str(base_backup_grpc_port + i),
] + extra_args
return args
def find_primary_addr(ipv6):
family = socket.AF_INET6 if ipv6 else socket.AF_INET
dns_addr = "2001:4860:4860::8844" if ipv6 else "8.8.8.8"
s = socket.socket(family, socket.SOCK_DGRAM)
try:
s.connect((dns_addr, 53))
if ipv6:
addr, port, _, _ = s.getsockname()
else:
addr, port = s.getsockname()
return addr
except socket.error:
return None
finally:
s.close()
def start_cluster(num_nodes=1,
dont_start=False,
start_index=0,
dont_rename=False,
static_cookie=False,
loglevel='debug',
prepend_extras=False,
pluggable_config=[],
use_minified=False,
disable_autocomplete="{disable_autocomplete,false}",
pretend_version=None,
ipv6=False,
force_community=False,
dev_preview_default=None,
args=[],
root_dir=ns_server_dir,
wait_for_start=False,
nooutput=False):
extra_args = []
if not dont_rename:
primary_addr = find_primary_addr(ipv6)
if primary_addr is None:
print("was unable to detect 'internet' address of this machine."
+ " node rename will be disabled")
else:
extra_args += ["rename_ip", '"' + primary_addr + '"']
if prepend_extras:
prepend_args = args[0:]
else:
prepend_args = []
extra_args += args[0:]
if static_cookie:
extra_args += ["-ns_server", "dont_reset_cookie", "true"]
if dont_start:
extra_args += ["-run", "t", "fake_loggers"]
else:
extra_args += ["-noinput"]
extra_args += ["-run", "child_erlang", "child_start",
"ns_babysitter_bootstrap"]
extra_args += ["-ns_babysitter", "handle_ctrl_c", "true"]
extra_args += ["-ns_server", "loglevel_stderr", loglevel]
plugins_dir = os.path.join(ns_server_dir, '..', 'build',
'cluster_run_ui_plugins')
if os.path.isdir(plugins_dir):
for f in os.listdir(plugins_dir):
if fnmatch.fnmatch(f, 'pluggable-ui-*.cluster_run.json'):
pluggable_config.append(os.path.join(plugins_dir, f))
if pluggable_config:
extra_args += ["-ns_server", "ui_plugins",
quote_string_for_erl(','.join(pluggable_config))]
ui_env = [disable_autocomplete]
extra_args += ["-ns_server", "use_minified",
"true" if use_minified else "false"]
extra_args += ["-ns_server", "ui_env", '[' + ','.join(ui_env) + ']']
if pretend_version is not None:
extra_args += ["-ns_server",
"pretend_version", '"{}"'.format(pretend_version)]
if dev_preview_default is not None:
extra_args += ["-ns_server", "developer_preview_enabled_default",
"true" if dev_preview_default else "false"]
ebin_path = prepare_start_cluster(force_community, start_index)
def start_node(node_num):
logdir = os.path.join(root_dir, "logs", f"n_{node_num}")
try:
os.makedirs(logdir)
except OSError:
pass
args = erlang_args_for_node(node_num, ebin_path, extra_args,
prepend_args, root_dir)
params = {}
os_specific(args, params)
if 'env' not in params:
params['env'] = {}
params['env'].update(os.environ)
path = params['env']['PATH']
path = (PREFIX + "/bin") + os.pathsep + path
if 'ERL_FULLSWEEP_AFTER' not in params['env']:
params['env']['ERL_FULLSWEEP_AFTER'] = '512'
params['env']['PATH'] = path
crash_dump_base = 'erl_crash.dump.n_%d' % node_num
params['env']['ERL_CRASH_DUMP_BASE'] = crash_dump_base
params['env']['ERL_CRASH_DUMP'] = crash_dump_base + '.babysitter'
params['env']['COUCHBASE_SMALLER_PKEYS'] = '1'
params['close_fds'] = True
if platform.system() == "Windows":
params['close_fds'] = False
w = None
r = None
if "-noinput" in args:
(r, w) = os.pipe()
params['stdin'] = r
if 'setpgrp' in os.__dict__ and params.get('close_fds'):
# this puts child out of our process group. So that
# Ctrl-C doesn't deliver SIGINT to it, leaving us
# ability to it shutdown carefully or otherwise
params['preexec_fn'] = os.setpgrp
if nooutput:
params['stdout'] = subprocess.DEVNULL
params['stderr'] = subprocess.DEVNULL
pr = subprocess.Popen(args, **params)
if w is not None:
os.close(r)
# Squirrel away the write descriptor for the pipe into the
# subprocess.Popen object
pr.write_side = w
return pr
processes = [start_node(i + start_index) for i in range(num_nodes)]
if wait_for_start:
wait_nodes_up(num_nodes, start_index, node_start_timeout_s)
return processes
def wait_nodes_up(num_nodes, start_index, timeout_s):
deadline = time.time() + timeout_s
def wait_node(i):
last_error = None
print(f"Waiting for node {i}", end="")
sys.stdout.flush()
while time.time() < deadline:
try:
http_get_json(f"http://localhost:{base_api_port+i}/pools")
print(f" UP")
return
except urllib.error.URLError as e:
last_error = e.reason
print('.', end='')
sys.stdout.flush()
time.sleep(0.5)
print(" TIMEOUT")
raise RuntimeError(f"Node {i} wait timed out (last error: {last_error})")
[wait_node(start_index + i) for i in range(num_nodes)]
def kill_nodes(nodes, terminal_attrs=None):
for n in nodes:
if n.write_side is not None:
print("Closing %d\n" % n.write_side)
# this line does graceful shutdown versus quick
# os.write(n.write_side, "shutdown\n")
os.close(n.write_side)
else:
try:
n.kill()
except OSError:
pass
for n in nodes:
n.wait()
if terminal_attrs is not None:
import termios
termios.tcsetattr(sys.stdin, termios.TCSANOW, terminal_attrs)
def bool_request_value(value):
return "1" if value else "0"
class PasswordManager(urllib.request.HTTPPasswordMgr):
def __init__(self, username, password):
self.auth = (username, password)
def find_user_password(self, realm, authuri):
return self.auth
def do_encode(input_string):
return input_string.encode()
def http_get_json(url):
return json.loads(http_get(url))
def http_get(url):
password_mgr = PasswordManager(default_username, default_pass)
handler = urllib.request.HTTPBasicAuthHandler(password_mgr)
o = urllib.request.build_opener(handler)
return o.open(url).read()
def connect(num_nodes=0,
start_index=0,
deploy=['kv'],
buckettype="membase",
memsize=256,
indexmemsize=256,
index_storage_mode=None,
replicas=1,
replica_index=True,
protocol="ipv4",
encryption=False,
do_rebalance=True,
storage_backend="couchstore"):
if isinstance(deploy, list):
services = deploy
deploy = dict(("n%d" % i, services[:]) for i in range(num_nodes))
if "kv" not in deploy.get("n0", []):
deploy["n0"] = deploy.get("n0", []) + ["kv"]
if num_nodes == 0 or buckettype not in valid_bucket_types or \
int(memsize) < 256 or int(replicas) > 3 or \
not set(deploy.keys()) <= \
set(["n" + str(i) for i in range(num_nodes)]) or \
not set(reduce(lambda x, y: x + y, deploy.values(), [])) <= \
valid_service_types:
usage()
sys.exit()
password_mgr = PasswordManager("Administrator", "asdasd")
handler = urllib.request.HTTPBasicAuthHandler(password_mgr)
o = urllib.request.build_opener(handler)
print("Connecting {0} nodes, bucket type {1}, mem size {2} "
"with {3} replica copies, password asdasd, "
"deployment plan {4}\n".format(
num_nodes, buckettype, memsize, replicas, str(deploy)))
base_port = 9000 + start_index
addr = "127.0.0.1" if protocol == "ipv4" else "[::1]"
services = deploy["n0"]
print("Connecting node 0 with services {0}".format(str(services)))
info = json.loads(o.open("http://{0}:{1}/pools".format(
addr, base_port)).read())
community_edition = info['isEnterprise'] is not True
net_opts = do_encode(
"afamily={0}".format(protocol) +
"&nodeEncryption={0}".format(
"on" if encryption else "off"))
o.open("http://{0}:{1}/node/controller/enableExternalListener".format(
addr, base_port), net_opts)
o.open("http://{0}:{1}/node/controller/setupNetConfig".format(
addr, base_port), net_opts)
data = do_encode("services={0}".format(",".join(services)))
o.open("http://{0}:{1}/node/controller/setupServices".format(
addr, base_port), data).read()
data = do_encode("memoryQuota=" + str(memsize) +
"&indexMemoryQuota=" + str(indexmemsize))
o.open("http://{0}:{1}/pools/default".format(addr, base_port), data).read()
data = do_encode("name=default" +
"&bucketType=" + buckettype +
"&storageBackend=" + storage_backend +
"&ramQuotaMB=" + str(memsize) +
"&replicaNumber=" + str(replicas) +
"&replicaIndex=" + bool_request_value(replica_index))
o.open("http://{0}:{1}/pools/default/buckets".format(addr, base_port),
data).read()
data = do_encode("port=SAME&username=Administrator&password=asdasd")
o.open("http://{0}:{1}/settings/web".format(addr, base_port),
data).read()
if index_storage_mode is not None:
o.open("http://{0}:{1}/settings/indexes".format(addr, base_port),
do_encode("storageMode=" + index_storage_mode)).read()
for i in range(1, num_nodes):
port = base_port + i
services = deploy.get("n" + str(i), [])
if not services:
services = ["kv"]
print("Connecting node {0} with services {1}".format(i, str(services)))
cluster_member_port = base_port if community_edition else \
base_port + 10000
data = do_encode("user=Administrator&password=asdasd&" +
"clusterMemberHostIp={0}".format(addr) +
"&clusterMemberPort={0}".format(cluster_member_port) +
"&services={0}".format(",".join(services)))
o.open("http://{0}:{1}/node/controller/doJoinCluster".format(
addr, port), data).read()
if do_rebalance:
print("Getting node list")
info = json.loads(o.open("http://{0}:{1}/nodeStatuses".format(
addr, base_port)).read())
print("Servers added, triggering rebalance.")
data = do_encode(urllib.parse.urlencode(
{'knownNodes': ",".join([info[k]['otpNode'] for k in info]),
'ejectedNodes': ''}))
o.open("http://{0}:{1}/controller/rebalance".format(addr, base_port),
data).read()
| {
"repo_name": "couchbase/ns_server",
"path": "pylib/cluster_run_lib.py",
"copies": "1",
"size": "24900",
"license": "apache-2.0",
"hash": -526849018817724700,
"line_mean": 36.1641791045,
"line_max": 82,
"alpha_frac": 0.5736144578,
"autogenerated": false,
"ratio": 3.4175130387043646,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44911274965043646,
"avg_score": null,
"num_lines": null
} |
__author__ = 'coxious'
from constants import *
global LandingPoint
LandingPoint = []
def Strategy1(pos_vec,binary_map):
pos = pos_vec[0]
global LandingPoint
if len(LandingPoint)== 0 :
LandingPoint = GetLandingPoint(pos_vec,binary_map)
print "Landing point" , LandingPoint
#accelorator = np.array([vector[0],vector[1],0]/math.sqrt(vector*vector))
return ((LandingPoint - pos)*100)
def GetLandingPoint(pos_vec,binary_map):
pos = pos_vec[0]
shape = binary_map.shape
def GetDistance(pos_current,x,y):
return math.sqrt((pos_current[0] - x)**2 + (pos_current[1] - y)**2 )
acceptable_place = [[GetDistance(pos,x,y),x,y]
for x in xrange(shape[0])
for y in xrange(shape[1])
if binary_map[x][y] == 0]
# if(binary_map[pos[0]][pos[1]] == 255):
# for i in xrange(-1000,1000):
# for j in xrange(-1000,1000):
# if binary_map[pos[0]+i][pos[1]+j] == 0:
# acceptable_place.append([math.sqrt(i**2 + j**2),i,j])
acceptable_place = sorted(acceptable_place,key= lambda x:x[0])
return np.array([acceptable_place[0][1],acceptable_place[0][2],0])
| {
"repo_name": "Coxious/SpaceLanding",
"path": "Strategy.py",
"copies": "1",
"size": "1237",
"license": "apache-2.0",
"hash": 1740916233421066500,
"line_mean": 30.7179487179,
"line_max": 77,
"alpha_frac": 0.5731608731,
"autogenerated": false,
"ratio": 3.031862745098039,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4105023618198039,
"avg_score": null,
"num_lines": null
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.