index
int64 | repo_name
string | branch_name
string | path
string | content
string | import_graph
string |
|---|---|---|---|---|---|
42,668,260
|
EdLeafe/leafe
|
refs/heads/main
|
/downloads.py
|
import datetime
from functools import partial
import os
import re
import smtplib
import stat
import shutil
import time
from flask import abort, g, redirect, render_template, request, url_for
import utils
UPLOAD_DIR = "/var/www/uploads"
DOWNLOAD_PATH = "download_file"
DLBASE = f"https://leafe.com/{DOWNLOAD_PATH}"
# CDNBASE = "https://baba3e9f50e49daa7c3f-032440835eb7c07735745c77228f7f03.ssl.cf1.rackcdn.com"
CDNBASE = "https://com-leafe-images.nyc3.cdn.digitaloceanspaces.com/ftp"
LICENSES = {
"f": "Freeware ",
"s": "Shareware ",
"c": "Commercial ",
"d": "Demoware ",
"g": "GPL Software ",
"l": "LGPL Software ",
"m": "Creative Commons License ",
"o": "Other Open Source License ",
}
search_term = ""
def _cost_type(val, cost):
license = LICENSES.get(val, "")
if cost:
cost_text = " - $%s" % cost
else:
cost_text = ""
return "".join([license, cost_text])
def _hilite_match(val, txt):
if not val:
return txt
pat = re.compile(rf"\b{val}\b", re.I)
repl = f'<span class="searchmatch">{val}</span>'
return pat.sub(repl, txt)
def download_file(url, url2=None):
passed_url = os.path.join(url, url2) if url2 else url
full_url = os.path.join(CDNBASE, passed_url)
utils.logit("download_file returning full URL:", full_url)
return redirect(full_url)
def main_page():
return render_template("downloads.html")
def search_dls():
global search_term
search_term = request.form.get("term")
term = """ and mdesc like '%%%s%%'
or ctitle like '%%%s%%'
or cauthor like '%%%s%%' """ % (
search_term,
search_term,
search_term,
)
return _run_query(term=term)
def all_dls():
return _run_query(term="")
def _update_link(link):
"""The database contains links in the format
'http://leafe.com/download/<fname>'. I want this to be more explicit by
specifying the link as '/download_file/<fname>', so this function does
that. When I convert the site to use exclusively this newer code, I can
update the database, making this function moot.
"""
return link.replace("/download/", "/download_file/")
def _run_query(term=None):
term = term or ""
crs = utils.get_cursor()
sql = (
"""select * from files where lpublish = 1 %s
order by ctype ASC, dlastupd DESC;"""
% term
)
crs.execute(sql)
recs = crs.fetchall()
g.vfp = [d for d in recs if d["ctype"] == "v"]
g.dabo = [d for d in recs if d["ctype"] == "b"]
g.python = [d for d in recs if d["ctype"] == "p"]
g.osx = [d for d in recs if d["ctype"] == "x"]
g.cb = [d for d in recs if d["ctype"] == "c"]
g.fox2x = [d for d in recs if d["ctype"] == "f"]
g.other = [d for d in recs if d["ctype"] == "o"]
hl_func = partial(_hilite_match, search_term)
func_dict = {
"hilite": hl_func,
"cost_calc": _cost_type,
"any": any,
"update_link": _update_link,
}
return render_template("download_list.html", **func_dict)
def upload():
g.message = ""
return render_template("upload.html")
def upload_test(app):
import flask
request = flask.request
app.logger.warning("upload_test() called")
app.logger.warning(f"opname: {request.headers.get('operation_name')}")
app.logger.warning(f"Op-Name: {request.headers.get('Operation-Name')}")
app.logger.warning(f"Headers: {request.headers}")
chunk_size = 8192
total_size = 0
while True:
chunk = request.stream.read(chunk_size)
chunk_len = len(chunk)
app.logger.warning(f"CHUNK: {chunk_len}")
total_size += chunk_len
if not chunk_len:
break
app.logger.warning(f"Returning {total_size}")
time.sleep(1)
return str(total_size)
def upload_file():
post = request.form
newfile = request.files.get("newfile")
try:
newname = newfile.filename
except AttributeError:
# Will happen if newfile is None
abort(400, "No file specified")
target_file = os.path.join(UPLOAD_DIR, newname.replace(os.sep, "_"))
with open(target_file, "wb") as file_obj:
shutil.copyfileobj(newfile.stream, file_obj)
newfile.stream.close()
file_size = os.stat(target_file)[stat.ST_SIZE]
fsize = utils.human_fmt(file_size).replace(" ", "")
# Don't use the CDN; use the generic download URL that will redirect.
fldr = {"c": "cb", "d": "dabo"}.get(post["section"], "")
cfile = os.path.join(DLBASE, fldr, newname)
sql = """INSERT INTO files (ctype, ctitle, mdesc, cfile, ccosttype, ncost,
csize, cauthor, cauthoremail, dlastupd, lpublish)
VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)"""
args = (
post.get("section"),
post.get("title"),
post.get("description"),
cfile,
post.get("file_license"),
post.get("cost"),
fsize,
post.get("author"),
post.get("author_email"),
datetime.date.today(),
False,
)
crs = utils.get_cursor()
crs.execute(sql, args)
body = """Originating IP = %s
Section = %s
Title = %s
File = %s
License = %s
Cost = %s
Size = %s
Author = %s
Email = %s
Description:
%s
""" % (
request.remote_addr,
post.get("section"),
post.get("title"),
newname,
post.get("file_license"),
post.get("cost"),
fsize,
post.get("author"),
post.get("author_email"),
post.get("description"),
)
msg = """From: File Uploads <files@leafe.com>
X-Mailer: flask script
To: Ed Leafe <ed@leafe.com>
Subject: New Uploaded File
Date: %s
%s
""" % (
time.strftime("%c"),
body,
)
smtp = smtplib.SMTP("mail.leafe.com")
smtp.sendmail("files@leafe.com", "ed@leafe.com", msg)
g.message = "Your file has been uploaded."
return render_template("upload.html")
|
{"/threadize.py": ["/utils.py"], "/twitterthread.py": ["/threadize.py"], "/ircsearch.py": ["/utils.py"], "/zipcodes.py": ["/utils.py"], "/leafe.py": ["/archives.py", "/art.py", "/downloads.py", "/drstandup.py", "/galleries.py", "/ircsearch.py", "/twitterthread.py", "/zipcodes.py"], "/cloud_upload.py": ["/utils.py"], "/downloads.py": ["/utils.py"], "/galleries.py": ["/utils.py"], "/archives.py": ["/utils.py"]}
|
42,668,261
|
EdLeafe/leafe
|
refs/heads/main
|
/galleries.py
|
from __future__ import print_function
import os
import random
import six
from six.moves.urllib import parse
from flask import g, render_template
import utils
DO_BASE = "https://com-leafe-images.nyc3.cdn.digitaloceanspaces.com/galleries"
def index():
g.gallery_names = utils.get_gallery_names()
return render_template("gallery_listing.html")
def show_gallery(gallery_name):
g.gallery_name = gallery_name
all_photos = utils.get_photos_in_gallery(gallery_name)
g.photos = {
six.ensure_text(os.path.join(DO_BASE, parse.quote(ph))): md for ph, md in all_photos.items()
}
# random.shuffle(g.photos)
return render_template("gallery.html")
|
{"/threadize.py": ["/utils.py"], "/twitterthread.py": ["/threadize.py"], "/ircsearch.py": ["/utils.py"], "/zipcodes.py": ["/utils.py"], "/leafe.py": ["/archives.py", "/art.py", "/downloads.py", "/drstandup.py", "/galleries.py", "/ircsearch.py", "/twitterthread.py", "/zipcodes.py"], "/cloud_upload.py": ["/utils.py"], "/downloads.py": ["/utils.py"], "/galleries.py": ["/utils.py"], "/archives.py": ["/utils.py"]}
|
42,668,262
|
EdLeafe/leafe
|
refs/heads/main
|
/archives.py
|
import datetime
import math
import pprint
import re
import string
from textwrap import TextWrapper
import time
import elasticsearch
from flask import abort, Flask, g, render_template, request, session, url_for
import utils
ADMIN_IP = "108.205.7.108"
LINK_PAT = re.compile(r"(https?://[^\s]+)")
PLACEHOLDER_TEXT = "ABCDEF%sZYXWVU"
# Elasticsearch doesn't alllow for accessing more than 10K records, even with
# using offsets. There are ways around it, but really, a search that pulls more
# than 10K is not a very good search.
MAX_RECORDS = 10000
LIMIT_MSG = f"Note: Result sets are limited to {MAX_RECORDS:,} records"
es_client = elasticsearch.Elasticsearch(host="dodata")
# My original names in the DB suck, so...
DB_TO_ELASTIC_NAMES = {
"imsg": "msg_num",
"clist": "list_name",
"csubject": "subject",
"cfrom": "from",
"tposted": "posted",
"cmessageid": "message_id",
"creplytoid": "replyto_id",
"mtext": "body",
"id": "id",
}
ELASTIC_TO_DB_NAMES = {v: k for k, v in DB_TO_ELASTIC_NAMES.items()}
def db_names_from_elastic(recs):
return [dict((ELASTIC_TO_DB_NAMES.get(k), v) for k, v in rec.items()) for rec in recs]
def _extract_records(resp, translate_to_db=True):
recs = [r["_source"] for r in resp["hits"]["hits"]]
excepts = 0
for rec in recs:
try:
rec["posted"] = datetime.datetime.strptime(rec["posted"], "%Y-%m-%dT%H:%M:%S")
except ValueError:
rec["posted"] = datetime.datetime.strptime(rec["posted"], "%Y-%m-%d %H:%M:%S")
excepts += 1
if translate_to_db:
allrecs = [utils.DotDict(rec) for rec in db_names_from_elastic(recs)]
else:
allrecs = [utils.DotDict(rec) for rec in recs]
g.date_excepts = excepts
return allrecs
def _get_sort_order(order_by):
return {
"recent_first": "posted:desc",
"oldest_first": "posted:asc",
"author_name": "from:asc",
"natural": "",
# "recent_first": {"posted": "desc"},
# "oldest_first": {"posted": "asc"},
# "author_name": {"from": "asc"},
# "natural": "",
}.get(order_by)
def _proper_listname(val):
return {
"profox": "ProFox",
"prolinux": "ProLinux",
"propython": "ProPython",
"valentina": "Valentina",
"codebook": "Codebook",
"dabo-dev": "Dabo-Dev",
"dabo-users": "Dabo-Users",
}.get(val, "")
def _listAbbreviation(val):
return {
"profox": "p",
"prolinux": "l",
"propython": "y",
"valentina": "v",
"codebook": "c",
"testing": "t",
"dabo-dev": "d",
"dabo-users": "u",
}.get(val, "")
def _listFromAbbreviation(val):
return {
"p": "profox",
"l": "prolinux",
"y": "propython",
"v": "valentina",
"c": "codebook",
"t": "testing",
"d": "dabo-dev",
"u": "dabo-users",
}.get(val, "")
def archives_form():
g.listname = session.get("listname", "")
return render_template("archives_form.html")
def _format_author(val):
split_val = val.split("<")[0]
if not split_val:
return val
return split_val.replace('"', "")
def _format_date(val):
return val.strftime("%Y-%m-%d at %H:%M:%S")
def _format_short_date(val):
return val.strftime("%Y-%m-%d %H:%M")
def _pager_text():
thispage = g.page
pagecount = g.total_pages
if pagecount == 1:
return ""
url = g.url.split("?")[0]
prevpage = max(1, thispage - 1)
nextpage = min(thispage + 1, pagecount)
page_links = []
for pg in range(pagecount):
pgnum = pg + 1
linkstate = "active" if thispage == pgnum else "waves-effect"
page_links.append(f"""<li class={linkstate}><a href={url}?page={pgnum}>{pgnum}</a></li>""")
page_link_text = "\n ".join(page_links)
if thispage == 1:
prev_text = """<li class="grey-text"><i class="material-icons">chevron_left</i></li>"""
else:
prev_text = f"""<li class="waves-effect"><a href="{url}?page={prevpage}"><i class="material-icons">chevron_left</i></a></li>"""
if thispage == pagecount:
next_text = """<li class="grey-text"><i class="material-icons">chevron_right</i></li>"""
else:
next_text = f"""<li class="waves-effect"><a href="{url}?page={nextpage}"><i class="material-icons">chevron_right</i></a></li>"""
return f""" <div class="row">
<ul class="pagination">
{prev_text}
{page_link_text}
{next_text}
</ul>
</div>
"""
def _linkify(txt):
replacements = []
set_links = set(LINK_PAT.findall(txt))
links = list(set_links)
# Shorter links may be a subset of longer links, so replace longer ones first.
links.sort(key=len, reverse=True)
for num, link in enumerate(links):
# Some links can contain asterisks, which blows up re.
link = link.replace("*", "[*]")
try:
linked = f'<a href="{link}" target="_blank">{link}</a>'
except Exception:
# Funky characters; not much you can do.
continue
# Replace the original links in the text with placeholders
txt = txt.replace(link, PLACEHOLDER_TEXT % num)
replacements.append(linked)
# OK, now replace the placeholders with the links
for num, link in enumerate(replacements):
target = PLACEHOLDER_TEXT % num
txt = txt.replace(target, replacements[num])
return txt
def _wrap_text(txt):
txt = txt.replace("<", "<")
txt = _linkify(txt)
ret = []
for ln in txt.splitlines():
if ln.startswith(">"):
ret.append(f'<p style="font-style: italic; color: grey">{ln}</p>')
else:
ret.append(f"<p>{ln}</p>")
return "".join(ret)
def _regexp_casing(txt):
"""Since elasticsearch doesn't support case-insensitive searches, this is a
brute-force method to accomplish the same thing.
"""
def case_dupe(s):
if s in string.ascii_letters:
return "[" + s.upper() + s.lower() + "]"
elif s in '.?+*|{}[]()"\\)]}':
# if s in '.?+*|{}[]()"\\)]}':
return f"\\{s}"
return s
return "".join([case_dupe(char) for char in txt])
def _get_message(msg_num):
msg_num = int(msg_num)
kwargs = {"body": {"query": {"match": {"msg_num": msg_num}}}}
resp = es_client.search(index="email", **kwargs)
allrecs = _extract_records(resp, translate_to_db=False)
if not allrecs:
abort(404, "No message with id=%s exists" % msg_num)
return allrecs[0]
def show_full_thread(msg_num):
g.msg_num = msg_num = int(msg_num)
g.listname = session.get("listname")
g.message = _get_message(g.msg_num)
g.subject = g.message["subject"]
pat = re.compile(r"^(re: ?)*", re.I)
clean_subj = pat.sub("", g.subject)
subj_regexp = "([Rr][Ee]: ?)*" + _regexp_casing(clean_subj)
kwargs = {
"body": {
"query": {"regexp": {"subject": subj_regexp}},
"sort": {"posted": "asc"},
}
}
resp = es_client.search(index="email", **kwargs)
allrecs = _extract_records(resp, translate_to_db=False)
if not allrecs:
abort(404, "No message with id=%s exists" % msg_num)
g.messages = allrecs
func_dict = {
"fmt_author": _format_author,
"wrap": _wrap_text,
"fmt_short_date": _format_short_date,
}
return render_template("fullthread.html", **func_dict)
def show_message_by_msgid(msg_id):
kwargs = {"body": {"query": {"match": {"message_id": msg_id}}}}
resp = es_client.search(index="email", **kwargs)
allrecs = _extract_records(resp, translate_to_db=False)
if not allrecs:
abort(404, "No message with id=%s exists" % msg_id)
g.message = allrecs[0]
g.msg_num = msg_num = g.message.get("msg_num")
g.subject = g.message.get("subject")
g.author = _format_author(g.message.get("from"))
g.copy_year = g.message.get("posted").year
g.posted = _format_date(g.message.get("posted"))
g.body = _wrap_text(g.message.get("body"))
g.session = session
list_abb = g.message.get("list_name")
g.listname = session["listname"] = _listFromAbbreviation(list_abb)
full_results = session.get("full_results", [])
try:
pos = full_results.index(msg_num)
g.prev_msg_num = full_results[pos - 1] if pos > 0 else ""
g.next_msg_num = full_results[pos + 1] if pos + 1 < len(full_results) else ""
except ValueError:
# Not coming from a search
g.prev_msg_num = g.next_msg_num = ""
return render_template("message.html")
def show_message(msg_num):
g.msg_num = msg_num = int(msg_num)
g.message = _get_message(msg_num)
g.subject = g.message.get("subject")
g.author = _format_author(g.message.get("from"))
g.copy_year = g.message.get("posted").year
g.posted = _format_date(g.message.get("posted"))
g.body = _wrap_text(g.message.get("body"))
g.session = session
g.listname = session.get("listname")
full_results = session.get("full_results", [])
try:
pos = full_results.index(msg_num)
g.prev_msg_num = full_results[pos - 1] if pos > 0 else ""
g.next_msg_num = full_results[pos + 1] if pos + 1 < len(full_results) else ""
except ValueError:
# Not coming from a search
g.prev_msg_num = g.next_msg_num = ""
return render_template("message.html")
def archives_results_GET():
g.listname = session["listname"]
g.elapsed = session["elapsed"]
g.total_pages = session["total_pages"]
g.limit_msg = session["limit_msg"]
g.num_results = session["num_results"]
g.formatted_results = session["formatted_results"]
g.total_hits = session["total_hits"]
g.full_results = session["full_results"]
g.batch_size = session["batch_size"]
g.page = int(request.args["page"])
g.url = request.url
g.remote_addr = request.remote_addr
g.kwargs = session["kwargs"]
g.offset = (g.page - 1) * g.batch_size
# Make sure we don't exceed elasticsearch's limits
g.kwargs["from_"] = min(g.offset, MAX_RECORDS - g.batch_size)
resp = es_client.search(index="email", **g.kwargs)
g.results = _extract_records(resp, translate_to_db=False)
g.pager_text = _pager_text()
g.session = session
func_dict = {
"enumerate": enumerate,
"fmt_author": _format_author,
"fmt_date": _format_date,
}
return render_template("archive_results.html", **func_dict)
def archives_results_POST():
# Clear any old session data
for key in (
"listname",
"elapsed",
"total_pages",
"limit_msg",
"num_results",
"formatted_results",
"total_hits",
"full_results",
"batch_size",
"kwargs",
):
session.pop(key, None)
g.listname = session["listname"] = request.form.get("listname")
body_terms = request.form.get("body_terms")
subject = request.form.get("subject_phrase")
author = request.form.get("author")
start_date = request.form.get("start_date")
end_date = request.form.get("end_date")
# We want to include items on the end date, so extend the search to the
# following date.
end_date_dt = datetime.datetime.strptime(end_date, "%Y-%m-%d") + datetime.timedelta(days=1)
end_date_plus = end_date_dt.strftime("%Y-%m-%d")
sort_order = _get_sort_order(request.form.get("sort_order"))
include_OT = bool(request.form.get("chk_OT"))
include_NF = bool(request.form.get("chk_NF"))
batch_size = int(request.form.get("batch_size"))
kwargs = utils.search_term_query(body_terms, "body", start_date, end_date_plus)
bqbm = kwargs["body"]["query"]["bool"]["must"]
neg_bqbm = kwargs["body"]["query"]["bool"]["must_not"]
listabb = _listAbbreviation(g.listname)
if listabb:
utils.add_match(bqbm, "list_name", listabb)
if subject:
utils.add_match_phrase(bqbm, "fulltext_subject", subject)
if author:
expr = "*%s*" % author
bqbm.append({"wildcard": {"from": expr}})
if listabb == "p":
if not include_OT:
utils.add_match(neg_bqbm, "subject", "[OT]")
if not include_NF:
utils.add_match(neg_bqbm, "subject", "[NF]")
if sort_order:
kwargs["sort"] = [sort_order]
if not bqbm:
del kwargs["body"]["query"]["bool"]["must"]
if not neg_bqbm:
del kwargs["body"]["query"]["bool"]["must_not"]
session["query_body"] = kwargs["body"]
# Get the total number of hits. This will return the total without
# pulling all the data.
kwargs["size"] = 10000
kwargs["_source"] = ["msg_num"]
startTime = time.time()
utils.debugout("KWARGS", kwargs)
resp = es_client.search(index="email", **kwargs)
utils.debugout("RESP", resp)
session["elapsed"] = g.elapsed = "%.4f" % (time.time() - startTime)
g.full_results = [r["_source"]["msg_num"] for r in resp["hits"]["hits"]]
session["full_results"] = g.full_results
session["num_results"] = session["total_hits"] = g.num_results = g.total_hits = resp["hits"][
"total"
]["value"]
if g.num_results == MAX_RECORDS:
kwargs["size"] = 0
tot = es_client.search(index="email", track_total_hits=True, **kwargs)
session["total_hits"] = g.total_hits = tot["hits"]["total"]["value"]
session["formatted_results"] = g.formatted_results = f"{g.num_results:,}"
# g.limit_msg = "" if g.num_results <= g.total_hits else LIMIT_MSG
g.limit_msg = "" if g.total_hits <= MAX_RECORDS else LIMIT_MSG
session["limit_msg"] = g.limit_msg
# Now run the query for real
kwargs.pop("_source")
kwargs["size"] = batch_size
g.offset = int(request.form.get("page", "0")) * batch_size
# Make sure we don't exceed elasticsearch's limits
kwargs["from_"] = min(g.offset, MAX_RECORDS - batch_size)
session["batch_size"] = batch_size
session["kwargs"] = g.kwargs = kwargs
g.kwargs = f"<pre>{pprint.pformat(kwargs)}</pre"
resp = es_client.search(index="email", **kwargs)
total = "{:,}".format(resp["hits"]["total"]["value"])
g.results = _extract_records(resp, translate_to_db=False)
g.session = session
# Set up environment vals
g.url = request.url
g.remote_addr = request.remote_addr
g.from_admin = request.remote_addr == ADMIN_IP
page = int(request.form.get("page", "1"))
calc_pages = int(math.ceil(float(g.num_results) / batch_size))
max_pages = int(MAX_RECORDS / batch_size)
session["total_pages"] = g.total_pages = min(calc_pages, max_pages)
page = min(page, g.total_pages)
g.page = page
g.pager_text = _pager_text()
func_dict = {
"enumerate": enumerate,
"fmt_author": _format_author,
"fmt_date": _format_date,
}
return render_template("archive_results.html", **func_dict)
# BATCH_SIZE = 250
# MAX_PAGES = int(MAX_RECORDS / BATCH_SIZE)
|
{"/threadize.py": ["/utils.py"], "/twitterthread.py": ["/threadize.py"], "/ircsearch.py": ["/utils.py"], "/zipcodes.py": ["/utils.py"], "/leafe.py": ["/archives.py", "/art.py", "/downloads.py", "/drstandup.py", "/galleries.py", "/ircsearch.py", "/twitterthread.py", "/zipcodes.py"], "/cloud_upload.py": ["/utils.py"], "/downloads.py": ["/utils.py"], "/galleries.py": ["/utils.py"], "/archives.py": ["/utils.py"]}
|
42,757,662
|
pomm5555/openami.amiserver
|
refs/heads/main
|
/PlugInsFolder/PowerSwitch.py
|
import os, re
from AmiTree import Container, SwitchContainer, TextfieldContainer
from PlugIn import PlugIn
from amiConfig import Config
class PowerSwitch(PlugIn):
def __init__(self, token, configFile):
PlugIn.__init__(self)
self.architecture = "all"
#plugin itself
self.content = Container("plugin", token, "This is a GEneric Interface")
switch = SwitchContainer("cmd", "MunichRadio", "This is a Switch Interface",
on=Config.jid+'/Defaults/audioplay',
off=Config.jid+'/Defaults/audiostop')
self.content.addChild(switch)
switch = SwitchContainer("cmd", "MunichRadio2tet", "This is a Switch Interface",
on=Config.jid+'/Defaults/audioplay',
off=Config.jid+'/Defaults/audiostop')
self.content.addChild(switch)
text = TextfieldContainer("cmd", "Volume", "This is a Volume Textfield",
target=Config.jid+"/Defaults/setvol")
self.content.addChild(text)
|
{"/PlugInsFolder/Audio/iTunes.py": ["/AmiTree.py", "/PlugIn.py", "/amiConfig.py"], "/PlugInsFolder/Sensors/Temperature.py": ["/AmiTree.py", "/PlugIn.py"], "/PlugInsSupport/lcd.py": ["/PlugInsSupport/avrBridgePy.py"], "/amiLog.py": ["/amiConfig.py"], "/PlugInsFolder/PowerSwitch.py": ["/AmiTree.py", "/PlugIn.py", "/amiConfig.py"]}
|
42,763,415
|
Campagne8758/climatesite
|
refs/heads/master
|
/helpers.py
|
import xarray as xr
import json
import requests
def temp_anomaly(lat_in, lon_in):
radius = 5
# SOURCE: http://berkeleyearth.org/data/
tmps = xr.open_dataset("Data/Raw_TAVG_LatLong1.nc")
lat = lat_in + 90
lon = lon_in + 180
# Data points collected from a radius of 5x5 degrees on latitude and longitude in 1 degrees increments for the previous 24 months.
tmp_values = tmps.temperature.values[3242:3253, lat - radius:lat + radius , lon - radius: lon + radius]
total_temp = 0
avg_temp = 0
data_points = 0
# sum of every valid data point (only land measurements considered)
for year in tmp_values:
for month in year:
for j in month:
if str(j) != "nan":
total_temp += float(j)
data_points += 1
# calculate average and return only if at least 1 data point was found
if data_points > 0:
avg_temp = total_temp / data_points
return round(avg_temp,3)
else:
return 'N/A'
def calc_emissions(country):
year_box2 = '1990'
year_box3 = '2018'
emissions1990 = '0'
emissions2018 = '0'
emissions = []
if country == "Russia":
country = "Russian Federation"
# Open json downloaded with world bank API
with open('Data/co2.json') as co2:
data = json.load(co2)
for i in data[1]:
if i['country']['value'] == country:
if i['date'] == year_box2:
emissions1990 = i['value']
elif i['date'] == year_box3:
emissions2018 = i['value']
emissions.append(emissions1990)
emissions.append(emissions2018)
return emissions
def geocode(lat,lng):
# Uses the google API to find country location based on coordinates
url = f'https://maps.googleapis.com/maps/api/geocode/json?latlng={lat},{lng}&result_type=country&key=AIzaSyDEBjX-IQaXy0dXtike6l2Sm4ileHItbDw'
response = requests.get(url)
if response.json()['status'] == 'ZERO_RESULTS':
return 'N/A'
else:
country = response.json()['results'][0]['address_components'][0]['long_name']
return country
|
{"/app.py": ["/helpers.py"]}
|
42,763,416
|
Campagne8758/climatesite
|
refs/heads/master
|
/app.py
|
from flask import Flask, render_template, request
from helpers import temp_anomaly, calc_emissions, geocode
import requests
app = Flask(__name__)
@app.route("/", methods=['GET', 'POST'])
def index():
if request.method == 'GET':
# get ip address to get user's location
ip_address = "24.48.0.1" #this will need to be: request.headers['X-Real-IP'] currently using test IP address - the line is specific for pythoneverywhere
# Code to get initial user location:
url = f"http://ip-api.com/json/{ip_address}?fields=country,countryCode,lat,lon"
response = requests.get(url)
location = response.json()
country = location["country"]
# variable to pass to helpers and JS wit coordinates
lat_in = int(location["lat"])
lon_in = int(location["lon"])
loc_in = {'lat' : lat_in, 'lon' : lon_in}
# call to functions in helpers to derive arguments to pass to web page
tmp_display = temp_anomaly(lat_in, lon_in)
emissions = calc_emissions(country)
emissions_1990 = emissions[0]
emissions_2018 = emissions[1]
return render_template("index.html", tmp_display=tmp_display, loc_in=loc_in, emissions_2018=emissions_2018, emissions_1990=emissions_1990, country=country)
else:
# POST method after submitting form
lat_in = request.form.get("lat")
lon_in = request.form.get("lon")
# Handling empty form
if not lat_in or not lon_in:
tmp_display = temp_anomaly(44,9)
country = 'Italy'
loc_in = {'lat' : 44, 'lon' : 9}
emissions = calc_emissions(country)
emissions_1990 = emissions[0]
emissions_2018 = emissions[1]
return render_template("coord.html", tmp_display=tmp_display, loc_in=loc_in, emissions_2018=emissions_2018, emissions_1990=emissions_1990, country=country)
loc_in = {'lat' : lat_in, 'lon' : lon_in}
tmp_display = temp_anomaly(int(lat_in), int(lon_in))
country = geocode(lat_in,lon_in)
emissions = calc_emissions(country)
emissions_1990 = emissions[0]
emissions_2018 = emissions[1]
return render_template("coord.html", tmp_display=tmp_display, loc_in=loc_in, emissions_2018=emissions_2018, emissions_1990=emissions_1990, country=country)
@app.route("/explain")
def explain():
ip_address = "24.48.0.1" #this will need to be: request.headers['X-Real-IP'] currently using test IP address - the line is specific for pythoneverywhere
# Code to get initial user location:
url = f"http://ip-api.com/json/{ip_address}?fields=country,countryCode,lat,lon"
response = requests.get(url)
location = response.json()
country = location["country"]
lat_in = int(location["lat"])
lon_in = int(location["lon"])
loc_in = {'lat' : lat_in, 'lon' : lon_in}
return render_template("explain.html", loc_in=loc_in)
|
{"/app.py": ["/helpers.py"]}
|
42,874,214
|
Mawinnn/ids
|
refs/heads/master
|
/baseline.py
|
import pandas as pd
import numpy as np
import sys
import sklearn
import matplotlib
import matplotlib.pyplot as plt
from sklearn.preprocessing import LabelEncoder,OneHotEncoder
from sklearn import preprocessing
from sklearn.ensemble import RandomForestClassifier
import MSomte as ms
from imblearn.over_sampling import SMOTE
from collections import Counter
import processing
from sklearn.model_selection import train_test_split
from sklearn.neural_network import MLPClassifier
from sklearn.svm import SVC
from sklearn.model_selection import GridSearchCV
from sklearn import metrics
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import f1_score
if __name__ == '__main__':
col_names = ["duration", "protocol_type", "service", "flag", "src_bytes",
"dst_bytes", "land", "wrong_fragment", "urgent", "hot", "num_failed_logins",
"logged_in", "num_compromised", "root_shell", "su_attempted", "num_root",
"num_file_creations", "num_shells", "num_access_files", "num_outbound_cmds",
"is_host_login", "is_guest_login", "count", "srv_count", "serror_rate",
"srv_serror_rate", "rerror_rate", "srv_rerror_rate", "same_srv_rate",
"diff_srv_rate", "srv_diff_host_rate", "dst_host_count", "dst_host_srv_count",
"dst_host_same_srv_rate", "dst_host_diff_srv_rate", "dst_host_same_src_port_rate",
"dst_host_srv_diff_host_rate", "dst_host_serror_rate", "dst_host_srv_serror_rate",
"dst_host_rerror_rate", "dst_host_srv_rerror_rate", "label"]
df = pd.read_csv("KDDTrain+.csv", header=None, names=col_names)
df_test = pd.read_csv("KDDTest+_2.csv", header=None, names=col_names)
df = processing.changetag(df)
X = np.array(df)[:, :-1] # All rows, omit last column
y = np.ravel(np.array(df)[:, -1:]) # All rows, only the last column
# smote
sm = SMOTE(random_state=42)
X_train, y_train = sm.fit_resample(X, y.astype('int'))
std = MinMaxScaler()
X_train = std.fit_transform(X_train)
df_test = processing.changetag(df_test)
X_test = np.array(df_test)[:, :-1] # All rows, omit last column
y_test = np.ravel(np.array(df_test)[:, -1:]) # All rows, only the last column
X_test = std.fit_transform(X_test)
# RF
# for i in range(1,20):
rfc = RandomForestClassifier(n_estimators=10, random_state=42, max_depth=4, )
rfc.fit(X_train, y_train.astype('int'))
print( rfc.score(X_test,y_test.astype('int')))
y_pre = rfc.fit(X_train, y_train.astype('int')).predict(X_test)
print(f1_score(y_test,y_pre,average='macro'))
#SVM
# svm_model = SVC(kernel='rbf', C=1000, gamma=0.001) # 最佳模型
# svm_model.fit(X_train, y_train.astype('int'))
# print (svm_model.score(X_test,y_test.astype('int')))
|
{"/main.py": ["/MSomte.py", "/processing.py"], "/baseline.py": ["/MSomte.py", "/processing.py"], "/test.py": ["/MSomte.py", "/processing.py"]}
|
42,874,215
|
Mawinnn/ids
|
refs/heads/master
|
/MSomte.py
|
from sklearn.datasets import make_classification
from collections import Counter
from imblearn.over_sampling import SMOTE
import random
from sklearn.neighbors import NearestNeighbors
import numpy as np
import matplotlib.pyplot as plt
from numpy import *
class MSmote(object):
def __init__(self, N=50, k=5, r=2):
# 初始化self.N, self.k, self.r, self.newindex
self.N = N
self.k = k
# self.r是距离决定因子
self.r = r
# self.newindex用于记录SMOTE算法已合成的样本个数
self.newindex = 0
# 构建训练函数
def fit(self, samples):
# 初始化self.samples, self.T, self.numattrs
self.samples = samples
# self.T是少数类样本个数,self.numattrs是少数类样本的特征个数
self.T, self.numattrs = self.samples.shape
# 查看N%是否小于100%
if (self.N < 100):
# 如果是,随机抽取N*T/100个样本,作为新的少数类样本
np.random.shuffle(self.samples)
self.T = int(self.N * self.T / 100)
self.samples = self.samples[0:self.T, :]
# N%变成100%
self.N = 100
# 查看从T是否不大于近邻数k
if (self.T <= self.k):
# 若是,k更新为T-1
self.k = self.T - 1
# 令N是100的倍数
N = int(self.N / 100)
# 创建保存合成样本的数组
self.synthetic = np.zeros((self.T * N, self.numattrs))
# 调用并设置k近邻函数
neighbors = NearestNeighbors(n_neighbors=self.k + 1,
algorithm='ball_tree',
p=self.r).fit(self.samples)
# 对所有输入样本做循环
for i in range(len(self.samples)):
mid =samples.iloc[i]
samples_np =np.array(samples.iloc[i])
# 调用kneighbors方法搜索k近
nnarray = neighbors.kneighbors(samples_np.reshape((1, -1)),
return_distance=False)[0][1:]
# 把N,i,nnarray输入样本合成函数self._populate
self.__populate(N, i, nnarray, samples)
# 最后返回合成样本self.synthetic
return self.synthetic
# 构建合成样本函数
def __populate(self, N, i, nnarray, samples):
# 按照倍数N做循环
for j in range(N):
# attrs用于保存合成样本的特征
attrs = []
# 随机抽取1~k之间的一个整数,即选择k近邻中的一个样本用于合成数据
nn = random.randint(0, self.k - 1)
# 计算差值
diff = samples.iloc[nnarray[nn]] - samples.iloc[i]
dis_nn = self.__manhattan_distance(samples, samples.iloc[nnarray[nn]])
dis_i = self.__manhattan_distance(samples, samples.iloc[i])
#print(dis_nn, dis_i)
# 随机生成一个0~1之间的数
if (dis_nn > dis_i):
gap = random.uniform(0.5, 1)
else:
gap = random.uniform(0, 0.5)
# 合成的新样本放入数组self.synthetic
self.synthetic[self.newindex] = self.samples.iloc[i] + gap * diff
# self.newindex加1, 表示已合成的样本又多了1个
self.newindex += 1
def __manhattan_distance(self, x, narray):
x = x.drop(['protocol_type', 'land','wrong_fragment','urgent','num_outbound_cmds','is_host_login','label'], axis=1)
x_mean = np.mean(np.array(x), axis=0)
x = np.array(x)
xT = x.T
D = np.cov(xT)
invD = np.linalg.inv(D)
narray_ch = narray.drop(['protocol_type', 'land','wrong_fragment','urgent','num_outbound_cmds','is_host_login','label'], axis=0)
tp = narray_ch - x_mean
dis = np.sqrt(dot(dot(tp, invD), tp.T))
return dis
|
{"/main.py": ["/MSomte.py", "/processing.py"], "/baseline.py": ["/MSomte.py", "/processing.py"], "/test.py": ["/MSomte.py", "/processing.py"]}
|
42,874,216
|
Mawinnn/ids
|
refs/heads/master
|
/main.py
|
import pandas as pd
import numpy as np
import sys
import sklearn
import matplotlib
import matplotlib.pyplot as plt
from sklearn import preprocessing
from sklearn.ensemble import RandomForestClassifier
import MSomte as ms
from imblearn.over_sampling import SMOTE
from collections import Counter
import processing
from sklearn.model_selection import train_test_split
from sklearn.svm import SVC
from sklearn import metrics
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import accuracy_score, f1_score, precision_score, recall_score, classification_report, confusion_matrix
from sklearn.metrics import confusion_matrix
def heatmap(confusion_matrix,X_n,Y_n):
fig, ax = plt.subplots()
# 将元组分解为fig和ax两个变量
im = ax.imshow(confusion_matrix)
# 显示图片
ax.set_xticks(np.arange(len(X_n)))
# 设置x轴刻度间隔
ax.set_yticks(np.arange(len(Y_n)))
# 设置y轴刻度间隔
ax.set_xticklabels(X_n)
# 设置x轴标签'''
ax.set_yticklabels(Y_n)
# 设置y轴标签'''
plt.setp(ax.get_xticklabels(), rotation=45, ha="right",
rotation_mode="anchor")
# 设置标签 旋转45度 ha有三个选择:right,center,left(对齐方式)
for i in range(len(X_n)):
for j in range(len(Y_n)):
text = ax.text(j, i, confusion_matrix[i, j],
ha="center", va="center", color="w")
ax.set_title("Confusion Matrix")
# 设置题目
fig.tight_layout() # 自动调整子图参数,使之填充整个图像区域。
plt.show() # 图像展示
# Press the green button in the gutter to run the script.
if __name__ == '__main__':
col_names = ["duration", "protocol_type", "service", "flag", "src_bytes",
"dst_bytes", "land", "wrong_fragment", "urgent", "hot", "num_failed_logins",
"logged_in", "num_compromised", "root_shell", "su_attempted", "num_root",
"num_file_creations", "num_shells", "num_access_files", "num_outbound_cmds",
"is_host_login", "is_guest_login", "count", "srv_count", "serror_rate",
"srv_serror_rate", "rerror_rate", "srv_rerror_rate", "same_srv_rate",
"diff_srv_rate", "srv_diff_host_rate", "dst_host_count", "dst_host_srv_count",
"dst_host_same_srv_rate", "dst_host_diff_srv_rate", "dst_host_same_src_port_rate",
"dst_host_srv_diff_host_rate", "dst_host_serror_rate", "dst_host_srv_serror_rate",
"dst_host_rerror_rate", "dst_host_srv_rerror_rate", "label"]
# for m in range(15,90,5):
# n = m/100
# print("---------%.2f---------"%n)
for a in range(8,9):
print("---------------------%d--------------------"%a)
list =[a >>d & 1 for d in range(5)][::-1]
print(list)
df = pd.read_csv("KDDTrain+.csv", header=None, names=col_names)
df_test = pd.read_csv("KDDTest+_2.csv", header=None, names=col_names)
X_test_b = np.array(processing.changetag(df_test))[:, :-1]
df_X = np.array(df)[:, :-1]
df_y = np.ravel(np.array(df)[:, -1:])
X_train, X_test, y_train, y_test = train_test_split(df_X, df_y, test_size=0.45, random_state=42)
df_training = pd.DataFrame(np.column_stack((X_train, y_train)), columns=col_names)
df_testing = pd.DataFrame(np.column_stack((X_test, y_test)), columns=col_names)
std = MinMaxScaler()
# todo Normal
df_Normal = processing.changetag_Normal(df_training)
df_Normal = std.fit_transform(df_Normal)
# df_Normal = processing.Standardization(df_Normal)
df_test_Normal = processing.changetag_Normal(df_testing)
df_test_Normal = std.fit_transform(df_test_Normal)
if (list[0]==1):
df_Normal_fs, df_test_Normal_fs, X_test_a_Normalfs = df_Normal[:, 11], df_test_Normal[:, 11], X_test_b[:, 11]
list_Normal = [3, 29, 25, 2, 36, 38, 28, 4]
for i in list_Normal:
df_Normal_0 = df_Normal[:, i]
df_Normal_fs = np.column_stack((df_Normal_fs, df_Normal_0))
df_test_Normal_0 = df_test_Normal[:, i]
df_test_Normal_fs = np.column_stack((df_test_Normal_fs, df_test_Normal_0))
X_test_a_0 = X_test_b[:, i]
X_test_a_Normalfs = np.column_stack((X_test_a_Normalfs, X_test_a_0))
# df_test_Normal = processing.Standardization(df_test_Normal)
X = np.array(df_Normal_fs) # All rows, omit last column
y = np.ravel(np.array(df_Normal)[:, -1:]) # All rows, only the last column
# print(Counter(y))
#smote
sm = SMOTE(random_state=42)
X_train_Normal, y_train_Normal = sm.fit_resample(X,y)
#msmote
# R2L_df = df[[each == 1 for each in df['label']]]
# #for m in range(10000,15000,100):
# msmote = ms.MSmote(N=m)
# samples = msmote.fit(R2L_df)
# df = np.append(pd.DataFrame(df), samples, axis=0)
# df = pd.DataFrame(df)
# X_train = np.array(df)[:, :-1] # All rows, omit last column
# y_train = np.ravel(np.array(df)[:, -1:]) # All rows, only the last column
#normal
# X_train_Normal = X
# y_train_Normal = y
# print(Counter(y))
X_train_Normal = std.fit_transform(X_train_Normal)
X_test_Normal = np.array(df_test_Normal_fs) # All rows, omit last column
y_test_Normal = np.ravel(np.array(df_test_Normal)[:, -1:]) # All rows, only the last column
# print(Counter(y_test_Normal))
# for i in range(1,20):
rfc = RandomForestClassifier(n_estimators=100, random_state=42, max_depth=9,)
rfc.fit(X_train_Normal, y_train_Normal.astype('int'))
y_pred_Normal = rfc.score(X_test_Normal, y_test_Normal.astype('int'))
y_pred_Normal_t = rfc.fit(X_train_Normal, y_train_Normal.astype('int')).predict(X_test_Normal)
# print("%d:%.10f"%(i,y_pred_Normal))
# print(Counter(y_pred_Normal_t))
#print(y_pred_t)
X_train_1 = rfc.predict_proba(X_test_Normal)
X_test_a_Normalfs = std.fit_transform(X_test_a_Normalfs)
X_test_NN = rfc.predict_proba(X_test_a_Normalfs)
else:
X = np.array(df_Normal)[:, :-1] # All rows, omit last column
y = np.ravel(np.array(df_Normal)[:, -1:]) # All rows, only the last column
# print(Counter(y))
# smote
sm = SMOTE(random_state=42)
X_train_Normal, y_train_Normal = sm.fit_resample(X, y.astype('int'))
# msmote
# Normal_df =pd.DataFrame(df_Normal,columns=col_names)
# Normal_df = Normal_df[[each == 1 for each in Normal_df['label']]]
# #for m in range(10000,15000,100):
# msmote = ms.MSmote(N=200)
# samples = msmote.fit(Normal_df)
# df_Normal = pd.DataFrame(np.append(pd.DataFrame(df_Normal), samples, axis=0))
# X_train_Normal = np.array(df_Normal)[:, :-1] # All rows, omit last column
# y_train_Normal = np.ravel(np.array(df_Normal)[:, -1:]) # All rows, only the last column
# normal
# X_train_Normal = X
# y_train_Normal = y
# print(Counter(y))
X_train_Normal = std.fit_transform(X_train_Normal)
X_test_Normal = np.array(df_test_Normal)[:, :-1] # All rows, omit last column
y_test_Normal = np.ravel(np.array(df_test_Normal)[:, -1:]) # All rows, only the last column
# print(Counter(y_test_Normal))
# for i in range(1,20):
rfc = RandomForestClassifier(n_estimators=130, random_state=42, max_depth=15, )
rfc.fit(X_train_Normal, y_train_Normal.astype('int'))
y_pred_Normal = rfc.score(X_test_Normal, y_test_Normal.astype('int'))
y_pred_t_Normal = rfc.fit(X_train_Normal, y_train_Normal.astype('int')).predict(X_test_Normal)
# print("%d:%.10f"%(i,y_pred_Normal))
# print(Counter(y_test_Normal))
# print(Counter(y_pred_t_Normal))
# print(Counter(y_pred_t_Normal))
# print(y_pred_t_Normal)
X_train_1 = rfc.predict_proba(X_test_Normal)
X_test_a_Normal = std.fit_transform(X_test_b)
X_test_NN = rfc.predict_proba(X_test_a_Normal)
# todo Dos
df_training = pd.DataFrame(np.column_stack((X_train, y_train)), columns=col_names)
df_testing = pd.DataFrame(np.column_stack((X_test, y_test)), columns=col_names)
df_Dos = processing.changetag_Dos(df_training)
# df_Dos = std.fit_transform(df_Dos)
# df_Dos = processing.Standardization(df_Dos)
df_test_Dos = processing.changetag_Dos(df_testing)
df_test_Dos = std.fit_transform(df_test_Dos)
if(list[1] == 1):
df_Dos_fs, df_test_Dos_fs, X_test_a_Dosfs = np.array(df_Dos)[:, 3], df_test_Dos[:, 3], X_test_b[:, 3]
list_Dos = [25, 11, 29, 38, 36, 24, 4, 37 ,30 ,28 ,2]
for i in list_Dos:
df_Dos_0 = np.array(df_Dos)[:, i]
df_Dos_fs = np.column_stack((df_Dos_fs, df_Dos_0))
df_test_Dos_0 = df_test_Dos[:, i]
df_test_Dos_fs = np.column_stack((df_test_Dos_fs, df_test_Dos_0))
X_test_a_1 = X_test_b[:, i]
X_test_a_Dosfs = np.column_stack((X_test_a_Dosfs, X_test_a_1))
# df_test_Dos = processing.Standardization(df_test_Dos)
X = np.array(df_Dos_fs) # All rows, omit last column
y = np.ravel(np.array(df_Dos)[:, -1:]) # All rows, only the last column
# print(Counter(y))
#smote
sm = SMOTE(random_state=42)
X_train_Dos, y_train_Dos = sm.fit_resample(X,y.astype('int'))
#msmote
# R2L_df = df[[each == 1 for each in df['label']]]
# #for m in range(10000,15000,100):
# msmote = ms.MSmote(N=m)
# samples = msmote.fit(R2L_df)
# df = np.append(pd.DataFrame(df), samples, axis=0)
# df = pd.DataFrame(df)
# X_train = np.array(df)[:, :-1] # All rows, omit last column
# y_train = np.ravel(np.array(df)[:, -1:]) # All rows, only the last column
#normal
# X_train_Dos = X
# y_train_Dos = y
# print(Counter(y))
X_train_Dos = std.fit_transform(X_train_Dos)
X_test_Dos = np.array(df_test_Dos_fs) # All rows, omit last column
y_test_Dos = np.ravel(np.array(df_test_Dos)[:, -1:]) # All rows, only the last column
# print(Counter(y_test_Dos))
# for i in range(1,20):
rfc = RandomForestClassifier(n_estimators=110, random_state=42, max_depth=15,)
rfc.fit(X_train_Dos, y_train_Dos.astype('int'))
y_pred_Dos = rfc.score(X_test_Dos, y_test_Dos.astype('int'))
y_pred_t_Dos = rfc.fit(X_train_Dos, y_train_Dos.astype('int')).predict(X_test_Dos)
# print("%d:%.10f"%(i,y_pred_Dos))
# print(Counter(y_pred_t_Dos))
#print(y_pred_t)
X_train_1 = np.column_stack((X_train_1, rfc.predict_proba(X_test_Dos)))
X_test_a_Dosfs = std.fit_transform(X_test_a_Dosfs)
X_test_NN = np.column_stack((X_test_NN,rfc.predict_proba(X_test_a_Dosfs)))
else:
X = np.array(df_Dos)[:, :-1] # All rows, omit last column
y = np.ravel(np.array(df_Dos)[:, -1:]) # All rows, only the last column
# print(Counter(y))
# smote
sm = SMOTE(random_state=42)
X_train_Dos, y_train_Dos = sm.fit_resample(X, y.astype('int'))
# msmote
# Dos_df =pd.DataFrame(df_Dos,columns=col_names)
# Dos_df = Dos_df[[each == 1 for each in Dos_df['label']]]
# #for m in range(10000,15000,100):
# msmote = ms.MSmote(N=200)
# samples = msmote.fit(Dos_df)
# df_Dos = pd.DataFrame(np.append(pd.DataFrame(df_Dos), samples, axis=0))
# X_train_Dos = np.array(df_Dos)[:, :-1] # All rows, omit last column
# y_train_Dos = np.ravel(np.array(df_Dos)[:, -1:]) # All rows, only the last column
# normal
# X_train_Dos = X
# y_train_Dos = y
# print(Counter(y))
X_train_Dos = std.fit_transform(X_train_Dos)
X_test_Dos = np.array(df_test_Dos)[:, :-1] # All rows, omit last column
y_test_Dos = np.ravel(np.array(df_test_Dos)[:, -1:]) # All rows, only the last column
# print(Counter(y_test_Dos))
# for i in range(1,20):
rfc = RandomForestClassifier(n_estimators=40, random_state=42, max_depth=13, )
rfc.fit(X_train_Dos, y_train_Dos.astype('int'))
y_pred_Dos = rfc.score(X_test_Dos, y_test_Dos.astype('int'))
y_pred_t_Dos = rfc.fit(X_train_Dos, y_train_Dos.astype('int')).predict(X_test_Dos)
# print("%d:%.10f"%(i,y_pred_Dos))
# print(Counter(y_test_Dos))
# print(Counter(y_pred_t_Dos))
# print(Counter(y_pred_t_Dos))
# print(y_pred_t_Dos)
X_train_1 = np.column_stack((X_train_1, rfc.predict_proba(X_test_Dos)))
X_test_a_Dos = std.fit_transform(X_test_b)
X_test_NN = np.column_stack((X_test_NN, rfc.predict_proba(X_test_a_Dos)))
# todo Probe
df_training = pd.DataFrame(np.column_stack((X_train, y_train)), columns=col_names)
df_testing = pd.DataFrame(np.column_stack((X_test, y_test)), columns=col_names)
df_Probe = processing.changetag_Probe(df_training)
# df_Probe = std.fit_transform(df_Probe)
# df_Probe = processing.Standardization(df_Probe)
df_test_Probe = processing.changetag_Probe(df_testing)
df_test_Probe = std.fit_transform(df_test_Probe)
# df_test_Probe = processing.Standardization(df_test_Probe)
if (list[2]== 1):
df_Probe_fs, df_test_Probe_fs, X_test_a_Probefs = np.array(df_Probe)[:, 11], df_test_Probe[:, 11], X_test_b[:, 11]
list_Probe = [2, 3, 36, 35, 38, 4, 30, 40, 29, 23, 5, 37]
for i in list_Probe:
df_Probe_0 = np.array(df_Probe)[:, i]
df_Probe_fs = np.column_stack((df_Probe_fs, df_Probe_0))
df_test_Probe_0 = df_test_Probe[:, i]
df_test_Probe_fs = np.column_stack((df_test_Probe_fs, df_test_Probe_0))
X_test_a_1 = X_test_b[:, i]
X_test_a_Probefs = np.column_stack((X_test_a_Probefs, X_test_a_1))
X = np.array(df_Probe_fs) # All rows, omit last column
y = np.ravel(np.array(df_Probe)[:, -1:]) # All rows, only the last column
# print(Counter(y))
# smote
sm = SMOTE(random_state=42)
X_train_Probe, y_train_Probe = sm.fit_resample(X, y.astype('int'))
# msmote
# R2L_df = df[[each == 1 for each in df['label']]]
# #for m in range(10000,15000,100):
# msmote = ms.MSmote(N=m)
# samples = msmote.fit(R2L_df)
# df = np.append(pd.DataFrame(df), samples, axis=0)
# df = pd.DataFrame(df)
# X_train = np.array(df)[:, :-1] # All rows, omit last column
# y_train = np.ravel(np.array(df)[:, -1:]) # All rows, only the last column
# normal
# X_train_Probe = X
# y_train_Probe = y
# print(Counter(y))
X_train_Probe = std.fit_transform(X_train_Probe)
X_test_Probe = np.array(df_test_Probe_fs) # All rows, omit last column
y_test_Probe = np.ravel(np.array(df_test_Probe)[:, -1:]) # All rows, only the last column
# print(Counter(y_test_Probe))
# for i in range(60,80):
rfc = RandomForestClassifier(n_estimators=70, random_state=42, max_depth=19, )
rfc.fit(X_train_Probe, y_train_Probe.astype('int'))
y_pred_Probe = rfc.score(X_test_Probe, y_test_Probe.astype('int'))
y_pred_t_Probe = rfc.fit(X_train_Probe, y_train_Probe.astype('int')).predict(X_test_Probe)
# print("%d:%.10f"%(i,y_pred_Probe))
# print(Counter(y_pred_t_Probe))
# print(y_pred_t)
X_train_1 = np.column_stack((X_train_1, rfc.predict_proba(X_test_Probe)))
X_test_a_Probefs = std.fit_transform(X_test_a_Probefs)
X_test_NN = np.column_stack((X_test_NN,rfc.predict_proba(X_test_a_Probefs)))
else :
X = np.array(df_Probe)[:, :-1] # All rows, omit last column
y = np.ravel(np.array(df_Probe)[:, -1:]) # All rows, only the last column
# print(Counter(y))
# smote
sm = SMOTE(random_state=42)
X_train_Probe, y_train_Probe = sm.fit_resample(X, y.astype('int'))
# msmote
# Probe_df =pd.DataFrame(df_Probe,columns=col_names)
# Probe_df = Probe_df[[each == 1 for each in Probe_df['label']]]
# #for m in range(10000,15000,100):
# msmote = ms.MSmote(N=200)
# samples = msmote.fit(Probe_df)
# df_Probe = pd.DataFrame(np.append(pd.DataFrame(df_Probe), samples, axis=0))
# X_train_Probe = np.array(df_Probe)[:, :-1] # All rows, omit last column
# y_train_Probe = np.ravel(np.array(df_Probe)[:, -1:]) # All rows, only the last column
# normal
# X_train_Probe = X
# y_train_Probe = y
# print(Counter(y))
X_train_Probe = std.fit_transform(X_train_Probe)
X_test_Probe = np.array(df_test_Probe)[:, :-1] # All rows, omit last column
y_test_Probe = np.ravel(np.array(df_test_Probe)[:, -1:]) # All rows, only the last column
# print(Counter(y_test_Probe))
# for i in range(1,20):
rfc = RandomForestClassifier(n_estimators=90, random_state=42, max_depth=14, )
rfc.fit(X_train_Probe, y_train_Probe.astype('int'))
y_pred_Probe = rfc.score(X_test_Probe, y_test_Probe.astype('int'))
y_pred_t_Probe = rfc.fit(X_train_Probe, y_train_Probe.astype('int')).predict(X_test_Probe)
# print("%d:%.10f"%(i,y_pred_Probe))
# print(Counter(y_test_Probe))
# print(Counter(y_pred_t_Probe))
# print(Counter(y_pred_t_Probe))
# print(y_pred_t_Probe)
X_train_1 = np.column_stack((X_train_1, rfc.predict_proba(X_test_Probe)))
X_test_a_Probe = std.fit_transform(X_test_b)
X_test_NN = np.column_stack((X_test_NN, rfc.predict_proba(X_test_a_Probe)))
# todo R2L
df_training = pd.DataFrame(np.column_stack((X_train, y_train)), columns=col_names)
df_testing = pd.DataFrame(np.column_stack((X_test, y_test)), columns=col_names)
df_R2L = processing.changetag_R2L(df_training)
# df_R2L = std.fit_transform(df_R2L)
# df_R2L = processing.Standardization(df_R2L)
df_test_R2L = processing.changetag_R2L(df_testing)
df_test_R2L = std.fit_transform(df_test_R2L)
# df_test_R2L = processing.Standardization(df_test_R2L)
if (list[3] == 1):
df_R2L_fs, df_test_R2L_fs, X_test_a_R2Lfs = np.array(df_R2L)[:, 2], df_test_R2L[:, 2], X_test_b[:, 2]
list_R2L = [21, 11, 29, 23, 25, 1, 4, 40, 9, 28, 35]
for i in list_R2L:
df_R2L_0 = np.array(df_R2L)[:, i]
df_R2L_fs = np.column_stack((df_R2L_fs, df_R2L_0))
df_test_R2L_0 = df_test_R2L[:, i]
df_test_R2L_fs = np.column_stack((df_test_R2L_fs, df_test_R2L_0))
X_test_a_1 = X_test_b[:, i]
X_test_a_R2Lfs = np.column_stack((X_test_a_R2Lfs, X_test_a_1))
X = np.array(df_R2L_fs) # All rows, omit last column
y = np.ravel(np.array(df_R2L)[:, -1:]) # All rows, only the last column
# print(Counter(y))
# smote
sm = SMOTE(random_state=42)
X_train_R2L, y_train_R2L = sm.fit_resample(X, y.astype('int'))
# msmote
# R2L_df =pd.DataFrame(df_R2L,columns=col_names)
# R2L_df = R2L_df[[each == 1 for each in R2L_df['label']]]
# #for m in range(10000,15000,100):
# msmote = ms.MSmote(N=200)
# samples = msmote.fit(R2L_df)
# df_R2L = pd.DataFrame(np.append(pd.DataFrame(df_R2L), samples, axis=0))
# X_train_R2L = np.array(df_R2L)[:, :-1] # All rows, omit last column
# y_train_R2L = np.ravel(np.array(df_R2L)[:, -1:]) # All rows, only the last column
# normal
# X_train_R2L = X
# y_train_R2L = y
# print(Counter(y))
X_train_R2L = std.fit_transform(X_train_R2L)
X_test_R2L = np.array(df_test_R2L_fs) # All rows, omit last column
y_test_R2L = np.ravel(np.array(df_test_R2L)[:, -1:]) # All rows, only the last column
# print(Counter(y_test_R2L))
# for i in range(121,140):
rfc = RandomForestClassifier(n_estimators=121, random_state=42, max_depth=23, )
rfc.fit(X_train_R2L, y_train_R2L.astype('int'))
y_pred_R2L = rfc.score(X_test_R2L, y_test_R2L.astype('int'))
y_pred_t_R2L = rfc.fit(X_train_R2L, y_train_R2L.astype('int')).predict(X_test_R2L)
# print("%d:%.10f"%(i,y_pred_R2L))
# print(Counter(y_pred_t_R2L))
#print(y_pred_t_R2L)
X_train_1 = np.column_stack((X_train_1, rfc.predict_proba(X_test_R2L)))
X_test_a_R2Lfs = std.fit_transform(X_test_a_R2Lfs)
X_test_NN = np.column_stack((X_test_NN,rfc.predict_proba(X_test_a_R2Lfs)))
else:
X = np.array(df_R2L)[:, :-1] # All rows, omit last column
y = np.ravel(np.array(df_R2L)[:, -1:]) # All rows, only the last column
# print(Counter(y))
# smote
sm = SMOTE(random_state=42)
X_train_R2L, y_train_R2L = sm.fit_resample(X, y.astype('int'))
# msmote
# R2L_df =pd.DataFrame(df_R2L,columns=col_names)
# R2L_df = R2L_df[[each == 1 for each in R2L_df['label']]]
# #for m in range(10000,15000,100):
# msmote = ms.MSmote(N=200)
# samples = msmote.fit(R2L_df)
# df_R2L = pd.DataFrame(np.append(pd.DataFrame(df_R2L), samples, axis=0))
# X_train_R2L = np.array(df_R2L)[:, :-1] # All rows, omit last column
# y_train_R2L = np.ravel(np.array(df_R2L)[:, -1:]) # All rows, only the last column
# normal
# X_train_R2L = X
# y_train_R2L = y
# print(Counter(y))
X_train_R2L = std.fit_transform(X_train_R2L)
X_test_R2L = np.array(df_test_R2L)[:, :-1] # All rows, omit last column
y_test_R2L = np.ravel(np.array(df_test_R2L)[:, -1:]) # All rows, only the last column
# print(Counter(y_test_R2L))
# for i in range(1,20):
rfc = RandomForestClassifier(n_estimators=50, random_state=42, max_depth=18, )
rfc.fit(X_train_R2L, y_train_R2L.astype('int'))
y_pred_R2L = rfc.score(X_test_R2L, y_test_R2L.astype('int'))
y_pred_t_R2L = rfc.fit(X_train_R2L, y_train_R2L.astype('int')).predict(X_test_R2L)
# print("%d:%.10f"%(i,y_pred_R2L))
# print(Counter(y_test_R2L))
# print(Counter(y_pred_t_R2L))
# print(Counter(y_pred_t_R2L))
# print(y_pred_t_R2L)
X_train_1 = np.column_stack((X_train_1, rfc.predict_proba(X_test_R2L)))
X_test_a_R2L = std.fit_transform(X_test_b)
X_test_NN = np.column_stack((X_test_NN, rfc.predict_proba(X_test_a_R2L)))
# todo U2R
df_training = pd.DataFrame(np.column_stack((X_train, y_train)), columns=col_names)
df_testing = pd.DataFrame(np.column_stack((X_test, y_test)), columns=col_names)
df_U2R = processing.changetag_U2R(df_training)
# df_U2R = processing.Standardization(df_U2R)
df_test_U2R = std.fit_transform(processing.changetag_U2R(df_testing))
df_test_U2R = std.fit_transform(df_test_U2R)
if (list[4] == 1):
df_U2R_fs, df_test_U2R_fs, X_test_a_U2Rfs = np.array(df_U2R)[:, 3], df_test_U2R[:, 3], X_test_b[:, 3]
list_U2R = [13, 2, 23, 11, 35, 34, 25, 37, 30, 36, 27]
for i in list_U2R:
df_U2R_0 = np.array(df_U2R)[:, i]
df_U2R_fs = np.column_stack((df_U2R_fs, df_U2R_0))
df_test_U2R_0 = df_test_U2R[:, i]
df_test_U2R_fs = np.column_stack((df_test_U2R_fs, df_test_U2R_0))
X_test_a_1 = X_test_b[:, i]
X_test_a_U2Rfs = np.column_stack((X_test_a_U2Rfs, X_test_a_1))
# df_test_U2R = processing.Standardization(df_test_U2R)
X = np.array(df_U2R_fs) # All rows, omit last column
y = np.ravel(np.array(df_U2R)[:, -1:]) # All rows, only the last column
# print(Counter(y))
# smote
sm = SMOTE(random_state=42)
X_train_U2R, y_train_U2R = sm.fit_resample(X, y.astype('int'))
# msmote
# U2R_df = df[[each == 1 for each in df['label']]]
# #for m in range(10000,15000,100):
# msmote = ms.MSmote(N=m)
# samples = msmote.fit(U2R_df)
# df = np.append(pd.DataFrame(df), samples, axis=0)
# df = pd.DataFrame(df)
# X_train = np.array(df)[:, :-1] # All rows, omit last column
# y_train = np.ravel(np.array(df)[:, -1:]) # All rows, only the last column
# normal
# X_train_U2R = X
# y_train_U2R = y
# print(Counter(y))
X_train_U2R = std.fit_transform(X_train_U2R)
X_test_U2R = np.array(df_test_U2R_fs) # All rows, omit last column
y_test_U2R = np.ravel(np.array(df_test_U2R)[:, -1:]) # All rows, only the last column
# print(Counter(y_test_U2R))
# for i in range(19,30):
rfc = RandomForestClassifier(n_estimators=20, random_state=42, max_depth=18, )
rfc.fit(X_train_U2R, y_train_U2R.astype('int'))
y_pred_U2R = rfc.score(X_test_U2R, y_test_U2R.astype('int'))
y_pred_t_U2R = rfc.fit(X_train_U2R, y_train_U2R.astype('int')).predict(X_test_U2R)
# print("%d:%.10f"%(i,y_pred_U2R))
# print(Counter(y_pred_t_U2R))
# print(y_pred_t)
X_train_NN = np.column_stack((X_train_1, rfc.predict_proba(X_test_U2R)))
X_test_a_U2Rfs = std.fit_transform(X_test_a_U2Rfs)
X_test_NN = np.column_stack((X_test_NN,rfc.predict_proba(X_test_a_U2Rfs)))
else:
X = np.array(df_U2R)[:, :-1] # All rows, omit last column
y = np.ravel(np.array(df_U2R)[:, -1:]) # All rows, only the last column
# print(Counter(y))
# smote
sm = SMOTE(random_state=42)
X_train_U2R, y_train_U2R = sm.fit_resample(X, y.astype('int'))
# msmote
# R2L_df =pd.DataFrame(df_R2L,columns=col_names)
# R2L_df = R2L_df[[each == 1 for each in R2L_df['label']]]
# #for m in range(10000,15000,100):
# msmote = ms.MSmote(N=200)
# samples = msmote.fit(R2L_df)
# df_R2L = pd.DataFrame(np.append(pd.DataFrame(df_R2L), samples, axis=0))
# X_train_R2L = np.array(df_R2L)[:, :-1] # All rows, omit last column
# y_train_R2L = np.ravel(np.array(df_R2L)[:, -1:]) # All rows, only the last column
# normal
# X_train_R2L = X
# y_train_R2L = y
# print(Counter(y))
X_train_U2R = std.fit_transform(X_train_U2R)
X_test_U2R = np.array(df_test_U2R)[:, :-1] # All rows, omit last column
y_test_U2R = np.ravel(np.array(df_test_U2R)[:, -1:]) # All rows, only the last column
# print(Counter(y_test_R2L))
# for i in range(1,20):
rfc = RandomForestClassifier(n_estimators=100, random_state=42, max_depth=8, )
rfc.fit(X_train_U2R, y_train_U2R.astype('int'))
y_pred_U2R = rfc.score(X_test_U2R, y_test_U2R.astype('int'))
y_pred_t_U2R = rfc.fit(X_train_U2R, y_train_U2R.astype('int')).predict(X_test_U2R)
# print("%d:%.10f"%(i,y_pred_U2R))
# print(Counter(y_test_U2R))
# print(Counter(y_pred_t_U2R))
# print(y_pred_t)
X_train_NN = np.column_stack((X_train_1, rfc.predict_proba(X_test_U2R)))
X_test_a_U2R = std.fit_transform(X_test_b)
X_test_NN = np.column_stack((X_test_NN, rfc.predict_proba(X_test_a_U2R)))
# todo NN
df_testing = pd.DataFrame(np.column_stack((X_test, y_test)), columns=col_names)
df_train_NN = processing.changetag(df_testing)
y_train_NN = np.ravel(np.array(df_train_NN)[:, -1:]) # All rows, omit last column
# print(Counter(y_train_NN))
X_train_NN, y_train_NN = sm.fit_resample(X_train_NN, y_train_NN.astype('int'))
# print(Counter(y_train_NN))
y = np.ravel(np.array(df_test)[:, -1:]) # All rows, only the last column
print(Counter(y))
# model = SVC(kernel='rbf', probability=True)
# model.fit(X_train_NN,y_train_NN.astype('int'))
# y_pred_NN = model.score(X_test_NN,y.astype('int'))
# svc_model = SVC(kernel='rbf')
# param_grid = {'C': [1e-3, 1e-2, 1e-1, 1, 10, 100, 1000], 'gamma': [0.001,0.0001]} # param_grid:我们要调参数的列表(带有参数名称作为键的字典),此处共有14种超参数的组合来进行网格搜索,进而选择一个拟合分数最好的超平面系数。
# grid_search = GridSearchCV(svc_model, param_grid, n_jobs=-1,verbose=1) # n_jobs:并行数,int类型。(-1:跟CPU核数一致;1:默认值);verbose:日志冗长度。默认为0:不输出训练过程;1:偶尔输出;>1:对每个子模型都输出。
# grid_search.fit(X_train_NN,y_train_NN.astype('int')) # 训练,默认使用5折交叉验证
# s = grid_search.score
# best_parameters = grid_search.best_estimator_.get_params() # 获取最佳模型中的最佳参数
# print("cv results are" % grid_search.best_params_, grid_search.cv_results_) # grid_search.cv_results_:给出不同参数情况下的评价结果。
# print("best parameters are" % grid_search.best_params_,grid_search.best_params_) # grid_search.best_params_:已取得最佳结果的参数的组合;
# print("best score are" % grid_search.best_params_, grid_search.best_score_) # grid_search.best_score_:优化过程期间观察到的最好的评分。
# # for para, val in list(best_parameters.items()):
# # print(para, val)
svm_model = SVC(kernel='rbf', C=1000, gamma=0.001) # 最佳模型
# svm_model.fit(X_train_NN, y_train_NN.astype('int'))
y_pred_t = svm_model.fit(X_train_NN, y_train_NN.astype('int')).predict(X_test_NN)
print(Counter(y_pred_t))
# print(metrics.precision_score(y.astype('int'), y_pred_t, labels=[5], average='macro'))
# print(metrics.precision_score(y.astype('int'), y_pred_t, labels=[4], average='macro'))
# print(metrics.precision_score(y.astype('int'), y_pred_t, labels=[3], average='macro'))
# print(metrics.f1_score(y.astype('int'), y_pred_t, average='macro'))
y_pred_NN = svm_model.score(X_test_NN,y.astype('int'))
#
# print(y_pred_NN)
print ("%.10f:%.10f"%(y_pred_NN,metrics.f1_score(y.astype('int'), y_pred_t, average='macro')))
print(metrics.classification_report(y, y_pred_t))
cfm = confusion_matrix(y,y_pred_t)
print(cfm)
Ve = ["Normal","Dos","Probe","R2L","U2R"]
heatmap(cfm,Ve,Ve)
# a =1
|
{"/main.py": ["/MSomte.py", "/processing.py"], "/baseline.py": ["/MSomte.py", "/processing.py"], "/test.py": ["/MSomte.py", "/processing.py"]}
|
42,874,217
|
Mawinnn/ids
|
refs/heads/master
|
/test.py
|
import pandas as pd
import numpy as np
import sys
import sklearn
import matplotlib
import matplotlib.pyplot as plt
from sklearn.preprocessing import LabelEncoder,OneHotEncoder
from sklearn import preprocessing
from sklearn.ensemble import RandomForestClassifier
import MSomte as ms
from imblearn.over_sampling import SMOTE
from collections import Counter
import processing
from sklearn.model_selection import train_test_split
from sklearn.neural_network import MLPClassifier
from sklearn.svm import SVC
from sklearn.model_selection import GridSearchCV
from sklearn import metrics
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import accuracy_score, f1_score, precision_score, recall_score, classification_report, confusion_matrix
# Press the green button in the gutter to run the script.
if __name__ == '__main__':
col_names = ["duration", "protocol_type", "service", "flag", "src_bytes",
"dst_bytes", "land", "wrong_fragment", "urgent", "hot", "num_failed_logins",
"logged_in", "num_compromised", "root_shell", "su_attempted", "num_root",
"num_file_creations", "num_shells", "num_access_files", "num_outbound_cmds",
"is_host_login", "is_guest_login", "count", "srv_count", "serror_rate",
"srv_serror_rate", "rerror_rate", "srv_rerror_rate", "same_srv_rate",
"diff_srv_rate", "srv_diff_host_rate", "dst_host_count", "dst_host_srv_count",
"dst_host_same_srv_rate", "dst_host_diff_srv_rate", "dst_host_same_src_port_rate",
"dst_host_srv_diff_host_rate", "dst_host_serror_rate", "dst_host_srv_serror_rate",
"dst_host_rerror_rate", "dst_host_srv_rerror_rate", "label"]
df = pd.read_csv("KDDTrain+.csv", header=None, names=col_names)
df_test = pd.read_csv("KDDTest+_2.csv", header=None, names=col_names)
df,df_test = processing.change(df,df_test)
X_test_a = np.array(processing.changetag(df_test))[:, :-1]
df_X = np.array(df)[:, :-1]
df_y = np.ravel(np.array(df)[:, -1:])
X_train, X_test, y_train, y_test = train_test_split(df_X, df_y, test_size=0.45,random_state= 42)
df_training = pd.DataFrame(np.column_stack((X_train, y_train)),columns = df.columns.values.tolist())
df_testing = pd.DataFrame(np.column_stack((X_test, y_test)),columns = df.columns.values.tolist())
std = MinMaxScaler()
# todo Normal
df_Normal = processing.changetag_Normal(df_training)
df_Normal = std.fit_transform(df_Normal)
# df_Normal = processing.Standardization(df_Normal)
df_test_Normal = processing.changetag_Normal(df_testing)
df_test_Normal = std.fit_transform(df_test_Normal)
df_Normal_fs ,df_test_Normal_fs,X_test_a_Normalfs = df_Normal[:, 120] , df_test_Normal[:,120],X_test_a[:,120]
list_Normal = [8, 116, 65, 26, 90, 35, 1, 33 ,25 ,22, 53]
for i in list_Normal:
df_Normal_0 = df_Normal[:, i]
df_Normal_fs = np.column_stack((df_Normal_fs, df_Normal_0))
df_test_Normal_0 = df_test_Normal[:,i]
df_test_Normal_fs = np.column_stack((df_test_Normal_fs, df_test_Normal_0))
X_test_a_0 = X_test_a[:, i]
X_test_a_Normalfs = np.column_stack((X_test_a_Normalfs, X_test_a_0))
# df_test_Normal = processing.Standardization(df_test_Normal)
X = df_Normal_fs # All rows, omit last column
y = np.ravel(np.array(df_Normal)[:, -1:]) # All rows, only the last column
# print(Counter(y))
#smote
sm = SMOTE(random_state=42)
X_train_Normal, y_train_Normal = sm.fit_resample(X,y)
#msmote
# R2L_df = df[[each == 1 for each in df['label']]]
# #for m in range(10000,15000,100):
# msmote = ms.MSmote(N=m)
# samples = msmote.fit(R2L_df)
# df = np.append(pd.DataFrame(df), samples, axis=0)
# df = pd.DataFrame(df)
# X_train = np.array(df)[:, :-1] # All rows, omit last column
# y_train = np.ravel(np.array(df)[:, -1:]) # All rows, only the last column
#normal
# X_train_Normal = X
# y_train_Normal = y
# print(Counter(y))
# X_train_Normal = std.fit_transform(X_train_Normal)
X_test_Normal = df_test_Normal_fs # All rows, omit last column
y_test_Normal = np.ravel(np.array(df_test_Normal)[:, -1:]) # All rows, only the last column
# print(Counter(y_test_Normal))
#
for i in range(10,210,10):
rfc = RandomForestClassifier(n_estimators=i, random_state=42, max_depth=4,)
rfc.fit(X_train_Normal, y_train_Normal.astype('int'))
y_pred_Normal = rfc.score(X_test_Normal, y_test_Normal.astype('int'))
y_pred_Normal_t = rfc.fit(X_train_Normal, y_train_Normal.astype('int')).predict(X_test_Normal)
print("%d:%.10f"%(i,y_pred_Normal))
print(Counter(y_test_U2R))
print(Counter(y_pred_t_U2R))
|
{"/main.py": ["/MSomte.py", "/processing.py"], "/baseline.py": ["/MSomte.py", "/processing.py"], "/test.py": ["/MSomte.py", "/processing.py"]}
|
42,874,218
|
Mawinnn/ids
|
refs/heads/master
|
/processing.py
|
import pandas as pd
def changetag_Normal(df):
df['protocol_type'] = df['protocol_type'].astype('category')
df['service'] = df['service'].astype('category')
df['flag'] = df['flag'].astype('category')
cat_columns = df.select_dtypes(['category']).columns
df[cat_columns] = df[cat_columns].apply(lambda x: x.cat.codes)
labeldf = df['label']
newlabeldf = labeldf.replace(
{'normal': 1,
'neptune': 0, 'back': 0, 'land': 0, 'pod': 0, 'smurf': 0, 'teardrop': 0, 'mailbomb': 0,'apache2': 0,'processtable': 0, 'udpstorm': 0, 'worm': 0,
'ipsweep': 0, 'nmap': 0, 'portsweep': 0, 'satan': 0, 'mscan': 0, 'saint': 0,
'ftp_write': 0, 'guess_passwd': 0, 'imap': 0, 'multihop': 0, 'phf': 0, 'spy': 0, 'warezclient': 0,'warezmaster': 0, 'sendmail': 0, 'named': 0, 'snmpgetattack': 0, 'snmpguess': 0, 'xlock': 0, 'xsnoop': 0,'httptunnel': 0,
'buffer_overflow': 0, 'loadmodule': 0, 'perl': 0, 'rootkit': 0, 'ps': 0, 'sqlattack': 0, 'xterm': 0})
df['label'] = newlabeldf
return df
def changetag_Dos(df):
df['protocol_type'] = df['protocol_type'].astype('category')
df['service'] = df['service'].astype('category')
df['flag'] = df['flag'].astype('category')
cat_columns = df.select_dtypes(['category']).columns
df[cat_columns] = df[cat_columns].apply(lambda x: x.cat.codes)
labeldf = df['label']
newlabeldf = labeldf.replace(
{'normal': 0,
'neptune': 1, 'back': 1, 'land': 1, 'pod': 1, 'smurf': 1, 'teardrop': 1, 'mailbomb': 1,'apache2': 1,'processtable': 1, 'udpstorm': 1, 'worm': 1,
'ipsweep': 0, 'nmap': 0, 'portsweep': 0, 'satan': 0, 'mscan': 0, 'saint': 0,
'ftp_write': 0, 'guess_passwd': 0, 'imap': 0, 'multihop': 0, 'phf': 0, 'spy': 0, 'warezclient': 0,'warezmaster': 0, 'sendmail': 0, 'named': 0, 'snmpgetattack': 0, 'snmpguess': 0, 'xlock': 0, 'xsnoop': 0,'httptunnel': 0,
'buffer_overflow': 0, 'loadmodule': 0, 'perl': 0, 'rootkit': 0, 'ps': 0, 'sqlattack': 0, 'xterm': 0})
df['label'] = newlabeldf
return df
def changetag_Probe(df):
df['protocol_type'] = df['protocol_type'].astype('category')
df['service'] = df['service'].astype('category')
df['flag'] = df['flag'].astype('category')
cat_columns = df.select_dtypes(['category']).columns
df[cat_columns] = df[cat_columns].apply(lambda x: x.cat.codes)
labeldf = df['label']
newlabeldf = labeldf.replace(
{'normal': 0,
'neptune': 0, 'back': 0, 'land': 0, 'pod': 0, 'smurf': 0, 'teardrop': 0, 'mailbomb': 0,'apache2': 0,'processtable': 0, 'udpstorm': 0, 'worm': 0,
'ipsweep': 1, 'nmap': 1, 'portsweep': 1, 'satan': 1, 'mscan': 1, 'saint': 1,
'ftp_write': 0, 'guess_passwd': 0, 'imap': 0, 'multihop': 0, 'phf': 0, 'spy': 0, 'warezclient': 0,'warezmaster': 0, 'sendmail': 0, 'named': 0, 'snmpgetattack': 0, 'snmpguess': 0, 'xlock': 0, 'xsnoop': 0,'httptunnel': 0,
'buffer_overflow': 0, 'loadmodule': 0, 'perl': 0, 'rootkit': 0, 'ps': 0, 'sqlattack': 0, 'xterm': 0})
df['label'] = newlabeldf
return df
def changetag_R2L(df):
df['protocol_type'] = df['protocol_type'].astype('category')
df['service'] = df['service'].astype('category')
df['flag'] = df['flag'].astype('category')
cat_columns = df.select_dtypes(['category']).columns
df[cat_columns] = df[cat_columns].apply(lambda x: x.cat.codes)
labeldf = df['label']
newlabeldf = labeldf.replace(
{'normal': 0, 'neptune': 0, 'back': 0, 'land': 0, 'pod': 0, 'smurf': 0, 'teardrop': 0, 'mailbomb': 0,
'apache2': 0,
'processtable': 0, 'udpstorm': 0, 'worm': 0,
'ipsweep': 0, 'nmap': 0, 'portsweep': 0, 'satan': 0, 'mscan': 0, 'saint': 0
, 'ftp_write': 1, 'guess_passwd': 1, 'imap': 1, 'multihop': 1, 'phf': 1, 'spy': 1, 'warezclient': 1,
'warezmaster': 1, 'sendmail': 1, 'named': 1, 'snmpgetattack': 1, 'snmpguess': 1, 'xlock': 1, 'xsnoop': 1,
'httptunnel': 1,
'buffer_overflow': 0, 'loadmodule': 0, 'perl': 0, 'rootkit': 0, 'ps': 0, 'sqlattack': 0, 'xterm': 0})
df['label'] = newlabeldf
return df
def changetag_U2R(df):
df['protocol_type'] = df['protocol_type'].astype('category')
df['service'] = df['service'].astype('category')
df['flag'] = df['flag'].astype('category')
cat_columns = df.select_dtypes(['category']).columns
df[cat_columns] = df[cat_columns].apply(lambda x: x.cat.codes)
labeldf = df['label']
newlabeldf = labeldf.replace(
{'normal': 0,
'neptune': 0, 'back': 0, 'land': 0, 'pod': 0, 'smurf': 0, 'teardrop': 0, 'mailbomb': 0,'apache2': 0,'processtable': 0, 'udpstorm': 0, 'worm': 0,
'ipsweep': 0, 'nmap': 0, 'portsweep': 0, 'satan': 0, 'mscan': 0, 'saint': 0,
'ftp_write': 0, 'guess_passwd': 0, 'imap': 0, 'multihop': 0, 'phf': 0, 'spy': 0, 'warezclient': 0,'warezmaster': 0, 'sendmail': 0, 'named': 0, 'snmpgetattack': 0, 'snmpguess': 0, 'xlock': 0, 'xsnoop': 0,'httptunnel': 0,
'buffer_overflow': 1, 'loadmodule': 1, 'perl': 1, 'rootkit': 1, 'ps': 1, 'sqlattack': 1, 'xterm': 1})
df['label'] = newlabeldf
return df
def changetag(df):
df['protocol_type'] = df['protocol_type'].astype('category')
df['service'] = df['service'].astype('category')
df['flag'] = df['flag'].astype('category')
cat_columns = df.select_dtypes(['category']).columns
df[cat_columns] = df[cat_columns].apply(lambda x: x.cat.codes)
labeldf = df['label']
newlabeldf = labeldf.replace(
{'normal': 1,
'neptune': 2, 'back': 2, 'land': 2, 'pod': 2, 'smurf': 2, 'teardrop': 2, 'mailbomb': 2,'apache2': 2,'processtable': 2, 'udpstorm': 2, 'worm': 2,
'ipsweep': 3, 'nmap': 3, 'portsweep': 3, 'satan': 3, 'mscan': 3, 'saint': 3,
'ftp_write': 4, 'guess_passwd': 4, 'imap': 4, 'multihop': 4, 'phf': 4, 'spy': 4, 'warezclient': 4,'warezmaster': 4, 'sendmail': 4, 'named': 4, 'snmpgetattack': 4, 'snmpguess': 4, 'xlock': 4, 'xsnoop': 4,'httptunnel': 4,
'buffer_overflow': 5, 'loadmodule': 5, 'perl': 5, 'rootkit': 5, 'ps': 5, 'sqlattack': 5, 'xterm': 5})
df['label'] = newlabeldf
return df
|
{"/main.py": ["/MSomte.py", "/processing.py"], "/baseline.py": ["/MSomte.py", "/processing.py"], "/test.py": ["/MSomte.py", "/processing.py"]}
|
42,891,894
|
Helenalyh/pytest-POM
|
refs/heads/master
|
/test/test_cases/test_training_ground_page.py
|
from pytest import mark
from test.page_objects.training_ground_page import TrainingGroundPage
@mark.xfail(reason="Fails on purpose.")
def test_training_page(chrome_driver):
trng_page = TrainingGroundPage(driver=chrome_driver)
trng_page.go()
assert trng_page.button1.text == 'Button2', "Unexpected button1 text!"
|
{"/test/test_cases/test_trial_page.py": ["/test/page_objects/wiki_page.py", "/test/page_objects/amazon_page.py", "/test/page_objects/imdb_page.py", "/test/page_objects/google_page.py"], "/test/test_cases/test_training_ground_page.py": ["/test/page_objects/google_page.py"]}
|
42,891,895
|
Helenalyh/pytest-POM
|
refs/heads/master
|
/test/test_cases/test_trial_page.py
|
from pytest import mark
from test.page_objects.trial_page import TrialPage
@mark.smoke
def test_trial_page(chrome_driver):
trial_page = TrialPage(driver=chrome_driver)
trial_page.go()
trial_page.stone_input.input_text("rock")
trial_page.stone_button.click()
|
{"/test/test_cases/test_trial_page.py": ["/test/page_objects/wiki_page.py", "/test/page_objects/amazon_page.py", "/test/page_objects/imdb_page.py", "/test/page_objects/google_page.py"], "/test/test_cases/test_training_ground_page.py": ["/test/page_objects/google_page.py"]}
|
42,962,365
|
kauroy1994/Virtual-Health-Assistant
|
refs/heads/main
|
/VHA.py
|
from Patient import Patient
from Prover import Prover
class VHA(object):
def __init__(self,
facts=[],
schema = [],
abstractions = [],
guidelines = [],
decode_instructions = [],
nlg_module = None):
'''constructor that stores knowledge graph
abstraction to abstract concepts to map to guideline
guidelines that contain mapping from abstract concepts to prescription
prescription decoder for execution by VHA
natural language generation module
'''
self.facts = facts
self.schema = schema
self.abstractions = abstractions
self.guidelines = guidelines
self.decode_instructions = decode_instructions
self.nlg_module = nlg_module
def set_guidelines(self,
guidelines):
'''sets the list of guidelines
KG init
'''
self.guidelines = guidelines
def add_guideline(self,
guideline):
'''adds a guideline to the list of guidelines
as a horn clause
'''
self.guidelines.append(guideline)
def add_facts(self,
fact):
'''adds a fact to the list of facts
as a predicate - KG update/init
'''
self.facts.append(fact)
def set_schema(self,
schema):
'''sets schema for the predicates
in the fact file - KG init
'''
self.schema = schema
def update_schema(self,
line,
add = True):
'''updates the schema - KG update
line can be added/deleted
'''
if add:
self.schema.append(line)
else:
self.schema.remove(line)
def prove_examples(self,patient_objects):
'''proves all consequents in a loop
from patient facts using guidelines
'''
examples = []
true_examples = []
symptoms = [x.split(',')[1][:-1] for x in Patient.possible_facts]
patients = ['p'+str(i) for i in range(Patient.count)]
for patient in patients:
examples.append("depression("+patient+')')
for symptom in symptoms:
examples.append("significant("+patient+','+symptom+')')
for example in examples:
before_count = 0
while True:
after_count = before_count
for guideline in self.guidelines:
if example.split('(')[0] in guideline.split(":-")[0]:
Prover.rule = guideline
Patient_id = example.split('(')[1][1:-1]
for patient in patient_objects:
if patient.id == Patient_id:
Prover.facts = patient.facts
break
print (example)
print (Prover.rule)
print (Prover.facts)
input()
if Prover.prove_rule(example):
true_examples.append(example)
after_count += 1
break
if after_count == before_count:
break
before_count = after_count
print (true_examples)
input()
return true_examples
#============TESTER FUNCTIONS===============
def main():
my_vha = VHA()
my_vha.set_guidelines(["depression(X):-significant(X,S)",
"significant(X,S):-depsymptom(X,S);freq(X,S,high)"])
patients = []
patient = Patient()
patients.append(patient)
facts = my_vha.prove_examples(patients)
'''
Prover.rule = my_vha.guidelines[1]
Prover.facts = patient.facts
examples = create_examples()
print (Prover.facts)
print (Prover.rule)
print (examples[1])
print (Prover.prove_rule(examples[1]))
'''
main()
|
{"/VHA.py": ["/Patient.py"]}
|
42,962,366
|
kauroy1994/Virtual-Health-Assistant
|
refs/heads/main
|
/Patient.py
|
from random import random
class Patient(object):
count = 0
possible_facts = ["depsymptom(X,feelings_of_sadness)",
"depsymptom(X,tearfulness)",
"depsymptom(X,emptiness)",
"depsymptom(X,hopelessness)",
"depsymptom(X,angry_outbursts)",
"depsymptom(X,irritability)",
"depsymptom(X,frustration)",
"depsymptom(X,loss_of_interest)",
"depsymptom(X,loss_of_pleasure)",
"depsymptom(X,loss_of_hobbies)",
"depsymptom(X,sleep_disturbances)",
"depsymptom(X,insomnia)",
"depsymptom(X,sleeping_too_much)",
"depsymptom(X,tiredness)",
"depsymptom(X,lack_of_energy)",
"depsymptom(X,reduced_appetite)",
"depsymptom(X,weight_loss)",
"depsymptom(X,increased_cravings)",
"depsymptom(X,weight_gain)",
"depsymptom(X,anxiety)",
"depsymptom(X,agitation)",
"depsymptom(X,restlessness)",
"depsymptom(X,slowed_thinking)",
"depsymptom(X,slow_speaking)",
"depsymptom(X,slow_body_movements)",
"depsymptom(X,feelings_of_worthlessness)",
"depsymptom(X,feelings_of_guilt)",
"depsymptom(X,feelings_of_failure)",
"depsymptom(X,feelings_of_blame)",
"depsymptom(X,trouble_thinking)",
"depsymptom(X,trouble_concentrating)",
"depsymptom(X,trouble_with_decisions)",
"depsymptom(X,trouble_remembering_things)",
"depsymptom(X,frequent_thoughts_of_death)",
"depsymptom(X,suicidal_thoughts)",
"depsymptom(X,suicide_attempts)",
"depsymptom(X,suicide)",
"depsymptom(X,unexplained_problems)"]
@staticmethod
def get_id():
return Patient.count
def __init__(self):
self.facts = []
self.id = str(Patient.get_id())
for fact in Patient.possible_facts:
if random() < 0.5:
self.facts.append(fact.replace('X','p'+self.id))
if random() < 0.5:
self.facts.append('freq('+'p'+str(Patient.get_id())+','+fact.split(',')[1][:-1]+',high'+')')
Patient.count += 1
|
{"/VHA.py": ["/Patient.py"]}
|
43,024,294
|
hjabbot/WiFeS-Spectrum-Extractor
|
refs/heads/main
|
/extract.py
|
from class_Line import *
from class_Spectrum import *
from fn_spaxelSelection import *
#Numpy
import numpy as np
import pandas as pd
#Plotting tools
import matplotlib.pyplot as plt
import matplotlib.lines as mlines
import matplotlib as mpl
#FITS manipulation
from astropy.io import fits
#Create copy of class instances
from copy import deepcopy
#Ordered dictionary
import collections as coll
#Region around line to analyse in Angstrom
window = 200
#How much to raise each line up by for plots
PLT_SHIFT = 0.7
# PLT_SHIFT = 0
#Spectral lines to check for
#HAVEN'T IMPLEMENTED
LINE_H_I = [6564.61, 4862.68, 4341.68, 4102.89]
LINE_He_I = [5875.624,7065.196,7281.349,7816.136, ]
LINE_He_II = [8236.790]
LINE_C_II = [7236.420]
LINE_C_III = [4647.420,4650.250,4651.470,5695.920,]
LINE_C_IV = [5801.330, 5811.980]
LINE_N_II = [6549.86,6585.27]
LINE_O_II = [3727.092,3729.875]
LINE_O_III = [1665.85,4364.436,4932.603,4960.295,5008.240]
LINE_S_II = [4072.3,6718.29,6732.67]
LINE_Si = []
#Load in data to manipulate
def loadFITS(filename):
'''
Reads in data from finalised WiFeS P11 file.
Can be used for any data cube where
HDU 0 is science data
HDU 1 is variance data
HDU 2 is data quality flag
Parameters
----------
filename : (str) File to read in, inclusive of file path
Returns
-------
sci : (np.array) Science data cube
var : (np.array) Variance data cube
dq : (np.array) Data quality date cube
'''
hdul = fits.open(filename)
sci = hdul[0]
var = hdul[1]
dq = hdul[2]
return sci, var, dq
#Average the image data across both WiFeS arms
def aveImage(*datacubes):
'''
Averages a data cube over all wavelengths to produce 2D image
Parameters
----------
*datacubes : (List of 3D np.array)
Science spaxels to image
Returns
-------
ave : (2D np.array)
Image of science data
'''
#Set total = 0 for array in size of image
total = np.zeros_like(datacubes[0][0])
#For each data cube to average
for cube in datacubes:
#Assert dimensions are correct
assert(cube.shape[1] == total.shape[0])
assert(cube.shape[2] == total.shape[1])
#Average the flux across wavelength axis
ave = np.mean(cube, axis=0)
#Add to total
total = np.add(total, ave)
#Make it an average instead of total count
ave = np.divide(total, len(datacubes))
return ave
#Calculate the flux values at each wavelength step, weighted by variance
def calcFlux(sci, var, save_x, save_y, sub_x, sub_y):
'''
Takes in user selected range of spaxels and averages flux for each spaxel per wavelength.
Subtracts spaxels in another user selected region for sky/host subtraction
Weights flux values by variance of spaxel (i.e. higher variance = less weight)
Parameters
----------
sci : (3D np.array)
Science data cube
var : (3D np.array)
Variance data cube
save_x, : (dict){'start':(int), 'end':(int)}
save_y Coordinates to average saved spaxels across
sub_x, : (dict){'start':(int), 'end':(int)}
sub_y Coordinates to average subtracted spaxels across
Returns
-------
fl : (2D np.array)
Spectrum of selected range
'''
#Extracts average spectrum in section to save
save_sci = sci.data[:, save_y['start']:save_y['end'], save_x['start']:save_x['end']]
save_var = var.data[:, save_y['start']:save_y['end'], save_x['start']:save_x['end']]
#Extracts average spectrum in section to subtract
sub_sci = sci.data[:, sub_y['start']:sub_y['end'], sub_x['start']:sub_x['end']]
sub_var = var.data[:, sub_y['start']:sub_y['end'], sub_x['start']:sub_x['end']]
#Calculates the weighted average spectrum across selection range
fl = [
np.average(save_sci[i], weights=np.reciprocal(save_var[i])) -
np.average(sub_sci[i], weights=np.reciprocal(sub_var[i]))
for i in range(save_sci.shape[0])
]
# fl = [
# np.sum(save_sci[i]) -
# np.sum(sub_sci[i])
# for i in range(save_sci.shape[0])
# ]
return fl
#Calculate the variance for each flux value
def calcVar(var, save_x, save_y, sub_x, sub_y):
'''
Calculates variance in flux values across selected region
Parameters
----------
var : (3D np.array)
Variance data cube
save_x, : (dict){'start':(int), 'end':(int)}
save_y Coordinates to average saved spaxels across
sub_x, : (dict){'start':(int), 'end':(int)}
sub_y Coordinates to average subtracted spaxels across
Returns
-------
err : (2D np.array)
Added error of spaxels in selected ranges.
'''
#Cut out relevant regions
save_var = var.data[:, save_y['start']:save_y['end'], save_x['start']:save_x['end']]
sub_var = var.data[:, sub_y['start']:sub_y['end'], sub_x['start']:sub_x['end']]
#Calculate standard error of weighted mean
save_err = np.reciprocal(np.sum(np.reciprocal(save_var), axis=(1,2)))
sub_err = np.reciprocal(np.sum(np.reciprocal(sub_var), axis=(1,2)))
#Add errors of two sections
return save_err + sub_err
#Calculate an array of each wavelength
def calcWavelength(sci):
'''
Calculates an array of wavelengths for each flux value to correspond to,
since not included by default in data
Parameters
----------
sci : (3D np.array)
Science data cube
Returns
-------
wl : (2D np.array)
Array for each wavelength in data cube
'''
initial = sci.header['CRVAL3']
step = sci.header['CDELT3']
num = len(sci.data)
final = initial + step * num
return np.arange(initial, final, step)
#Combine two spectra (e.g. red and blue arms of WiFeS)
def combineSpectra(b_fl, b_var, b_wl, r_fl, r_var, r_wl):
'''
Combines blue and red arms of WiFeS spectra. Calculates an overlap region, then
adjusts each spectrum to meet in middle.
Parameters
----------
b_fl, : (1D np.array)
r_fl Flux values for each arm
b_var, : (1D np.array)
r_var Variance values for each arm
b_wl, : (1D np.array)
r_wl Wavelength values for each arm
Returns
-------
comb_fl : (1D np.array)
Combined flux array
comb_var : (1D np.array)
Combined variance array
comb_wl : (1D np.array)
Combined wavelength array
'''
#Determine what extreme ends of filters wavelengths are
max_b = max(b_wl)
min_r = min(r_wl)
#Determine how many entries overlap in both
b_overlap = len([i for i in b_wl if i > min_r])
r_overlap = len([i for i in r_wl if i < max_b])
#Calculate the average flux within this range
b_ave = np.mean(b_fl[-b_overlap:])
r_ave = np.mean(r_fl[:r_overlap])
#Calculate the difference between the two in the overlapping region
# r_offset = (b_ave - r_ave)/2
# b_offset = (r_ave - b_ave)/2
############################################
r_offset = 0
b_offset = 0
############################################
#Shift spectra to meet at average
r_fl = [x + r_offset for x in r_fl]
b_fl = [x + b_offset for x in b_fl]
#Combine lists
comb_wl = np.concatenate((b_wl, r_wl))
comb_var= np.concatenate((b_var, r_var))
comb_fl = np.concatenate((b_fl, r_fl))
#Zip them together
zipped = list(zip(comb_wl, comb_fl, comb_var))
#Sort according to wavelength
zipped.sort(key = lambda x: x[0])
#Recreate combined lists, now sorted to wavelength
unzipped = [list(x) for x in zip(*zipped)]
comb_wl = unzipped[0]
comb_fl = unzipped[1]
comb_var= unzipped[2]
return comb_fl, comb_var, comb_wl
#Takes in blue and red filenames, outputs Spectrum object
def processData(blue_file, red_file, z=0, c='black', mjd_t_peak=0, instrument="WiFeS", obj="SN", offset=0, day=''):
'''
Imports data cubes, extracts important info out of FITS headers,
extracts spectra out of user selected region in data cube,
creates a Spectrum object
Parameters
----------
blue_file, : (str)
red_file File names of WiFeS data cubes to combine and extract info from
z=0 : (float)
Redshift of object
c='black' : (str)
Colour to plot spectrum in
mjd_t_peak=0 : (int)
MJD of time of SN max
instrument="wifes" : (str)
Name of instrument
obj="SN" : (str)
Name of object
offset=0 : (int)
How much to shift spectrum up in plotting so not all spectra are overlaid
day='' : (str)
Date of observations
Returns
-------
processed_spectrum : (Spectrum object)
Filled out Spectrum object from class_Spectrum.py
'''
#Load in data
b_sci, b_var, _ = loadFITS(blue_file)
r_sci, r_var, _ = loadFITS(red_file)
#Extract MJD out of header
mjd = b_sci.header['MJD-OBS']
date = b_sci.header['DATE-OBS']
bad_obj_names = ["", "TOO"]
if b_sci.header['NOTES'].upper() not in bad_obj_names:
obj = b_sci.header['NOTES']
#Generate image for user to see
ave_image = aveImage(r_sci.data, b_sci.data)
#Get user selection of pixels to analyse/reduce
save_x, save_y = select_spaxel(ave_image, date=date,)
sub_x, sub_y = select_spaxel(ave_image, date=date,
rect = (save_x['start'], save_y['start']),
width = save_x['end']-save_x['start'],
height= save_y['end']-save_y['start'],
)
#Reset
start = [None, None]
end = [None, None]
#Calculate spectrum for selected values
b_fl = calcFlux(b_sci, b_var, save_x, save_y, sub_x, sub_y)
b_var= calcVar(b_var, save_x, save_y, sub_x, sub_y)
b_wl = calcWavelength(b_sci)
r_fl = calcFlux(r_sci, r_var, save_x, save_y, sub_x, sub_y)
r_var= calcVar(r_var, save_x, save_y, sub_x, sub_y)
r_wl = calcWavelength(r_sci)
#Combine spectra into single array
fl, var, wl = combineSpectra(b_fl, b_var, b_wl, r_fl, r_var, r_wl)
std = np.sqrt(var)
#Create spectrum object
processed_spectrum = Spectrum(wavelength=wl,
flux=fl,
var=var,
std=std,
date=date,
mjd=mjd,
mjd_t_peak=mjd_t_peak,
instrument=instrument,
obj=obj,
z=z,
offset=offset,
c=c,
)
return processed_spectrum
#Returns order of magnitude of a value
def magnitude(value):
'''Returns order of magnitude of a float value'''
return int(np.log10(np.absolute(value)))
if __name__ == "__main__":
days = {}
#2019com
# days['20190401_G-S'] = {'b_file': 'cubes/T2m3wb-20190401.174404-0052.p11.fits', 'r_file': 'cubes/T2m3wr-20190401.180513-0053.p11.fits', 'colour': 'red'}
# days['20190401_SN-G'] = {'b_file': 'cubes/T2m3wb-20190401.174404-0052.p11.fits', 'r_file': 'cubes/T2m3wr-20190401.180513-0053.p11.fits', 'colour': 'green'}
# days['20190401_SN-S'] = {'b_file': 'cubes/T2m3wb-20190401.174404-0052.p11.fits', 'r_file': 'cubes/T2m3wr-20190401.180513-0053.p11.fits', 'colour': 'black'}
# days['20190401'] = {'b_file': 'cubes/T2m3wb-20190401.174404-0052.p11.fits', 'r_file': 'cubes/T2m3wr-20190401.180513-0053.p11.fits', 'colour': 'green'}
# days['20190402'] = {'b_file': 'cubes/T2m3wb-20190402.174823-0059.p11.fits', 'r_file': 'cubes/T2m3wr-20190402.174823-0059.p11.fits', 'colour': 'blue'}
# days['20190403'] = {'b_file': 'cubes/T2m3wb-20190403.173753-0066.p11.fits', 'r_file': 'cubes/T2m3wr-20190403.173753-0066.p11.fits', 'colour': 'purple'}
# days['20190404'] = {'b_file': 'cubes/T2m3wb-20190404.152853-0001.p11.fits', 'r_file': 'cubes/T2m3wr-20190404.152853-0001.p11.fits', 'colour': 'cyan'}
# days['20190423'] = {'b_file': 'cubes/T2m3wb-20190423.180348-0022.p11.fits', 'r_file': 'cubes/T2m3wr-20190423.173728-0020.p11.fits', 'colour': 'brown'}
# days['20190426'] = {'b_file': 'cubes/T2m3wb-20190426.174216-0510.p11.fits', 'r_file': 'cubes/T2m3wr-20190426.172117-0509.p11.fits', 'colour': 'orange'}
days['20190427'] = {'b_file': 'cubes/T2m3wb-20190427.131154-0104.p11.fits', 'r_file': 'cubes/T2m3wr-20190427.135355-0106.p11.fits', 'colour': 'red'}
# days['20190919'] = {'b_file': 'cubes/IC_4712_b_20190919.fits', 'r_file': 'cubes/IC_4712_r_20190919.fits', 'colour':'black'}
# days['20190922'] = {'b_file': 'cubes/IC_4712_b_20190922.fits', 'r_file': 'cubes/IC_4712_r_20190922.fits', 'colour':'blue'}
# days['20191109'] = {'b_file': 'cubes/IC_4712_b_20191109.fits', 'r_file': 'cubes/IC_4712_r_20191109.fits', 'colour':'black'}
# days['Centre'] = {'b_file': 'cubes/IC_4712_b_20190922.fits', 'r_file': 'cubes/IC_4712_r_20190922.fits', 'colour':'red'} #20190922
# days['Local'] = {'b_file': 'cubes/IC_4712_b_20190922.fits', 'r_file': 'cubes/IC_4712_r_20190922.fits', 'colour':'blue'} #20190922
#2019qiz
# days['19_small'] = {'b_file': '2019qiz/19/T2m3wb-20190919.165814-0100.p11.fits', 'r_file': '2019qiz/19/T2m3wr-20190919.165814-0100.p11.fits', 'colour': 'green'}
# days['19_large'] = {'b_file': '2019qiz/19/T2m3wb-20190919.165814-0100.p11.fits', 'r_file': '2019qiz/19/T2m3wr-20190919.165814-0100.p11.fits', 'colour': 'green'}
# days['19_small_std1'] = {'b_file': '2019qiz/19/stds/T2m3wb-20190919.154037-0009.p11.fits', 'r_file': '2019qiz/19/stds/T2m3wr-20190919.154037-0009.p11.fits', 'colour': 'green'}
# days['19_large_std1'] = {'b_file': '2019qiz/19/stds/T2m3wb-20190919.154037-0009.p11.fits', 'r_file': '2019qiz/19/stds/T2m3wr-20190919.154037-0009.p11.fits', 'colour': 'green'}
# days['19_small_std2'] = {'b_file': '2019qiz/19/stds/T2m3wb-20190919.155924-0012.p11.fits', 'r_file': '2019qiz/19/stds/T2m3wr-20190919.155924-0012.p11.fits', 'colour': 'green'}
# days['19_large_std2'] = {'b_file': '2019qiz/19/stds/T2m3wb-20190919.155924-0012.p11.fits', 'r_file': '2019qiz/19/stds/T2m3wr-20190919.155924-0012.p11.fits', 'colour': 'green'}
# days['22'] = {'b_file': '2019qiz/22/T2m3wb-20190922.164224-0058.p11.fits', 'r_file': '2019qiz/22/T2m3wr-20190922.164224-0058.p11.fits', 'colour': 'blue'}
# days['22_std1'] = {'b_file': '2019qiz/22/stds/T2m3wb-20190922.084757-0034.p11.fits', 'r_file': '2019qiz/22/stds/T2m3wr-20190922.084757-0034.p11.fits', 'colour': 'blue'}
# days['22_small_std1'] = {'b_file': '2019qiz/22/stds/T2m3wb-20190922.084757-0034.p11.fits', 'r_file': '2019qiz/22/stds/T2m3wr-20190922.084757-0034.p11.fits', 'colour': 'blue'}
# days['22_large_std2'] = {'b_file': '2019qiz/22/stds/T2m3wb-20190922.085450-0037.p11.fits', 'r_file': '2019qiz/22/stds/T2m3wr-20190922.085450-0037.p11.fits', 'colour': 'blue'}
SN2019com = {}
#For each day to analyse
for i, key in enumerate(sorted(days.keys())):
info = days[key]
print(key)
#Extract spectrum, merge blue and red arms
SN2019com[key] = processData(info['b_file'], info['r_file'], c=info['colour'], mjd_t_peak=58590, obj="SN2019com", offset=-i*PLT_SHIFT)
# SN2019com[key].DetermineRedshift(lam_rest=LINE_H_I[0],
# initial_z=0.0124,
# window=window,
# deredshift=True,
# )
# files = [
# # 'spectra/-7.0.dat',
# # 'spectra/+17.0.dat'
# # 'spectra/+180.1.dat'
# ]
# SN2009ip = {}
# for file in files:
# day = file[8:-6]
# wl, fl = np.loadtxt(file, unpack=True)
# SN2009ip[day] = Spectrum(wavelength=wl,
# flux=fl,
# date=r't_max {}'.format(day),
# mjd=0,
# mjd_t_peak=0,
# instrument='unknown',
# obj='SN2009ip',
# z=0.005944,
# offset=1,
# c='black',
# )
# SN2009ip[day].DetermineRedshift(lam_rest=LINE_H_I[0],
# initial_z=0.005944,
# window=window,
# deredshift=True,
# )
# gaussians = {}
# gaussians['G1'] = {'amp': 0.10, 'fwhm': FWHM2sigma(10400), 'mean':0}
# gaussians['G2'] = {'amp': 0.90, 'fwhm': FWHM2sigma(230) , 'mean':0}
# gaussians['G3'] = {'amp':-0.10, 'fwhm': FWHM2sigma(200) , 'mean':800}
# gaussians['G4'] = {'amp': 0.40, 'fwhm': FWHM2sigma(1500) , 'mean':0}
tempSN2 = {}
fig, ax = plt.subplots()
#Find H_alpha
for i, day in enumerate(days):
#Create a deepcopy so can overwrite each loop
tempSN = deepcopy(SN2019com[day])
# Trim around spectral line, convert to velocity space
# tempSN.TrimWL(min_wl=LINE_H_I[0]-window/2, max_wl=LINE_H_I[0]+window/2)
tempSN.TrimWL(min_wl=3800, max_wl=7500)
# tempSN.wl2vel(centre=LINE_H_I[0])
tempSN.Normalise(ignore_range=[[5300, 5600], [8000,10000]])
tempSN.SaveSpectrum("2019com/spectra/{}.csv".format(day))
#Normalise flux
# tempSN.Scale(factor=1E16)
# #Create a line object
# SN2019com_Ha = SNLine(wl=tempSN.wl,
# fl=tempSN.fl,
# vel=tempSN.vel,
# var=tempSN.var,
# std=tempSN.std,
# colour=tempSN.c,
# date=day
# )
# #Fit gaussians to it
# SN2019com_Ha.fitCurve(gaussians=gaussians,
# amp_percent_range=100, #Bounds (percent)
# fwhm_percent_range=20, #Bounds (percent)
# continuum_offset_percent_range=1, #Bounds (percent)
# mean_range=200, #Bounds (km/s)
# )
# tempSN.Scale(factor=1E-16)
# SN2019com_Ha.Scale(factor=1E-16)
#Trim plot to show just Halpha
# tempSN.TrimVel(min_vel=-2000, max_vel=2000)
# SN2019com_Ha.TrimVel(min_vel=-2000, max_vel=2000)
#Plot original data
tempSN.PlotSpectrum(ax, sigma=1, vel=False, alpha=0.8, name=day)
# tempSN.SaveSpectrum("2019qiz/spectra/NORMALISED_{}_centre-galaxy.csv".format(day))
#Plot fit data
# y = np.add(SN2019com_Ha.fl_fit, tempSN.offset)
# plt.plot(SN2019com_Ha.wl, y, color=SN2019com_Ha.colour, linestyle='-')
# SN2019com_Ha.printInfo()
# tempSN2[day] = deepcopy(tempSN)
# for i, day in enumerate(SN2009ip):
# print(SN2009ip[day].fl)
# #Create a deepcopy so can overwrite each loop
# tempSN = deepcopy(SN2009ip[day])
# # Trim around spectral line, convert to velocity space
# tempSN.TrimWL(min_wl=3900, max_wl=7000)
# #Normalise flux
# tempSN.Normalise(ignore_range=[[5300, 5600], [8000,10000]])
# #Plot original data
# tempSN.PlotSpectrum(ax, sigma=1, vel=False, alpha=0.8, name=day, error=False)
#Write extracted spectrum to file
# for day in days:
# data = np.transpose([SN2019com[day].wl, SN2019com[day].fl])
# np.savetxt('spectra/2019com_201904{}.dat'.format(day), data)
ax.set_xlabel(r'Wavelength ($\AA$)',fontsize=16,family='serif')
# ax.set_xlabel(r'Velocity (km $\mathrm{s}^{-1}$)',fontsize=16,family='serif')
ax.set_ylabel(r'Normalised Flux + Offset',fontsize=16,family='serif')
#Add legend
# data = mlines.Line2D([],[],color='black',marker='.', linestyle='none', label='Data')
# fit = mlines.Line2D([],[],color='black', linestyle='-', label='Fit')
# ax.legend(handles=[data, fit],loc=1)
#Add emission markers
xlim = ax.get_xlim()
ylim = ax.get_ylim()
top_offset = np.subtract(ylim[1], ylim[0])*0.1
top = ylim[1] - top_offset
side_offset = np.subtract(xlim[1], xlim[0])*0.03
# ax.axvline(x=LINE_H_I[0], color='black', linestyle='--', alpha=0.8)
# ax.text(LINE_H_I[0] + side_offset, top, r'H$\alpha$')
# ax.axvline(x=LINE_N_II[0], color='black', linestyle='--', alpha=0.8)
# ax.text(LINE_N_II[0] + side_offset, top, 'N II')
# ax.axvline(x=LINE_N_II[1], color='black', linestyle='--', alpha=0.8)
# ax.text(LINE_N_II[1] + side_offset, top, 'N II')
#Format ticks
ax.minorticks_on()
ax.tick_params(axis='both',
which='major',
direction='in',
length=5,
width=1,
color='black',
top=True,
right=True,
labelsize=12,
)
ax.tick_params(axis='both',
which='minor',
direction='in',
length=2.5,
color='black',
top=True,
right=True,
labelsize=12,
)
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
plt.show()
plt.close()
|
{"/extract.py": ["/class_Line.py", "/class_Spectrum.py", "/fn_spaxelSelection.py"]}
|
43,173,189
|
parth-shastri/FaceNet_siamesenn
|
refs/heads/master
|
/embed_clustering.py
|
import numpy as np
import os
from sklearn.manifold import TSNE
from sklearn.decomposition import PCA
from sklearn.cluster import KMeans
from facenet_my import model
from keras.utils import Progbar
from tensorflow.keras.applications import resnet
from facenet_my import train_data
import matplotlib.pyplot as plt
from matplotlib.offsetbox import OffsetImage
import config
cluster_x = []
prog_bar = Progbar(target=50)
for n, (anchor, pos, neg) in enumerate(train_data.take(50)):
batch_anchor_embedding = model.embedding(resnet.preprocess_input(anchor))
batch_pos_embedding = model.embedding(resnet.preprocess_input(pos))
batch_neg_embedding = model.embedding(resnet.preprocess_input(neg))
cluster_x.append(batch_anchor_embedding.numpy())
cluster_x.append(batch_neg_embedding.numpy())
cluster_x.append(batch_neg_embedding.numpy())
prog_bar.update(n)
cluster_x = np.array(cluster_x).reshape((-1, config.EMBED_DIM))
print("\n", cluster_x.shape)
pca = PCA(n_components=50).fit_transform(cluster_x)
tsne = TSNE(n_components=2).fit_transform(pca)
kmeans = KMeans().fit_predict(tsne)
ax = plt.subplots()
plt.scatter(tsne[:, 0], tsne[:, 1], c=kmeans)
plt.xlabel("features")
plt.ylabel("features")
plt.title("Visualization of the data")
plt.show()
print("mo")
|
{"/embed_clustering.py": ["/facenet_my.py", "/config.py"], "/facenet_my.py": ["/config.py"]}
|
43,173,190
|
parth-shastri/FaceNet_siamesenn
|
refs/heads/master
|
/config.py
|
IMAGE_SHAPE = (200, 200)
BATCH_SIZE = 32
CKPT_DIR = "face_net-ckpt/model_ckpt"
ANCHOR_DIR = "left/left"
POS_DIR = "right/right"
EMBED_DIM = 256
MARGIN = 0.5
ARCHITECTURE_PATH = "model_architecture.json"
|
{"/embed_clustering.py": ["/facenet_my.py", "/config.py"], "/facenet_my.py": ["/config.py"]}
|
43,173,191
|
parth-shastri/FaceNet_siamesenn
|
refs/heads/master
|
/facenet_my.py
|
# TODO: DON'T MESS UP NAMES OF VARIABLES
# TODO: prefer the SavedModel method for saving instead of saving in the hdf5 format if custom objects are present
import tensorflow as tf
from keras.applications import resnet
import numpy as np
import matplotlib.pyplot as plt
from keras import applications
from tensorflow.keras import layers
from tensorflow.keras import metrics, Model, optimizers, losses
import os
import json
import config
def preprocess(image_path):
image_string = tf.io.read_file(image_path)
image = tf.io.decode_jpeg(image_string, channels=3)
image = tf.image.convert_image_dtype(image, tf.float32)
image = tf.image.resize(image, size=config.IMAGE_SHAPE)
return image
def preprocess_triplets(anchor, pos, neg):
return (
preprocess(anchor),
preprocess(pos),
preprocess(neg)
)
anchor_images = sorted([os.path.join(config.ANCHOR_DIR, p) for p in os.listdir(config.ANCHOR_DIR)])
pos_images = sorted([os.path.join(config.POS_DIR, p) for p in os.listdir(config.POS_DIR)])
image_count = len(anchor_images)
anchor_dataset = tf.data.Dataset.from_tensor_slices(anchor_images)
pos_dataset = tf.data.Dataset.from_tensor_slices(pos_images)
rng = np.random.default_rng(232)
rng.shuffle(anchor_images)
rng.shuffle(pos_images)
neg_images = anchor_images + pos_images
neg_dataset = tf.data.Dataset.from_tensor_slices(neg_images)
neg_dataset = neg_dataset.shuffle(buffer_size=4096)
dataset = tf.data.Dataset.zip((anchor_dataset, pos_dataset, neg_dataset))
dataset = dataset.map(preprocess_triplets).shuffle(buffer_size=1024)
train_data = dataset.take(round(image_count * 0.8)).batch(config.BATCH_SIZE).prefetch(8)
val_data = dataset.skip(round(image_count * 0.8)).batch(config.BATCH_SIZE).prefetch(8)
def visualize(anchor, pos, neg):
fig, axs = plt.subplots(3, 3)
for i in range(3):
ax1 = axs[i, 0]
ax1.imshow(anchor[i])
ax2 = axs[i, 1]
ax2.imshow(pos[i])
ax3 = axs[i, 2]
ax3.imshow(neg[i])
plt.show()
def get_base_extractor():
be = resnet.ResNet50(include_top=False,
weights='imagenet',
input_shape=config.IMAGE_SHAPE + (3,))
for layer in be.layers:
if layer.name == "conv5_block1_out":
layer.trainable = True
layer.trainable = False
return be
class Embedding(layers.Layer):
def __init__(self, embed_dim=256):
super(Embedding, self).__init__()
self.embed_dim = embed_dim
self.base_feature_extractor = get_base_extractor()
self.flatten = layers.Flatten()
self.dense1 = layers.Dense(512, activation='relu')
self.bn1 = layers.BatchNormalization()
self.dense2 = layers.Dense(256, activation='relu')
self.bn2 = layers.BatchNormalization()
self.out = layers.Dense(self.embed_dim, name='output_dense')
def call(self, inputs, **kwargs):
x = self.base_feature_extractor(inputs)
x = self.flatten(x)
x = self.dense1(x)
x = self.bn1(x)
x = self.dense2(x)
x = self.bn2(x)
out = self.out(x)
return out
""" good practice to define get_config and from config in case of custom subclassing"""
def get_config(self): # to save this model in serialized .h5 format we have to override the get_config method
config = super(Embedding, self).get_config()
config.update({"embed_dim": self.embed_dim})
return config
@classmethod # No need to call this as this is in the same format by default (just for understanding purpose
def from_config(cls, config):
return cls(**config)
class DistanceLayer(layers.Layer):
def __init__(self, **kwargs):
super(DistanceLayer, self).__init__(**kwargs)
def call(self, anchor, positive, negative):
ap_distance = tf.reduce_sum(tf.square(anchor - positive), -1)
an_distance = tf.reduce_sum(tf.square(anchor - negative), -1)
return ap_distance, an_distance
""" good practice to define get_config and from config in case of custom subclassing"""
def get_config(self): # to save this model in serialized .h5 format we have to override the get_config method
config = super(DistanceLayer, self).get_config()
return config
@classmethod # No need to call this as this is in the same format by default (just for understanding purpose
def from_config(cls, config):
return cls(**config)
class FaceNet(Model):
def __init__(self, margin=0.5, embed_dim=256):
super(FaceNet, self).__init__()
self.margin = margin
self.embed_dim = embed_dim
self.embedding = Embedding(embed_dim=self.embed_dim)
self.distance = DistanceLayer()
self.loss_tracker = tf.metrics.Mean(name='loss')
def train_step(self, data):
with tf.GradientTape(persistent=True) as tape:
loss = self._compute_loss(data)
gradients = tape.gradient(loss, self.embedding.trainable_weights)
self.optimizer.apply_gradients(
zip(gradients, self.embedding.trainable_weights)
)
self.loss_tracker.update_state(loss)
return {"loss": self.loss_tracker.result()}
def test_step(self, data):
loss = self._compute_loss(data)
self.loss_tracker.update_state(loss)
return {"loss": self.loss_tracker.result()}
def call(self, inputs, training=None, mask=None):
anchor_in, positive_in, negative_in = inputs
distances = self.distance(
self.embedding(resnet.preprocess_input(anchor_in)),
self.embedding(resnet.preprocess_input(positive_in)),
self.embedding(resnet.preprocess_input(negative_in)),
)
return distances
def _compute_loss(self, data):
ap_dist, an_dist = self.call(data)
loss = ap_dist - an_dist
loss = tf.maximum(loss + self.margin, 0)
return loss
@property
def metrics(self):
return [self.loss_tracker]
""" good practice to define get_config and from config in case of custom subclassing"""
def get_config(self): # to save this model in serialized .h5 format we have to override the get_config method
config = {"margin": self.margin, "embed_dim": self.embed_dim}
return config
@classmethod # No need to call this as this is in the same format by default (just for understanding purpose
def from_config(cls, config, custom_objects=None):
return cls(**config)
tr_model = FaceNet(margin=config.MARGIN, embed_dim=config.EMBED_DIM)
tr_model.compile(optimizer=optimizers.Adam(0.0001))
ckpt = tf.keras.callbacks.ModelCheckpoint(config.CKPT_DIR, save_freq='epoch') # saves after each epoch
# model.fit(train_data, epochs=10, validation_data=val_data, callbacks=[ckpt])
json_config = tr_model.to_json()
with open("model_architecture.json", "w") as fp:
json.dump(json_config, fp)
# must be provided in order to deserialize
# TODO: Don't mess up names while assigning them in the overridden 'get_config' method in custom Model
custom_objects = {"FaceNet": FaceNet, "DistanceLayer": DistanceLayer, "resnet": resnet}
model = tf.keras.models.model_from_json(json_config, custom_objects=custom_objects)
model.load_weights(config.CKPT_DIR)
if __name__ == "__main__":
'''calculation of the similarity (Cosine Similarity)'''
samples = next(iter(val_data))
anchor, pos, neg = samples
anchor_embedding = model.embedding(resnet.preprocess_input(anchor))
pos_embedding = model.embedding(resnet.preprocess_input(pos))
neg_embedding = model.embedding(resnet.preprocess_input(neg))
print(f"The shape of embeddings :{anchor_embedding.shape, pos_embedding.shape, neg_embedding.shape}")
similarity = metrics.CosineSimilarity()
pos_sim = similarity(anchor_embedding, pos_embedding)
neg_sim = similarity(anchor_embedding, neg_embedding)
assert pos_sim.numpy() > neg_sim.numpy()
print(f"The similarity between positive image and anchor:{pos_sim}")
print(f"The similarity between negative image and anchor:{neg_sim}")
|
{"/embed_clustering.py": ["/facenet_my.py", "/config.py"], "/facenet_my.py": ["/config.py"]}
|
43,181,176
|
oleksdev1/lab002_git
|
refs/heads/main
|
/main.py
|
from first.nums import plus
if __name__ == '__main__':
print( plus(1,2) )
|
{"/main.py": ["/first/nums.py"]}
|
43,211,365
|
paranormman/TEAL_project
|
refs/heads/main
|
/visual/urls.py
|
from django.urls import path
from . import views
urlpatterns =[
path('', views.Home.as_view(), name = "Home"),
path('index/', views.index, name = "Index"),
path('upload/', views.upload, name = "Upload"),
path('value/', views.sample, name = "Sampling freq"),
]
|
{"/api/urls.py": ["/api/views.py"], "/api/models.py": ["/api/validation.py"], "/api/serializers.py": ["/api/models.py"], "/api/views.py": ["/api/models.py", "/api/serializers.py"], "/visual/forms.py": ["/visual/models.py", "/visual/validators.py"], "/visual/views.py": ["/visual/models.py", "/visual/forms.py", "/visual/validators.py"], "/visual/models.py": ["/visual/validators.py"], "/visual/migrations/0001_initial.py": ["/visual/validators.py"]}
|
43,211,366
|
paranormman/TEAL_project
|
refs/heads/main
|
/visual/validators.py
|
from django.core.exceptions import ValidationError
import os
import pandas as pd
# from django.contrib import messages
from requests.api import request
from django.shortcuts import redirect, render
def validate_file(value):
ext = os.path.splitext(value.name)[1]
valid_extentions = ['.csv']
if ext.lower() in valid_extentions:
df=pd.read_csv(value)
col = df.columns.tolist()
n = len(col)
if n == 0:
raise ValidationError(u'No Column to parse ')
elif n != 2:
if col[0] != 'time':
raise ValidationError('File misses time value, give samplingfrequency value by clicking the button below')
return redirect("Sampling freq")
elif col[1] != 'amplitude':
raise ValidationError('File misses amplitude value')
# calculate sf and other operations
raise ValidationError(u'File doesnt have either amplitude or time')
# return render(request, "csvapp/upload.html", data)
elif n == 2:
return value
else:
raise ValidationError(u'Unsupported file extention. please upload a .csv file. ')
|
{"/api/urls.py": ["/api/views.py"], "/api/models.py": ["/api/validation.py"], "/api/serializers.py": ["/api/models.py"], "/api/views.py": ["/api/models.py", "/api/serializers.py"], "/visual/forms.py": ["/visual/models.py", "/visual/validators.py"], "/visual/views.py": ["/visual/models.py", "/visual/forms.py", "/visual/validators.py"], "/visual/models.py": ["/visual/validators.py"], "/visual/migrations/0001_initial.py": ["/visual/validators.py"]}
|
43,211,367
|
paranormman/TEAL_project
|
refs/heads/main
|
/visual/forms.py
|
from django import forms
from .models import SourceFile, SampleField
from .validators import validate_file
from . import views
from rest_framework import settings
import pandas as pd
class UploadFileForm(forms.ModelForm):
class Meta:
model = SourceFile
fields = ('file',)
def clean_csv_file(self):
upload = self.cleaned_data['file']
if upload:
filename = upload.name
if filename.endswith(settings.FILE_UPLOAD_TYPE):
df=pd.read_csv(upload)
n = len(df.columns.tolist())
if n == 0:
raise forms.ValidationError(u'No Column to parse ')
elif n == 1:
try:
df=pd.read_csv(upload, header=0, nrows=0).columns.tolist()
if df == 'time':
raise forms.ValidationError(u'missing Time value in the file')
else:
raise forms.ValidationError(u'missing amplitude value in the file')
except Exception as e:
raise forms.ValidationError("Unable to upload CVS file. "+repr(e))
# take sf as input from user and other operations
raise forms.ValidationError(u'No time value in the file ')
elif n == 2:
# calculate sf and other operations
raise forms.ValidationError(u'Missing amplitude value in the file')
else:
return upload
else:
raise forms.ValidationError("Please upload a .csv extention files only")
return upload
def clean(self):
cleaned_data = super(UploadFileForm, self, clean)
upload = cleaned_data.get('file')
return upload
class SampleForm(forms.Form):
timefile = forms.FileField(widget=forms.FileInput)
sampling_frequency = forms.IntegerField(widget=forms.NumberInput)
class Meta:
model = SampleField
fields = ('timefile',)
|
{"/api/urls.py": ["/api/views.py"], "/api/models.py": ["/api/validation.py"], "/api/serializers.py": ["/api/models.py"], "/api/views.py": ["/api/models.py", "/api/serializers.py"], "/visual/forms.py": ["/visual/models.py", "/visual/validators.py"], "/visual/views.py": ["/visual/models.py", "/visual/forms.py", "/visual/validators.py"], "/visual/models.py": ["/visual/validators.py"], "/visual/migrations/0001_initial.py": ["/visual/validators.py"]}
|
43,211,368
|
paranormman/TEAL_project
|
refs/heads/main
|
/visual/views.py
|
from django.shortcuts import redirect, render
from django.http import HttpResponse, HttpResponseRedirect, JsonResponse, request
from django.views.generic import TemplateView
from django.core.files.storage import FileSystemStorage
from django.views.decorators.csrf import csrf_exempt
from rest_framework.views import APIView
from rest_framework.response import Response
from django.core.exceptions import ValidationError
from rest_framework import status
import pandas as pd
import numpy as np
import csv
import io
import urllib, base64
import matplotlib.pyplot as plt
from .models import SourceFile
from .forms import UploadFileForm, SampleForm
from . validators import validate_file
from django.contrib import messages
from rest_framework.reverse import reverse
import logging
import os
# Create your views here.
api_url = "http://127.0.0.1:8000/visual/"
class Home(TemplateView):
template_name = "visual/home.html"
def index(request):
if request.method =="POST":
# sampfreq1 = request.POST.get("sampling_frequency")
csv_file = request.FILES['file']
csv = pd.read_csv(csv_file)
samplingFrequency = sampfreq1;
samplingInterval = 1 / samplingFrequency;
time = csv['time']
amplitude = csv['amplitude']
fourierTransform = np.fft.fft(amplitude)/len(amplitude) # Normalize amplitude
fourierTransform = fourierTransform[range(int(len(amplitude)/2))] # Exclude sampling frequency
tpCount = len(amplitude)
values = np.arange(int(tpCount/2))
timePeriod = tpCount/samplingFrequency
frequencies = values/timePeriod
plt.title('Fourier transform depicting the frequency components')
plt.plot(frequencies, abs(fourierTransform))
plt.xlabel('Frequency')
plt.ylabel('Amplitude')
plt.show()
fig = plt.fft()
buf = io.BytesIO()
fig.savefig(buf, format = 'png')
buf.seek(0)
string = base64.b64encode(buf.read())
uri = urllib.parse.quote(string)
return render (request, 'visual/value.html', {"something": True, "frequency": frequencies, "amplitude" : amplitude }, {'data':uri})
# file = request.FILES.get('file', None)
# val = len(csv['time'])
# num = csv['time']. iloc[-1]
# sf = int((val/num)*1000)
# samplingFrequency = sf;
# samplingInterval = 1 / samplingFrequency;
# time = csv['time']
# amplitude = csv['amplitude']
# fourierTransform = np.fft.fft(amplitude)/len(amplitude) # Normalize amplitude
# fourierTransform = fourierTransform[range(int(len(amplitude)/2))] # Exclude sampling frequency
# tpCount = len(amplitude)
# values = np.arange(int(tpCount/2))
# timePeriod = tpCount/samplingFrequency
# frequencies = values/timePeriod
# plt.title('Fourier transform depicting the frequency components')
# plt.plot(frequencies, abs(fourierTransform))
# plt.xlabel('Frequency')
# plt.ylabel('Amplitude')
# plt.show()
# fig = plt.fft()
# buf = io.BytesIO()
# fig.savefig(buf, format = 'png')
# buf.seek(0)
# string = base64.b64encode(buf.read())
# uri = urllib.parse.quote(string)
# return render (request, 'visual/index.html', {"something": True, "frequency": frequencies, "amplitude" : amplitude }, {'data':uri})
else:
return render (request,'visual/index.html')
def upload(request):
if request.method == 'POST':
form = UploadFileForm(request.POST, request.FILES)
if form.is_valid():
file_uploaded = form.save(commit=False)
file = UploadFileForm()
form.save()
return render(request, "visual/index.html")
elif request.method == 'GET':
form = UploadFileForm()
return render (request, 'visual/upload.html', {'form' : form})
def sample(request):
if request.method == 'POST':
form = SampleForm(request.POST, request.FILES)
if form.is_valid():
file_uploaded = form.save(commit=False)
form.save()
return redirect("<h1>Data saved Successfully<h1>")
elif request.method == 'GET':
form = SampleForm()
return render (request, 'visual/value.html', {'form' : form})
# context = {}
# if request.method == 'POST':
# uploaded_file = request.FILES['file']
# fs = FileSystemStorage()
# name = fs.save(uploaded_file.name, uploaded_file)
# url = fs.url(name)
# context['url'] = fs.url(name)
# return render (request, 'visual/upload.html', context)
|
{"/api/urls.py": ["/api/views.py"], "/api/models.py": ["/api/validation.py"], "/api/serializers.py": ["/api/models.py"], "/api/views.py": ["/api/models.py", "/api/serializers.py"], "/visual/forms.py": ["/visual/models.py", "/visual/validators.py"], "/visual/views.py": ["/visual/models.py", "/visual/forms.py", "/visual/validators.py"], "/visual/models.py": ["/visual/validators.py"], "/visual/migrations/0001_initial.py": ["/visual/validators.py"]}
|
43,211,369
|
paranormman/TEAL_project
|
refs/heads/main
|
/visual/fft.py
|
# # Python example - Fourier transform using numpy.fft method
# import numpy as np
# import pandas as pd
# import matplotlib.pyplot as plt
# df = pd.read_csv('E:\\Django_proj\\mysite\\media\\Acc_time.csv')
# length = 40960
# # How many time points are needed i,e., Sampling Frequency
# samplingFrequency = length;
# # At what intervals time points are sampled
# samplingInterval = 1 / samplingFrequency;
# # # Create subplot
# # figure, axis = plotter.subplots(4, 1)
# # plotter.subplots_adjust(hspace=1)
# # Time points
# time = df['time']
# amplitude = df['amplitude']
# # Frequency domain representation
# fourierTransform = np.fft.fft(amplitude)/len(amplitude) # Normalize amplitude
# fourierTransform = fourierTransform[range(int(len(amplitude)/2))] # Exclude sampling frequency
# tpCount = len(amplitude)
# values = np.arange(int(tpCount/2))
# timePeriod = tpCount/samplingFrequency
# frequencies = values/timePeriod
# # Frequency domain representation
# plt.title('Fourier transform depicting the frequency components')
# plt.plot(frequencies, abs(fourierTransform))
# plt.xlabel('Frequency')
# plt.ylabel('Amplitude')
# plt.show()
import csv
import pandas as pd
# import numpy as np
file = ("E:\\Django_proj\\restapi\\media\\Acc_time_ext.csv")
# csv = pd.read_csv(file)
# csv = pd.read_csv(file, header=0, nrows=0).columns.tolist()
# first = csv.index('time')
# second = csv.index('amplitude')
# if csv != first:
# print('yes')
# else:
# print('no')
# print(csv)
# print(second)
file = ("E:\\Django_proj\\restapi\\media\\Acc_time_ext.csv")
df=pd.read_csv(file)
col = df.columns.tolist()
n = len(col)
print(n)
if col[0] != 'time' or col[1] != 'amplitude':
print('column')
else:
print('column')
# time = len(csv[0])
# num = csv['time']. iloc[1]
# sf = int((time/num)*1000)
# print(sf)
# val = len(file.columns)
# print(time)
|
{"/api/urls.py": ["/api/views.py"], "/api/models.py": ["/api/validation.py"], "/api/serializers.py": ["/api/models.py"], "/api/views.py": ["/api/models.py", "/api/serializers.py"], "/visual/forms.py": ["/visual/models.py", "/visual/validators.py"], "/visual/views.py": ["/visual/models.py", "/visual/forms.py", "/visual/validators.py"], "/visual/models.py": ["/visual/validators.py"], "/visual/migrations/0001_initial.py": ["/visual/validators.py"]}
|
43,211,370
|
paranormman/TEAL_project
|
refs/heads/main
|
/visual/models.py
|
from django.db import models
from . validators import validate_file
# Create your models here.
class SourceFile(models.Model):
file = models.FileField(upload_to="media/", validators=[validate_file])
title = models.CharField(max_length=255)
def __str__(self):
return self.name + ": " + str(self.filepath)
class SampleField(models.Model):
timefile = models.FileField(upload_to="media/")
sampling_frequency = models.IntegerField(default=0, null=False)
|
{"/api/urls.py": ["/api/views.py"], "/api/models.py": ["/api/validation.py"], "/api/serializers.py": ["/api/models.py"], "/api/views.py": ["/api/models.py", "/api/serializers.py"], "/visual/forms.py": ["/visual/models.py", "/visual/validators.py"], "/visual/views.py": ["/visual/models.py", "/visual/forms.py", "/visual/validators.py"], "/visual/models.py": ["/visual/validators.py"], "/visual/migrations/0001_initial.py": ["/visual/validators.py"]}
|
43,211,371
|
paranormman/TEAL_project
|
refs/heads/main
|
/visual/migrations/0001_initial.py
|
# Generated by Django 3.1.7 on 2021-04-29 05:27
from django.db import migrations, models
import visual.validators
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='SampleField',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('timefile', models.FileField(upload_to='media/')),
('sampling_frequency', models.IntegerField(default=0)),
],
),
migrations.CreateModel(
name='SourceFile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('file', models.FileField(upload_to='media/', validators=[visual.validators.validate_file])),
('title', models.CharField(max_length=255)),
],
),
]
|
{"/api/urls.py": ["/api/views.py"], "/api/models.py": ["/api/validation.py"], "/api/serializers.py": ["/api/models.py"], "/api/views.py": ["/api/models.py", "/api/serializers.py"], "/visual/forms.py": ["/visual/models.py", "/visual/validators.py"], "/visual/views.py": ["/visual/models.py", "/visual/forms.py", "/visual/validators.py"], "/visual/models.py": ["/visual/validators.py"], "/visual/migrations/0001_initial.py": ["/visual/validators.py"]}
|
43,258,833
|
quaternionmedia/kpl
|
refs/heads/main
|
/kpl/config.py
|
from os import environ
STATIC_FILES = environ.get('STATIC_FILES', 'web/static')
DIST_DIR = environ.get('DIST_DIR', 'web/dist')
KRPC_ADDRESS = environ.get('KRPC_ADDRESS', '127.0.0.1')
KRPC_PORT = 50000
KRPC_STREAM_PORT = 50001
CROSSBAR_ADDRESS = environ.get('CROSSBAR_ADDRESS', '127.0.0.1')
CROSSBAR_PORT = environ.get('CROSSBAR_PORT', 8080)
|
{"/kmap.py": ["/kpl.py", "/kerbal.py"], "/index.py": ["/kpl.py", "/kdash.py", "/kmap.py"], "/plottr.py": ["/kdash.py"], "/kdash.py": ["/kpl.py"], "/kerbal.py": ["/kpl.py"], "/kpl/api.py": ["/kpl/responses.py", "/kpl/config.py"], "/kpl/main.py": ["/kpl/api.py"]}
|
43,258,834
|
quaternionmedia/kpl
|
refs/heads/main
|
/setup.py
|
from setuptools import setup, find_packages
setup(
name='kpl',
install_requires=[
'pluggy>=0.3,<1.0',
'krpc>=0.4.8',
'fastapi>=0.63.0',
'aiofiles>=0.5.0',
'orjson>=3.5.1',
'uvicorn>=0.13.3',
'autobahn>=12.3.1'
],
entry_points={'console_scripts': ['kpl=kpl.main:main']},
packages=find_packages(),
)
|
{"/kmap.py": ["/kpl.py", "/kerbal.py"], "/index.py": ["/kpl.py", "/kdash.py", "/kmap.py"], "/plottr.py": ["/kdash.py"], "/kdash.py": ["/kpl.py"], "/kerbal.py": ["/kpl.py"], "/kpl/api.py": ["/kpl/responses.py", "/kpl/config.py"], "/kpl/main.py": ["/kpl/api.py"]}
|
43,258,835
|
quaternionmedia/kpl
|
refs/heads/main
|
/kpl/api.py
|
import krpc
from fastapi import FastAPI
from starlette.staticfiles import StaticFiles
from kpl.responses import ORJSONResponse
from kpl.constants import flight_chars
from autobahn.asyncio.wamp import ApplicationSession, ApplicationRunner
from json import dumps
from .config import KRPC_ADDRESS, KRPC_PORT, KRPC_STREAM_PORT, STATIC_FILES, DIST_DIR
app = FastAPI()
app.mount('/static', StaticFiles(directory=STATIC_FILES, html=True), name='static')
app.mount('/', StaticFiles(directory=DIST_DIR, html=True), name='dist')
|
{"/kmap.py": ["/kpl.py", "/kerbal.py"], "/index.py": ["/kpl.py", "/kdash.py", "/kmap.py"], "/plottr.py": ["/kdash.py"], "/kdash.py": ["/kpl.py"], "/kerbal.py": ["/kpl.py"], "/kpl/api.py": ["/kpl/responses.py", "/kpl/config.py"], "/kpl/main.py": ["/kpl/api.py"]}
|
43,258,836
|
quaternionmedia/kpl
|
refs/heads/main
|
/kpl/responses.py
|
from fastapi.responses import JSONResponse
from typing import Any
from orjson import dumps
class ORJSONResponse(JSONResponse):
media_type = "application/json"
def render(self, content: Any) -> bytes:
return dumps(content)
|
{"/kmap.py": ["/kpl.py", "/kerbal.py"], "/index.py": ["/kpl.py", "/kdash.py", "/kmap.py"], "/plottr.py": ["/kdash.py"], "/kdash.py": ["/kpl.py"], "/kerbal.py": ["/kpl.py"], "/kpl/api.py": ["/kpl/responses.py", "/kpl/config.py"], "/kpl/main.py": ["/kpl/api.py"]}
|
43,258,837
|
quaternionmedia/kpl
|
refs/heads/main
|
/kpl/main.py
|
from kpl.api import app
def main():
from uvicorn import run
run(app, host='0.0.0.0', port=8888)
if __name__ == '__main__':
main()
|
{"/kmap.py": ["/kpl.py", "/kerbal.py"], "/index.py": ["/kpl.py", "/kdash.py", "/kmap.py"], "/plottr.py": ["/kdash.py"], "/kdash.py": ["/kpl.py"], "/kerbal.py": ["/kpl.py"], "/kpl/api.py": ["/kpl/responses.py", "/kpl/config.py"], "/kpl/main.py": ["/kpl/api.py"]}
|
43,258,838
|
quaternionmedia/kpl
|
refs/heads/main
|
/kpl/__init__.py
|
VERSION = '0.0.2'
from pluggy import HookimplMarker
kpl = HookimplMarker('kpl')
|
{"/kmap.py": ["/kpl.py", "/kerbal.py"], "/index.py": ["/kpl.py", "/kdash.py", "/kmap.py"], "/plottr.py": ["/kdash.py"], "/kdash.py": ["/kpl.py"], "/kerbal.py": ["/kpl.py"], "/kpl/api.py": ["/kpl/responses.py", "/kpl/config.py"], "/kpl/main.py": ["/kpl/api.py"]}
|
43,258,839
|
quaternionmedia/kpl
|
refs/heads/main
|
/kpl/ckpl.py
|
import krpc
from autobahn.asyncio.wamp import ApplicationSession, ApplicationRunner
from os import environ
import asyncio
from constants import flight_chars
from config import KRPC_ADDRESS, KRPC_PORT, KRPC_STREAM_PORT
class Ckpl(ApplicationSession):
async def onJoin(self, details):
print('kerbal session started!')
self.conn = krpc.connect(name='ckpl',
address=KRPC_ADDRESS,
rpc_port=KRPC_PORT,
stream_port=KRPC_STREAM_PORT,
)
print('connected!')
self.vessel = self.conn.space_center.active_vessel
self.refframe = self.vessel.orbit.body.reference_frame
self.flight = self.vessel.flight()
self.streams = []
for char in flight_chars:
stream = self.conn.add_stream(getattr, self.flight, char)
stream.add_callback(self._publish(char))
self.streams.append(stream)
[stream.start() for stream in self.streams]
def _publish(self, channel):
print('setting up ', channel)
def pub(value):
# print('publishing', channel, value)
self.publish('local.ksp.' + channel, value)
return pub
async def onDisconnect(self):
asyncio.get_event_loop().stop()
if __name__ == '__main__':
from config import CROSSBAR_ADDRESS, CROSSBAR_PORT
runner = ApplicationRunner(environ.get('AUTOBAHN_DEMO_ROUTER', f'ws://{CROSSBAR_ADDRESS}:{CROSSBAR_PORT}/ws'), u'realm1',)
runner.run(Ckpl)
|
{"/kmap.py": ["/kpl.py", "/kerbal.py"], "/index.py": ["/kpl.py", "/kdash.py", "/kmap.py"], "/plottr.py": ["/kdash.py"], "/kdash.py": ["/kpl.py"], "/kerbal.py": ["/kpl.py"], "/kpl/api.py": ["/kpl/responses.py", "/kpl/config.py"], "/kpl/main.py": ["/kpl/api.py"]}
|
43,270,295
|
arslanmurat06/AutoTweetWithAutoCreatedImage
|
refs/heads/master
|
/imageDownloader.py
|
## Importing Necessary Modules
import shutil # to save it locally
import requests # to get image from the web
## Set up the image URL and filename
image_url = "https://source.unsplash.com/user/erondu/600x600"
filename = image_url.split("/")[-1]+".jpeg"
# Open the url image, set stream to True, this will return the stream content.
r = requests.get(image_url, stream = True)
# Check if the image was retrieved successfully
if r.status_code == 200:
# Set decode_content value to True, otherwise the downloaded image file's size will be zero.
r.raw.decode_content = True
# Open a local file with wb ( write binary ) permission.
with open(filename,'wb') as f:
shutil.copyfileobj(r.raw, f)
print('Image sucessfully Downloaded: ',filename)
else:
print('Image Couldn\'t be retreived')
|
{"/imageDownloader.py": ["/jsonImporter.py"], "/imageCreater.py": ["/jsonImporter.py"]}
|
43,270,296
|
arslanmurat06/AutoTweetWithAutoCreatedImage
|
refs/heads/master
|
/autoTweet.py
|
from PIL import Image, ImageDraw, ImageFont,ImageFilter
import textwrap
# pip install textwrap
def add_whatsapp_number_and_name(image_path, top_text, bottom_text,center_text):
font = ImageFont.truetype('assets/Roboto-Medium.ttf', 50)
im1 = Image.open(image_path)
im2 = im1.filter(ImageFilter.GaussianBlur(radius = 4))
draw= ImageDraw.Draw(im2)
image_width, image_height = im1.size
stroke_width = 5
top_text = top_text.upper()
bottom_text = bottom_text.upper()
# text wrapping
char_width, char_height = font.getsize('A')
chars_per_line = image_width // char_width
top_lines = textwrap.wrap(top_text, width=chars_per_line)
center_lines =textwrap.wrap(center_text, width=chars_per_line)
bottom_lines = textwrap.wrap(bottom_text, width=chars_per_line)
# draw top lines
y = 80
for line in top_lines:
line_width, line_height = font.getsize(line)
x = (image_width - line_width) / 2
draw.text((x, y), line, fill='white', font=font, stroke_width=stroke_width, stroke_fill='black')
y += line_height
y = 150
for line in center_lines:
line_width, line_height = font.getsize(line)
x = (image_width - line_width) / 2
draw.text((x, y), line, fill='white', font=font, stroke_width=stroke_width, stroke_fill='black')
y += line_height
# draw bottom lines
y = image_height - char_height * len(bottom_lines) -250
for line in bottom_lines:
line_width, line_height = font.getsize(line)
x = (image_width - line_width) / 2
draw.text((x, y), line, fill='pink', font=font, stroke_width=stroke_width, stroke_fill='black')
y += line_height
im2.show()
if __name__ == '__main__':
top_text = "Show TV"
center_text="Whatsapp İhbar Hattı"
bottom_text = "05327433256"
add_whatsapp_number_and_name('600x600.jpeg', top_text=top_text, bottom_text=bottom_text,center_text=center_text)
|
{"/imageDownloader.py": ["/jsonImporter.py"], "/imageCreater.py": ["/jsonImporter.py"]}
|
43,322,713
|
Rezolventa/autobattle_demo
|
refs/heads/master
|
/frame.py
|
import pygame
from const import WIN_WIDTH, WIN_HEIGHT
from rendering import get_scaled_image, center_coords_to_left_up
class BattleFrame:
"""Сущность-окно, внутри которого отрисовывается автобой"""
teamA = [] # команда игрока
teamB = [] # команда ИИ
zombie_1 = get_scaled_image('sprites/zombie_1.png', 4)
zombie_1_attack = get_scaled_image('sprites/zombie_1_attack.png', 4)
win = None
def start(self):
for obj in self.teamA + self.teamB:
obj.set_target()
def render_all(self):
i = 0
# отображаем только живых
for obj in self.teamA:
i += 1
if obj.hp > 0:
pygame.draw.circle(self.win, (255, 155, 0), self.get_spot_coords('A', i), 35)
i = 0
for obj in self.teamB:
i += 1
if obj.hp > 0:
if obj.status == obj.STATUS_ATTACK:
coords = center_coords_to_left_up(self.get_spot_coords('B', i), self.zombie_1_attack)
self.win.blit(self.zombie_1_attack, coords)
else:
coords = center_coords_to_left_up(self.get_spot_coords('B', i), self.zombie_1)
self.win.blit(self.zombie_1, coords)
# отдельно - работа с координатами
def get_spot_coords(self, team, number):
"""
Возвращает координаты центров позиций спрайтов.
Первый спрайт посередине, второй сверху, третий снизу.
"""
sign_x = -1 if team == 'A' else 1
x = WIN_WIDTH // 2 + sign_x * 150
sign_y = {1: 0, 2: -1, 3: 1}.get(number)
y = WIN_HEIGHT // 2 + sign_y * 150
return (x, y)
def add_unit(self, team, unit):
if team == 'A':
self.teamA.append(unit)
elif team == 'B':
self.teamB.append(unit)
unit.frame = self
def handle_tick(self):
self.handle_team(self.teamA, self.teamB)
self.handle_team(self.teamB, self.teamA)
def handle_team(self, creature_list_1, creature_list_2):
for obj in creature_list_1:
if obj.busy == 0:
# кулдаун атаки закончился, накручиваем
obj.set_busy()
# меняем статус для анимации
obj.set_status(obj.STATUS_ATTACK)
# если предыдущая цель есть и жива, продолжаем ее атаковать
if obj.target:
# снимаем у цели хп
obj.target.hp -= obj.attack # TODO: обработчик obj.apply_damage?
print(obj.name, 'attacks', obj.target.name, 'for', obj.attack, '({} left)'.format(obj.target.hp))
# обрабатываем последствия
if not obj.target.hp > 0:
creature_list_2.remove(obj.target)
obj.set_target()
# иначе выбираем новую
else:
obj.set_target()
else:
# снимаем тик
obj.busy -= 1
obj.animation_countdown -= 1
if obj.animation_countdown == 0:
obj.set_status(obj.STATUS_IDLE)
|
{"/frame.py": ["/rendering.py", "/const.py"], "/rendering.py": ["/const.py"], "/units.py": ["/const.py", "/rendering.py"], "/main.py": ["/const.py", "/frame.py", "/units.py"]}
|
43,322,714
|
Rezolventa/autobattle_demo
|
refs/heads/master
|
/main.py
|
from random import randint
import pygame
from const import WIN_WIDTH, WIN_HEIGHT, FRAME_RATE
from frame import BattleFrame
pygame.init()
clock = pygame.time.Clock()
win = pygame.display.set_mode((WIN_WIDTH, WIN_HEIGHT))
pygame.display.set_caption('auto battle')
class Unit:
name = None
hp = None
attack = None
attack_delay = None
hit_chance = None
crit_chance = None
busy = None
target = None
frame = None
status = None
animation_countdown = 0
filter_countdown = 0
STATUS_IDLE = 'idle'
STATUS_ATTACK = 'attack'
def __init__(self, name, hp, attack , attack_delay, hit_chance, crit_chance):
self.name = name
self.hp = hp
self.attack = attack
self.attack_delay = attack_delay
self.hit_chance = hit_chance
self.crit_chance = crit_chance
self.status = self.STATUS_IDLE
self.set_busy()
def set_busy(self):
self.busy = round(FRAME_RATE * self.attack_delay)
def set_target(self):
# выбрать рандомную цель из другой команды
if self in self.frame.teamA:
if len(self.frame.teamB):
self.target = self.frame.teamB[randint(0, len(self.frame.teamB) - 1)]
print(self.name, 'targets', self.target.name)
else:
self.target = None
else:
if len(self.frame.teamA):
self.target = self.frame.teamA[randint(0, len(self.frame.teamA) - 1)]
print(self.name, 'targets', self.target.name)
else:
self.target = None
def set_status(self, status):
self.status = status
if status == self.STATUS_ATTACK:
self.animation_countdown = FRAME_RATE * 0.5 # полсекунды
else:
self.animation_countdown = 0
def take_hit(self, damage):
pass
def attack(self):
pass
player = Unit('player', 450, 15, 1.40, 0.80, 0.15)
enemy1 = Unit('AI-1', 100, 25, 2, 0.80, 0.05)
enemy2 = Unit('AI-2', 100, 23, 2.2, 0.80, 0.05)
enemy3 = Unit('AI-3', 100, 45, 4, 0.80, 0.05)
frame = BattleFrame()
frame.add_unit('A', player)
frame.add_unit('B', enemy1)
frame.add_unit('B', enemy2)
frame.add_unit('B', enemy3)
frame.win = win
frame.start()
def main_loop():
run = True
while run:
clock.tick(FRAME_RATE)
win.fill((0, 0, 0))
frame.render_all()
frame.handle_tick()
for event in pygame.event.get():
if event.type == pygame.QUIT:
run = False
pygame.display.flip()
if __name__ == '__main__':
main_loop()
|
{"/frame.py": ["/rendering.py", "/const.py"], "/rendering.py": ["/const.py"], "/units.py": ["/const.py", "/rendering.py"], "/main.py": ["/const.py", "/frame.py", "/units.py"]}
|
43,322,715
|
Rezolventa/autobattle_demo
|
refs/heads/master
|
/rendering.py
|
import pygame
def get_scaled_image(image, k):
loaded = pygame.image.load(image)
size = loaded.get_size()
return pygame.transform.scale(loaded, (int(size[0] * k), int(size[1] * k)))
def center_coords_to_left_up(coord_tuple, image):
size = image.get_size()
x = coord_tuple[0] - size[0] // 2
y = coord_tuple[1] - size[1] // 2
return (x, y)
|
{"/frame.py": ["/rendering.py", "/const.py"], "/rendering.py": ["/const.py"], "/units.py": ["/const.py", "/rendering.py"], "/main.py": ["/const.py", "/frame.py", "/units.py"]}
|
43,322,716
|
Rezolventa/autobattle_demo
|
refs/heads/master
|
/const.py
|
WIN_WIDTH = 800
WIN_HEIGHT = 600
FRAME_RATE = 30
|
{"/frame.py": ["/rendering.py", "/const.py"], "/rendering.py": ["/const.py"], "/units.py": ["/const.py", "/rendering.py"], "/main.py": ["/const.py", "/frame.py", "/units.py"]}
|
43,410,815
|
Ramonacus/cthulhu-npc
|
refs/heads/main
|
/sample/attributes/gender.py
|
from attributes import weighted_attr
class Gender:
def __init__(self, name=None, expression=None):
self.name = weighted_attr.generate(
['non-binary', 'female', 'male'],
[0.2, 49.8, 49.8])(name)
self.expression = weighted_attr.generate(
['cis', 'trans'],
[99.4, 0.6])(expression)
@property
def pronoun(self):
return "he" if self.name == 'male'\
else "she" if self.name == 'female'\
else "they"
@property
def possessive_pronoun(self):
return "his" if self.name == 'male'\
else "her" if self.name == 'female'\
else "their"
|
{"/main.py": ["/attributes.py", "/name_generator.py"], "/sample/attributes/gender.py": ["/attributes.py"], "/sample/npc.py": ["/attributes.py"], "/sample/attributes/sexuality.py": ["/attributes.py"]}
|
43,410,816
|
Ramonacus/cthulhu-npc
|
refs/heads/main
|
/sample/beliefs/classes.py
|
max_belief_val = 5
min_belief_val = -max_belief_val
class Belief:
def __init__(self, name, value):
self.value = max(min_belief_val, min(value, max_belief_val))
self.name = name
@property
def group(self):
if self.value == 5:
return 'adores'
if self.value >= 3:
return 'loves'
if self.value >= 1:
return 'likes'
if self.value > -1:
return 'is neutral to'
if self.value > -3:
return 'dislikes'
if self.value > -5:
return 'hates'
else:
return 'abhors'
# TODO In the future, enable custom descriptions for each belief
@property
def description(self):
return f"%s {self.group} {self.name}"
|
{"/main.py": ["/attributes.py", "/name_generator.py"], "/sample/attributes/gender.py": ["/attributes.py"], "/sample/npc.py": ["/attributes.py"], "/sample/attributes/sexuality.py": ["/attributes.py"]}
|
43,410,817
|
Ramonacus/cthulhu-npc
|
refs/heads/main
|
/sample/npc.py
|
import datetime
import random
import attributes
import beliefs
from relationship import Relationship
# TODO fix this quirk. Create class Campaign? World?
campaignDate = datetime.date(1934, 12, 31)
currentDate = campaignDate or datetime.date.today()
class NPC:
""" Procedurally generates a NPC.
Should add config to everything in the future. """
def __init__(self,
gender_name=None,
gender_expression=None,
sexuality=None,
max_age=60,
min_age=12):
# Define sexual traits and name
self.gender = attributes.Gender(name=gender_name, expression=gender_expression)
self.sexuality = attributes.Sexuality(sexuality)
self.name = attributes.generate_name(self.gender.name)
# Define age in relationship with campaign setting
days_per_year = 365.25
ageRange = random.randrange(
round(days_per_year * min_age),
round(days_per_year * (max_age + 1)))
self.birthday = currentDate - datetime.timedelta(days=ageRange)
# Belief system
self.beliefSystem = beliefs.BeliefSystem()
# Relationships starts as an empty list
self.relationships = []
# Get the age for the current campaignDate
@property
def age(self):
dob = self.birthday
current = campaignDate or datetime.date.today()
years = current.year - dob.year
if (current.month < dob.month
or current.month == dob.month
and current.day < dob.day):
# The date of birth is later during the year, decrease the age by 1
years -= 1
return years
def generate_family_at_birth(self):
# For now I will settle for traditional families
# TODO Monoparental? Homosexual with adopted kids?
mother = NPC(gender_name='female', min_age=self.age+16, max_age=self.age+44)
mother.name.other[0] = self.name.other[1]
self.relationships.append(Relationship(mother, 'parent'))
mother.relationships.append(Relationship(self, 'offspring'))
father = NPC(gender_name='male', min_age=self.age+16, max_age=self.age+44)
father.name.other[0] = self.name.other[0]
self.relationships.append(Relationship(father, 'parent'))
father.relationships.append(Relationship(self, 'offspring'))
def get_relationships(self, type):
return filter(lambda x: x.type, self.relationships)
|
{"/main.py": ["/attributes.py", "/name_generator.py"], "/sample/attributes/gender.py": ["/attributes.py"], "/sample/npc.py": ["/attributes.py"], "/sample/attributes/sexuality.py": ["/attributes.py"]}
|
43,410,818
|
Ramonacus/cthulhu-npc
|
refs/heads/main
|
/sample/attributes/sexuality.py
|
from attributes import weighted_attr
class Sexuality:
def __init__(self, name=None):
self.name = weighted_attr.generate(
['heterosexual', 'homosexual', 'bisexual'],
[90, 8, 2])(name)
|
{"/main.py": ["/attributes.py", "/name_generator.py"], "/sample/attributes/gender.py": ["/attributes.py"], "/sample/npc.py": ["/attributes.py"], "/sample/attributes/sexuality.py": ["/attributes.py"]}
|
43,410,819
|
Ramonacus/cthulhu-npc
|
refs/heads/main
|
/sample/npcgen.py
|
from npc import NPC
from itertools import groupby
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--gender', nargs='?',
help='Female, male or non-binary')
parser.add_argument('--expression', nargs='?',
help='Gender expression, accepted values are: cis or trans')
parser.add_argument('--sexuality', nargs='?',
help='Heterosexual, homosexual or bisexual')
parser.add_argument('--age', nargs='?', help='In years, defaults to a 12 to 60 range')
args = parser.parse_args()
npc = NPC(
gender_name=args.gender,
gender_expression=args.expression,
sexuality=args.sexuality,
max_age=int(args.age or 60),
min_age=int(args.age or 12)
)
print(f"{npc.name.full} is a {npc.sexuality.name} {npc.gender.name}")
print(f"{npc.gender.pronoun.capitalize()} was born on "\
f"{npc.birthday.strftime('%b %d %Y')}")
print(f"{npc.gender.pronoun.capitalize()} is now {npc.age} years old")
def groupBeliefs(group):
regroup = list(belief.name for belief in group)
return regroup[0] if len(regroup) == 1\
else ' and '.join([', '.join(regroup[:-1]), regroup[-1]])
print(f"{npc.gender.pronoun.capitalize()} has the following beliefs:")
for key, group in groupby(npc.beliefSystem.beliefs, lambda x: x.group):
print(f"* {key} {groupBeliefs(group)}")
|
{"/main.py": ["/attributes.py", "/name_generator.py"], "/sample/attributes/gender.py": ["/attributes.py"], "/sample/npc.py": ["/attributes.py"], "/sample/attributes/sexuality.py": ["/attributes.py"]}
|
43,410,820
|
Ramonacus/cthulhu-npc
|
refs/heads/main
|
/sample/attributes/data/names.py
|
names = {
'male': [
'Antonio',
'Manuel',
'José',
'Francisco',
'David',
'Juan',
'José Antonio',
'Javier',
'Daniel',
'José Luis',
'Francisco Javier',
'Carlos',
'Jesús',
'Alejandro',
'Miguel',
'José Manuel',
'Rafael',
'Miguel Ángel',
'Pedro',
'Pablo',
'Ángel',
'Sergio',
'José María',
'Fernando',
'Jorge',
'Luis',
'Alberto',
'Juan Carlos',
'Ávaro',
'Adrián',
'Juan José',
'Diego',
'Raúl',
'Iván',
'Juan Antonio',
'Rubén',
'Enrique',
'Óscar',
'Ramón',
'Vicente',
'Andrés',
'Juan Manuel',
'Joaquín',
'Santiago',
'Víctor',
'Eduardo',
'Mario',
'Roberto',
'Jaime',
'Francisco José',
'Marcos',
'Ignacio',
'Alfonso',
'Jordi',
'Hugo',
'Ricardo',
'Salvador',
'Guillermo',
'Emilio',
'Gabriel',
'Marc',
'Gonzalo',
'Julio',
'Julián',
'Mohamed',
'José Miguel',
'Tomás',
'Martín',
'Agustín',
'José Ramón',
'Nicolás',
'Ismael',
'Joan',
'Félix',
'Samuel',
'Cristian',
'Aitor',
'Lucas',
'Héctor',
'Juan Francisco',
'Iker',
'Josép',
'José Carlos',
'Álex',
'Mariano',
'Domingo',
'Sebastian',
'Alfredo',
'César',
'José Ángel',
'Felipe',
'José Ignacio',
'Victor Manuel',
'Rodrigo',
'Luis Miguel',
'Mateo',
'José Francisco',
'Juan Luis',
'Xavier',
'Albert'
],
'female': [
'María Carmen',
'María',
'Carmen',
'Ana María',
'Josefa',
'Isabel',
'María Pilar',
'María Dolores',
'Laura',
'María Teresa',
'Ana',
'Cristina',
'Marta',
'María Angeles',
'Francisca',
'Lucía',
'María Isabel',
'María Jose',
'Antonia',
'Dolores',
'Sara',
'Paula',
'Elena',
'María Luisa',
'Raquel',
'Rosa María',
'Pilar',
'Concepción',
'Manuela',
'María Jesus',
'Mercedes',
'Julia',
'Beatriz',
'Nuria',
'Silvia',
'Rosario',
'Juana',
'Alba',
'Irene',
'Teresa',
'Encarnación',
'Patricia',
'Montserrat',
'Andrea',
'Rocío',
'Mónica',
'Rosa',
'Alicia',
'María Mar',
'Sonia',
'Sandra',
'Ángela',
'Marina',
'Susana',
'Natalia',
'Yolanda',
'Margarita',
'María Josefa',
'Claudia',
'Eva',
'María Rosario',
'Inmaculada',
'Sofía',
'María Mercedes',
'Carla',
'Ana Isabel',
'Esther',
'Noelia',
'Verónica',
'Ángeles',
'Nerea',
'Carolina',
'María Victoria',
'Eva María',
'Inés',
'Míriam',
'María Rosa',
'Daniela',
'Lorena',
'Ana Belen',
'María Elena',
'María Concepción',
'Victoria',
'Amparo',
'María Antonia',
'Catalina',
'Martina',
'Lidia',
'Alejandra',
'Celia',
'María Nieves',
'Consuelo',
'Olga',
'Ainhoa',
'Fátima',
'Gloria',
'Emilia',
'María Soledad',
'Clara',
'María Cristina'
]
}
surnames = [
'García',
'Rodríguez',
'González',
'Fernández',
'López',
'Martínez',
'Sánchez',
'Pérez',
'Gémez',
'Martín',
'Jiménez',
'Ruiz',
'Hernández',
'Díaz',
'Moreno',
'Muñoz',
'Álvarez',
'Romero',
'Alonso',
'Gutiérrez',
'Navarro',
'Torres',
'Domínguez',
'Vázquez',
'Ramos',
'Gil',
'Ramírez',
'Serrano',
'Blanco',
'Molina',
'Morales',
'Suárez',
'Ortega',
'Delgado',
'Castro',
'Ortiz',
'Marín',
'Rubio',
'Sanz',
'Núñez',
'Medina',
'Iglesias',
'Cortes',
'Castillo',
'Garrido',
'Santos',
'Lozano',
'Guerrero',
'Cano',
'Prieto',
'Mendez',
'Cruz',
'Flores',
'Herrera',
'Gallego',
'Márquez',
'León',
'Peña',
'Calvo',
'Cabrera',
'Vidal',
'Campos',
'Vega',
'Fuentes',
'Carrasco',
'Reyes',
'Díez',
'Caballero',
'Nieto',
'Aguilar',
'Santana',
'Pascual',
'Herrero',
'Montero',
'Hidalgo',
'Giménez',
'Lorenzo',
'Ibáñez',
'Vargas',
'Santiago',
'Durán',
'Ferrer',
'Benítez',
'Mora',
'Arias',
'Vicente',
'Carmona',
'Crespo',
'Roman',
'Soto',
'Pastor',
'Velasco',
'Sáez',
'Rojas',
'Moya',
'Parra',
'Soler',
'Bravo',
'Gallardo',
'Esteban'
]
|
{"/main.py": ["/attributes.py", "/name_generator.py"], "/sample/attributes/gender.py": ["/attributes.py"], "/sample/npc.py": ["/attributes.py"], "/sample/attributes/sexuality.py": ["/attributes.py"]}
|
43,410,821
|
Ramonacus/cthulhu-npc
|
refs/heads/main
|
/sample/main.py
|
from npc import NPC
from itertools import groupby
npc = NPC()
print(f"{npc.name.full} is a {npc.sexuality.name} {npc.gender.name}")
print(f"{npc.gender.pronoun.capitalize()} was born on "\
f"{npc.birthday.strftime('%b %d %Y')}")
print(f"{npc.gender.pronoun.capitalize()} is now {npc.age} years old")
print()
# Family
npc.generate_family_at_birth()
print(f"{npc.gender.possessive_pronoun.capitalize()} parents are:")
for rel in npc.get_relationships('parent'):
print(f"{rel.person.name.full}, {rel.person.age}")
print()
# Beliefs display
def groupBeliefs(group):
regroup = list(belief.name for belief in group)
return regroup[0] if len(regroup) == 1\
else ' and '.join([', '.join(regroup[:-1]), regroup[-1]])
print(f"{npc.gender.pronoun.capitalize()} has the following beliefs:")
for key, group in groupby(npc.beliefSystem.beliefs, lambda x: x.group):
print(f"* {key} {groupBeliefs(group)}")
|
{"/main.py": ["/attributes.py", "/name_generator.py"], "/sample/attributes/gender.py": ["/attributes.py"], "/sample/npc.py": ["/attributes.py"], "/sample/attributes/sexuality.py": ["/attributes.py"]}
|
43,410,822
|
Ramonacus/cthulhu-npc
|
refs/heads/main
|
/sample/relationship.py
|
class Relationship:
def __init__(self, char, type):
self.person = char
self.type = type
|
{"/main.py": ["/attributes.py", "/name_generator.py"], "/sample/attributes/gender.py": ["/attributes.py"], "/sample/npc.py": ["/attributes.py"], "/sample/attributes/sexuality.py": ["/attributes.py"]}
|
43,410,823
|
Ramonacus/cthulhu-npc
|
refs/heads/main
|
/sample/attributes/weighted_attr.py
|
import random
# Returns a function to select an attribute from a list based on weights
def generate(options, w):
if len(options) != len(w):
raise Exception("The number of weights does not match the population")
def callback(name=None):
if name is None:
name = random.choices(options, weights=w)[0]
elif name not in options:
# If passing an override, it must still be amongst the valid values
raise Exception("Invalid option")
return name
return callback
|
{"/main.py": ["/attributes.py", "/name_generator.py"], "/sample/attributes/gender.py": ["/attributes.py"], "/sample/npc.py": ["/attributes.py"], "/sample/attributes/sexuality.py": ["/attributes.py"]}
|
43,410,824
|
Ramonacus/cthulhu-npc
|
refs/heads/main
|
/sample/attributes/name.py
|
import random
import math
from attributes.data.names import names, surnames
def generate_name(genderName):
if names[genderName] is None:
zippedList = zip(names.male, names.female)
nameList = [val for pair in zippedList for val in pair]
else:
nameList = names[genderName]
# Weights will range from .5 to 1.5 times the name list length,
# wich will make the first names in the list 3 times more likely
min_weight = math.floor(len(nameList) / 2)
max_weight = len(nameList) + min_weight
first = random.choices(nameList, range(max_weight, min_weight, -1))[0]
return Name(first, random.choice(surnames), random.choice(surnames))
class Name:
def __init__(self, first, *argv):
self.first = first
self.other = list(argv)
@property
def full(self):
return f"{self.first} {' '.join(self.other)}"
|
{"/main.py": ["/attributes.py", "/name_generator.py"], "/sample/attributes/gender.py": ["/attributes.py"], "/sample/npc.py": ["/attributes.py"], "/sample/attributes/sexuality.py": ["/attributes.py"]}
|
43,410,825
|
Ramonacus/cthulhu-npc
|
refs/heads/main
|
/sample/beliefs/belief_system.py
|
from beliefs.classes import Belief
import numpy
import math
belief_list = [
"religion",
"tradition",
"wealth",
"power",
"knowledge",
"homeland",
"peace",
"justice",
"law",
"sex",
"romance",
"family",
"friendship",
"honesty",
"independence"
]
# Make -5.5 and 5 three standard deviations away
# This way 99.73% of values will be between those two
mean = 0
stdev = 5.5/3
class BeliefSystem:
def __init__(self):
self.beliefs = list(map(
lambda b: Belief(b, numpy.random.normal(mean, stdev)),
belief_list
))
self.sort(True)
def sort(self, rev=False):
self.beliefs.sort(key=lambda e: e.value, reverse=rev)
|
{"/main.py": ["/attributes.py", "/name_generator.py"], "/sample/attributes/gender.py": ["/attributes.py"], "/sample/npc.py": ["/attributes.py"], "/sample/attributes/sexuality.py": ["/attributes.py"]}
|
43,410,826
|
Ramonacus/cthulhu-npc
|
refs/heads/main
|
/sample/beliefs/__init__.py
|
from beliefs.belief_system import BeliefSystem
|
{"/main.py": ["/attributes.py", "/name_generator.py"], "/sample/attributes/gender.py": ["/attributes.py"], "/sample/npc.py": ["/attributes.py"], "/sample/attributes/sexuality.py": ["/attributes.py"]}
|
43,410,827
|
Ramonacus/cthulhu-npc
|
refs/heads/main
|
/sample/attributes/__init__.py
|
from attributes.sexuality import Sexuality
from attributes.gender import Gender
from attributes.name import Name, generate_name
|
{"/main.py": ["/attributes.py", "/name_generator.py"], "/sample/attributes/gender.py": ["/attributes.py"], "/sample/npc.py": ["/attributes.py"], "/sample/attributes/sexuality.py": ["/attributes.py"]}
|
43,479,352
|
hackit90/django-invoices
|
refs/heads/main
|
/pvs_suban/migrations/serializers.py
|
from rest_framework import serializers
from rest_framework.serializers import ModelSerializer
from .models import Contact, InvoicePosition, Address, Invoice, Country
#Adresse integrieren
class AddressNestedSerializer(ModelSerializer):
class Meta:
model = Address
fields = ['street', 'zip', 'city', 'country', 'id']
class ContactSerializer(ModelSerializer):
addresses = AddressNestedSerializer(many=True)
class Meta:
model = Contact
fields = ['type', 'salutation', 'name', 'email','addresses', 'id']
read_only_fields = ['addresses']
#InvoicePosition integrieren
class InvoicePositionNestedSerializer(ModelSerializer):
class Meta:
model = InvoicePosition
fields = '__all__'
class InvoiceSerializer(ModelSerializer):
InvoicePositions = InvoicePositionNestedSerializer(many=True)
total_amount = serializers.FloatField(source='total')
class Meta:
model = Invoice
fields = ['title', 'body', 'date', 'due', 'condition', 'InvoicePositions', 'total_amount', 'id']
read_only_fields = ['InvoicePositions']
class InvoicePositionSerializer(ModelSerializer):
class Meta:
model = InvoicePosition
fields = '__all__'
class AddressSerializer(ModelSerializer):
country_name = serializers.SerializerMethodField(source='get_country_name')
contact = serializers.SerializerMethodField(source='get_contact')
class Meta:
model = Address
fields = ['street', 'zip', 'city', 'invoices', 'contact', 'country_name', 'id']
def get_country_name(self, obj):
return obj.country.value
def get_contact(self, obj):
return obj.contact.name
class CountrySerializer(ModelSerializer):
class Meta:
model = Country
fields = '__all__'
|
{"/pvs_suban/urls.py": ["/pvs_suban/views.py"], "/pvs_suban/views.py": ["/pvs_suban/models.py"], "/pvs_suban/admin.py": ["/pvs_suban/models.py"]}
|
43,479,353
|
hackit90/django-invoices
|
refs/heads/main
|
/pvs_suban/admin.py
|
from django.contrib import admin
from django.urls import reverse
from django.db.models import Count
from .models import Contact, Address, Country, Invoice, InvoicePosition
# Register your models here.
class AddressInline(admin.StackedInline):
model = Address
extra = 0
@admin.register(Contact)
class ContactAdmin(admin.ModelAdmin):
inlines = [AddressInline]
list_display = ['name', 'type', 'address_count']
search_fields = ['name']
list_filter = ['type']
def address_count(self, obj):
count = obj.addresses.count()
return count
@admin.register(Country)
class CountryAdmin(admin.ModelAdmin):
list_display = ['key', 'value']
search_fields = ['value', 'key']
class InvoposInline(admin.StackedInline):
model = InvoicePosition
extra = 0
@admin.register(Invoice)
class InvoiceAdmin(admin.ModelAdmin):
list_display = ['title', 'date','due', 'total', 'contact_name']
inlines = [InvoposInline]
search_fields = ['title', 'address__contact__name']
def contact_name(self, obj):
return obj.address.contact.name
contact_name.short_description = 'contact name'
|
{"/pvs_suban/urls.py": ["/pvs_suban/views.py"], "/pvs_suban/views.py": ["/pvs_suban/models.py"], "/pvs_suban/admin.py": ["/pvs_suban/models.py"]}
|
43,593,131
|
slimanemd/ud_cnfsp
|
refs/heads/main
|
/app.py
|
# ==========================================================
# imports
# python
from os.path import abspath, join as join_path, pardir
# 3rd party:
from flask import Flask, redirect, render_template, request
# my libs
from git_webhook import webhook #import git #import os
# ==========================================================
# varaibles
instance_path = abspath(join_path(abspath(__file__), pardir))
#process_comments = False
# application
app = Flask(
__name__,
instance_path=instance_path,
template_folder='templates'
)
# ==========================================================
# jinja templates
# process_comments = True
# remove comments
@app.template_filter()
def remove_comments(code):
process_comments = False
if 'wc' in request.args.keys(): process_comments = True
output = code
if process_comments == False:
lines = code.splitlines()
lines_without_comments = [line for line in lines if not (line.__contains__("#") or line=="")]
output = "\n" + "\n".join(lines_without_comments) # "" + ("\n".join(lines_without_comments)) + ""
return output
# randomis
@app.template_filter()
def randomis(code):
import random
output = str(random.randint(0,100))
return output
# ==================================================================================================
# Endpoints
# home
@app.route("/", methods=['GET'])
def index():
return render_template("index.html", data='cnfsp') #getItem()
# getItem
@app.route("/sec/<item>", methods=['GET'])
def getItem(item='cnfsp'):
if not item in ['drupal', 'cnfsp', 'git', 'divers', 'others']:
return redirect("/", code=302)
return render_template("index.html", data=item)
# webhook
@app.route("/update_server", methods=['POST'])
def update_server_from_gitwh():
return webhook()
# ==================================================================================================
# main
if __name__ == "__main__":
app.run(host='0.0.0.0', debug=True, port=5001)
|
{"/app.py": ["/git_webhook.py"]}
|
43,593,132
|
slimanemd/ud_cnfsp
|
refs/heads/main
|
/git_webhook.py
|
# ===========================================================================
#
import os
import git
# ===========================================================================
#
# ===========================================================================
# webhook
def webhook():
repo = git.Repo('/home/slimanemd/udcnf')
repo.remotes.origin.pull('main')
app_loader = "/var/www/slimanemd_pythonanywhere_com_wsgi.py"
os.system("touch " + app_loader)
msg = "Updated site version successfully oh"
return msg, 200
def webhook2():
def callGitOriginPul(): #import os
repo = git.Repo('/home/slimanemd/udcnf')
origin = repo.remotes.origin
origin.pull('main') #rigin.pull() #os.system('/var/www/aliben_pythonanywhere_com_wsgi.py')
return "Updated site version successfully"
msg = callGitOriginPul()
print(msg)
def touch2():
import os
app_loader = "/var/www/slimanemd_pythonanywhere_com_wsgi.py"
os.system("touch " + app_loader)
#webhook()
#touch2()
|
{"/app.py": ["/git_webhook.py"]}
|
43,695,658
|
Gernby/openpilot
|
refs/heads/gernby-0.7.4
|
/selfdrive/debug/filter_log_message.py
|
#!/usr/bin/env python3
import os
import argparse
import json
import cereal.messaging as messaging
LEVELS = {
"DEBUG": 10,
"INFO": 20,
"WARNING": 30,
"ERROR": 40,
"CRITICAL": 50,
}
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--level', default='DEBUG')
parser.add_argument('--addr', default='127.0.0.1')
parser.add_argument("socket", type=str, nargs='*', help="socket name")
args = parser.parse_args()
if args.addr != "127.0.0.1":
os.environ["ZMQ"] = "1"
messaging.context = messaging.Context()
poller = messaging.Poller()
sock = messaging.sub_sock("logMessage", poller, addr=args.addr)
min_level = LEVELS[args.level]
while True:
polld = poller.poll(1000)
for sock in polld:
evt = messaging.recv_one(sock)
log = json.loads(evt.logMessage)
if log['levelnum'] >= min_level:
print(f"{log['filename']}:{log.get('lineno', '')} - {log.get('funcname', '')}: {log['msg']}")
|
{"/selfdrive/car/car_helpers.py": ["/common/vin.py"]}
|
43,695,659
|
Gernby/openpilot
|
refs/heads/gernby-0.7.4
|
/panda/tests/safety/test_nissan.py
|
#!/usr/bin/env python3
import unittest
import numpy as np
from panda import Panda
from panda.tests.safety import libpandasafety_py
from panda.tests.safety.common import StdTest, make_msg
ANGLE_MAX_BP = [1.3, 10., 30.]
ANGLE_MAX_V = [540., 120., 23.]
ANGLE_DELTA_BP = [0., 5., 15.]
ANGLE_DELTA_V = [5., .8, .15] # windup limit
ANGLE_DELTA_VU = [5., 3.5, 0.4] # unwind limit
TX_MSGS = [[0x169, 0], [0x2b1, 0], [0x4cc, 0], [0x20b, 2]]
def twos_comp(val, bits):
if val >= 0:
return val
else:
return (2**bits) + val
def sign(a):
if a > 0:
return 1
else:
return -1
class TestNissanSafety(unittest.TestCase):
@classmethod
def setUp(cls):
cls.safety = libpandasafety_py.libpandasafety
cls.safety.set_safety_hooks(Panda.SAFETY_NISSAN, 0)
cls.safety.init_tests_nissan()
def _angle_meas_msg(self, angle):
to_send = make_msg(0, 0x2)
angle = int(angle * -10)
t = twos_comp(angle, 16)
to_send[0].RDLR = t & 0xFFFF
return to_send
def _set_prev_angle(self, t):
t = int(t * -100)
self.safety.set_nissan_desired_angle_last(t)
def _angle_meas_msg_array(self, angle):
for i in range(6):
self.safety.safety_rx_hook(self._angle_meas_msg(angle))
def _lkas_state_msg(self, state):
to_send = make_msg(0, 0x1b6)
to_send[0].RDHR = (state & 0x1) << 6
return to_send
def _lkas_control_msg(self, angle, state):
to_send = make_msg(0, 0x169)
angle = int((angle - 1310) * -100)
to_send[0].RDLR = ((angle & 0x3FC00) >> 10) | ((angle & 0x3FC) << 6) | ((angle & 0x3) << 16)
to_send[0].RDHR = ((state & 0x1) << 20)
return to_send
def _speed_msg(self, speed):
to_send = make_msg(0, 0x29a)
speed = int(speed / 0.00555 * 3.6)
to_send[0].RDLR = ((speed & 0xFF) << 24) | ((speed & 0xFF00) << 8)
return to_send
def _brake_msg(self, brake):
to_send = make_msg(1, 0x454)
to_send[0].RDLR = ((brake & 0x1) << 23)
return to_send
def _send_gas_cmd(self, gas):
to_send = make_msg(0, 0x15c)
to_send[0].RDHR = ((gas & 0x3fc) << 6) | ((gas & 0x3) << 22)
return to_send
def _acc_button_cmd(self, buttons):
to_send = make_msg(2, 0x20b)
to_send[0].RDLR = (buttons << 8)
return to_send
def test_spam_can_buses(self):
StdTest.test_spam_can_buses(self, TX_MSGS)
def test_angle_cmd_when_enabled(self):
# when controls are allowed, angle cmd rate limit is enforced
# test 1: no limitations if we stay within limits
speeds = [0., 1., 5., 10., 15., 100.]
angles = [-300, -100, -10, 0, 10, 100, 300]
for a in angles:
for s in speeds:
max_delta_up = np.interp(s, ANGLE_DELTA_BP, ANGLE_DELTA_V)
max_delta_down = np.interp(s, ANGLE_DELTA_BP, ANGLE_DELTA_VU)
angle_lim = np.interp(s, ANGLE_MAX_BP, ANGLE_MAX_V)
# first test against false positives
self._angle_meas_msg_array(a)
self.safety.safety_rx_hook(self._speed_msg(s))
self._set_prev_angle(np.clip(a, -angle_lim, angle_lim))
self.safety.set_controls_allowed(1)
self.assertEqual(True, self.safety.safety_tx_hook(self._lkas_control_msg(
np.clip(a + sign(a) * max_delta_up, -angle_lim, angle_lim), 1)))
self.assertTrue(self.safety.get_controls_allowed())
self.assertEqual(True, self.safety.safety_tx_hook(
self._lkas_control_msg(np.clip(a, -angle_lim, angle_lim), 1)))
self.assertTrue(self.safety.get_controls_allowed())
self.assertEqual(True, self.safety.safety_tx_hook(self._lkas_control_msg(
np.clip(a - sign(a) * max_delta_down, -angle_lim, angle_lim), 1)))
self.assertTrue(self.safety.get_controls_allowed())
# now inject too high rates
self.assertEqual(False, self.safety.safety_tx_hook(self._lkas_control_msg(a + sign(a) *
(max_delta_up + 1), 1)))
self.assertFalse(self.safety.get_controls_allowed())
self.safety.set_controls_allowed(1)
self._set_prev_angle(np.clip(a, -angle_lim, angle_lim))
self.assertTrue(self.safety.get_controls_allowed())
self.assertEqual(True, self.safety.safety_tx_hook(
self._lkas_control_msg(np.clip(a, -angle_lim, angle_lim), 1)))
self.assertTrue(self.safety.get_controls_allowed())
self.assertEqual(False, self.safety.safety_tx_hook(self._lkas_control_msg(a - sign(a) *
(max_delta_down + 1), 1)))
self.assertFalse(self.safety.get_controls_allowed())
# Check desired steer should be the same as steer angle when controls are off
self.safety.set_controls_allowed(0)
self.assertEqual(True, self.safety.safety_tx_hook(self._lkas_control_msg(a, 0)))
def test_angle_cmd_when_disabled(self):
self.safety.set_controls_allowed(0)
self._set_prev_angle(0)
self.assertFalse(self.safety.safety_tx_hook(self._lkas_control_msg(0, 1)))
self.assertFalse(self.safety.get_controls_allowed())
def test_brake_disengage(self):
StdTest.test_allow_brake_at_zero_speed(self)
StdTest.test_not_allow_brake_when_moving(self, 0)
def test_gas_rising_edge(self):
self.safety.set_controls_allowed(1)
self.safety.safety_rx_hook(self._send_gas_cmd(100))
self.assertFalse(self.safety.get_controls_allowed())
def test_acc_buttons(self):
self.safety.set_controls_allowed(1)
self.safety.safety_tx_hook(self._acc_button_cmd(0x2)) # Cancel button
self.assertTrue(self.safety.get_controls_allowed())
self.safety.safety_tx_hook(self._acc_button_cmd(0x1)) # ProPilot button
self.assertFalse(self.safety.get_controls_allowed())
self.safety.set_controls_allowed(1)
self.safety.safety_tx_hook(self._acc_button_cmd(0x4)) # Follow Distance button
self.assertFalse(self.safety.get_controls_allowed())
self.safety.set_controls_allowed(1)
self.safety.safety_tx_hook(self._acc_button_cmd(0x8)) # Set button
self.assertFalse(self.safety.get_controls_allowed())
self.safety.set_controls_allowed(1)
self.safety.safety_tx_hook(self._acc_button_cmd(0x10)) # Res button
self.assertFalse(self.safety.get_controls_allowed())
self.safety.set_controls_allowed(1)
self.safety.safety_tx_hook(self._acc_button_cmd(0x20)) # No button pressed
self.assertFalse(self.safety.get_controls_allowed())
def test_relay_malfunction(self):
StdTest.test_relay_malfunction(self, 0x169)
def test_fwd_hook(self):
buss = list(range(0x0, 0x3))
msgs = list(range(0x1, 0x800))
blocked_msgs = [0x169,0x2b1,0x4cc]
for b in buss:
for m in msgs:
if b == 0:
fwd_bus = 2
elif b == 1:
fwd_bus = -1
elif b == 2:
fwd_bus = -1 if m in blocked_msgs else 0
# assume len 8
self.assertEqual(fwd_bus, self.safety.safety_fwd_hook(b, make_msg(b, m, 8)))
if __name__ == "__main__":
unittest.main()
|
{"/selfdrive/car/car_helpers.py": ["/common/vin.py"]}
|
43,695,660
|
Gernby/openpilot
|
refs/heads/gernby-0.7.4
|
/panda/tests/safety/test_volkswagen_mqb.py
|
#!/usr/bin/env python3
import unittest
import numpy as np
import crcmod
from panda import Panda
from panda.tests.safety import libpandasafety_py
from panda.tests.safety.common import StdTest, make_msg, MAX_WRONG_COUNTERS
MAX_RATE_UP = 4
MAX_RATE_DOWN = 10
MAX_STEER = 300
MAX_RT_DELTA = 75
RT_INTERVAL = 250000
DRIVER_TORQUE_ALLOWANCE = 80
DRIVER_TORQUE_FACTOR = 3
MSG_ESP_19 = 0xB2 # RX from ABS, for wheel speeds
MSG_EPS_01 = 0x9F # RX from EPS, for driver steering torque
MSG_ESP_05 = 0x106 # RX from ABS, for brake light state
MSG_TSK_06 = 0x120 # RX from ECU, for ACC status from drivetrain coordinator
MSG_MOTOR_20 = 0x121 # RX from ECU, for driver throttle input
MSG_HCA_01 = 0x126 # TX by OP, Heading Control Assist steering torque
MSG_GRA_ACC_01 = 0x12B # TX by OP, ACC control buttons for cancel/resume
MSG_LDW_02 = 0x397 # TX by OP, Lane line recognition and text alerts
# Transmit of GRA_ACC_01 is allowed on bus 0 and 2 to keep compatibility with gateway and camera integration
TX_MSGS = [[MSG_HCA_01, 0], [MSG_GRA_ACC_01, 0], [MSG_GRA_ACC_01, 2], [MSG_LDW_02, 0]]
def sign(a):
if a > 0:
return 1
else:
return -1
# Python crcmod works differently somehow from every other CRC calculator. The
# implied leading 1 on the polynomial isn't a problem, but to get the right
# result for CRC-8H2F/AUTOSAR, we have to feed it initCrc 0x00 instead of 0xFF.
volkswagen_crc_8h2f = crcmod.mkCrcFun(0x12F, initCrc=0x00, rev=False, xorOut=0xFF)
def volkswagen_mqb_crc(msg, addr, len_msg):
# This is CRC-8H2F/AUTOSAR with a twist. See the OpenDBC implementation of
# this algorithm for a version with explanatory comments.
msg_bytes = msg.RDLR.to_bytes(4, 'little') + msg.RDHR.to_bytes(4, 'little')
counter = (msg.RDLR & 0xF00) >> 8
if addr == MSG_EPS_01:
magic_pad = b'\xF5\xF5\xF5\xF5\xF5\xF5\xF5\xF5\xF5\xF5\xF5\xF5\xF5\xF5\xF5\xF5'[counter]
elif addr == MSG_ESP_05:
magic_pad = b'\x07\x07\x07\x07\x07\x07\x07\x07\x07\x07\x07\x07\x07\x07\x07\x07'[counter]
elif addr == MSG_TSK_06:
magic_pad = b'\xC4\xE2\x4F\xE4\xF8\x2F\x56\x81\x9F\xE5\x83\x44\x05\x3F\x97\xDF'[counter]
elif addr == MSG_MOTOR_20:
magic_pad = b'\xE9\x65\xAE\x6B\x7B\x35\xE5\x5F\x4E\xC7\x86\xA2\xBB\xDD\xEB\xB4'[counter]
elif addr == MSG_HCA_01:
magic_pad = b'\xDA\xDA\xDA\xDA\xDA\xDA\xDA\xDA\xDA\xDA\xDA\xDA\xDA\xDA\xDA\xDA'[counter]
elif addr == MSG_GRA_ACC_01:
magic_pad = b'\x6A\x38\xB4\x27\x22\xEF\xE1\xBB\xF8\x80\x84\x49\xC7\x9E\x1E\x2B'[counter]
else:
magic_pad = None
return volkswagen_crc_8h2f(msg_bytes[1:len_msg] + magic_pad.to_bytes(1, 'little'))
class TestVolkswagenMqbSafety(unittest.TestCase):
cnt_eps_01 = 0
cnt_esp_05 = 0
cnt_tsk_06 = 0
cnt_motor_20 = 0
cnt_hca_01 = 0
cnt_gra_acc_01 = 0
@classmethod
def setUp(cls):
cls.safety = libpandasafety_py.libpandasafety
cls.safety.set_safety_hooks(Panda.SAFETY_VOLKSWAGEN_MQB, 0)
cls.safety.init_tests_volkswagen()
def _set_prev_torque(self, t):
self.safety.set_volkswagen_desired_torque_last(t)
self.safety.set_volkswagen_rt_torque_last(t)
# Wheel speeds _esp_19_msg
def _speed_msg(self, speed):
wheel_speed_scaled = int(speed / 0.0075)
to_send = make_msg(0, MSG_ESP_19)
to_send[0].RDLR = wheel_speed_scaled | (wheel_speed_scaled << 16)
to_send[0].RDHR = wheel_speed_scaled | (wheel_speed_scaled << 16)
return to_send
# Brake light switch _esp_05_msg
def _brake_msg(self, brake):
to_send = make_msg(0, MSG_ESP_05)
to_send[0].RDLR = (0x1 << 26) if brake else 0
to_send[0].RDLR |= (self.cnt_esp_05 % 16) << 8
to_send[0].RDLR |= volkswagen_mqb_crc(to_send[0], MSG_ESP_05, 8)
self.__class__.cnt_esp_05 += 1
return to_send
# Driver steering input torque
def _eps_01_msg(self, torque):
to_send = make_msg(0, MSG_EPS_01)
t = abs(torque)
to_send[0].RDHR = ((t & 0x1FFF) << 8)
if torque < 0:
to_send[0].RDHR |= 0x1 << 23
to_send[0].RDLR |= (self.cnt_eps_01 % 16) << 8
to_send[0].RDLR |= volkswagen_mqb_crc(to_send[0], MSG_EPS_01, 8)
self.__class__.cnt_eps_01 += 1
return to_send
# openpilot steering output torque
def _hca_01_msg(self, torque):
to_send = make_msg(0, MSG_HCA_01)
t = abs(torque)
to_send[0].RDLR = (t & 0xFFF) << 16
if torque < 0:
to_send[0].RDLR |= 0x1 << 31
to_send[0].RDLR |= (self.cnt_hca_01 % 16) << 8
to_send[0].RDLR |= volkswagen_mqb_crc(to_send[0], MSG_HCA_01, 8)
self.__class__.cnt_hca_01 += 1
return to_send
# ACC engagement status
def _tsk_06_msg(self, status):
to_send = make_msg(0, MSG_TSK_06)
to_send[0].RDLR = (status & 0x7) << 24
to_send[0].RDLR |= (self.cnt_tsk_06 % 16) << 8
to_send[0].RDLR |= volkswagen_mqb_crc(to_send[0], MSG_TSK_06, 8)
self.__class__.cnt_tsk_06 += 1
return to_send
# Driver throttle input
def _motor_20_msg(self, gas):
to_send = make_msg(0, MSG_MOTOR_20)
to_send[0].RDLR = (gas & 0xFF) << 12
to_send[0].RDLR |= (self.cnt_motor_20 % 16) << 8
to_send[0].RDLR |= volkswagen_mqb_crc(to_send[0], MSG_MOTOR_20, 8)
self.__class__.cnt_motor_20 += 1
return to_send
# Cruise control buttons
def _gra_acc_01_msg(self, bit):
to_send = make_msg(2, MSG_GRA_ACC_01)
to_send[0].RDLR = 1 << bit
to_send[0].RDLR |= (self.cnt_gra_acc_01 % 16) << 8
to_send[0].RDLR |= volkswagen_mqb_crc(to_send[0], MSG_GRA_ACC_01, 8)
self.__class__.cnt_gra_acc_01 += 1
return to_send
def test_spam_can_buses(self):
StdTest.test_spam_can_buses(self, TX_MSGS)
def test_relay_malfunction(self):
StdTest.test_relay_malfunction(self, MSG_HCA_01)
def test_prev_gas(self):
for g in range(0, 256):
self.safety.safety_rx_hook(self._motor_20_msg(g))
self.assertEqual(True if g > 0 else False, self.safety.get_gas_pressed_prev())
def test_default_controls_not_allowed(self):
self.assertFalse(self.safety.get_controls_allowed())
def test_enable_control_allowed_from_cruise(self):
self.safety.set_controls_allowed(0)
self.safety.safety_rx_hook(self._tsk_06_msg(3))
self.assertTrue(self.safety.get_controls_allowed())
def test_disable_control_allowed_from_cruise(self):
self.safety.set_controls_allowed(1)
self.safety.safety_rx_hook(self._tsk_06_msg(1))
self.assertFalse(self.safety.get_controls_allowed())
def test_sample_speed(self):
# Stationary
self.safety.safety_rx_hook(self._speed_msg(0))
self.assertEqual(0, self.safety.get_volkswagen_moving())
# 1 km/h, just under 0.3 m/s safety grace threshold
self.safety.safety_rx_hook(self._speed_msg(1))
self.assertEqual(0, self.safety.get_volkswagen_moving())
# 2 km/h, just over 0.3 m/s safety grace threshold
self.safety.safety_rx_hook(self._speed_msg(2))
self.assertEqual(1, self.safety.get_volkswagen_moving())
# 144 km/h, openpilot V_CRUISE_MAX
self.safety.safety_rx_hook(self._speed_msg(144))
self.assertEqual(1, self.safety.get_volkswagen_moving())
def test_prev_brake(self):
self.assertFalse(self.safety.get_brake_pressed_prev())
self.safety.safety_rx_hook(self._brake_msg(True))
self.assertTrue(self.safety.get_brake_pressed_prev())
def test_brake_disengage(self):
StdTest.test_allow_brake_at_zero_speed(self)
StdTest.test_not_allow_brake_when_moving(self, 1)
def test_disengage_on_gas(self):
self.safety.safety_rx_hook(self._motor_20_msg(0))
self.safety.set_controls_allowed(True)
self.safety.safety_rx_hook(self._motor_20_msg(1))
self.assertFalse(self.safety.get_controls_allowed())
def test_allow_engage_with_gas_pressed(self):
self.safety.safety_rx_hook(self._motor_20_msg(1))
self.safety.set_controls_allowed(True)
self.safety.safety_rx_hook(self._motor_20_msg(1))
self.assertTrue(self.safety.get_controls_allowed())
self.safety.safety_rx_hook(self._motor_20_msg(1))
self.assertTrue(self.safety.get_controls_allowed())
def test_steer_safety_check(self):
for enabled in [0, 1]:
for t in range(-500, 500):
self.safety.set_controls_allowed(enabled)
self._set_prev_torque(t)
if abs(t) > MAX_STEER or (not enabled and abs(t) > 0):
self.assertFalse(self.safety.safety_tx_hook(self._hca_01_msg(t)))
else:
self.assertTrue(self.safety.safety_tx_hook(self._hca_01_msg(t)))
def test_manually_enable_controls_allowed(self):
StdTest.test_manually_enable_controls_allowed(self)
def test_spam_cancel_safety_check(self):
BIT_CANCEL = 13
BIT_RESUME = 19
BIT_SET = 16
self.safety.set_controls_allowed(0)
self.assertTrue(self.safety.safety_tx_hook(self._gra_acc_01_msg(BIT_CANCEL)))
self.assertFalse(self.safety.safety_tx_hook(self._gra_acc_01_msg(BIT_RESUME)))
self.assertFalse(self.safety.safety_tx_hook(self._gra_acc_01_msg(BIT_SET)))
# do not block resume if we are engaged already
self.safety.set_controls_allowed(1)
self.assertTrue(self.safety.safety_tx_hook(self._gra_acc_01_msg(BIT_RESUME)))
def test_non_realtime_limit_up(self):
self.safety.set_volkswagen_torque_driver(0, 0)
self.safety.set_controls_allowed(True)
self._set_prev_torque(0)
self.assertTrue(self.safety.safety_tx_hook(self._hca_01_msg(MAX_RATE_UP)))
self._set_prev_torque(0)
self.assertTrue(self.safety.safety_tx_hook(self._hca_01_msg(-MAX_RATE_UP)))
self._set_prev_torque(0)
self.assertFalse(self.safety.safety_tx_hook(self._hca_01_msg(MAX_RATE_UP + 1)))
self.safety.set_controls_allowed(True)
self._set_prev_torque(0)
self.assertFalse(self.safety.safety_tx_hook(self._hca_01_msg(-MAX_RATE_UP - 1)))
def test_non_realtime_limit_down(self):
self.safety.set_volkswagen_torque_driver(0, 0)
self.safety.set_controls_allowed(True)
def test_against_torque_driver(self):
self.safety.set_controls_allowed(True)
for sign in [-1, 1]:
for t in np.arange(0, DRIVER_TORQUE_ALLOWANCE + 1, 1):
t *= -sign
self.safety.set_volkswagen_torque_driver(t, t)
self._set_prev_torque(MAX_STEER * sign)
self.assertTrue(self.safety.safety_tx_hook(self._hca_01_msg(MAX_STEER * sign)))
self.safety.set_volkswagen_torque_driver(DRIVER_TORQUE_ALLOWANCE + 1, DRIVER_TORQUE_ALLOWANCE + 1)
self.assertFalse(self.safety.safety_tx_hook(self._hca_01_msg(-MAX_STEER)))
# spot check some individual cases
for sign in [-1, 1]:
driver_torque = (DRIVER_TORQUE_ALLOWANCE + 10) * sign
torque_desired = (MAX_STEER - 10 * DRIVER_TORQUE_FACTOR) * sign
delta = 1 * sign
self._set_prev_torque(torque_desired)
self.safety.set_volkswagen_torque_driver(-driver_torque, -driver_torque)
self.assertTrue(self.safety.safety_tx_hook(self._hca_01_msg(torque_desired)))
self._set_prev_torque(torque_desired + delta)
self.safety.set_volkswagen_torque_driver(-driver_torque, -driver_torque)
self.assertFalse(self.safety.safety_tx_hook(self._hca_01_msg(torque_desired + delta)))
self._set_prev_torque(MAX_STEER * sign)
self.safety.set_volkswagen_torque_driver(-MAX_STEER * sign, -MAX_STEER * sign)
self.assertTrue(self.safety.safety_tx_hook(self._hca_01_msg((MAX_STEER - MAX_RATE_DOWN) * sign)))
self._set_prev_torque(MAX_STEER * sign)
self.safety.set_volkswagen_torque_driver(-MAX_STEER * sign, -MAX_STEER * sign)
self.assertTrue(self.safety.safety_tx_hook(self._hca_01_msg(0)))
self._set_prev_torque(MAX_STEER * sign)
self.safety.set_volkswagen_torque_driver(-MAX_STEER * sign, -MAX_STEER * sign)
self.assertFalse(self.safety.safety_tx_hook(self._hca_01_msg((MAX_STEER - MAX_RATE_DOWN + 1) * sign)))
def test_realtime_limits(self):
self.safety.set_controls_allowed(True)
for sign in [-1, 1]:
self.safety.init_tests_volkswagen()
self._set_prev_torque(0)
self.safety.set_volkswagen_torque_driver(0, 0)
for t in np.arange(0, MAX_RT_DELTA, 1):
t *= sign
self.assertTrue(self.safety.safety_tx_hook(self._hca_01_msg(t)))
self.assertFalse(self.safety.safety_tx_hook(self._hca_01_msg(sign * (MAX_RT_DELTA + 1))))
self._set_prev_torque(0)
for t in np.arange(0, MAX_RT_DELTA, 1):
t *= sign
self.assertTrue(self.safety.safety_tx_hook(self._hca_01_msg(t)))
# Increase timer to update rt_torque_last
self.safety.set_timer(RT_INTERVAL + 1)
self.assertTrue(self.safety.safety_tx_hook(self._hca_01_msg(sign * (MAX_RT_DELTA - 1))))
self.assertTrue(self.safety.safety_tx_hook(self._hca_01_msg(sign * (MAX_RT_DELTA + 1))))
def test_torque_measurements(self):
self.safety.safety_rx_hook(self._eps_01_msg(50))
self.safety.safety_rx_hook(self._eps_01_msg(-50))
self.safety.safety_rx_hook(self._eps_01_msg(0))
self.safety.safety_rx_hook(self._eps_01_msg(0))
self.safety.safety_rx_hook(self._eps_01_msg(0))
self.safety.safety_rx_hook(self._eps_01_msg(0))
self.assertEqual(-50, self.safety.get_volkswagen_torque_driver_min())
self.assertEqual(50, self.safety.get_volkswagen_torque_driver_max())
self.safety.safety_rx_hook(self._eps_01_msg(0))
self.assertEqual(0, self.safety.get_volkswagen_torque_driver_max())
self.assertEqual(-50, self.safety.get_volkswagen_torque_driver_min())
self.safety.safety_rx_hook(self._eps_01_msg(0))
self.assertEqual(0, self.safety.get_volkswagen_torque_driver_max())
self.assertEqual(0, self.safety.get_volkswagen_torque_driver_min())
def test_rx_hook(self):
# checksum checks
# TODO: Would be ideal to check ESP_19 as well, but it has no checksum
# or counter, and I'm not sure if we can easily validate Panda's simple
# temporal reception-rate check here.
for msg in [MSG_EPS_01, MSG_ESP_05, MSG_TSK_06, MSG_MOTOR_20]:
self.safety.set_controls_allowed(1)
if msg == MSG_EPS_01:
to_push = self._eps_01_msg(0)
if msg == MSG_ESP_05:
to_push = self._brake_msg(False)
if msg == MSG_TSK_06:
to_push = self._tsk_06_msg(3)
if msg == MSG_MOTOR_20:
to_push = self._motor_20_msg(0)
self.assertTrue(self.safety.safety_rx_hook(to_push))
to_push[0].RDHR ^= 0xFF
self.assertFalse(self.safety.safety_rx_hook(to_push))
self.assertFalse(self.safety.get_controls_allowed())
# counter
# reset wrong_counters to zero by sending valid messages
for i in range(MAX_WRONG_COUNTERS + 1):
self.__class__.cnt_eps_01 += 1
self.__class__.cnt_esp_05 += 1
self.__class__.cnt_tsk_06 += 1
self.__class__.cnt_motor_20 += 1
if i < MAX_WRONG_COUNTERS:
self.safety.set_controls_allowed(1)
self.safety.safety_rx_hook(self._eps_01_msg(0))
self.safety.safety_rx_hook(self._brake_msg(False))
self.safety.safety_rx_hook(self._tsk_06_msg(3))
self.safety.safety_rx_hook(self._motor_20_msg(0))
else:
self.assertFalse(self.safety.safety_rx_hook(self._eps_01_msg(0)))
self.assertFalse(self.safety.safety_rx_hook(self._brake_msg(False)))
self.assertFalse(self.safety.safety_rx_hook(self._tsk_06_msg(3)))
self.assertFalse(self.safety.safety_rx_hook(self._motor_20_msg(0)))
self.assertFalse(self.safety.get_controls_allowed())
# restore counters for future tests with a couple of good messages
for i in range(2):
self.safety.set_controls_allowed(1)
self.safety.safety_rx_hook(self._eps_01_msg(0))
self.safety.safety_rx_hook(self._brake_msg(False))
self.safety.safety_rx_hook(self._tsk_06_msg(3))
self.safety.safety_rx_hook(self._motor_20_msg(0))
self.assertTrue(self.safety.get_controls_allowed())
def test_fwd_hook(self):
buss = list(range(0x0, 0x3))
msgs = list(range(0x1, 0x800))
blocked_msgs_0to2 = []
blocked_msgs_2to0 = [MSG_HCA_01, MSG_LDW_02]
for b in buss:
for m in msgs:
if b == 0:
fwd_bus = -1 if m in blocked_msgs_0to2 else 2
elif b == 1:
fwd_bus = -1
elif b == 2:
fwd_bus = -1 if m in blocked_msgs_2to0 else 0
# assume len 8
self.assertEqual(fwd_bus, self.safety.safety_fwd_hook(b, make_msg(b, m, 8)))
if __name__ == "__main__":
unittest.main()
|
{"/selfdrive/car/car_helpers.py": ["/common/vin.py"]}
|
43,695,661
|
Gernby/openpilot
|
refs/heads/gernby-0.7.4
|
/panda/tests/safety/test_subaru.py
|
#!/usr/bin/env python3
import unittest
import numpy as np
from panda import Panda
from panda.tests.safety import libpandasafety_py
from panda.tests.safety.common import StdTest, make_msg
MAX_RATE_UP = 50
MAX_RATE_DOWN = 70
MAX_STEER = 2047
MAX_RT_DELTA = 940
RT_INTERVAL = 250000
DRIVER_TORQUE_ALLOWANCE = 60;
DRIVER_TORQUE_FACTOR = 10;
SPEED_THRESHOLD = 20 # 1kph (see dbc file)
TX_MSGS = [[0x122, 0], [0x221, 0], [0x322, 0]]
TX_L_MSGS = [[0x164, 0], [0x221, 0], [0x322, 0]]
def twos_comp(val, bits):
if val >= 0:
return val
else:
return (2**bits) + val
def sign(a):
if a > 0:
return 1
else:
return -1
def subaru_checksum(msg, addr, len_msg):
checksum = addr + (addr >> 8)
for i in range(len_msg):
if i < 4:
checksum += (msg.RDLR >> (8 * i))
else:
checksum += (msg.RDHR >> (8 * (i - 4)))
return checksum & 0xff
class TestSubaruSafety(unittest.TestCase):
cnt_gas = 0
cnt_torque_driver = 0
cnt_cruise = 0
cnt_speed = 0
cnt_brake = 0
@classmethod
def setUp(cls):
cls.safety = libpandasafety_py.libpandasafety
cls.safety.set_safety_hooks(Panda.SAFETY_SUBARU, 0)
cls.safety.init_tests_subaru()
def _set_prev_torque(self, t):
self.safety.set_subaru_desired_torque_last(t)
self.safety.set_subaru_rt_torque_last(t)
def _torque_driver_msg(self, torque):
t = twos_comp(torque, 11)
if self.safety.get_subaru_global():
to_send = make_msg(0, 0x119)
to_send[0].RDLR = ((t & 0x7FF) << 16)
to_send[0].RDLR |= (self.cnt_torque_driver & 0xF) << 8
to_send[0].RDLR |= subaru_checksum(to_send, 0x119, 8)
self.__class__.cnt_torque_driver += 1
else:
to_send = make_msg(0, 0x371)
to_send[0].RDLR = (t & 0x7) << 29
to_send[0].RDHR = (t >> 3) & 0xFF
return to_send
def _speed_msg(self, speed):
speed &= 0x1FFF
to_send = make_msg(0, 0x13a)
to_send[0].RDLR = speed << 12
to_send[0].RDHR = speed << 6
to_send[0].RDLR |= (self.cnt_speed & 0xF) << 8
to_send[0].RDLR |= subaru_checksum(to_send, 0x13a, 8)
self.__class__.cnt_speed += 1
return to_send
def _brake_msg(self, brake):
to_send = make_msg(0, 0x139)
to_send[0].RDHR = (brake << 4) & 0xFFF
to_send[0].RDLR |= (self.cnt_brake & 0xF) << 8
to_send[0].RDLR |= subaru_checksum(to_send, 0x139, 8)
self.__class__.cnt_brake += 1
return to_send
def _torque_msg(self, torque):
t = twos_comp(torque, 13)
if self.safety.get_subaru_global():
to_send = make_msg(0, 0x122)
to_send[0].RDLR = (t << 16)
else:
to_send = make_msg(0, 0x164)
to_send[0].RDLR = (t << 8)
return to_send
def _gas_msg(self, gas):
if self.safety.get_subaru_global():
to_send = make_msg(0, 0x40)
to_send[0].RDHR = gas & 0xFF
to_send[0].RDLR |= (self.cnt_gas & 0xF) << 8
to_send[0].RDLR |= subaru_checksum(to_send, 0x40, 8)
self.__class__.cnt_gas += 1
else:
to_send = make_msg(0, 0x140)
to_send[0].RDLR = gas & 0xFF
return to_send
def _cruise_msg(self, cruise):
if self.safety.get_subaru_global():
to_send = make_msg(0, 0x240)
to_send[0].RDHR = cruise << 9
to_send[0].RDLR |= (self.cnt_cruise & 0xF) << 8
to_send[0].RDLR |= subaru_checksum(to_send, 0x240, 8)
self.__class__.cnt_cruise += 1
else:
to_send = make_msg(0, 0x144)
to_send[0].RDHR = cruise << 17
return to_send
def _set_torque_driver(self, min_t, max_t):
for i in range(0, 5):
self.safety.safety_rx_hook(self._torque_driver_msg(min_t))
self.safety.safety_rx_hook(self._torque_driver_msg(max_t))
def test_spam_can_buses(self):
StdTest.test_spam_can_buses(self, TX_MSGS if self.safety.get_subaru_global() else TX_L_MSGS)
def test_relay_malfunction(self):
StdTest.test_relay_malfunction(self, 0x122 if self.safety.get_subaru_global() else 0x164)
def test_default_controls_not_allowed(self):
self.assertFalse(self.safety.get_controls_allowed())
def test_enable_control_allowed_from_cruise(self):
self.safety.safety_rx_hook(self._cruise_msg(True))
self.assertTrue(self.safety.get_controls_allowed())
def test_disable_control_allowed_from_cruise(self):
self.safety.set_controls_allowed(1)
self.safety.safety_rx_hook(self._cruise_msg(False))
self.assertFalse(self.safety.get_controls_allowed())
def test_disengage_on_gas(self):
self.safety.set_controls_allowed(True)
self.safety.safety_rx_hook(self._gas_msg(0))
self.assertTrue(self.safety.get_controls_allowed())
self.safety.safety_rx_hook(self._gas_msg(1))
self.assertFalse(self.safety.get_controls_allowed())
def test_brake_disengage(self):
if (self.safety.get_subaru_global()):
StdTest.test_allow_brake_at_zero_speed(self)
StdTest.test_not_allow_brake_when_moving(self, SPEED_THRESHOLD)
def test_steer_safety_check(self):
for enabled in [0, 1]:
for t in range(-3000, 3000):
self.safety.set_controls_allowed(enabled)
self._set_prev_torque(t)
if abs(t) > MAX_STEER or (not enabled and abs(t) > 0):
self.assertFalse(self.safety.safety_tx_hook(self._torque_msg(t)))
else:
self.assertTrue(self.safety.safety_tx_hook(self._torque_msg(t)))
def test_manually_enable_controls_allowed(self):
StdTest.test_manually_enable_controls_allowed(self)
def test_non_realtime_limit_up(self):
self._set_torque_driver(0, 0)
self.safety.set_controls_allowed(True)
self._set_prev_torque(0)
self.assertTrue(self.safety.safety_tx_hook(self._torque_msg(MAX_RATE_UP)))
self._set_prev_torque(0)
self.assertTrue(self.safety.safety_tx_hook(self._torque_msg(-MAX_RATE_UP)))
self._set_prev_torque(0)
self.assertFalse(self.safety.safety_tx_hook(self._torque_msg(MAX_RATE_UP + 1)))
self.safety.set_controls_allowed(True)
self._set_prev_torque(0)
self.assertFalse(self.safety.safety_tx_hook(self._torque_msg(-MAX_RATE_UP - 1)))
def test_non_realtime_limit_down(self):
self._set_torque_driver(0, 0)
self.safety.set_controls_allowed(True)
def test_against_torque_driver(self):
self.safety.set_controls_allowed(True)
for sign in [-1, 1]:
for t in np.arange(0, DRIVER_TORQUE_ALLOWANCE + 1, 1):
t *= -sign
self._set_torque_driver(t, t)
self._set_prev_torque(MAX_STEER * sign)
self.assertTrue(self.safety.safety_tx_hook(self._torque_msg(MAX_STEER * sign)))
self._set_torque_driver(DRIVER_TORQUE_ALLOWANCE + 1, DRIVER_TORQUE_ALLOWANCE + 1)
self.assertFalse(self.safety.safety_tx_hook(self._torque_msg(-MAX_STEER)))
# arbitrary high driver torque to ensure max steer torque is allowed
max_driver_torque = int(MAX_STEER / DRIVER_TORQUE_FACTOR + DRIVER_TORQUE_ALLOWANCE + 1)
# spot check some individual cases
for sign in [-1, 1]:
driver_torque = (DRIVER_TORQUE_ALLOWANCE + 10) * sign
torque_desired = (MAX_STEER - 10 * DRIVER_TORQUE_FACTOR) * sign
delta = 1 * sign
self._set_prev_torque(torque_desired)
self._set_torque_driver(-driver_torque, -driver_torque)
self.assertTrue(self.safety.safety_tx_hook(self._torque_msg(torque_desired)))
self._set_prev_torque(torque_desired + delta)
self._set_torque_driver(-driver_torque, -driver_torque)
self.assertFalse(self.safety.safety_tx_hook(self._torque_msg(torque_desired + delta)))
self._set_prev_torque(MAX_STEER * sign)
self._set_torque_driver(-max_driver_torque * sign, -max_driver_torque * sign)
self.assertTrue(self.safety.safety_tx_hook(self._torque_msg((MAX_STEER - MAX_RATE_DOWN) * sign)))
self._set_prev_torque(MAX_STEER * sign)
self._set_torque_driver(-max_driver_torque * sign, -max_driver_torque * sign)
self.assertTrue(self.safety.safety_tx_hook(self._torque_msg(0)))
self._set_prev_torque(MAX_STEER * sign)
self._set_torque_driver(-max_driver_torque * sign, -max_driver_torque * sign)
self.assertFalse(self.safety.safety_tx_hook(self._torque_msg((MAX_STEER - MAX_RATE_DOWN + 1) * sign)))
def test_realtime_limits(self):
self.safety.set_controls_allowed(True)
for sign in [-1, 1]:
self.safety.init_tests_subaru()
self._set_prev_torque(0)
self._set_torque_driver(0, 0)
for t in np.arange(0, MAX_RT_DELTA, 1):
t *= sign
self.assertTrue(self.safety.safety_tx_hook(self._torque_msg(t)))
self.assertFalse(self.safety.safety_tx_hook(self._torque_msg(sign * (MAX_RT_DELTA + 1))))
self._set_prev_torque(0)
for t in np.arange(0, MAX_RT_DELTA, 1):
t *= sign
self.assertTrue(self.safety.safety_tx_hook(self._torque_msg(t)))
# Increase timer to update rt_torque_last
self.safety.set_timer(RT_INTERVAL + 1)
self.assertTrue(self.safety.safety_tx_hook(self._torque_msg(sign * (MAX_RT_DELTA - 1))))
self.assertTrue(self.safety.safety_tx_hook(self._torque_msg(sign * (MAX_RT_DELTA + 1))))
def test_fwd_hook(self):
buss = list(range(0x0, 0x3))
msgs = list(range(0x1, 0x800))
blocked_msgs = [290, 545, 802] if self.safety.get_subaru_global() else [356, 545, 802]
for b in buss:
for m in msgs:
if b == 0:
fwd_bus = 2
elif b == 1:
fwd_bus = -1
elif b == 2:
fwd_bus = -1 if m in blocked_msgs else 0
# assume len 8
self.assertEqual(fwd_bus, self.safety.safety_fwd_hook(b, make_msg(b, m, 8)))
class TestSubaruLegacySafety(TestSubaruSafety):
@classmethod
def setUp(cls):
cls.safety = libpandasafety_py.libpandasafety
cls.safety.set_safety_hooks(Panda.SAFETY_SUBARU_LEGACY, 0)
cls.safety.init_tests_subaru()
if __name__ == "__main__":
unittest.main()
|
{"/selfdrive/car/car_helpers.py": ["/common/vin.py"]}
|
43,695,662
|
Gernby/openpilot
|
refs/heads/gernby-0.7.4
|
/panda/tests/safety/test_honda.py
|
#!/usr/bin/env python3
import unittest
import numpy as np
from panda import Panda
from panda.tests.safety import libpandasafety_py
from panda.tests.safety.common import StdTest, make_msg, MAX_WRONG_COUNTERS
MAX_BRAKE = 255
INTERCEPTOR_THRESHOLD = 328
N_TX_MSGS = [[0xE4, 0], [0x194, 0], [0x1FA, 0], [0x200, 0], [0x30C, 0], [0x33D, 0]]
BH_TX_MSGS = [[0xE4, 0], [0x296, 1], [0x33D, 0]] # Bosch Harness
BG_TX_MSGS = [[0xE4, 2], [0x296, 0], [0x33D, 2]] # Bosch Giraffe
HONDA_N_HW = 0
HONDA_BG_HW = 1
HONDA_BH_HW = 2
def honda_checksum(msg, addr, len_msg):
checksum = 0
while addr > 0:
checksum += addr
addr >>= 4
for i in range (0, 2*len_msg):
if i < 8:
checksum += (msg.RDLR >> (4 * i))
else:
checksum += (msg.RDHR >> (4 * (i - 8)))
return (8 - checksum) & 0xF
class TestHondaSafety(unittest.TestCase):
cnt_speed = 0
cnt_gas = 0
cnt_button = 0
@classmethod
def setUp(cls):
cls.safety = libpandasafety_py.libpandasafety
cls.safety.set_safety_hooks(Panda.SAFETY_HONDA_NIDEC, 0)
cls.safety.init_tests_honda()
def _speed_msg(self, speed):
bus = 1 if self.safety.get_honda_hw() == HONDA_BH_HW else 0
to_send = make_msg(bus, 0x158)
to_send[0].RDLR = speed
to_send[0].RDHR |= (self.cnt_speed % 4) << 28
to_send[0].RDHR |= honda_checksum(to_send[0], 0x158, 8) << 24
self.__class__.cnt_speed += 1
return to_send
def _button_msg(self, buttons, addr):
bus = 1 if self.safety.get_honda_hw() == HONDA_BH_HW else 0
to_send = make_msg(bus, addr)
to_send[0].RDLR = buttons << 5
to_send[0].RDHR |= (self.cnt_button % 4) << 28
to_send[0].RDHR |= honda_checksum(to_send[0], addr, 8) << 24
self.__class__.cnt_button += 1
return to_send
def _brake_msg(self, brake):
bus = 1 if self.safety.get_honda_hw() == HONDA_BH_HW else 0
to_send = make_msg(bus, 0x17C)
to_send[0].RDHR = 0x200000 if brake else 0
to_send[0].RDHR |= (self.cnt_gas % 4) << 28
to_send[0].RDHR |= honda_checksum(to_send[0], 0x17C, 8) << 24
self.__class__.cnt_gas += 1
return to_send
def _alt_brake_msg(self, brake):
to_send = make_msg(0, 0x1BE)
to_send[0].RDLR = 0x10 if brake else 0
return to_send
def _gas_msg(self, gas):
bus = 1 if self.safety.get_honda_hw() == HONDA_BH_HW else 0
to_send = make_msg(bus, 0x17C)
to_send[0].RDLR = 1 if gas else 0
to_send[0].RDHR |= (self.cnt_gas % 4) << 28
to_send[0].RDHR |= honda_checksum(to_send[0], 0x17C, 8) << 24
self.__class__.cnt_gas += 1
return to_send
def _send_brake_msg(self, brake):
to_send = make_msg(0, 0x1FA)
to_send[0].RDLR = ((brake & 0x3) << 14) | ((brake & 0x3FF) >> 2)
return to_send
def _send_interceptor_msg(self, gas, addr):
to_send = make_msg(0, addr, 6)
gas2 = gas * 2
to_send[0].RDLR = ((gas & 0xff) << 8) | ((gas & 0xff00) >> 8) | \
((gas2 & 0xff) << 24) | ((gas2 & 0xff00) << 8)
return to_send
def _send_steer_msg(self, steer):
bus = 2 if self.safety.get_honda_hw() == HONDA_BG_HW else 0
to_send = make_msg(bus, 0xE4, 6)
to_send[0].RDLR = steer
return to_send
def test_spam_can_buses(self):
hw_type = self.safety.get_honda_hw()
if hw_type == HONDA_N_HW:
tx_msgs = N_TX_MSGS
elif hw_type == HONDA_BH_HW:
tx_msgs = BH_TX_MSGS
elif hw_type == HONDA_BG_HW:
tx_msgs = BG_TX_MSGS
StdTest.test_spam_can_buses(self, tx_msgs)
def test_relay_malfunction(self):
hw = self.safety.get_honda_hw()
bus = 2 if hw == HONDA_BG_HW else 0
StdTest.test_relay_malfunction(self, 0xE4, bus=bus)
def test_default_controls_not_allowed(self):
self.assertFalse(self.safety.get_controls_allowed())
def test_manually_enable_controls_allowed(self):
StdTest.test_manually_enable_controls_allowed(self)
def test_resume_button(self):
RESUME_BTN = 4
self.safety.set_controls_allowed(0)
self.safety.safety_rx_hook(self._button_msg(RESUME_BTN, 0x296))
self.assertTrue(self.safety.get_controls_allowed())
def test_set_button(self):
SET_BTN = 3
self.safety.set_controls_allowed(0)
self.safety.safety_rx_hook(self._button_msg(SET_BTN, 0x296))
self.assertTrue(self.safety.get_controls_allowed())
def test_cancel_button(self):
CANCEL_BTN = 2
self.safety.set_controls_allowed(1)
self.safety.safety_rx_hook(self._button_msg(CANCEL_BTN, 0x296))
self.assertFalse(self.safety.get_controls_allowed())
def test_sample_speed(self):
self.assertEqual(0, self.safety.get_honda_moving())
self.safety.safety_rx_hook(self._speed_msg(100))
self.assertEqual(1, self.safety.get_honda_moving())
def test_prev_brake(self):
self.assertFalse(self.safety.get_brake_pressed_prev())
self.safety.safety_rx_hook(self._brake_msg(True))
self.assertTrue(self.safety.get_brake_pressed_prev())
def test_disengage_on_brake(self):
self.safety.set_controls_allowed(1)
self.safety.safety_rx_hook(self._brake_msg(1))
self.assertFalse(self.safety.get_controls_allowed())
def test_alt_disengage_on_brake(self):
self.safety.set_honda_alt_brake_msg(1)
self.safety.set_controls_allowed(1)
self.safety.safety_rx_hook(self._alt_brake_msg(1))
self.assertFalse(self.safety.get_controls_allowed())
self.safety.set_honda_alt_brake_msg(0)
self.safety.set_controls_allowed(1)
self.safety.safety_rx_hook(self._alt_brake_msg(1))
self.assertTrue(self.safety.get_controls_allowed())
def test_brake_disengage(self):
StdTest.test_allow_brake_at_zero_speed(self)
StdTest.test_not_allow_brake_when_moving(self, 0)
def test_prev_gas(self):
self.safety.safety_rx_hook(self._gas_msg(False))
self.assertFalse(self.safety.get_gas_pressed_prev())
self.safety.safety_rx_hook(self._gas_msg(True))
self.assertTrue(self.safety.get_gas_pressed_prev())
def test_prev_gas_interceptor(self):
self.safety.safety_rx_hook(self._send_interceptor_msg(0x0, 0x201))
self.assertFalse(self.safety.get_gas_interceptor_prev())
self.safety.safety_rx_hook(self._send_interceptor_msg(0x1000, 0x201))
self.assertTrue(self.safety.get_gas_interceptor_prev())
self.safety.safety_rx_hook(self._send_interceptor_msg(0x0, 0x201))
self.safety.set_gas_interceptor_detected(False)
def test_disengage_on_gas(self):
self.safety.safety_rx_hook(self._gas_msg(0))
self.safety.set_controls_allowed(1)
self.safety.safety_rx_hook(self._gas_msg(1))
self.assertFalse(self.safety.get_controls_allowed())
def test_allow_engage_with_gas_pressed(self):
self.safety.safety_rx_hook(self._gas_msg(1))
self.safety.set_controls_allowed(1)
self.safety.safety_rx_hook(self._gas_msg(1))
self.assertTrue(self.safety.get_controls_allowed())
def test_disengage_on_gas_interceptor(self):
for g in range(0, 0x1000):
self.safety.safety_rx_hook(self._send_interceptor_msg(0, 0x201))
self.safety.set_controls_allowed(True)
self.safety.safety_rx_hook(self._send_interceptor_msg(g, 0x201))
remain_enabled = g <= INTERCEPTOR_THRESHOLD
self.assertEqual(remain_enabled, self.safety.get_controls_allowed())
self.safety.safety_rx_hook(self._send_interceptor_msg(0, 0x201))
self.safety.set_gas_interceptor_detected(False)
def test_allow_engage_with_gas_interceptor_pressed(self):
self.safety.safety_rx_hook(self._send_interceptor_msg(0x1000, 0x201))
self.safety.set_controls_allowed(1)
self.safety.safety_rx_hook(self._send_interceptor_msg(0x1000, 0x201))
self.assertTrue(self.safety.get_controls_allowed())
self.safety.safety_rx_hook(self._send_interceptor_msg(0, 0x201))
self.safety.set_gas_interceptor_detected(False)
def test_brake_safety_check(self):
hw = self.safety.get_honda_hw()
if hw == HONDA_N_HW:
for fwd_brake in [False, True]:
self.safety.set_honda_fwd_brake(fwd_brake)
for brake in np.arange(0, MAX_BRAKE + 10, 1):
for controls_allowed in [True, False]:
self.safety.set_controls_allowed(controls_allowed)
if fwd_brake:
send = False # block openpilot brake msg when fwd'ing stock msg
elif controls_allowed:
send = MAX_BRAKE >= brake >= 0
else:
send = brake == 0
self.assertEqual(send, self.safety.safety_tx_hook(self._send_brake_msg(brake)))
self.safety.set_honda_fwd_brake(False)
def test_gas_interceptor_safety_check(self):
if self.safety.get_honda_hw() == HONDA_N_HW:
for gas in np.arange(0, 4000, 100):
for controls_allowed in [True, False]:
self.safety.set_controls_allowed(controls_allowed)
if controls_allowed:
send = True
else:
send = gas == 0
self.assertEqual(send, self.safety.safety_tx_hook(self._send_interceptor_msg(gas, 0x200)))
def test_steer_safety_check(self):
self.safety.set_controls_allowed(0)
self.assertTrue(self.safety.safety_tx_hook(self._send_steer_msg(0x0000)))
self.assertFalse(self.safety.safety_tx_hook(self._send_steer_msg(0x1000)))
def test_spam_cancel_safety_check(self):
hw = self.safety.get_honda_hw()
if hw != HONDA_N_HW:
RESUME_BTN = 4
SET_BTN = 3
CANCEL_BTN = 2
BUTTON_MSG = 0x296
self.safety.set_controls_allowed(0)
self.assertTrue(self.safety.safety_tx_hook(self._button_msg(CANCEL_BTN, BUTTON_MSG)))
self.assertFalse(self.safety.safety_tx_hook(self._button_msg(RESUME_BTN, BUTTON_MSG)))
self.assertFalse(self.safety.safety_tx_hook(self._button_msg(SET_BTN, BUTTON_MSG)))
# do not block resume if we are engaged already
self.safety.set_controls_allowed(1)
self.assertTrue(self.safety.safety_tx_hook(self._button_msg(RESUME_BTN, BUTTON_MSG)))
def test_rx_hook(self):
# checksum checks
SET_BTN = 3
for msg in ["btn1", "btn2", "gas", "speed"]:
self.safety.set_controls_allowed(1)
if msg == "btn1":
if self.safety.get_honda_hw() == HONDA_N_HW:
to_push = self._button_msg(SET_BTN, 0x1A6) # only in Honda_NIDEC
else:
continue
if msg == "btn2":
to_push = self._button_msg(SET_BTN, 0x296)
if msg == "gas":
to_push = self._gas_msg(0)
if msg == "speed":
to_push = self._speed_msg(0)
self.assertTrue(self.safety.safety_rx_hook(to_push))
to_push[0].RDHR = 0 # invalidate checksum
self.assertFalse(self.safety.safety_rx_hook(to_push))
self.assertFalse(self.safety.get_controls_allowed())
# counter
# reset wrong_counters to zero by sending valid messages
for i in range(MAX_WRONG_COUNTERS + 1):
self.__class__.cnt_speed += 1
self.__class__.cnt_gas += 1
self.__class__.cnt_button += 1
if i < MAX_WRONG_COUNTERS:
self.safety.set_controls_allowed(1)
self.safety.safety_rx_hook(self._button_msg(SET_BTN, 0x296))
self.safety.safety_rx_hook(self._speed_msg(0))
self.safety.safety_rx_hook(self._gas_msg(0))
else:
self.assertFalse(self.safety.safety_rx_hook(self._button_msg(SET_BTN, 0x296)))
self.assertFalse(self.safety.safety_rx_hook(self._speed_msg(0)))
self.assertFalse(self.safety.safety_rx_hook(self._gas_msg(0)))
self.assertFalse(self.safety.get_controls_allowed())
# restore counters for future tests with a couple of good messages
for i in range(2):
self.safety.set_controls_allowed(1)
self.safety.safety_rx_hook(self._button_msg(SET_BTN, 0x296))
self.safety.safety_rx_hook(self._speed_msg(0))
self.safety.safety_rx_hook(self._gas_msg(0))
self.safety.safety_rx_hook(self._button_msg(SET_BTN, 0x296))
self.assertTrue(self.safety.get_controls_allowed())
def test_fwd_hook(self):
buss = list(range(0x0, 0x3))
msgs = list(range(0x1, 0x800))
fwd_brake = [False, True]
for f in fwd_brake:
self.safety.set_honda_fwd_brake(f)
blocked_msgs = [0xE4, 0x194, 0x33D]
blocked_msgs += [0x30C]
if not f:
blocked_msgs += [0x1FA]
for b in buss:
for m in msgs:
if b == 0:
fwd_bus = 2
elif b == 1:
fwd_bus = -1
elif b == 2:
fwd_bus = -1 if m in blocked_msgs else 0
# assume len 8
self.assertEqual(fwd_bus, self.safety.safety_fwd_hook(b, make_msg(b, m, 8)))
self.safety.set_honda_fwd_brake(False)
class TestHondaBoschGiraffeSafety(TestHondaSafety):
@classmethod
def setUp(cls):
TestHondaSafety.setUp()
cls.safety = libpandasafety_py.libpandasafety
cls.safety.set_safety_hooks(Panda.SAFETY_HONDA_BOSCH_GIRAFFE, 0)
cls.safety.init_tests_honda()
def test_fwd_hook(self):
buss = range(0x0, 0x3)
msgs = range(0x1, 0x800)
hw = self.safety.get_honda_hw()
bus_rdr_cam = 2 if hw == HONDA_BH_HW else 1
bus_rdr_car = 0 if hw == HONDA_BH_HW else 2
bus_pt = 1 if hw == HONDA_BH_HW else 0
blocked_msgs = [0xE4, 0x33D]
for b in buss:
for m in msgs:
if b == bus_pt:
fwd_bus = -1
elif b == bus_rdr_cam:
fwd_bus = -1 if m in blocked_msgs else bus_rdr_car
elif b == bus_rdr_car:
fwd_bus = bus_rdr_cam
# assume len 8
self.assertEqual(fwd_bus, self.safety.safety_fwd_hook(b, make_msg(b, m, 8)))
class TestHondaBoschHarnessSafety(TestHondaBoschGiraffeSafety):
@classmethod
def setUp(cls):
TestHondaBoschGiraffeSafety.setUp()
cls.safety = libpandasafety_py.libpandasafety
cls.safety.set_safety_hooks(Panda.SAFETY_HONDA_BOSCH_HARNESS, 0)
cls.safety.init_tests_honda()
if __name__ == "__main__":
unittest.main()
|
{"/selfdrive/car/car_helpers.py": ["/common/vin.py"]}
|
43,695,663
|
Gernby/openpilot
|
refs/heads/gernby-0.7.4
|
/selfdrive/locationd/paramsd.py
|
#!/usr/bin/env python3
import math
import cereal.messaging as messaging
from selfdrive.locationd.kalman.models.car_kf import CarKalman, ObservationKind, States
CARSTATE_DECIMATION = 5
class ParamsLearner:
def __init__(self, CP):
self.kf = CarKalman()
self.kf.filter.set_mass(CP.mass) # pylint: disable=no-member
self.kf.filter.set_rotational_inertia(CP.rotationalInertia) # pylint: disable=no-member
self.kf.filter.set_center_to_front(CP.centerToFront) # pylint: disable=no-member
self.kf.filter.set_center_to_rear(CP.wheelbase - CP.centerToFront) # pylint: disable=no-member
self.kf.filter.set_stiffness_front(CP.tireStiffnessFront) # pylint: disable=no-member
self.kf.filter.set_stiffness_rear(CP.tireStiffnessRear) # pylint: disable=no-member
self.active = False
self.speed = 0
self.steering_pressed = False
self.steering_angle = 0
self.carstate_counter = 0
def update_active(self):
self.active = (abs(self.steering_angle) < 45 or not self.steering_pressed) and self.speed > 5
def handle_log(self, t, which, msg):
if which == 'liveLocationKalman':
v_calibrated = msg.velocityCalibrated.value
# v_calibrated_std = msg.velocityCalibrated.std
self.speed = v_calibrated[0]
yaw_rate = msg.angularVelocityCalibrated.value[2]
# yaw_rate_std = msg.angularVelocityCalibrated.std[2]
self.update_active()
if self.active:
self.kf.predict_and_observe(t, ObservationKind.ROAD_FRAME_YAW_RATE, [-yaw_rate])
self.kf.predict_and_observe(t, ObservationKind.ROAD_FRAME_XY_SPEED, [[v_calibrated[0], -v_calibrated[1]]])
# Clamp values
x = self.kf.x
if not (10 < x[States.STEER_RATIO] < 25):
self.kf.predict_and_observe(t, ObservationKind.STEER_RATIO, [15.0])
if not (0.5 < x[States.STIFFNESS] < 3.0):
self.kf.predict_and_observe(t, ObservationKind.STIFFNESS, [1.0])
else:
self.kf.filter.filter_time = t - 0.1
elif which == 'carState':
self.carstate_counter += 1
if self.carstate_counter % CARSTATE_DECIMATION == 0:
self.steering_angle = msg.steeringAngle
self.steering_pressed = msg.steeringPressed
self.update_active()
if self.active:
self.kf.predict_and_observe(t, ObservationKind.STEER_ANGLE, [math.radians(msg.steeringAngle)])
self.kf.predict_and_observe(t, ObservationKind.ANGLE_OFFSET_FAST, [0])
else:
self.kf.filter.filter_time = t - 0.1
def main(sm=None, pm=None):
if sm is None:
sm = messaging.SubMaster(['liveLocationKalman', 'carState'])
if pm is None:
pm = messaging.PubMaster(['liveParameters'])
# TODO: Read from car params at runtime
from selfdrive.car.toyota.interface import CarInterface
from selfdrive.car.toyota.values import CAR
CP = CarInterface.get_params(CAR.COROLLA_TSS2)
learner = ParamsLearner(CP)
while True:
sm.update()
for which, updated in sm.updated.items():
if not updated:
continue
t = sm.logMonoTime[which] * 1e-9
learner.handle_log(t, which, sm[which])
# TODO: set valid to false when locationd stops sending
# TODO: make sure controlsd knows when there is no gyro
# TODO: move posenetValid somewhere else to show the model uncertainty alert
# TODO: Save and resume values from param
# TODO: Change KF to allow mass, etc to be inputs in predict step
if sm.updated['carState']:
msg = messaging.new_message('liveParameters')
msg.logMonoTime = sm.logMonoTime['carState']
msg.liveParameters.valid = True # TODO: Check if learned values are sane
msg.liveParameters.posenetValid = True
msg.liveParameters.sensorValid = True
x = learner.kf.x
msg.liveParameters.steerRatio = float(x[States.STEER_RATIO])
msg.liveParameters.stiffnessFactor = float(x[States.STIFFNESS])
msg.liveParameters.angleOffsetAverage = math.degrees(x[States.ANGLE_OFFSET])
msg.liveParameters.angleOffset = math.degrees(x[States.ANGLE_OFFSET_FAST])
# P = learner.kf.P
# print()
# print("sR", float(x[States.STEER_RATIO]), float(P[States.STEER_RATIO, States.STEER_RATIO])**0.5)
# print("x ", float(x[States.STIFFNESS]), float(P[States.STIFFNESS, States.STIFFNESS])**0.5)
# print("ao avg ", math.degrees(x[States.ANGLE_OFFSET]), math.degrees(P[States.ANGLE_OFFSET, States.ANGLE_OFFSET])**0.5)
# print("ao ", math.degrees(x[States.ANGLE_OFFSET_FAST]), math.degrees(P[States.ANGLE_OFFSET_FAST, States.ANGLE_OFFSET_FAST])**0.5)
pm.send('liveParameters', msg)
if __name__ == "__main__":
main()
|
{"/selfdrive/car/car_helpers.py": ["/common/vin.py"]}
|
43,695,664
|
Gernby/openpilot
|
refs/heads/gernby-0.7.4
|
/panda/tests/safety/test_chrysler.py
|
#!/usr/bin/env python3
import unittest
import numpy as np
from panda import Panda
from panda.tests.safety import libpandasafety_py
from panda.tests.safety.common import StdTest, make_msg
MAX_RATE_UP = 3
MAX_RATE_DOWN = 3
MAX_STEER = 261
MAX_RT_DELTA = 112
RT_INTERVAL = 250000
MAX_TORQUE_ERROR = 80
TX_MSGS = [[571, 0], [658, 0], [678, 0]]
def chrysler_checksum(msg, len_msg):
checksum = 0xFF
for idx in range(0, len_msg-1):
curr = (msg.RDLR >> (8*idx)) if idx < 4 else (msg.RDHR >> (8*(idx - 4)))
curr &= 0xFF
shift = 0x80
for i in range(0, 8):
bit_sum = curr & shift
temp_chk = checksum & 0x80
if (bit_sum != 0):
bit_sum = 0x1C
if (temp_chk != 0):
bit_sum = 1
checksum = checksum << 1
temp_chk = checksum | 1
bit_sum ^= temp_chk
else:
if (temp_chk != 0):
bit_sum = 0x1D
checksum = checksum << 1
bit_sum ^= checksum
checksum = bit_sum
shift = shift >> 1
return ~checksum & 0xFF
class TestChryslerSafety(unittest.TestCase):
cnt_torque_meas = 0
cnt_gas = 0
cnt_cruise = 0
cnt_brake = 0
@classmethod
def setUp(cls):
cls.safety = libpandasafety_py.libpandasafety
cls.safety.set_safety_hooks(Panda.SAFETY_CHRYSLER, 0)
cls.safety.init_tests_chrysler()
def _button_msg(self, buttons):
to_send = make_msg(0, 571)
to_send[0].RDLR = buttons
return to_send
def _cruise_msg(self, active):
to_send = make_msg(0, 500)
to_send[0].RDLR = 0x380000 if active else 0
to_send[0].RDHR |= (self.cnt_cruise % 16) << 20
to_send[0].RDHR |= chrysler_checksum(to_send[0], 8) << 24
self.__class__.cnt_cruise += 1
return to_send
def _speed_msg(self, speed):
speed = int(speed / 0.071028)
to_send = make_msg(0, 514, 4)
to_send[0].RDLR = ((speed & 0xFF0) >> 4) + ((speed & 0xF) << 12) + \
((speed & 0xFF0) << 12) + ((speed & 0xF) << 28)
return to_send
def _gas_msg(self, gas):
to_send = make_msg(0, 308)
to_send[0].RDHR = (gas & 0x7F) << 8
to_send[0].RDHR |= (self.cnt_gas % 16) << 20
self.__class__.cnt_gas += 1
return to_send
def _brake_msg(self, brake):
to_send = make_msg(0, 320)
to_send[0].RDLR = 5 if brake else 0
to_send[0].RDHR |= (self.cnt_brake % 16) << 20
to_send[0].RDHR |= chrysler_checksum(to_send[0], 8) << 24
self.__class__.cnt_brake += 1
return to_send
def _set_prev_torque(self, t):
self.safety.set_chrysler_desired_torque_last(t)
self.safety.set_chrysler_rt_torque_last(t)
self.safety.set_chrysler_torque_meas(t, t)
def _torque_meas_msg(self, torque):
to_send = make_msg(0, 544)
to_send[0].RDHR = ((torque + 1024) >> 8) + (((torque + 1024) & 0xff) << 8)
to_send[0].RDHR |= (self.cnt_torque_meas % 16) << 20
to_send[0].RDHR |= chrysler_checksum(to_send[0], 8) << 24
self.__class__.cnt_torque_meas += 1
return to_send
def _torque_msg(self, torque):
to_send = make_msg(0, 0x292)
to_send[0].RDLR = ((torque + 1024) >> 8) + (((torque + 1024) & 0xff) << 8)
return to_send
def test_spam_can_buses(self):
StdTest.test_spam_can_buses(self, TX_MSGS)
def test_relay_malfunction(self):
StdTest.test_relay_malfunction(self, 0x292)
def test_default_controls_not_allowed(self):
self.assertFalse(self.safety.get_controls_allowed())
def test_steer_safety_check(self):
for enabled in [0, 1]:
for t in range(-MAX_STEER*2, MAX_STEER*2):
self.safety.set_controls_allowed(enabled)
self._set_prev_torque(t)
if abs(t) > MAX_STEER or (not enabled and abs(t) > 0):
self.assertFalse(self.safety.safety_tx_hook(self._torque_msg(t)))
else:
self.assertTrue(self.safety.safety_tx_hook(self._torque_msg(t)))
def test_manually_enable_controls_allowed(self):
StdTest.test_manually_enable_controls_allowed(self)
def test_enable_control_allowed_from_cruise(self):
to_push = self._cruise_msg(True)
self.safety.safety_rx_hook(to_push)
self.assertTrue(self.safety.get_controls_allowed())
def test_disable_control_allowed_from_cruise(self):
to_push = self._cruise_msg(False)
self.safety.set_controls_allowed(1)
self.safety.safety_rx_hook(to_push)
self.assertFalse(self.safety.get_controls_allowed())
def test_gas_disable(self):
self.safety.set_controls_allowed(1)
self.safety.safety_rx_hook(self._speed_msg(2.2))
self.safety.safety_rx_hook(self._gas_msg(1))
self.assertTrue(self.safety.get_controls_allowed())
self.safety.safety_rx_hook(self._gas_msg(0))
self.safety.safety_rx_hook(self._speed_msg(2.3))
self.safety.safety_rx_hook(self._gas_msg(1))
self.assertFalse(self.safety.get_controls_allowed())
def test_brake_disengage(self):
StdTest.test_allow_brake_at_zero_speed(self)
StdTest.test_not_allow_brake_when_moving(self, 0)
def test_non_realtime_limit_up(self):
self.safety.set_controls_allowed(True)
self._set_prev_torque(0)
self.assertTrue(self.safety.safety_tx_hook(self._torque_msg(MAX_RATE_UP)))
self._set_prev_torque(0)
self.assertFalse(self.safety.safety_tx_hook(self._torque_msg(MAX_RATE_UP + 1)))
def test_non_realtime_limit_down(self):
self.safety.set_controls_allowed(True)
self.safety.set_chrysler_rt_torque_last(MAX_STEER)
torque_meas = MAX_STEER - MAX_TORQUE_ERROR - 20
self.safety.set_chrysler_torque_meas(torque_meas, torque_meas)
self.safety.set_chrysler_desired_torque_last(MAX_STEER)
self.assertTrue(self.safety.safety_tx_hook(self._torque_msg(MAX_STEER - MAX_RATE_DOWN)))
self.safety.set_chrysler_rt_torque_last(MAX_STEER)
self.safety.set_chrysler_torque_meas(torque_meas, torque_meas)
self.safety.set_chrysler_desired_torque_last(MAX_STEER)
self.assertFalse(self.safety.safety_tx_hook(self._torque_msg(MAX_STEER - MAX_RATE_DOWN + 1)))
def test_exceed_torque_sensor(self):
self.safety.set_controls_allowed(True)
for sign in [-1, 1]:
self._set_prev_torque(0)
for t in np.arange(0, MAX_TORQUE_ERROR + 2, 2): # step needs to be smaller than MAX_TORQUE_ERROR
t *= sign
self.assertTrue(self.safety.safety_tx_hook(self._torque_msg(t)))
self.assertFalse(self.safety.safety_tx_hook(self._torque_msg(sign * (MAX_TORQUE_ERROR + 2))))
def test_realtime_limit_up(self):
self.safety.set_controls_allowed(True)
for sign in [-1, 1]:
self.safety.init_tests_chrysler()
self._set_prev_torque(0)
for t in np.arange(0, MAX_RT_DELTA+1, 1):
t *= sign
self.safety.set_chrysler_torque_meas(t, t)
self.assertTrue(self.safety.safety_tx_hook(self._torque_msg(t)))
self.assertFalse(self.safety.safety_tx_hook(self._torque_msg(sign * (MAX_RT_DELTA + 1))))
self._set_prev_torque(0)
for t in np.arange(0, MAX_RT_DELTA+1, 1):
t *= sign
self.safety.set_chrysler_torque_meas(t, t)
self.assertTrue(self.safety.safety_tx_hook(self._torque_msg(t)))
# Increase timer to update rt_torque_last
self.safety.set_timer(RT_INTERVAL + 1)
self.assertTrue(self.safety.safety_tx_hook(self._torque_msg(sign * MAX_RT_DELTA)))
self.assertTrue(self.safety.safety_tx_hook(self._torque_msg(sign * (MAX_RT_DELTA + 1))))
def test_torque_measurements(self):
self.safety.safety_rx_hook(self._torque_meas_msg(50))
self.safety.safety_rx_hook(self._torque_meas_msg(-50))
self.safety.safety_rx_hook(self._torque_meas_msg(0))
self.safety.safety_rx_hook(self._torque_meas_msg(0))
self.safety.safety_rx_hook(self._torque_meas_msg(0))
self.safety.safety_rx_hook(self._torque_meas_msg(0))
self.assertEqual(-50, self.safety.get_chrysler_torque_meas_min())
self.assertEqual(50, self.safety.get_chrysler_torque_meas_max())
self.safety.safety_rx_hook(self._torque_meas_msg(0))
self.assertEqual(0, self.safety.get_chrysler_torque_meas_max())
self.assertEqual(-50, self.safety.get_chrysler_torque_meas_min())
self.safety.safety_rx_hook(self._torque_meas_msg(0))
self.assertEqual(0, self.safety.get_chrysler_torque_meas_max())
self.assertEqual(0, self.safety.get_chrysler_torque_meas_min())
def test_cancel_button(self):
CANCEL = 1
for b in range(0, 0xff):
if b == CANCEL:
self.assertTrue(self.safety.safety_tx_hook(self._button_msg(b)))
else:
self.assertFalse(self.safety.safety_tx_hook(self._button_msg(b)))
def test_fwd_hook(self):
buss = list(range(0x0, 0x3))
msgs = list(range(0x1, 0x800))
blocked_msgs = [658, 678]
for b in buss:
for m in msgs:
if b == 0:
fwd_bus = 2
elif b == 1:
fwd_bus = -1
elif b == 2:
fwd_bus = -1 if m in blocked_msgs else 0
# assume len 8
self.assertEqual(fwd_bus, self.safety.safety_fwd_hook(b, make_msg(b, m, 8)))
if __name__ == "__main__":
unittest.main()
|
{"/selfdrive/car/car_helpers.py": ["/common/vin.py"]}
|
43,773,330
|
MaximZagoruy/Newsbot
|
refs/heads/main
|
/vkparser.py
|
import requests
import json
from db import create_news_items
def get_feed():
res = requests.get('https://api.vk.com/method/wall.get?owner_id=-36180072&count=100&v=5.52&access_token=976f55e3faaac76907812e9747747850cf79ef6a2ee0ca76622a839fe0cc9df20400cb355a592ad18ad32')
res = json.loads(res.content.decode('utf-8')).get('response')
return create_news_items(res['items'])
|
{"/vkparser.py": ["/settings.py", "/db.py"], "/db.py": ["/settings.py"], "/handlers.py": ["/db.py", "/vkparser.py"], "/bot.py": ["/handlers.py", "/settings.py"]}
|
43,883,593
|
brian86258/Discord_Bot
|
refs/heads/master
|
/main.py
|
import discord
from discord import user
import requests
import json, random
from DB import create_user, create_table, delete_user, create_transaction
import DB
client = discord.Client()
token = "ODQ4MTQ1NzIyMTI5MzgzNDM1.YLIXQQ.9QAPx5L62mAqsPiupeVuFYddKJ0"
starter_encouragements = [
"Cheer up!",
"Hang in there.",
"You are a great person / bot!"
]
sad_words = ["sad", "depressed", "unhappy", "angry", "miserable"]
def get_quote():
response = requests.get("https://zenquotes.io/api/random")
json_data = json.loads(response.text)
quote = json_data[0]['q'] + " -" + json_data[0]['a']
return(quote)
@client.event
async def on_ready():
print('We have logged in as {0.user}'.format(client))
# @client.event
# async def on_typing(channel, user, when):
# print("send DM to typing user")
# await user.send(''' Send before you think twice.
# {}
# {}
# '''.format(channel.name, when))
@client.event
async def on_message(message):
if message.author == client.user:
return
author = message.author
msg = message.content
if msg.startswith('$hello'):
await message.channel.send('Hello!')
if msg.startswith('$createUser'):
create_user(author.id,author.name)
await message.channel.send("Successful create USER {}".format(author.name))
if msg.startswith('$inspire'):
quote = get_quote()
await message.channel.send(quote)
if any(word in msg for word in sad_words):
await message.channel.send(random.choice(starter_encouragements))
if msg.startswith("$DM"):
await message.author.send("HI")
if msg.startswith("$whoami"):
res = '''
id: {}
name :{}
guild: {}
status: {}
role: {}
'''.format(author.id, author.name, author.guild, author.status, author.roles)
await author.send(res)
if msg.startswith('$roles'):
role_name = [role.name for role in author.roles]
res = '''
role_names : {}
'''.format(role_name)
if 'manager' in role_name:
res +="\n You're a fucking manger!!"
await message.channel.send(res)
if msg.startswith('$add'):
user_id = author.id
token = msg.split(' ')[1]
res_messsage = DB.add_token(user_id, token)
await message.channel.send(res_messsage)
if msg.startswith('$tokens'):
user_id = author.id
tokens = DB.select_token(user_id)
await message.author.send('''>>> ```markdown
Hi you still have ##{}
```
'''.format(tokens))
@client.event
async def on_member_join(member):
await member.send('Private message')
client.run(token)
|
{"/main.py": ["/DB.py"]}
|
43,883,594
|
brian86258/Discord_Bot
|
refs/heads/master
|
/db_test.py
|
import sqlite3
db = sqlite3.connect('data.sqlite')
user_id = "741527663147220992"
cur = db.cursor()
exec = "Select tokens from users where user_id = '{}'".format(user_id)
res = cur.execute(exec).fetchone()
print(type(res[0]))
|
{"/main.py": ["/DB.py"]}
|
43,993,217
|
daniel23102000/iniprojectbarukita
|
refs/heads/main
|
/login/accounts/login/accounts/admin.py
|
from django.contrib import admin
from .models import Composer, Musictitle, Genre , Country , City , Singer
admin.site.register(Composer)
admin.site.register(Musictitle)
admin.site.register(Genre)
admin.site.register(Country)
admin.site.register(City)
admin.site.register(Singer)
|
{"/iniprojectkita-master/iniprojectkita-master/accounts/admin.py": ["/iniprojectkita-master/iniprojectkita-master/accounts/models.py"]}
|
44,013,537
|
bmagnette/default_flask
|
refs/heads/master
|
/core/application.py
|
import os
from dotenv import load_dotenv
from flask import Flask
from flask_migrate import Migrate
from core.api.hello import hello_world_router
from core.extensions import mail, db
def create_app() -> Flask:
dir_path = os.path.dirname(os.path.realpath(__file__))
project_path = os.path.abspath(os.path.join(dir_path, os.pardir))
load_dotenv(dotenv_path=project_path + '/.env')
app = Flask("Insiders", template_folder=os.path.join(dir_path, 'templates'))
app.config['SQLALCHEMY_DATABASE_URI'] = os.environ[
'SQLALCHEMY_DATABASE_URI'] # '{}://{}:{}@{}/{}'.format(os.environ['DB_TYPE'], os.environ['DB_USER'], os.environ['DB_USER'], os.environ['DB_HOST'], os.environ['DB_NAME'])
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['DEBUG'] = False
app.register_blueprint(hello_world_router)
register_extensions(app)
# register_models(app)
return app
def register_extensions(app: Flask) -> None:
mail.init_app(app)
db.init_app(app)
Migrate(app, db)
def register_models(app: Flask) -> None:
with app.app_context():
db.create_all()
|
{"/core/application.py": ["/core/api/hello.py", "/core/extensions.py", "/core/models/stack.py"], "/run_app.py": ["/core/application.py"], "/core/models/stack.py": ["/core/extensions.py"], "/core/api/hello.py": ["/core/extensions.py", "/core/models/stack.py"]}
|
44,013,538
|
bmagnette/default_flask
|
refs/heads/master
|
/core/api/hello.py
|
from flask import Blueprint, render_template
hello_world_router = Blueprint('hello2', __name__)
@hello_world_router.route("/")
def hello_world():
"""
Tu peux ajouter ce que tu veux ici, mettre un model
"""
return render_template("hello.html") # tu peux passer des arguments à ta vue.
|
{"/core/application.py": ["/core/api/hello.py", "/core/extensions.py", "/core/models/stack.py"], "/run_app.py": ["/core/application.py"], "/core/models/stack.py": ["/core/extensions.py"], "/core/api/hello.py": ["/core/extensions.py", "/core/models/stack.py"]}
|
44,013,958
|
peter-stoyanov/quiz-app
|
refs/heads/main
|
/budget/migrations/0001_initial.py
|
# Generated by Django 3.1.3 on 2020-12-24 08:17
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='ExpenseType',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
],
),
migrations.CreateModel(
name='Expense',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('amount', models.DecimalField(decimal_places=2, max_digits=10)),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('type', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='budget.expensetype')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
{"/quizes/test/test_models.py": ["/quizes/models.py"], "/budget/views.py": ["/budget/forms.py", "/budget/models.py"], "/budget/admin.py": ["/budget/models.py"], "/quizes/forms.py": ["/quizes/models.py"], "/quizes/test/test_views.py": ["/quizes/forms.py", "/quizes/models.py", "/quizes/views.py"], "/quizes/admin.py": ["/quizes/models.py"], "/formsets/views.py": ["/formsets/forms.py"], "/budget/forms.py": ["/budget/models.py"], "/accounts/test/test_models.py": ["/quizes/models.py"], "/accounts/test/test_views.py": ["/accounts/views.py"], "/accounts/test/test_forms.py": ["/accounts/forms.py"], "/quizes/test/test_forms.py": ["/quizes/forms.py"], "/quizes/views.py": ["/quizes/forms.py", "/quizes/models.py"]}
|
44,013,959
|
peter-stoyanov/quiz-app
|
refs/heads/main
|
/quizes/test/test_models.py
|
from django.core.exceptions import ValidationError
from django.test import TestCase
from quizes.models import Quiz
class QuizModelTest(TestCase):
# Note: what is expected to fail here, whats the point - integration test ?
def test_quiz_save_and_retrieve(self):
quiz = Quiz(title='Hard quiz')
quiz.save()
quizes = Quiz.objects.all()
self.assertEqual(quizes.count(), 1)
saved_quiz = quizes[0]
self.assertEqual(saved_quiz.title, 'Hard quiz')
def test_quiz_has_persisted_uuid(self):
quiz = Quiz(title='Hard quiz')
quiz.save()
quizes = Quiz.objects.all()
saved_quiz = quizes[0]
self.assertTrue(saved_quiz.uuid, 'Missing uuid')
quizes2 = Quiz.objects.all()
saved_quiz2 = quizes2[0]
self.assertEqual(saved_quiz.uuid, saved_quiz2.uuid, 'UUID is new each time instead of persisted')
# Note: this was later duplicated in test_forms
def test_quiz_title_max_length(self):
with self.assertRaises(ValidationError):
quiz = Quiz(title='This is intentionally looooooooooooooooooooong string')
quiz.full_clean()
quiz.save()
|
{"/quizes/test/test_models.py": ["/quizes/models.py"], "/budget/views.py": ["/budget/forms.py", "/budget/models.py"], "/budget/admin.py": ["/budget/models.py"], "/quizes/forms.py": ["/quizes/models.py"], "/quizes/test/test_views.py": ["/quizes/forms.py", "/quizes/models.py", "/quizes/views.py"], "/quizes/admin.py": ["/quizes/models.py"], "/formsets/views.py": ["/formsets/forms.py"], "/budget/forms.py": ["/budget/models.py"], "/accounts/test/test_models.py": ["/quizes/models.py"], "/accounts/test/test_views.py": ["/accounts/views.py"], "/accounts/test/test_forms.py": ["/accounts/forms.py"], "/quizes/test/test_forms.py": ["/quizes/forms.py"], "/quizes/views.py": ["/quizes/forms.py", "/quizes/models.py"]}
|
44,013,960
|
peter-stoyanov/quiz-app
|
refs/heads/main
|
/budget/views.py
|
from django.contrib.auth.decorators import login_required
from django.shortcuts import render, redirect
# Create your views here.
from budget.forms import ExpenseForm
from budget.models import Expense
@login_required
def create_expense(request):
if request.method == "POST":
form = ExpenseForm(request.POST)
if form.is_valid():
expense_instance = form.instance
expense_instance.user = request.user
form.save()
return redirect('/budget/all')
else:
form = ExpenseForm()
return render(request, 'create_expense.html', {'form': form})
@login_required
def all_expenses(request):
expenses = Expense.objects.all().filter(user=request.user)
return render(request, 'list_expenses.html', {'expenses': expenses})
|
{"/quizes/test/test_models.py": ["/quizes/models.py"], "/budget/views.py": ["/budget/forms.py", "/budget/models.py"], "/budget/admin.py": ["/budget/models.py"], "/quizes/forms.py": ["/quizes/models.py"], "/quizes/test/test_views.py": ["/quizes/forms.py", "/quizes/models.py", "/quizes/views.py"], "/quizes/admin.py": ["/quizes/models.py"], "/formsets/views.py": ["/formsets/forms.py"], "/budget/forms.py": ["/budget/models.py"], "/accounts/test/test_models.py": ["/quizes/models.py"], "/accounts/test/test_views.py": ["/accounts/views.py"], "/accounts/test/test_forms.py": ["/accounts/forms.py"], "/quizes/test/test_forms.py": ["/quizes/forms.py"], "/quizes/views.py": ["/quizes/forms.py", "/quizes/models.py"]}
|
44,013,961
|
peter-stoyanov/quiz-app
|
refs/heads/main
|
/budget/admin.py
|
from django.contrib import admin
# Register your models here.
from budget.models import Expense, ExpenseType
class ExpenseAdmin(admin.ModelAdmin):
pass
admin.site.register(Expense, ExpenseAdmin)
class ExpenseTypeAdmin(admin.ModelAdmin):
class Meta:
verbose_name = 'Expense TType'
verbose_name_plural = 'Expense TTypes '
admin.site.register(ExpenseType, ExpenseTypeAdmin)
|
{"/quizes/test/test_models.py": ["/quizes/models.py"], "/budget/views.py": ["/budget/forms.py", "/budget/models.py"], "/budget/admin.py": ["/budget/models.py"], "/quizes/forms.py": ["/quizes/models.py"], "/quizes/test/test_views.py": ["/quizes/forms.py", "/quizes/models.py", "/quizes/views.py"], "/quizes/admin.py": ["/quizes/models.py"], "/formsets/views.py": ["/formsets/forms.py"], "/budget/forms.py": ["/budget/models.py"], "/accounts/test/test_models.py": ["/quizes/models.py"], "/accounts/test/test_views.py": ["/accounts/views.py"], "/accounts/test/test_forms.py": ["/accounts/forms.py"], "/quizes/test/test_forms.py": ["/quizes/forms.py"], "/quizes/views.py": ["/quizes/forms.py", "/quizes/models.py"]}
|
44,013,962
|
peter-stoyanov/quiz-app
|
refs/heads/main
|
/quizes/forms.py
|
from django import forms
from quizes.models import Quiz
class QuizForm(forms.models.ModelForm):
class Meta:
model = Quiz
fields = ('title',)
widgets = {
'title': forms.fields.TextInput(attrs={
'placeholder': 'Enter a quiz title',
'class': 'create-quiz__title-input',
}),
}
error_messages = {
'title': {
'required': "You can't have a quiz with empty title",
'max_length': "You can't have a quiz with more than 40 characters"
}
}
|
{"/quizes/test/test_models.py": ["/quizes/models.py"], "/budget/views.py": ["/budget/forms.py", "/budget/models.py"], "/budget/admin.py": ["/budget/models.py"], "/quizes/forms.py": ["/quizes/models.py"], "/quizes/test/test_views.py": ["/quizes/forms.py", "/quizes/models.py", "/quizes/views.py"], "/quizes/admin.py": ["/quizes/models.py"], "/formsets/views.py": ["/formsets/forms.py"], "/budget/forms.py": ["/budget/models.py"], "/accounts/test/test_models.py": ["/quizes/models.py"], "/accounts/test/test_views.py": ["/accounts/views.py"], "/accounts/test/test_forms.py": ["/accounts/forms.py"], "/quizes/test/test_forms.py": ["/quizes/forms.py"], "/quizes/views.py": ["/quizes/forms.py", "/quizes/models.py"]}
|
44,013,963
|
peter-stoyanov/quiz-app
|
refs/heads/main
|
/quizes/models.py
|
import uuid
from django.db import models
# Create your models here.
class Quiz(models.Model):
title = models.CharField(blank=False, max_length=40)
uuid = models.CharField(max_length=40, null=True, blank=True, unique=True)
def __init__(self, *args, **kwargs):
super(Quiz, self).__init__(*args, **kwargs)
if self.uuid is None:
self.uuid = str(uuid.uuid4())
class Publication(models.Model):
title = models.CharField(max_length=30)
class Meta:
ordering = ['title']
def __str__(self):
return self.title
class Article(models.Model):
headline = models.CharField(max_length=100)
publications = models.ManyToManyField(Publication)
class Meta:
ordering = ['headline']
def __str__(self):
return self.headline
|
{"/quizes/test/test_models.py": ["/quizes/models.py"], "/budget/views.py": ["/budget/forms.py", "/budget/models.py"], "/budget/admin.py": ["/budget/models.py"], "/quizes/forms.py": ["/quizes/models.py"], "/quizes/test/test_views.py": ["/quizes/forms.py", "/quizes/models.py", "/quizes/views.py"], "/quizes/admin.py": ["/quizes/models.py"], "/formsets/views.py": ["/formsets/forms.py"], "/budget/forms.py": ["/budget/models.py"], "/accounts/test/test_models.py": ["/quizes/models.py"], "/accounts/test/test_views.py": ["/accounts/views.py"], "/accounts/test/test_forms.py": ["/accounts/forms.py"], "/quizes/test/test_forms.py": ["/quizes/forms.py"], "/quizes/views.py": ["/quizes/forms.py", "/quizes/models.py"]}
|
44,013,964
|
peter-stoyanov/quiz-app
|
refs/heads/main
|
/quizes/test/test_views.py
|
from django.test import TestCase, SimpleTestCase
from django.urls import resolve
from http import HTTPStatus
from django.utils.html import escape
from quizes.forms import QuizForm
from quizes.models import Quiz
from quizes.views import view_quiz, create_quiz, list_quizes, index
class HomePageViewTests(SimpleTestCase):
def test_root_url_resolves_to_home_view(self):
found = resolve('/')
self.assertEqual(found.func, index)
def test_app_title_on_home_page(self):
response = self.client.get('/')
self.assertContains(response, 'Welcome to the Quiz App', html=True)
class QuizViewTests(TestCase):
def test_uuid_link_resolves_to_quiz_page_view(self):
found = resolve('/quiz/075194d3-6885-417e-a8a8-6c931e272f00')
self.assertEqual(found.func, view_quiz)
def test_link_with_numbers_returns_404(self):
response = self.client.get('/quiz/075194')
self.assertEqual(response.status_code, 404)
def test_quiz_title_is_displayed(self):
quiz = Quiz(title='Hard quiz')
quiz.save()
quizes = Quiz.objects.all()
response = self.client.get(f'/quiz/{quiz.uuid}')
self.assertTemplateUsed(response, 'quiz.html')
self.assertContains(response, 'Hard quiz')
class CreateQuizViewTests(TestCase):
def test_url_resolves_to_quiz_create_page_view(self):
found = resolve('/quiz')
self.assertEqual(found.func, create_quiz)
def test_quiz_page_uses_quiz_form(self):
response = self.client.get('/quiz')
# Note: this assert was passing when I was not actually rendering the form in the template
# Missing input would be caught by the functional tests though
# should I check some html content here ?
self.assertEqual(response.status_code, HTTPStatus.OK)
self.assertIsInstance(response.context['form'], QuizForm)
def test_quiz_page_form_error(self):
response = self.client.post('/quiz', data={'title': ''})
self.assertEqual(response.status_code, HTTPStatus.OK)
# all errors are contained in one property in the view
# checking that one error is displayed should be sufficient
self.assertContains(response, escape("You can't have a quiz with empty title"), html=True)
class QuizListViewTests(TestCase):
def test_all_url_resolves_to_quiz_list_view(self):
found = resolve('/quiz/all')
self.assertEqual(found.func, list_quizes)
def test_quiz_list_view(self):
quiz = Quiz(title='First quiz')
quiz.save()
response = self.client.get('/quiz/all')
self.assertEqual(response.status_code, HTTPStatus.OK)
self.assertContains(response, escape('All Quizes'), html=True)
self.assertContains(response, escape('First quiz'))
|
{"/quizes/test/test_models.py": ["/quizes/models.py"], "/budget/views.py": ["/budget/forms.py", "/budget/models.py"], "/budget/admin.py": ["/budget/models.py"], "/quizes/forms.py": ["/quizes/models.py"], "/quizes/test/test_views.py": ["/quizes/forms.py", "/quizes/models.py", "/quizes/views.py"], "/quizes/admin.py": ["/quizes/models.py"], "/formsets/views.py": ["/formsets/forms.py"], "/budget/forms.py": ["/budget/models.py"], "/accounts/test/test_models.py": ["/quizes/models.py"], "/accounts/test/test_views.py": ["/accounts/views.py"], "/accounts/test/test_forms.py": ["/accounts/forms.py"], "/quizes/test/test_forms.py": ["/quizes/forms.py"], "/quizes/views.py": ["/quizes/forms.py", "/quizes/models.py"]}
|
44,013,965
|
peter-stoyanov/quiz-app
|
refs/heads/main
|
/budget/urls.py
|
from django.conf.urls import url
from budget import views
urlpatterns = [
url(r'^$', views.create_expense),
url(r'/all', views.all_expenses),
# url(r'/(?P<uuid>[0-9A-Fa-f]{8}[-]?(?:[0-9A-Fa-f]{4}[-]?){3}[0-9A-Fa-fs]{12})', views.view_quiz)
]
|
{"/quizes/test/test_models.py": ["/quizes/models.py"], "/budget/views.py": ["/budget/forms.py", "/budget/models.py"], "/budget/admin.py": ["/budget/models.py"], "/quizes/forms.py": ["/quizes/models.py"], "/quizes/test/test_views.py": ["/quizes/forms.py", "/quizes/models.py", "/quizes/views.py"], "/quizes/admin.py": ["/quizes/models.py"], "/formsets/views.py": ["/formsets/forms.py"], "/budget/forms.py": ["/budget/models.py"], "/accounts/test/test_models.py": ["/quizes/models.py"], "/accounts/test/test_views.py": ["/accounts/views.py"], "/accounts/test/test_forms.py": ["/accounts/forms.py"], "/quizes/test/test_forms.py": ["/quizes/forms.py"], "/quizes/views.py": ["/quizes/forms.py", "/quizes/models.py"]}
|
44,013,966
|
peter-stoyanov/quiz-app
|
refs/heads/main
|
/quizes/admin.py
|
from django.contrib import admin
# Register your models here.
from quizes.models import Quiz
class QuizAdmin(admin.ModelAdmin):
pass
admin.site.register(Quiz, QuizAdmin)
|
{"/quizes/test/test_models.py": ["/quizes/models.py"], "/budget/views.py": ["/budget/forms.py", "/budget/models.py"], "/budget/admin.py": ["/budget/models.py"], "/quizes/forms.py": ["/quizes/models.py"], "/quizes/test/test_views.py": ["/quizes/forms.py", "/quizes/models.py", "/quizes/views.py"], "/quizes/admin.py": ["/quizes/models.py"], "/formsets/views.py": ["/formsets/forms.py"], "/budget/forms.py": ["/budget/models.py"], "/accounts/test/test_models.py": ["/quizes/models.py"], "/accounts/test/test_views.py": ["/accounts/views.py"], "/accounts/test/test_forms.py": ["/accounts/forms.py"], "/quizes/test/test_forms.py": ["/quizes/forms.py"], "/quizes/views.py": ["/quizes/forms.py", "/quizes/models.py"]}
|
44,013,967
|
peter-stoyanov/quiz-app
|
refs/heads/main
|
/formsets/views.py
|
from django.shortcuts import render, redirect
# Create your views here.
from formsets.forms import ArticleForm
def forms_home(request):
return render(request, 'home.html')
def article_form(request):
if request.method == "POST":
form = ArticleForm(request.POST)
if form.is_valid():
print(form.cleaned_data)
return redirect('/forms')
else:
form = ArticleForm()
return render(request, 'article.html', {'form': form})
|
{"/quizes/test/test_models.py": ["/quizes/models.py"], "/budget/views.py": ["/budget/forms.py", "/budget/models.py"], "/budget/admin.py": ["/budget/models.py"], "/quizes/forms.py": ["/quizes/models.py"], "/quizes/test/test_views.py": ["/quizes/forms.py", "/quizes/models.py", "/quizes/views.py"], "/quizes/admin.py": ["/quizes/models.py"], "/formsets/views.py": ["/formsets/forms.py"], "/budget/forms.py": ["/budget/models.py"], "/accounts/test/test_models.py": ["/quizes/models.py"], "/accounts/test/test_views.py": ["/accounts/views.py"], "/accounts/test/test_forms.py": ["/accounts/forms.py"], "/quizes/test/test_forms.py": ["/quizes/forms.py"], "/quizes/views.py": ["/quizes/forms.py", "/quizes/models.py"]}
|
44,013,968
|
peter-stoyanov/quiz-app
|
refs/heads/main
|
/budget/forms.py
|
from django import forms
from budget.models import Expense
class ExpenseForm(forms.models.ModelForm):
class Meta:
model = Expense
fields = ('amount', 'type')
widgets = {
'title': forms.fields.TextInput(attrs={
'placeholder': 'Enter a quiz title',
'class': 'create-quiz__title-input',
}),
}
error_messages = {
'title': {
'required': "You can't have an expense with empty name",
'max_length': "You can't have an expense with more than 50 characters"
}
}
|
{"/quizes/test/test_models.py": ["/quizes/models.py"], "/budget/views.py": ["/budget/forms.py", "/budget/models.py"], "/budget/admin.py": ["/budget/models.py"], "/quizes/forms.py": ["/quizes/models.py"], "/quizes/test/test_views.py": ["/quizes/forms.py", "/quizes/models.py", "/quizes/views.py"], "/quizes/admin.py": ["/quizes/models.py"], "/formsets/views.py": ["/formsets/forms.py"], "/budget/forms.py": ["/budget/models.py"], "/accounts/test/test_models.py": ["/quizes/models.py"], "/accounts/test/test_views.py": ["/accounts/views.py"], "/accounts/test/test_forms.py": ["/accounts/forms.py"], "/quizes/test/test_forms.py": ["/quizes/forms.py"], "/quizes/views.py": ["/quizes/forms.py", "/quizes/models.py"]}
|
44,013,969
|
peter-stoyanov/quiz-app
|
refs/heads/main
|
/functional_tests.py
|
from selenium import webdriver
import unittest
import time
import uuid
def generate_random_string():
return uuid.uuid4().hex[:8].upper()
class UserVisitTest(unittest.TestCase):
def setUp(self):
self.browser = webdriver.Firefox()
def tearDown(self):
self.browser.quit()
def test_create_quiz_page(self):
# Joe wants to create a quiz to challenge his friends and visits our cool app
self.browser.get('http://localhost:3000/quiz')
self.assertIn('Quiz', self.browser.title, 'Page title does not contain Quiz')
# Joe puts in a title for the whole quiz
quiz_title_input = self.browser.find_element_by_css_selector('.create-quiz__title-input')
quiz_title_input.send_keys('History quiz')
quiz_form_submit_button = self.browser.find_element_by_css_selector('.create-quiz__submit-btn')
# He submits the form
quiz_form_submit_button.click()
time.sleep(1)
# .. and sees the new title in the quiz table
quiz_titles = self.browser.find_elements_by_class_name('quizes__list-item')
self.assertTrue(any(['History quiz' in title.text for title in quiz_titles]))
def test_visit_home_page(self):
# Joe follow a link and finds himself on our app
self.browser.get('http://localhost:3000')
self.assertIn('Quiz', self.browser.title, 'Page title does not contain Quiz')
home_page_title = self.browser.find_element_by_class_name('home__title')
self.assertIn('Welcome to the Quiz App', home_page_title.text)
def test_register_user(self):
# Joe liked what he saw and wants to register
self.browser.get('http://localhost:3000')
# Joe decides to follow the register link
register_link = self.browser.find_element_by_class_name('navbar__register-btn')
register_link.click()
time.sleep(1)
register_page_title = self.browser.find_element_by_class_name('register__title')
self.assertIn('Register', register_page_title.text)
random_password = generate_random_string()
random_username = generate_random_string()
# he fills in username
username_input = self.browser.find_element_by_css_selector('#id_username')
username_input.send_keys(random_username)
# and password
password_input = self.browser.find_element_by_css_selector('#id_password1')
password_input.send_keys(random_password)
confirm_password_input = self.browser.find_element_by_css_selector('#id_password2')
confirm_password_input.send_keys(random_password)
# when ready he submits the form
register_button = self.browser.find_element_by_class_name('register__submit-btn')
register_button.click()
time.sleep(1)
# and is redirected to login page
login_page_title = self.browser.find_element_by_class_name('login__title')
self.assertIn('Login', login_page_title.text)
if __name__ == '__main__':
unittest.main(warnings='ignore')
|
{"/quizes/test/test_models.py": ["/quizes/models.py"], "/budget/views.py": ["/budget/forms.py", "/budget/models.py"], "/budget/admin.py": ["/budget/models.py"], "/quizes/forms.py": ["/quizes/models.py"], "/quizes/test/test_views.py": ["/quizes/forms.py", "/quizes/models.py", "/quizes/views.py"], "/quizes/admin.py": ["/quizes/models.py"], "/formsets/views.py": ["/formsets/forms.py"], "/budget/forms.py": ["/budget/models.py"], "/accounts/test/test_models.py": ["/quizes/models.py"], "/accounts/test/test_views.py": ["/accounts/views.py"], "/accounts/test/test_forms.py": ["/accounts/forms.py"], "/quizes/test/test_forms.py": ["/quizes/forms.py"], "/quizes/views.py": ["/quizes/forms.py", "/quizes/models.py"]}
|
44,013,970
|
peter-stoyanov/quiz-app
|
refs/heads/main
|
/accounts/test/test_models.py
|
from django.core.exceptions import ValidationError
from django.test import TestCase
from quizes.models import Quiz
|
{"/quizes/test/test_models.py": ["/quizes/models.py"], "/budget/views.py": ["/budget/forms.py", "/budget/models.py"], "/budget/admin.py": ["/budget/models.py"], "/quizes/forms.py": ["/quizes/models.py"], "/quizes/test/test_views.py": ["/quizes/forms.py", "/quizes/models.py", "/quizes/views.py"], "/quizes/admin.py": ["/quizes/models.py"], "/formsets/views.py": ["/formsets/forms.py"], "/budget/forms.py": ["/budget/models.py"], "/accounts/test/test_models.py": ["/quizes/models.py"], "/accounts/test/test_views.py": ["/accounts/views.py"], "/accounts/test/test_forms.py": ["/accounts/forms.py"], "/quizes/test/test_forms.py": ["/quizes/forms.py"], "/quizes/views.py": ["/quizes/forms.py", "/quizes/models.py"]}
|
44,013,971
|
peter-stoyanov/quiz-app
|
refs/heads/main
|
/accounts/test/test_views.py
|
import uuid
from django.contrib import auth
from django.contrib.auth.forms import UserCreationForm, AuthenticationForm
from django.contrib.auth.models import User, AnonymousUser
from django.test import TestCase
from django.urls import resolve
from http import HTTPStatus
from accounts.views import register, login, logout
class RegisterViewTests(TestCase):
def test_register_url_resolves_to_register_view(self):
found = resolve('/account/register')
self.assertEqual(found.func, register)
def test_for_correct_template(self):
response = self.client.get('/account/register')
self.assertEqual(response.status_code, HTTPStatus.OK)
self.assertContains(response, 'Register')
self.assertTemplateUsed(response, 'register.html')
def test_for_correct_form_instance(self):
response = self.client.get('/account/register')
self.assertIsInstance(response.context['form'], UserCreationForm)
def test_valid_post_should_create_user(self):
# Note: move to reusable helper/util class
username = uuid.uuid4().hex[:8].upper()
password = uuid.uuid4().hex[:8].upper()
form = {'username': username, 'password1': password, 'password2': password}
response = self.client.post('/account/register', data=form)
user = User.objects.get(username=username)
self.assertIsNotNone(user)
self.assertRedirects(response, '/account/login')
def test_invalid_post_should_return_errors(self):
form = {'username': 'test'}
response = self.client.post('/account/register', data=form)
self.assertTemplateUsed(response, 'register.html')
self.assertContains(response, 'This field is required')
class LoginViewTests(TestCase):
fixtures = ['test_user.json']
def test_login_url_resolves_to_login_view(self):
found = resolve('/account/login')
self.assertEqual(found.func, login)
def test_for_correct_template(self):
response = self.client.get('/account/login')
self.assertEqual(response.status_code, HTTPStatus.OK)
self.assertContains(response, 'Login')
self.assertTemplateUsed(response, 'login.html')
def test_for_correct_form_instance(self):
response = self.client.get('/account/login')
self.assertIsInstance(response.context['form'], AuthenticationForm)
def test_valid_post_should_login_user(self):
form = {'username': 'test_user', 'password': 'test123456'}
response = self.client.post('/account/login', data=form)
user = auth.get_user(self.client)
self.assertTrue(user.is_authenticated)
self.assertRedirects(response, '/')
def test_invalid_post_should_return_errors(self):
form = {'username': 'no_such_user', 'password': 'fake'}
response = self.client.post('/account/login', data=form)
self.assertTemplateUsed(response, 'login.html')
self.assertContains(response, 'Please enter a correct username and password')
class LogoutViewTests(TestCase):
def test_logout_url_resolves_to_logout_view(self):
found = resolve('/account/logout')
self.assertEqual(found.func, logout)
def test_valid_post_should_login_user(self):
user = User.objects.create_user(username='testuser', password='12345678')
self.client.login(username=user.username, password='12345678')
response = self.client.post('/account/logout')
user = auth.get_user(self.client)
self.assertIsInstance(user, AnonymousUser)
self.assertRedirects(response, '/')
|
{"/quizes/test/test_models.py": ["/quizes/models.py"], "/budget/views.py": ["/budget/forms.py", "/budget/models.py"], "/budget/admin.py": ["/budget/models.py"], "/quizes/forms.py": ["/quizes/models.py"], "/quizes/test/test_views.py": ["/quizes/forms.py", "/quizes/models.py", "/quizes/views.py"], "/quizes/admin.py": ["/quizes/models.py"], "/formsets/views.py": ["/formsets/forms.py"], "/budget/forms.py": ["/budget/models.py"], "/accounts/test/test_models.py": ["/quizes/models.py"], "/accounts/test/test_views.py": ["/accounts/views.py"], "/accounts/test/test_forms.py": ["/accounts/forms.py"], "/quizes/test/test_forms.py": ["/quizes/forms.py"], "/quizes/views.py": ["/quizes/forms.py", "/quizes/models.py"]}
|
44,013,972
|
peter-stoyanov/quiz-app
|
refs/heads/main
|
/quizes/urls.py
|
from django.conf.urls import url
from quizes import views
urlpatterns = [
url(r'^$', views.create_quiz),
url(r'/all', views.list_quizes),
url(r'/(?P<uuid>[0-9A-Fa-f]{8}[-]?(?:[0-9A-Fa-f]{4}[-]?){3}[0-9A-Fa-fs]{12})', views.view_quiz)
]
|
{"/quizes/test/test_models.py": ["/quizes/models.py"], "/budget/views.py": ["/budget/forms.py", "/budget/models.py"], "/budget/admin.py": ["/budget/models.py"], "/quizes/forms.py": ["/quizes/models.py"], "/quizes/test/test_views.py": ["/quizes/forms.py", "/quizes/models.py", "/quizes/views.py"], "/quizes/admin.py": ["/quizes/models.py"], "/formsets/views.py": ["/formsets/forms.py"], "/budget/forms.py": ["/budget/models.py"], "/accounts/test/test_models.py": ["/quizes/models.py"], "/accounts/test/test_views.py": ["/accounts/views.py"], "/accounts/test/test_forms.py": ["/accounts/forms.py"], "/quizes/test/test_forms.py": ["/quizes/forms.py"], "/quizes/views.py": ["/quizes/forms.py", "/quizes/models.py"]}
|
44,013,973
|
peter-stoyanov/quiz-app
|
refs/heads/main
|
/accounts/test/test_forms.py
|
import uuid
from django.test import TestCase
from accounts.forms import RegisterForm
class RegisterFormTest(TestCase):
def test_for_blank_username(self):
form = RegisterForm(data={'username': ''})
self.assertFalse(form.is_valid())
self.assertEqual(
form.errors['username'],
["This field is required."]
)
def test_for_long_username(self):
form = RegisterForm(data={'username': "x" * 151})
self.assertFalse(form.is_valid())
self.assertTrue("Ensure this value has at most 150 characters" in str(form.errors['username']))
def test_for_unique_username(self):
username = uuid.uuid4().hex[:8].upper()
password = uuid.uuid4().hex[:8].upper()
register_user_form = RegisterForm(data={'username': username, 'password': password})
register_user_form.save()
form = RegisterForm(data={'username': username})
self.assertFalse(form.is_valid())
self.assertTrue("A user with that username already exists" in str(form.errors['username']))
def test_for_long_first_name(self):
form = RegisterForm(data={'first_name': "x" * 151})
self.assertFalse(form.is_valid())
self.assertTrue("Ensure this value has at most 150 characters" in str(form.errors['first_name']))
def test_for_long_last_name(self):
form = RegisterForm(data={'last_name': "x" * 151})
self.assertFalse(form.is_valid())
self.assertTrue("Ensure this value has at most 150 characters" in str(form.errors['last_name']))
|
{"/quizes/test/test_models.py": ["/quizes/models.py"], "/budget/views.py": ["/budget/forms.py", "/budget/models.py"], "/budget/admin.py": ["/budget/models.py"], "/quizes/forms.py": ["/quizes/models.py"], "/quizes/test/test_views.py": ["/quizes/forms.py", "/quizes/models.py", "/quizes/views.py"], "/quizes/admin.py": ["/quizes/models.py"], "/formsets/views.py": ["/formsets/forms.py"], "/budget/forms.py": ["/budget/models.py"], "/accounts/test/test_models.py": ["/quizes/models.py"], "/accounts/test/test_views.py": ["/accounts/views.py"], "/accounts/test/test_forms.py": ["/accounts/forms.py"], "/quizes/test/test_forms.py": ["/quizes/forms.py"], "/quizes/views.py": ["/quizes/forms.py", "/quizes/models.py"]}
|
44,013,974
|
peter-stoyanov/quiz-app
|
refs/heads/main
|
/formsets/forms.py
|
from django import forms
from django.core.exceptions import ValidationError
class ArticleForm(forms.Form):
title = forms.CharField()
pub_date = forms.DateField()
def clean_title(self):
value = self.cleaned_data['title']
if 'pesho' in value:
return value
raise ValidationError('Title is not valid', 'special')
|
{"/quizes/test/test_models.py": ["/quizes/models.py"], "/budget/views.py": ["/budget/forms.py", "/budget/models.py"], "/budget/admin.py": ["/budget/models.py"], "/quizes/forms.py": ["/quizes/models.py"], "/quizes/test/test_views.py": ["/quizes/forms.py", "/quizes/models.py", "/quizes/views.py"], "/quizes/admin.py": ["/quizes/models.py"], "/formsets/views.py": ["/formsets/forms.py"], "/budget/forms.py": ["/budget/models.py"], "/accounts/test/test_models.py": ["/quizes/models.py"], "/accounts/test/test_views.py": ["/accounts/views.py"], "/accounts/test/test_forms.py": ["/accounts/forms.py"], "/quizes/test/test_forms.py": ["/quizes/forms.py"], "/quizes/views.py": ["/quizes/forms.py", "/quizes/models.py"]}
|
44,013,975
|
peter-stoyanov/quiz-app
|
refs/heads/main
|
/formsets/urls.py
|
from django.conf.urls import url
from formsets import views
urlpatterns = [
url(r'^$', views.forms_home),
url(r'/article', views.article_form),
]
|
{"/quizes/test/test_models.py": ["/quizes/models.py"], "/budget/views.py": ["/budget/forms.py", "/budget/models.py"], "/budget/admin.py": ["/budget/models.py"], "/quizes/forms.py": ["/quizes/models.py"], "/quizes/test/test_views.py": ["/quizes/forms.py", "/quizes/models.py", "/quizes/views.py"], "/quizes/admin.py": ["/quizes/models.py"], "/formsets/views.py": ["/formsets/forms.py"], "/budget/forms.py": ["/budget/models.py"], "/accounts/test/test_models.py": ["/quizes/models.py"], "/accounts/test/test_views.py": ["/accounts/views.py"], "/accounts/test/test_forms.py": ["/accounts/forms.py"], "/quizes/test/test_forms.py": ["/quizes/forms.py"], "/quizes/views.py": ["/quizes/forms.py", "/quizes/models.py"]}
|
44,013,976
|
peter-stoyanov/quiz-app
|
refs/heads/main
|
/budget/migrations/0002_auto_20201224_1030.py
|
# Generated by Django 3.1.3 on 2020-12-24 08:30
from django.db import migrations
def load_data(apps, schema_editor):
# We can't import the Person model directly as it may be a newer
# version than this migration expects. We use the historical version.
ExpenseType = apps.get_model('budget', 'ExpenseType')
names = [
'rent',
'groceries',
'eating out',
'bills',
'travel',
'load payments'
]
for name in names:
expense_type = ExpenseType(name=name)
expense_type.save()
class Migration(migrations.Migration):
dependencies = [
('budget', '0001_initial'),
]
operations = [
migrations.RunPython(load_data)
]
|
{"/quizes/test/test_models.py": ["/quizes/models.py"], "/budget/views.py": ["/budget/forms.py", "/budget/models.py"], "/budget/admin.py": ["/budget/models.py"], "/quizes/forms.py": ["/quizes/models.py"], "/quizes/test/test_views.py": ["/quizes/forms.py", "/quizes/models.py", "/quizes/views.py"], "/quizes/admin.py": ["/quizes/models.py"], "/formsets/views.py": ["/formsets/forms.py"], "/budget/forms.py": ["/budget/models.py"], "/accounts/test/test_models.py": ["/quizes/models.py"], "/accounts/test/test_views.py": ["/accounts/views.py"], "/accounts/test/test_forms.py": ["/accounts/forms.py"], "/quizes/test/test_forms.py": ["/quizes/forms.py"], "/quizes/views.py": ["/quizes/forms.py", "/quizes/models.py"]}
|
44,013,977
|
peter-stoyanov/quiz-app
|
refs/heads/main
|
/accounts/views.py
|
import json
from django.contrib.auth import authenticate
from django.contrib.auth.forms import UserCreationForm, AuthenticationForm
from django.http import HttpResponse
from django.shortcuts import render, redirect
from django.contrib import messages, auth
def register(request):
if request.method == "POST":
# Note: am I supposed to use these auth forms directly or subclass them?
form = UserCreationForm(request.POST)
if form.is_valid():
form.save()
return redirect('/account/login')
else:
form = UserCreationForm()
return render(request, 'register.html', {'form': form})
def login(request):
if request.method == "POST":
form = AuthenticationForm(request, data=request.POST)
if form.is_valid():
user = authenticate(
request,
username=form.cleaned_data.get('username'),
password=form.cleaned_data.get('password')
)
if user is not None:
auth.login(request, user)
messages.success(request, 'You are now logged in')
return redirect('/')
else:
messages.error(request, 'Invalid credentials')
else:
form = AuthenticationForm()
return render(request, 'login.html', {'form': form})
def logout(request):
if request.method == 'POST':
auth.logout(request)
messages.success(request, 'You are now logged out')
return redirect('/')
def add_to_session(request):
request.session['likes'] = 9
return HttpResponse('added')
def session(request):
from django.contrib.sessions.models import Session
from django.contrib.auth.models import User
# session_key = '8cae76c505f15432b48c8292a7dd0e54'
#
# session = Session.objects.get(session_key=session_key)
request.session.save()
session_data = request.session
# uid = session_data.get('_auth_user_id', 'no user id found in session')
# print(uid)
#
# print
# user.username, user.get_full_name(), user.email
# serialized = dict((key, value) for key, value in session_data.__dict__
# if not callable(value) and not key.startswith('__'))
return HttpResponse(json.dumps(session_data.__dict__['_session_cache']))
|
{"/quizes/test/test_models.py": ["/quizes/models.py"], "/budget/views.py": ["/budget/forms.py", "/budget/models.py"], "/budget/admin.py": ["/budget/models.py"], "/quizes/forms.py": ["/quizes/models.py"], "/quizes/test/test_views.py": ["/quizes/forms.py", "/quizes/models.py", "/quizes/views.py"], "/quizes/admin.py": ["/quizes/models.py"], "/formsets/views.py": ["/formsets/forms.py"], "/budget/forms.py": ["/budget/models.py"], "/accounts/test/test_models.py": ["/quizes/models.py"], "/accounts/test/test_views.py": ["/accounts/views.py"], "/accounts/test/test_forms.py": ["/accounts/forms.py"], "/quizes/test/test_forms.py": ["/quizes/forms.py"], "/quizes/views.py": ["/quizes/forms.py", "/quizes/models.py"]}
|
44,013,978
|
peter-stoyanov/quiz-app
|
refs/heads/main
|
/quizes/test/test_forms.py
|
from django.test import SimpleTestCase
from quizes.forms import QuizForm
class QuizFormTest(SimpleTestCase):
def test_form_validation_for_blank_quiz_title(self):
form = QuizForm(data={'title': ''})
self.assertFalse(form.is_valid())
self.assertEqual(
form.errors['title'],
["You can't have a quiz with empty title"]
)
def test_form_validation_for_too_long_quiz_title(self):
form = QuizForm(data={'title': 'This is intentionally looooooooooooooooooooong string'})
self.assertFalse(form.is_valid())
self.assertEqual(
form.errors['title'],
["You can't have a quiz with more than 40 characters"]
)
def test_form_validation_for_valid_quiz_title(self):
form = QuizForm(data={'title': 'Some quiz title'})
self.assertTrue(form.is_valid())
|
{"/quizes/test/test_models.py": ["/quizes/models.py"], "/budget/views.py": ["/budget/forms.py", "/budget/models.py"], "/budget/admin.py": ["/budget/models.py"], "/quizes/forms.py": ["/quizes/models.py"], "/quizes/test/test_views.py": ["/quizes/forms.py", "/quizes/models.py", "/quizes/views.py"], "/quizes/admin.py": ["/quizes/models.py"], "/formsets/views.py": ["/formsets/forms.py"], "/budget/forms.py": ["/budget/models.py"], "/accounts/test/test_models.py": ["/quizes/models.py"], "/accounts/test/test_views.py": ["/accounts/views.py"], "/accounts/test/test_forms.py": ["/accounts/forms.py"], "/quizes/test/test_forms.py": ["/quizes/forms.py"], "/quizes/views.py": ["/quizes/forms.py", "/quizes/models.py"]}
|
44,013,979
|
peter-stoyanov/quiz-app
|
refs/heads/main
|
/budget/models.py
|
from django.contrib.auth.models import User
from django.db import models
# Create your models here.
class ExpenseType(models.Model):
name = models.CharField(max_length=50)
def __str__(self):
return 'Type: ' + self.name
class Expense(models.Model):
amount = models.DecimalField(max_digits=10, decimal_places=2)
type = models.ForeignKey(ExpenseType, on_delete=models.DO_NOTHING)
user = models.ForeignKey(User, on_delete=models.CASCADE)
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
def __str__(self):
return 'Expense: ' + str(self.amount) + ' ( ' + self.type.name + ' )'
|
{"/quizes/test/test_models.py": ["/quizes/models.py"], "/budget/views.py": ["/budget/forms.py", "/budget/models.py"], "/budget/admin.py": ["/budget/models.py"], "/quizes/forms.py": ["/quizes/models.py"], "/quizes/test/test_views.py": ["/quizes/forms.py", "/quizes/models.py", "/quizes/views.py"], "/quizes/admin.py": ["/quizes/models.py"], "/formsets/views.py": ["/formsets/forms.py"], "/budget/forms.py": ["/budget/models.py"], "/accounts/test/test_models.py": ["/quizes/models.py"], "/accounts/test/test_views.py": ["/accounts/views.py"], "/accounts/test/test_forms.py": ["/accounts/forms.py"], "/quizes/test/test_forms.py": ["/quizes/forms.py"], "/quizes/views.py": ["/quizes/forms.py", "/quizes/models.py"]}
|
44,013,980
|
peter-stoyanov/quiz-app
|
refs/heads/main
|
/quizes/views.py
|
from django.http import HttpResponse
from django.shortcuts import render, get_object_or_404, redirect
# Create your views here.
from quizes.forms import QuizForm
from quizes.models import Quiz
def index(request):
return render(request, 'index.html')
def view_quiz(request, uuid):
quiz = get_object_or_404(Quiz, uuid=uuid)
return render(request, 'quiz.html', {'quiz': quiz})
def create_quiz(request):
if request.method == "POST":
form = QuizForm(request.POST)
if form.is_valid():
form.save()
return redirect('/quiz/all')
else:
form = QuizForm()
return render(request, 'create_quiz.html', {'form': form})
def list_quizes(request):
quizes = Quiz.objects.all()
return render(request, 'list_quizes.html', {'quizes': quizes})
|
{"/quizes/test/test_models.py": ["/quizes/models.py"], "/budget/views.py": ["/budget/forms.py", "/budget/models.py"], "/budget/admin.py": ["/budget/models.py"], "/quizes/forms.py": ["/quizes/models.py"], "/quizes/test/test_views.py": ["/quizes/forms.py", "/quizes/models.py", "/quizes/views.py"], "/quizes/admin.py": ["/quizes/models.py"], "/formsets/views.py": ["/formsets/forms.py"], "/budget/forms.py": ["/budget/models.py"], "/accounts/test/test_models.py": ["/quizes/models.py"], "/accounts/test/test_views.py": ["/accounts/views.py"], "/accounts/test/test_forms.py": ["/accounts/forms.py"], "/quizes/test/test_forms.py": ["/quizes/forms.py"], "/quizes/views.py": ["/quizes/forms.py", "/quizes/models.py"]}
|
44,013,981
|
peter-stoyanov/quiz-app
|
refs/heads/main
|
/accounts/forms.py
|
from django import forms
from django.contrib.auth.models import User
# Notes: at some point I realised there is a built-in form for that :(
class RegisterForm(forms.models.ModelForm):
class Meta:
model = User
fields = ('username', 'first_name', 'last_name', 'email', 'password')
widgets = {
'username': forms.fields.TextInput(attrs={
'class': 'register__username-input'
}),
'first_name': forms.fields.TextInput(attrs={
'class': 'register__first-name-input'
}),
'last_name': forms.fields.TextInput(attrs={
'class': 'register__last-name-input'
}),
'email': forms.fields.TextInput(attrs={
'class': 'register__email-input'
}),
'password': forms.fields.TextInput(attrs={
'class': 'register__password-input'
})
}
|
{"/quizes/test/test_models.py": ["/quizes/models.py"], "/budget/views.py": ["/budget/forms.py", "/budget/models.py"], "/budget/admin.py": ["/budget/models.py"], "/quizes/forms.py": ["/quizes/models.py"], "/quizes/test/test_views.py": ["/quizes/forms.py", "/quizes/models.py", "/quizes/views.py"], "/quizes/admin.py": ["/quizes/models.py"], "/formsets/views.py": ["/formsets/forms.py"], "/budget/forms.py": ["/budget/models.py"], "/accounts/test/test_models.py": ["/quizes/models.py"], "/accounts/test/test_views.py": ["/accounts/views.py"], "/accounts/test/test_forms.py": ["/accounts/forms.py"], "/quizes/test/test_forms.py": ["/quizes/forms.py"], "/quizes/views.py": ["/quizes/forms.py", "/quizes/models.py"]}
|
44,013,982
|
peter-stoyanov/quiz-app
|
refs/heads/main
|
/accounts/urls.py
|
from django.conf.urls import url
from accounts import views
urlpatterns = [
url('register', views.register),
url('login', views.login),
url('logout', views.logout),
url('add_to_session', views.add_to_session),
url('session', views.session),
]
|
{"/quizes/test/test_models.py": ["/quizes/models.py"], "/budget/views.py": ["/budget/forms.py", "/budget/models.py"], "/budget/admin.py": ["/budget/models.py"], "/quizes/forms.py": ["/quizes/models.py"], "/quizes/test/test_views.py": ["/quizes/forms.py", "/quizes/models.py", "/quizes/views.py"], "/quizes/admin.py": ["/quizes/models.py"], "/formsets/views.py": ["/formsets/forms.py"], "/budget/forms.py": ["/budget/models.py"], "/accounts/test/test_models.py": ["/quizes/models.py"], "/accounts/test/test_views.py": ["/accounts/views.py"], "/accounts/test/test_forms.py": ["/accounts/forms.py"], "/quizes/test/test_forms.py": ["/quizes/forms.py"], "/quizes/views.py": ["/quizes/forms.py", "/quizes/models.py"]}
|
44,071,544
|
darachm/itermae
|
refs/heads/main
|
/misc/fastq2sam.py
|
#!/usr/bin/env python3
# Tiny little script just to turn a fastq into a SAM file
import sys
import gzip
import argparse
from Bio import SeqIO
def format_sam_record(record_id, sequence, qualities, tags,
flag='0', reference_name='*',
mapping_position='0', mapping_quality='255', cigar_string='*',
reference_name_of_mate='=', position_of_mate='0', template_length='0'
):
return "\t".join([
record_id,
flag,
reference_name,
mapping_position,
mapping_quality,
cigar_string,
reference_name_of_mate,
position_of_mate,
template_length,
sequence,
qualities,
tags
])
# Name and description of this program
parser = argparse.ArgumentParser("")
parser.add_argument("--input",default="STDIN")
parser.add_argument("-z","--gzipped",action="store_true")
args = parser.parse_args()
if args.input == "STDIN":
if args.gzipped:
with gzip.open(sys.stdin,"rt") as input_file_gz:
input_seqs = SeqIO.parse(input_file_gz,"fastq")
else:
input_seqs = SeqIO.parse(sys.stdin,"fastq")
else:
if args.gzipped:
with gzip.open(args.input,"rt") as input_file_gz:
input_seqs = SeqIO.parse(input_file_gz,"fastq")
else:
input_seqs = SeqIO.parse(args.input_file,"fastq")
for i in input_seqs:
print(
format_sam_record(
i.id, str(i.seq),
''.join( [chr(i+33) for i in i.letter_annotations['phred_quality']] ),
''
)
)
|
{"/itermae/tests/test.py": ["/itermae/__init__.py"], "/tests/test_itermae.py": ["/itermae/__init__.py"]}
|
44,071,545
|
darachm/itermae
|
refs/heads/main
|
/tests/test_itermae.py
|
#!/usr/bin/env python3
import pytest
import itermae
# Required for testing apply_operation, as regex is generated in the command
# line program
import regex
# Required for testing SeqHolder etc. without the file IO of main script
from Bio import SeqIO, Seq, SeqRecord
# For doing combinations of the full-file parameters and formats
import itertools
# Required for full-file input/output testing
import subprocess
# For checking
import re
#### Ye Tests
# Same test for coming from a YML file
@pytest.fixture
def configuration_yaml():
configuration = itermae.Configuration()
configuration.config_from_file("itermae/data/example_schema.yml")
return configuration
@pytest.fixture
def benchmark_config():
return {
'verbosity': 1,
'input_from': 'STDIN',
'input_format': 'fastq',
'input_gzipped': False,
'matches': [
{ 'use': 'input',
'pattern': 'NGTCCACGAGGTCTCTNCGTACGCTG',
'marking': 'ABBBBBBBBBBBBBBBCDDDDDDDDD',
'marked_groups': {
'A': { 'name': 'sampleIndex',
'repeat': 5 },
'B': { 'name': 'prefix',
'allowed_errors': 2 },
'C': { 'name': 'barcode',
'repeat_min': 18,
'repeat_max': 22 },
'D': { 'allowed_insertions': 1,
'allowed_deletions': 2,
'allowed_substititions': 2 }
}
},
{ 'use': 'barcode',
'pattern': 'N',
'marking': 'z',
'marked_groups': {
'z': { 'name': 'first_five_barcode',
'repeat': 5 },
}
}
],
'output_list': [
{ 'name': 'short_barcodes',
'seq': 'barcode',
'filter': 'True',
'description': 'description' },
{ 'name': 'sampleIndex',
'seq': 'sampleIndex',
'filter': 'sampleIndex.length == 5',
'description': 'description+" this is just the sampleIndex"' },
{ 'name': 'usualbarcode',
'seq': 'barcode',
'id': 'id',
'description': 'description+" sample="+sampleIndex' },
{ 'name': 'other_barcodes',
'seq': 'barcode',
'filter': 'True',
'description': 'description+" other_barcodes"' }
],
'output_to': 'STDOUT',
'output_format': 'fasta',
'output_failed': 'failed.fastq',
'output_report': 'report.csv',
}
def test_configuration_yaml(configuration_yaml,benchmark_config):
assert benchmark_config['verbosity'] == getattr(configuration_yaml,'verbosity')
assert benchmark_config['input_from'] == getattr(configuration_yaml,'input')
assert benchmark_config['input_format'] == getattr(configuration_yaml,'input_format')
assert benchmark_config['input_gzipped'] == getattr(configuration_yaml,'gzipped')
assert benchmark_config['output_to'] == getattr(configuration_yaml,'output')
assert benchmark_config['output_format'] == getattr(configuration_yaml,'output_format')
assert benchmark_config['output_failed'] == getattr(configuration_yaml,'failed')
assert benchmark_config['output_report'] == getattr(configuration_yaml,'report')
for i in range(len(benchmark_config['output_list'])):
try: # Names should be equal
assert( benchmark_config['output_list'][i]['name'] ==
getattr(configuration_yaml,'outputs_array')[i]['name'] )
except: # or they should be set in conf to something
assert getattr(configuration_yaml,'outputs_array')[i]['name']
# Output sequence is required, we make no attempt to check 'compile'
assert ( benchmark_config['output_list'][i]['seq'] ==
getattr(configuration_yaml,'outputs_array')[i]['seq'][0] )
try: # The filter specification should be equal,
assert ( benchmark_config['output_list'][i]['filter'] ==
getattr(configuration_yaml,'outputs_array')[i]['filter'][0] )
except: # or should be not False. True == does not work ... mysterious
assert ( False != getattr(configuration_yaml,'outputs_array')[i]['filter'][0] )
try: # The description specification should be equal,
assert ( benchmark_config['output_list'][i]['description'] ==
getattr(configuration_yaml,'outputs_array')[i]['description'][0] )
except: # or set to 'description'
assert ( 'description' == getattr(configuration_yaml,'outputs_array')[i]['description'][0] )
for i in range(len(benchmark_config['matches'])):
# 'use' is turned into 'input'
assert( benchmark_config['matches'][i]['use'] ==
getattr(configuration_yaml,'matches_array')[i]['input'] )
for j in benchmark_config['matches'][i]['marked_groups']:
try: # for all matches, what's the benchmark_config group name?
bench_name = benchmark_config['matches'][i]['marked_groups'][j]['name']
except: # or is generic untitled group name
bench_name = r'untitled_group\d+'
group_pattern = re.compile(r"\(\?<"+bench_name+">")
# do we find that group name?
assert( group_pattern.search(str(getattr(configuration_yaml,'matches_array')[i]['regex'] )))
try: # next we move onto pulling in any repeat specs
repeat_min = str(benchmark_config['matches'][i]['marked_groups'][j]['repeat_min'])
except: # ... laboriously to avoid catching errors
repeat_min = None
try:
repeat_max = str(benchmark_config['matches'][i]['marked_groups'][j]['repeat_max'])
except:
repeat_max = None
try:
repeat = str(benchmark_config['matches'][i]['marked_groups'][j]['repeat'])
except:
repeat = None
if repeat_min and repeat_max: # then depending on what we have available, we search
assert ( re.search(r"\{"+repeat_min+","+repeat_max+"}",
str(getattr(configuration_yaml,'matches_array')[i]['regex'] )))
if repeat_min and repeat and ( repeat_max == None ):
assert ( re.search(r"\{"+repeat_min+","+repeat+"}",
str(getattr(configuration_yaml,'matches_array')[i]['regex'] )))
if ( repeat_min == None ) and repeat and repeat_max:
assert ( re.search(r"\{"+repeat+","+repeat_max+"}",
str(getattr(configuration_yaml,'matches_array')[i]['regex'] )))
if ( repeat_min == None ) and repeat and ( repeat_max == None ):
assert ( re.search(r"\{"+repeat+","+repeat+"}",
str(getattr(configuration_yaml,'matches_array')[i]['regex'] )))
def test_configuration_args(benchmark_config):
class A: pass # Here I just need a burner object that accepts new attributes
args_test = A()
args_test.input = 'STDIN'
args_test.input_format = 'fastq'
args_test.gzipped = False
args_test.match = ["input > (?P<sampleIndex>[ATCGN]{5,5})(?P<upPrime>GTCCTCGAGGTCTCT){e<=1}(?P<barcode>[ATCGN]{18,22})(?P<downPrime>CGTACGCTG){e<=1}"]
args_test.output_seq = ["barcode"]
args_test.output_id = ["id"]
args_test.output_description = None
args_test.output_filter = None
args_test.verbose = 1
args_test.output = "STDOUT"
args_test.output_format = "fasta"
args_test.failed = 'failed.fastq'
args_test.report = 'report.csv'
conf = itermae.Configuration()
conf.config_from_args(args_copy=args_test)
assert benchmark_config['verbosity'] == getattr(conf,'verbosity')
assert benchmark_config['input_from'] == getattr(conf,'input')
assert benchmark_config['input_from'] == getattr(conf,'input')
assert benchmark_config['input_format'] == getattr(conf,'input_format')
assert benchmark_config['input_gzipped'] == getattr(conf,'gzipped')
assert benchmark_config['output_to'] == getattr(conf,'output')
assert benchmark_config['output_format'] == getattr(conf,'output_format')
assert benchmark_config['output_format'] == getattr(conf,'output_format')
assert benchmark_config['output_failed'] == getattr(conf,'failed')
assert benchmark_config['output_report'] == getattr(conf,'report')
# Test MatchScores class
@pytest.fixture
def matchscore():
return itermae.MatchScores(1,2,3)
def test_matchscore_subs(matchscore):
assert matchscore.substitutions == 1
def test_matchscore_ins(matchscore):
assert matchscore.insertions == 2
def test_matchscore_dels(matchscore):
assert matchscore.deletions == 3
def test_matchscore_flatten(matchscore):
assert matchscore.flatten() == "1_2_3"
# Test GroupStats class
@pytest.fixture
def groupstats():
return itermae.GroupStats(5,15,
SeqRecord.SeqRecord(Seq.Seq('ATCGATCGAT')),[36]*10)
def test_groupstats_start(groupstats):
assert groupstats.start == 5
def test_groupstats_end(groupstats):
assert groupstats.end == 15
def test_groupstats_length(groupstats):
assert groupstats.length == 10
def test_groupstats_quality(groupstats):
assert groupstats.quality == [36]*10
def test_groupstats_flatten(groupstats):
assert groupstats.flatten() == "5_15_10"
def test_groupstats_repr(groupstats):
assert type(repr(groupstats)) == type(str())
# Setup inputs
# I'm not testing BioPython SeqIO, I assume that's good.
# This instead uses that to return a list of the records in the file.
@pytest.fixture
def fastqfile():
return SeqIO.parse("itermae/data/tests/test_inputs/barseq.fastq","fastq")
## SeqHolder Tests
# Test that SeqHolder can apply_operation, then since we're there testing
# that it finds the right groups for each seq, and passes or fails filters
# appropriately.
def test_seqholder_match_filter(fastqfile,configuration_yaml):
for seq, pos_pass, qual_pass, seq_pass, sequences_found, seq_targets \
in zip(fastqfile,
[ i == 1 for i in [1,1,1,1,1,1,1,1,1] ],
[ i == 1 for i in [0,0,1,1,0,0,0,1,1] ],
[ i == 1 for i in [1,0,0,0,0,0,0,0,0] ],
[ set(['dummyspacer','input','sample','fixed1','rest','tag','strain']),
set(['dummyspacer','input','sample','fixed1','rest','tag','strain','fixed2','UMItail']),
set(['dummyspacer','input','sample','fixed1','rest','tag','strain','fixed2','UMItail']),
set(['dummyspacer','input','sample','fixed1','rest','tag','strain','fixed2','UMItail']),
set(['dummyspacer','input','sample','fixed1','rest','tag','strain']),
set(['dummyspacer','input','sample','fixed1','rest','tag','strain','fixed2','UMItail']),
set(['dummyspacer','input','sample','fixed1','rest','tag','strain']),
set(['dummyspacer','input','sample','fixed1','rest','tag','strain','fixed2','UMItail']),
set(['dummyspacer','input','sample','fixed1','rest','tag','strain','fixed2','UMItail']),
],
[ ('NB501157:100:H5J5LBGX2:1:11101:10000:10043_TTCAC', 'TCAGTCGTAGCAGTTCGATG'),
('NB501157:100:H5J5LBGX2:1:11101:10000:10138_GCTTC', 'TGGGCAGACACAACGCTACA'),
('NB501157:100:H5J5LBGX2:1:11101:10000:16613_GCTTC','GACAGACTGATAACCCTTGC'),
('NB501157:100:H5J5LBGX2:1:11101:10000:19701_CTACT', 'GATGCACTGCGTTCCATGTT'),
('NB501157:100:H5J5LBGX2:1:11101:10000:5096_TAAGT','AGGGCTCGTCGATTCGTCTT'),
('NB501157:100:H5J5LBGX2:1:11101:10000:6068_CTACT','GCAGATAATACACTGTCACC'),
('NB501157:100:H5J5LBGX2:1:11101:10000:8488_CATAA','TCGAGGGGTTACATACG'),
('NB501157:100:H5J5LBGX2:1:11101:10001:10798_TCTAG','GAGGCTACGGTACGTTCCTT'),
('NB501157:100:H5J5LBGX2:1:11101:10001:11700_CGCAA','TGCGCCACATAGTATAAAT'),
]
):
# Read in the sequence to the holder
seqholder = itermae.SeqHolder(seq,configuration=configuration_yaml)
# Is the dummy X?
assert seqholder.seqs['dummyspacer'].seq == 'X'
# Is the number we just put there 40?
assert seqholder.seqs['dummyspacer'].letter_annotations['phred_quality'] == [40]
# Apply operations
seqholder.apply_operation('a','input',
regex.compile("(?P<sample>[ATCG]{5})(?P<fixed1>GTCCACGAGGTC){e<=2}(?P<rest>TCT.*){e<=1}",
regex.BESTMATCH) )
seqholder.apply_operation('b','rest',
regex.compile("(?P<tag>TCT){e<=1}(?P<strain>[ATCG]{10,26})(CGTACGCTGC){e<=2}",
regex.BESTMATCH) )
seqholder.apply_operation('c','rest',
regex.compile("(?P<fixed2>CGTACGCTGCAGGTC)(?<UMItail>GAC[ATCG]G[ATCG]A[ATCG]G[ATCG]G[ATCG]G[ATCG]GAT){s<=2}",
regex.BESTMATCH) )
# Are the right sequences found/matched for each read?
assert set(seqholder.seqs.keys()) == sequences_found
# Does it pass a position filter?
seqholder.build_context()
first_filter = 'sample.length == 5 and rest.start >= 15'
first_id = "id+'_'+sample"
first_seq = "strain"
first_desc = "description"
filter_result = seqholder.evaluate_filter_of_output(
{ 'name':'test',
'filter': [ first_filter,
compile(first_filter,'<string>','eval',optimize=2) ] ,
'id': [ first_id,
compile(first_id,'<string>','eval',optimize=2) ] ,
'seq': [ first_seq,
compile(first_seq,'<string>','eval',optimize=2) ],
'description': [ first_desc,
compile(first_desc,'<string>','eval',optimize=2) ]
})
assert pos_pass == filter_result
# Does it pass a quality filter, with statistics?
second_filter = 'statistics.mean(fixed1.quality) >= 33.5'
filter_result = seqholder.evaluate_filter_of_output(
{ 'name':'test',
'filter': [ second_filter,
compile(second_filter,'<string>','eval',optimize=2) ],
'id': [ first_id,
compile(first_id,'<string>','eval',optimize=2) ] ,
'seq': [ first_seq,
compile(first_seq,'<string>','eval',optimize=2) ],
'description': [ first_desc,
compile(first_desc,'<string>','eval',optimize=2) ]
})
assert qual_pass == filter_result
# Does it pass a specific sequence filter?
third_filter = 'sample == "TTCAC" or sample == "AGGAG"'
filter_result = seqholder.evaluate_filter_of_output(
{ 'name':'test',
'filter': [ third_filter,
compile(third_filter,'<string>','eval',optimize=2) ] ,
'id': [ first_id,
compile(first_id,'<string>','eval',optimize=2) ] ,
'seq': [ first_seq,
compile(first_seq,'<string>','eval',optimize=2) ],
'description': [ first_desc,
compile(first_desc,'<string>','eval',optimize=2) ]
})
assert seq_pass == filter_result
# Then test outputs
built_output = seqholder.build_output(
{ 'name':'test',
'filter': [ 'True', compile('True','<string>','eval',optimize=2) ] ,
'id': [ first_id,
compile(first_id,'<string>','eval',optimize=2) ] ,
'seq': [ first_seq,
compile(first_seq,'<string>','eval',optimize=2) ],
'description': [ first_desc,
compile(first_desc,'<string>','eval',optimize=2) ]
})
# Are the right outputs constructed?
if built_output is None:
assert seq_targets == ( None, None)
else:
assert seq_targets == ( built_output.id, built_output.seq )
# Buncha tests, defining dicts and lists first, then running with it
input_dicts = [
{ 'input_from': 'itermae/data/tests/test_inputs/barseq.sam.gz',
'input_format': 'sam', 'input_gzipped': 'true',
'has_quality':False,'has_desc':False, 'seq_as_id':False},
{ 'input_from': 'itermae/data/tests/test_inputs/barseq.fastq.gz',
'input_format': 'fastq', 'input_gzipped': 'true',
'has_quality':True,'has_desc':True, 'seq_as_id':False},
{ 'input_from': 'itermae/data/tests/test_inputs/barseq.fasta.gz',
'input_format': 'fasta', 'input_gzipped': 'true',
'has_quality':False,'has_desc':True, 'seq_as_id':False},
{ 'input_from': 'itermae/data/tests/test_inputs/barseq.txt.gz',
'input_format': 'txt', 'input_gzipped': 'true',
'has_quality':False,'has_desc':False, 'seq_as_id':True},
]
match_yaml_blocks = [
"""matches:
- use: 'input'
pattern: 'NGTCCTCGAGGTCTCT'
marking: 'ABBBBBBBBBBBBBBB'
marked_groups:
A:
name: sampleIndex
repeat: 5
B:
name: rest"""
,
"""matches:
- use: 'input'
pattern: 'NGTCCTCGAGGTCTCT+'
marking: 'ABBBBBBBBBBBBBBBB'
marked_groups:
A:
name: sampleIndex
repeat: 5
B:
name: rest
allowed_errors: 1
- use: rest
pattern: 'GTCCTCGAGGTCTCTNCGTACGCTG+'
marking: 'AAAAAAAAAAAAAAABCCCCCCCCCD'
marked_groups:
A:
name: upPrime
allowed_errors: 1
B:
name: barcode
repeat_min: 18
repeat_max: 22
C:
name: downPrime
allowed_errors: 1
D:
name: downstream
- use: downstream
pattern: 'CAGGTCGACNGNANGNGNGNGAT'
marking: 'AAAAAAAAABBBBBBBBBBBCCC'
marked_groups:
A:
name: fixed_pre_umi
allowed_errors: 1
B:
name: interspersed_umi
allowed_errors: 1
C:
name: tail
allowed_errors: 1
"""
]
match_args_blocks = [
"""--match 'input > (?P<sampleIndex>[ATCGN]{5,5})(?P<rest>GTCCTCGAGGTCTCT)'"""
,
"""\
--match 'input > (?P<sampleIndex>[ATCGN]{5,5})(?P<rest>GTCCTCGAGGTCTCT.+){e<=1}' \
--match 'rest > (?P<upPrime>GTCCTCGAGGTCTCT){e<=1}(?P<barcode>[ATCGN]{18,22})(?P<downPrime>CGTACGCTG){e<=1}(?P<downstream>.+)' \
--match 'downstream > (?P<fixed_pre_umi>CAGGTCGAC){e<=1}(?P<interspersed_umi>[ATCGN]G[ATCGN]A[ATCGN]G[ATCGN]G[ATCGN]G[ATCGN]){e<=1}(?P<tail>GAT){e<=1}' \
"""
]
output_dicts = [
{ 'output_to': 'STDOUT', 'output_format': 'sam' },
{ 'output_to': 'STDOUT', 'output_format': 'fastq' },
{ 'output_to': 'STDOUT', 'output_format': 'fasta' },
{ 'output_to': 'STDOUT', 'output_format': 'txt' }
]
output_yaml_blocks = [
"""output_list:
- filter: \'sampleIndex == "GCTTC"\'
seq: 'input'
"""
,
"""output_list:
- filter: 'statistics.median(barcode.quality) >= 35'
description: 'description+" sampleIndex="+sampleIndex+" umiSegment="+interspersed_umi'
seq: 'barcode'
- id: 'id+"_"+sampleIndex'
filter: 'statistics.median(barcode.quality) >= 40'
seq: 'barcode'
- filter: 'barcode.length < 20'
description: 'description+" sampleIndex="+sampleIndex'
seq: 'upPrime+barcode+downPrime'
"""
]
output_args_blocks = [
"""--output-seq 'input' --output-filter 'sampleIndex == "GCTTC"'"""
,
""" -os 'barcode' -of 'statistics.median(barcode.quality) >= 35' -oi 'id' -od 'description+" sampleIndex="+sampleIndex+" umiSegment="+interspersed_umi' \
-os 'barcode' -of 'statistics.median(barcode.quality) >= 40' -oi 'id+"_"+sampleIndex' -od 'description' \
-os 'upPrime+barcode+downPrime' -of 'barcode.length < 20' -oi 'id' -od 'description+" sampleIndex="+sampleIndex' \
"""
]
def making_a_full_test_yaml(config_file_path,
which_input, which_matches, which_output, which_outputs ):
this_input_dict = input_dicts[which_input]
this_match_yaml_block = match_yaml_blocks[which_matches]
this_output_dict = output_dicts[which_output]
this_output_yaml_block = output_yaml_blocks[which_outputs]
config_file = config_file_path / "config.yml"
config_file.write_text(
'input_from: '+this_input_dict['input_from']+"\n"+
'input_format: '+this_input_dict['input_format']+"\n"+
'input_gzipped: '+this_input_dict['input_gzipped']+"\n"+
this_match_yaml_block+"\n"+
'output_to: '+this_output_dict['output_to']+"\n"+
'output_format: '+this_output_dict['output_format']+"\n"+
this_output_yaml_block
)
results = subprocess.run(
'itermae -v --config '+str(config_file),
shell=True,capture_output=True,encoding='utf-8')
filename = ('itermae/data/tests/test_outputs/'+
'matches-'+str(which_matches)+
'_outputs-'+str(which_outputs)+
'_seqAsID-'+ str(this_input_dict['seq_as_id'])+
'_hasQuality-'+str(this_input_dict['has_quality'])+
'_hasDesc-'+str(this_input_dict['has_desc'])+
'.'+this_output_dict['output_format'])
# with open('tmpconf','w') as f:
# f.write(config_file.read_text())
# with open(filename,'w') as f:
# f.write(results.stdout)
with open(filename,'r') as f:
expected_file = f.readlines()
for i,j in zip(results.stdout.split('\n'),expected_file):
assert str(i) == str(j.rstrip('\n'))
def test_full_0000_yaml(tmp_path):
making_a_full_test_yaml(tmp_path,0,0,0,0)
def test_full_1000_yaml(tmp_path):
making_a_full_test_yaml(tmp_path,1,0,0,0)
def test_full_2000_yaml(tmp_path):
making_a_full_test_yaml(tmp_path,2,0,0,0)
def test_full_3000_yaml(tmp_path):
making_a_full_test_yaml(tmp_path,3,0,0,0)
def test_full_0010_yaml(tmp_path):
making_a_full_test_yaml(tmp_path,0,0,1,0)
def test_full_1010_yaml(tmp_path):
making_a_full_test_yaml(tmp_path,1,0,1,0)
def test_full_2010_yaml(tmp_path):
making_a_full_test_yaml(tmp_path,2,0,1,0)
def test_full_3010_yaml(tmp_path):
making_a_full_test_yaml(tmp_path,3,0,1,0)
def test_full_0020_yaml(tmp_path):
making_a_full_test_yaml(tmp_path,0,0,2,0)
def test_full_1020_yaml(tmp_path):
making_a_full_test_yaml(tmp_path,1,0,2,0)
def test_full_2020_yaml(tmp_path):
making_a_full_test_yaml(tmp_path,2,0,2,0)
def test_full_3020_yaml(tmp_path):
making_a_full_test_yaml(tmp_path,3,0,2,0)
def test_full_0030_yaml(tmp_path):
making_a_full_test_yaml(tmp_path,0,0,3,0)
def test_full_1030_yaml(tmp_path):
making_a_full_test_yaml(tmp_path,1,0,3,0)
def test_full_2030_yaml(tmp_path):
making_a_full_test_yaml(tmp_path,2,0,3,0)
def test_full_3030_yaml(tmp_path):
making_a_full_test_yaml(tmp_path,3,0,3,0)
def test_full_0101_yaml(tmp_path):
making_a_full_test_yaml(tmp_path,0,1,0,1)
def test_full_1101_yaml(tmp_path):
making_a_full_test_yaml(tmp_path,1,1,0,1)
def test_full_2101_yaml(tmp_path):
making_a_full_test_yaml(tmp_path,2,1,0,1)
def test_full_3101_yaml(tmp_path):
making_a_full_test_yaml(tmp_path,3,1,0,1)
def test_full_0111_yaml(tmp_path):
making_a_full_test_yaml(tmp_path,0,1,1,1)
def test_full_1111_yaml(tmp_path):
making_a_full_test_yaml(tmp_path,1,1,1,1)
def test_full_2111_yaml(tmp_path):
making_a_full_test_yaml(tmp_path,2,1,1,1)
def test_full_3111_yaml(tmp_path):
making_a_full_test_yaml(tmp_path,3,1,1,1)
def test_full_0121_yaml(tmp_path):
making_a_full_test_yaml(tmp_path,0,1,2,1)
def test_full_1121_yaml(tmp_path):
making_a_full_test_yaml(tmp_path,1,1,2,1)
def test_full_2121_yaml(tmp_path):
making_a_full_test_yaml(tmp_path,2,1,2,1)
def test_full_3121_yaml(tmp_path):
making_a_full_test_yaml(tmp_path,3,1,2,1)
def test_full_0131_yaml(tmp_path):
making_a_full_test_yaml(tmp_path,0,1,3,1)
def test_full_1131_yaml(tmp_path):
making_a_full_test_yaml(tmp_path,1,1,3,1)
def test_full_2131_yaml(tmp_path):
making_a_full_test_yaml(tmp_path,2,1,3,1)
def test_full_3131_yaml(tmp_path):
making_a_full_test_yaml(tmp_path,3,1,3,1)
def making_a_full_test_args(
which_input, which_matches, which_output, which_outputs ):
this_input_dict = input_dicts[which_input]
this_match_args_block = match_args_blocks[which_matches]
this_output_dict = output_dicts[which_output]
this_output_args_block = output_args_blocks[which_outputs]
cmd_string = ( 'itermae '+
'--input '+this_input_dict['input_from']+" "+
{'false':'','true':'-z '}[this_input_dict['input_gzipped']]+
'--input-format '+this_input_dict['input_format']+" "+
this_match_args_block+" "+
'--output '+this_output_dict['output_to']+" "+
'--output-format '+this_output_dict['output_format']+" "+
this_output_args_block
)
with open('tmp','w') as f:
f.write(cmd_string)
results = subprocess.run(
cmd_string,
shell=True,capture_output=True,encoding='utf-8')
filename = ('itermae/data/tests/test_outputs/'+
'matches-'+str(which_matches)+
'_outputs-'+str(which_outputs)+
'_seqAsID-'+ str(this_input_dict['seq_as_id'])+
'_hasQuality-'+str(this_input_dict['has_quality'])+
'_hasDesc-'+str(this_input_dict['has_desc'])+
'.'+this_output_dict['output_format'])
# with open(filename,'w') as f:
# f.write(results.stdout)
with open(filename,'r') as f:
expected_file = f.readlines()
for i,j in zip(results.stdout.split('\n'),expected_file):
assert str(i) == str(j.rstrip('\n'))
def test_full_0000_args():
making_a_full_test_args(0,0,0,0)
def test_full_1000_args():
making_a_full_test_args(1,0,0,0)
def test_full_2000_args():
making_a_full_test_args(2,0,0,0)
def test_full_3000_args():
making_a_full_test_args(3,0,0,0)
def test_full_0010_args():
making_a_full_test_args(0,0,1,0)
def test_full_1010_args():
making_a_full_test_args(1,0,1,0)
def test_full_2010_args():
making_a_full_test_args(2,0,1,0)
def test_full_3010_args():
making_a_full_test_args(3,0,1,0)
def test_full_0020_args():
making_a_full_test_args(0,0,2,0)
def test_full_1020_args():
making_a_full_test_args(1,0,2,0)
def test_full_2020_args():
making_a_full_test_args(2,0,2,0)
def test_full_3020_args():
making_a_full_test_args(3,0,2,0)
def test_full_0030_args():
making_a_full_test_args(0,0,3,0)
def test_full_1030_args():
making_a_full_test_args(1,0,3,0)
def test_full_2030_args():
making_a_full_test_args(2,0,3,0)
def test_full_3030_args():
making_a_full_test_args(3,0,3,0)
def test_full_0101_args():
making_a_full_test_args(0,1,0,1)
def test_full_1101_args():
making_a_full_test_args(1,1,0,1)
def test_full_2101_args():
making_a_full_test_args(2,1,0,1)
def test_full_3101_args():
making_a_full_test_args(3,1,0,1)
def test_full_0111_args():
making_a_full_test_args(0,1,1,1)
def test_full_1111_args():
making_a_full_test_args(1,1,1,1)
def test_full_2111_args():
making_a_full_test_args(2,1,1,1)
def test_full_3111_args():
making_a_full_test_args(3,1,1,1)
def test_full_0121_args():
making_a_full_test_args(0,1,2,1)
def test_full_1121_args():
making_a_full_test_args(1,1,2,1)
def test_full_2121_args():
making_a_full_test_args(2,1,2,1)
def test_full_3121_args():
making_a_full_test_args(3,1,2,1)
def test_full_0131_args():
making_a_full_test_args(0,1,3,1)
def test_full_1131_args():
making_a_full_test_args(1,1,3,1)
def test_full_2131_args():
making_a_full_test_args(2,1,3,1)
def test_full_3131_args():
making_a_full_test_args(3,1,3,1)
|
{"/itermae/tests/test.py": ["/itermae/__init__.py"], "/tests/test_itermae.py": ["/itermae/__init__.py"]}
|
44,071,546
|
darachm/itermae
|
refs/heads/main
|
/setup.py
|
import setuptools
with open('README.md','r') as fh:
long_description = fh.read()
setuptools.setup(
name='itermae',
version='0.6.0.1',
author='Darach Miller',
description='Commandline tool for parsing NGS reads by multiple fuzzy '+
'regex operations',
long_description=long_description,
long_description_content_type='text/markdown',
url='http://gitlab.com/darachm/itermae',
author_email='darachm@stanford.edu',
license='BSD 2-clause',
packages=setuptools.find_packages(),
install_requires=[
'pyyaml',
'regex',
'biopython',
],
scripts=['bin/itermae'],
setup_requires=['pytest-runner'],
tests_require=['pytest'],
zip_safe=False,
python_requires='>=3.6',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Bio-Informatics',
'Programming Language :: Python :: 3',
],
keywords='fastq regex fuzzy amplicon parser barcode extractor extracter'
)
|
{"/itermae/tests/test.py": ["/itermae/__init__.py"], "/tests/test_itermae.py": ["/itermae/__init__.py"]}
|
44,071,547
|
darachm/itermae
|
refs/heads/main
|
/itermae/__init__.py
|
#!/usr/bin/env python3
import time
import statistics
import sys
import gzip
import string
import argparse
import re
import itertools
import yaml
import regex
from Bio import SeqIO
from Bio import Seq, SeqRecord
# TODO pass description to flags field, but this requires lots of warnings
# and caveats to the users that they will have to preface the right SAM tag
# headers and such!
def format_sam_record(record_id, sequence, qualities, tags,
flag='0', reference_name='*', mapping_position='0',
mapping_quality='255', cigar_string='*', reference_name_of_mate='=',
position_of_mate='0', template_length='0' ):
"""This formats arguments into a string for outputting as a SAM format
record. This is missing the ability to handle descriptions or tags other
than the one indicating which group this is.
:param record_id: the ID of the read
:type record_id: str
:param sequence: the nucleotide sequence to output
:type sequence: str
:param qualities: per-base qualities, encoded as letters (ASCII I think)
:type qualities: str
:param tags: any tags to add to the tags field, see SAM specification for
the proper format of these, defaults to blank
:type tags: str, optional
:param flag: the bit-flag, defaults to '0'
:type flag: str, optional
:param reference_name: name of reference template, defaults to '*'
:type reference_name: str, optional
:param mapping_position: mapping position, defaults to '0'
:type mapping_position: str, optional
:param mapping_quality: mapping quality, defaults to '255'
:type mapping_quality: str, optional
:param cigar_string: CIGAR string of mutations relative to reference,
defaults to '*'
:type cigar_string: str, optional
:param reference_name_of_mate: reference name of mate, defaults to '='
:type reference_name_of_mate: str, optional
:param position_of_mate: position of mate, defaults to '0'
:type position_of_mate: str, optional
:param template_length: length of template, defaults to '0'
:type template_length: str, optional
:return: returns string of the fields, tab-separated for output as a
SAM record
:rtype: str
"""
return "\t".join([ record_id, flag, reference_name, mapping_position,
mapping_quality, cigar_string, reference_name_of_mate,
position_of_mate, template_length, sequence, qualities, tags ])
def phred_letter_to_number(letter):
"""Simple function to turn a PHRED score from letter to number. That's it.
:param letter: PHRED score letter
:type letter: str
:return: Returns just PHRED score (Illumina 1.8+, I believe)
score corresponding to the letter score
:rtype: int I think
"""
return ord(letter)-33
def phred_number_to_letter(score):
"""Simple function to turn a PHRED score from number to letter. That's it.
:param score: PHRED score number
:type score: int
:return: Returns just PHRED score (Illumina 1.8+, I believe)
letter corresponding to the numeric score
:rtype: str
"""
return chr(score+33)
def phred_number_array_to_joined_string(score_array):
"""Turn a list of PHRED score numbers to a letter string. That's it.
:param score_array: PHRED score array
:type score_array: list of int
:return: Returns a string of the PHRED scores (Illumina 1.8+, I believe)
converted from a numeric list to a letter string.
:rtype: str
"""
return str("".join([ phred_number_to_letter(i) for i in score_array]))
def read_sam_file(fh):
"""This is a minimal SAM reader, just for getting the fields I like and
yielding SeqRecord objects, sort of like BioPython SeqIO. Here, we are
putting SAM tags into the description field so it should be possible to
pass those through, but that's not well designed yet.
:param fh: file handle to read
:type fh: file handle opened by
:return: yields SeqRecords
:rtype: Bio.SeqRecord.SeqRecord
"""
for i in fh.readlines():
fields = i.rstrip('\n').split('\t')
yield SeqRecord.SeqRecord(
Seq.Seq(fields[9]),
id=fields[0],
letter_annotations={'phred_quality':
[phred_letter_to_number(i) for i in fields[10]]},
description=fields[11]
)
def read_txt_file(fh):
"""Reads a text file, and yields SeqRecords where the string in the line
is the sequence and the ID of the record.
:param fh: file handle opened by
:type fh: file handle opened by
:return: yields SeqRecords
:rtype: Bio.SeqRecord.SeqRecord
"""
for i in fh.readlines():
seq = i.rstrip()
yield SeqRecord.SeqRecord( Seq.Seq(seq), id=seq, description="")
# TODO consider moving the 'which' bit to something specified in the
# build_context, sort of like 'id' and 'description'
def write_out_seq(seq,fh,format,which):
"""This little utility just handles which of the four formats to print out,
and for SAM appends a tag with which match this is, using the IE tag.
:param seq: The SeqRecord to write
:type seq: Bio.SeqRecord
:param fh: file handle
:type fh: file handle returned by
:param format: which format to output, one of 'sam', 'txt', or something
that Bio.SeqIO will recognize
:type format: str
:param which: which output this is, so for SAM this appened to a tag, but
is ignored for the other formats
:type which: str
:return: nothing, it writes to a file
:rtype: None
"""
if format == "sam":
print( format_sam_record( seq.id, str(seq.seq),
phred_number_array_to_joined_string(seq.letter_annotations['phred_quality']),
"IE:Z:"+str(which) ),file=fh)
# We ignore printing the description anywhere - if you need it, concat
# it onto the ID
elif format == "txt":
print( str(seq.seq), file=fh)
else:
SeqIO.write(seq, fh, format)
class Configuration:
"""This class is for configuring itermae, from YAML or CLI arguments.
No arguments for initializing, it will set default values.
Then you use the configuration methods.
"""
def __init__(self):
self.verbosity = 0
self.matches_array = []
self.outputs_array = []
self.untitled_group_number = 0
self.untitled_output_number = 0
self.input = 'STDIN'
self.input_format = 'fastq'
self.gzipped = False
self.output = 'STDOUT'
self.output_format = 'sam'
self.failed = None
self.report = None
self.matches_array = []
self.outputs_array = []
self.output_fh = None
self.failed_fh = None
self.report_fh = None
# IUPAC dictionary for translating codes to regex.
# from http://www.bioinformatics.org/sms/iupac.html
# Note the inclusion of * and + for repeats.
self.iupac_codes = { # only used for the configuration file input!
'A':'A', 'C':'C', 'T':'T', 'G':'G',
'R':'[AG]', 'Y':'[CT]', 'S':'[GC]', 'W':'[AT]',
'K':'[GT]', 'M':'[AC]',
'B':'[CGT]', 'D':'[AGT]', 'H':'[ACT]', 'V':'[ACG]',
'N':'[ATCGN]', '*':'.*', '+':'.+' }
def open_input_fh(self):
"""Opens file-handle based on the configuration.
Requires `input` to be set.
:raises ValueError: Can't handle gzipped inputs on STDIN.
"""
if self.input.upper() == 'STDIN':
if self.gzipped:
raise ValueError("I can't handle gzipped inputs on STDIN ! "
"You shouldn't see this error, it shoulda been caught in "
"the launcher script.")
else:
self.input_fh = sys.stdin
else:
if self.gzipped:
self.input_fh = gzip.open(self.input,'rt',encoding='ascii')
else:
self.input_fh = open(self.input,'rt')
def open_appropriate_input_format(self):
"""Uses `input_format` and `input_fh` to set iterators
of SeqRecords from the appropriate inputs, in `input_seqs`.
Tries to handle all formats known, but will try with SeqIO
in case there's one I didn't think about.
"""
if self.input_format == 'fastq':
self.input_seqs = SeqIO.parse(self.input_fh, self.input_format)
elif self.input_format == 'sam':
self.input_seqs = iter(read_sam_file(self.input_fh))
elif self.input_format == 'fasta':
self.input_seqs = SeqIO.parse(self.input_fh, self.input_format)
elif self.input_format == 'txt':
self.input_seqs = iter(read_txt_file(self.input_fh))
else:
print("I don't know that input file format name '"+self.input_format+
"'. I will try and use the provided format name in BioPython "+
"SeqIO, and we will find out together if that works.",
file=sys.stderr)
self.input_seqs = SeqIO.parse(self.input_fh, self.input_format)
def get_input_seqs(self):
"""This calls `open_input_fh()` to set the `input_fh` attribute,
then calls `open_appropriate_input_format` to use this and the
`input_format` attribute to save an iterator of SeqRecords
into `input_seqs`.
Note this is inconsistent with design of the output, will pick one or
the other ... later.
"""
self.open_input_fh()
self.open_appropriate_input_format()
def open_output_fh(self,file_string):
"""Opens output file handle, which can then be written to later with
a format specification.
Note this is inconsistent with design of the input, will pick one or
the other ... later.
:param file_string: file to wrote to, or STDOUT or STDERR
:type file_string: str
:return: file string for appending output
:rtype: file handle returned by `open()`
"""
if file_string is None:
return None
if file_string.upper() == 'STDOUT':
return sys.stdout
elif file_string.upper() == 'STDERR':
return sys.stderr
else:
return open(file_string,'a')
def close_fhs(self):
"""This is for cleaning up, and tries to close file handles at
`input_seqs`, `ouput_fh`, `failed_fh`, `report_fh`.
"""
for i in [ self.input_seqs, self.output_fh, self.failed_fh, self.report_fh] :
try:
i.close()
except:
pass
def check_reserved_name(self,name,
reserved_names=['dummyspacer','input','id','description'] ):
"""This checks if the name is one of a reserved list, and raises error
if so. These names are reserved for these reasons:
- `dummyspacer` is so you can pop an X into your sequence as a separator
delimiter for later processing
- `input` is the input group, the original one
- `id` is the input ID, here just as `id` so it`s easy to find
- `description` is for mapping over the FASTQ description
:param name: name of group
:type name: str
:raises ValueError: raised if you're using one of the reserved names...
"""
if name in reserved_names:
raise ValueError("Hey, you can't name a capture group "+
(" or ".join(reserved_names[ [(i == name) for i in reserved_names]]))+
", I'm using that/those! Pick a different name.")
def config_from_file(self,file_path):
"""Tries to parse a configuration YAML file to update this configuration
object. Pass in the file path as an argument.
Recommend you run this config first, then config_from_args, as done in
`bin/itermae`.
:param file_path: file path to configure from, expecting it to point to
an appropriately formatted YAML file
:type file_path: str
:raises ValueError: Failure to parse the supplied YAML
:raise KeyError: You need to define a group called `pattern:`
inside each of the list inside of `matches:`
:raise ValueError: Error in yaml config, you`ve repeated a group
marking character to match in multiple places
:raise ValueError: Error in yaml config, the pattern and marking
you`ve defined are of different lengths
:raise ValueError: Error in yaml config
:raise KeyError: Marked roup in `marking:` field does not have
corresponding entry in `marked_groups:`.
:raise ValueError: Either the supplied `filter`, `id`, `seq`, or
`description` expression for a match group does not look like a
python expression
"""
if file_path is None:
return
try:
with open(file_path,'r') as f:
config = yaml.load(f,Loader=yaml.SafeLoader)
except Exception as error:
raise ValueError(repr(error)+" : "
"I failed to parse the supplied YAML file at that path.")
# Looking for verbosity instruction global, if not global, then in 'outputs'
try:
self.verbosity = config['verbosity']
except:
pass
try:
self.input = config['input_from']
except:
pass
try:
self.input_format = config['input_format']
except:
pass
try:
self.gzipped = config['input_gzipped']
except:
pass
try:
self.output = config['output_to']
except:
pass
try:
self.output_format = config['output_format']
except:
pass
# Immediately use that verbostiy
if self.verbosity >= 1:
print("Reading and processing the configuration file '"+
str(file_path)+"'.",file=sys.stderr)
# Building array of matches objects, so input and compiled regex
if self.verbosity >= 1:
print("Processing each match:",file=sys.stderr)
for each in config['matches']:
try:
each['use']
except:
each['use'] = 'input'
try:
assert each['pattern']
except Exception as error:
raise KeyError("You need to define a group called 'pattern:' "
"inside each of the list (denoted by '-'s) inside of "
"'matches:' - what is the sequence pattern to match?")
if self.verbosity >= 1:
print(" Taking '"+each['use']+"'. \n", end="",file=sys.stderr)
if len(re.sub(r'(.)\1+',r'\1',each['marking'])) > len(set(each['marking'])):
raise ValueError("Error in reading yaml config! It looks like "
"you've repeated a group marking character to match in "
"multiple places. I do not support that, "
"use a different character.")
if len(each['pattern']) != len(each['marking']):
raise ValueError("Error in reading yaml config! "
"The pattern and marking you've defined are of "
"different lengths. I need them to be the same length.")
pattern_groups = dict()
group_order = list() # This is to keep track of the order in which
# the groups are being defined in the paired lines
for character, mark in zip(each['pattern'],each['marking']):
if mark not in group_order:
group_order.append(mark)
try:
pattern_groups[mark] += character.upper()
except:
pattern_groups[mark] = character.upper()
regex_string = '' # building this now
for mark in group_order:
try:
assert each['marked_groups'][mark]
except Exception as error:
raise KeyError("You've marked a group in the 'marking:' "
"field but have not supplied a corresponding entry in "
"'marked_groups:', hence "+repr(error)+".")
if 'name' in each['marked_groups'][mark].keys():
self.check_reserved_name(each['marked_groups'][mark]['name'])
else:
each['marked_groups'][mark]['name'] = "untitled_group"+\
str(self.untitled_group_number)
self.untitled_group_number += 1
pattern_string = ""
if len(set(pattern_groups[mark])) == 1:
pattern_string = self.iupac_codes[pattern_groups[mark][0].upper()]
else:
for character in pattern_groups[mark]:
pattern_string += self.iupac_codes[character.upper()]
# This is adding on the pattern for a certain marked
# matching group, as zipped above, and we're using
# IUPAC codes to turn ambiguity codes into ranges
# Note that it is converted to upper case!
if self.verbosity >= 1:
print(" Found group '"+mark+"' with pattern '"+
pattern_string+"'",end="",file=sys.stderr)
try: # trying to build a repeat range, if supplied
if 'repeat_min' not in each['marked_groups'][mark].keys():
each['marked_groups'][mark]['repeat_min'] = \
each['marked_groups'][mark]['repeat']
if 'repeat_max' not in each['marked_groups'][mark].keys():
each['marked_groups'][mark]['repeat_max'] = \
each['marked_groups'][mark]['repeat']
pattern_string = ('('+pattern_string+')'+
'{'+str(each['marked_groups'][mark]['repeat_min'])+','+
str(each['marked_groups'][mark]['repeat_max'])+'}'
)
if self.verbosity >= 1:
print(", repeated between "+
str(each['marked_groups'][mark]['repeat_min'])+
" and "+
str(each['marked_groups'][mark]['repeat_max'])+
" times",end="",file=sys.stderr)
except:
pass
error_array = [] # Then building the error tolerance spec
try:
error_array.append(
"e<="+str(each['marked_groups'][mark]['allowed_errors']) )
except:
pass # This part takes up so much room because of try excepts...
try:
error_array.append(
"i<="+str(each['marked_groups'][mark]['allowed_insertions']) )
except:
pass
try:
error_array.append(
"d<="+str(each['marked_groups'][mark]['allowed_deletions']) )
except:
pass
try:
error_array.append(
"s<="+str(each['marked_groups'][mark]['allowed_substitutions']) )
except:
pass
if len(error_array):
error_string = "{"+','.join(error_array)+"}"
else:
error_string = ""
if self.verbosity >= 1:
print(".\n",end="",file=sys.stderr)
regex_string += ( "(?<"+each['marked_groups'][mark]['name']+
">"+pattern_string+")"+error_string )
# Okay, then use the built up regex_string to compile it
compiled_regex = regex.compile( regex_string, regex.BESTMATCH )
# And save it with the input source used, in array
self.matches_array.append( {'input':each['use'], 'regex':compiled_regex} )
if self.verbosity >= 1:
print("Processing output specifications.",file=sys.stderr)
output_list = config['output_list'] # I do need some outputs, or fail
for each in output_list:
try:
each['id']
except:
each['id'] = 'id' # default, the id
try:
each['description']
except:
each['description'] = 'description' # default pass through from in
try:
each['name']
except:
each['name'] = 'untitled_output_'+str(self.untitled_output_number)
self.untitled_output_number += 1
try:
each['filter']
except:
each['filter'] = 'True' # so will pass if not provided
if self.verbosity >= 1:
print(" Parsing output specification of '"+each['name']+"', "+
"ID is '"+each['id']+"' (input ID is 'id'), filter outputs "+
"to accept only if '"+each['filter']+"' is True, with "+
"sequence derived from '"+each['seq']+"', and a description "+
"of '"+each['description']+"' ('description' is input "+
"description').",file=sys.stderr)
try:
self.outputs_array.append( {
'name':each['name'], # These are on oneline for error
# readability about which one is the problem
'filter':[ each['filter'], compile(each['filter'],'<string>','eval',optimize=2) ],
'id':[ each['id'], compile(each['id'],'<string>','eval',optimize=2) ],
'seq':[ each['seq'], compile(each['seq'],'<string>','eval',optimize=2) ],
'description':[ each['description'], compile(each['description'],'<string>','eval',optimize=2) ]
})
except Exception as error:
raise ValueError(repr(error)+" : "
"Either the supplied 'filter', 'id', 'seq', "
"or 'description' expression for a match group does "
"not look like a python expression - are all "
"non-group-name parts in quotes? Are group-names and "
"other parts connected with + signs?")
try:
self.failed = config['output_failed']
except:
pass
try:
self.report = config['output_report']
except:
pass
def config_from_args(self,args_copy):
"""Make configuration object from arguments provided. Should be the
same as the config_from_yaml output, if supplied the same.
:param args_copy: pass in the `argparse` args object after collecting
the startup command line arguments
:type args_copy: argparse object, I think
:raise ValueError: I failed to build the regular expression for a match
:raise ValueError: The output IDs, seqs, descriptions, and filters are
of unequal sizes, make them equal or only define one of each
:raise ValueError: Either the supplied `filter`, `id`, `seq`, or
`description` expression for a match group does not look like a
python expression
"""
if args_copy.verbose:
self.verbosity = args_copy.verbose
for each in args_copy.match:
try:
for capture_name in re.findall('<(.*?)>',each):
self.check_reserved_name(capture_name)
try:
(input_string, regex_string) = re.split(r"\s>\s",each.strip())
except:
input_string = 'input' # default to just use raw input
regex_string = each.strip()
compiled_regex = regex.compile(
regex_string.strip(), # We use this regex
regex.BESTMATCH # And we use the BESTMATCH strategy, I think
)
self.matches_array.append( {'input':input_string.strip(), 'regex':compiled_regex} )
except Exception as error:
raise ValueError(repr(error)+" : "
"I failed to build the regular expression from the "
"command-line argument supplied.")
# Adding in defaults for outputs. Can't do that with argparse, I think,
# because this needs to be appending. First add in defaults, but
# absolutely first need an output_seq to be defined for it to try this:
if args_copy.output_seq:
if not args_copy.output_id:
args_copy.output_id = ['id']
if not args_copy.output_filter:
args_copy.output_filter = ['True']
if not args_copy.output_description:
args_copy.output_description = ['description']
# Then normalize the length 1 to max length
maximum_number_of_outputs = max( [len(args_copy.output_id),
len(args_copy.output_seq), len(args_copy.output_filter),
len(args_copy.output_description)] )
# Normalizing all singletons to same length
if len(args_copy.output_id) == 1:
args_copy.output_id = args_copy.output_id * maximum_number_of_outputs
if len(args_copy.output_seq) == 1:
args_copy.output_seq = args_copy.output_seq * maximum_number_of_outputs
if len(args_copy.output_filter) == 1:
args_copy.output_filter = args_copy.output_filter * maximum_number_of_outputs
if len(args_copy.output_description) == 1:
args_copy.output_description = args_copy.output_description * maximum_number_of_outputs
if not ( len(args_copy.output_id) == len(args_copy.output_seq) ==
len(args_copy.output_filter) == len(args_copy.output_description) ):
raise ValueError("The output IDs, seqs, descriptions, and "
"filters are of unequal sizes. Make them equal, or only "
"define one each and it will be reused across all."+
repr(( len(args_copy.output_id), len(args_copy.output_seq),
len(args_copy.output_filter), len(args_copy.output_description) )) )
i = 0
for idz, seqz, filterz, description in zip(args_copy.output_id, args_copy.output_seq, args_copy.output_filter, args_copy.output_description) :
this_name = 'untitled_output_'+str(i)
i += 1
try:
self.outputs_array.append( {
'name': this_name,
'filter': [ filterz, compile(filterz,'<string>','eval',optimize=2) ],
'id': [ idz, compile(idz,'<string>','eval',optimize=2) ],
'seq': [ seqz, compile(seqz,'<string>','eval',optimize=2) ] ,
'description':[ description, compile(description,'<string>','eval',optimize=2) ]
})
except Exception as error:
raise ValueError(repr(error)+" : "
"Either the supplied 'filter', 'id', 'seq', "
"or 'description' expression for a match group does "
"not look like a python expression - are all "
"non-group-name parts in quotes? Are group-names and "
"other parts connected with + signs?")
# Passing through the rest, defaults should be set in argparse defs
if args_copy.input is not None:
self.input = args_copy.input
if args_copy.input_format is not None:
self.input_format = args_copy.input_format
if args_copy.gzipped is not None:
self.gzipped = args_copy.gzipped
if args_copy.output is not None:
self.output = args_copy.output
if args_copy.output_format is not None:
self.output_format = args_copy.output_format
if args_copy.failed is not None:
self.failed = args_copy.failed
if args_copy.report is not None:
self.report = args_copy.report
def summary(self):
return_string = ('Configured as:'+
'\n input from: '+self.input+
'\n input format: '+self.input_format+
'\n is it gzipped?: '+str(self.gzipped)+
'\n output APPENDING to: '+self.output+
'\n output format is: '+self.output_format+
'\n failed being APPENDED to file: '+str(self.failed)+
'\n report being APPENDED to file: '+str(self.report)+
'\n with verbosity set at: '+str(self.verbosity)+
'\n doing these matches:')
for each in self.matches_array:
return_string += '\n - input: '+each['input']
return_string += '\n regex: '+str(each['regex'])
return_string += '\n writing these outputs:'
for each in self.outputs_array:
return_string += '\n - id: '+str(each['id'][0])
return_string += '\n description: '+str(each['description'][0])
return_string += '\n seq: '+str(each['seq'][0])
return_string += '\n filter: '+str(each['filter'][0])
return return_string
def reader(self):
"""This reads inputs, calls the `chop` method on each one, and sorts
it off to outputs. So this is called by the main function, and is
mostly about handling the I/O and handing it to the `chop` function.
Thus, this depends on the `Configuration` class being properly
configured with all the appropriate values.
"""
# Input
self.get_input_seqs()
# Outputs - passed records, failed records, report file
self.output_fh = self.open_output_fh(self.output)
self.report_fh = self.open_output_fh(self.report)
self.failed_fh = self.open_output_fh(self.failed)
# Do the chop-ing...
for each_seq in self.input_seqs:
# CAUTION
# The below is a munge.
# According to https://github.com/biopython/biopython/issues/398 ,
# BioPython mimics an old tool's weird behavior by outputting the
# ID in the description field. The fix for it relies on a comparing
# a white-space 'split' to remove the ID if it's in the description.
# So that doesn't work if you modify the ID or so, so I remove right
# after parsing.
each_seq.description = re.sub(str(each_seq.id),"",
each_seq.description).lstrip()
seq_holder = SeqHolder(each_seq,configuration=self)
seq_holder.chop()
self.close_fhs()
class MatchScores:
"""This is a little class just to hold the three scores under attributes,
such that they're easier to type for writing filters. Also, it flattens
them for debug report printing.
:param substitions: number to store under `.substitions` attribute
:type substitions: int
:param insertions: number to store under `.insertions` attribute
:type insertions: int
:param deletions: number to store under `.deletions` attribute
:type deletions: int
"""
def __init__(self, substitutions, insertions, deletions):
self.substitutions = substitutions
self.insertions = insertions
self.deletions = deletions
def flatten(self):
"""Flatten this object for printing debug reports.
:return: string in form substitutions_insertions_deletions
:rtype: str
"""
return str(self.substitutions)+"_"+str(self.insertions)+"_"+\
str(self.deletions)
class GroupStats:
"""Object for conveniently holding parameters from the match, so that
they're easier to type for filters/output specification, and to flatten
for debug printing.
:param start: number to store under `.start` attribute
:type start: int
:param end: number to store under `.end` attribute
:type end: int
:param length: number to store under `.length` attribute
:type length: int
:param quality: list of numbers to store under `.quality` attribute
:type quality: list of int
:param quality_string: string of the quality array under PHRED encodings
:type quality_string: string
"""
def __init__(self, start, end, seq, quality):
self.start = start
self.end = end
self.length = self.end - self.start
self.seq = seq
self.quality = quality
self.quality_string = phred_number_array_to_joined_string(quality)
def flatten(self):
"""Flatten this object for printing debug reports, but just for
the start, end, length attributes. Not quality.
:return: string in form start_end_length
:rtype: str
"""
return str(self.start)+"_"+str(self.end)+"_"+str(self.length)
def __eq__(self,other):
"""Attention! This is a hack to allow for using the group's name
(ie 'barcode') instead of accessing the '.seq' method.
"""
return str(self.seq.seq) == other
class SeqHolder:
"""This is the main holder of sequences, and has methods for doing matching,
building contexts, filtering, etcetra. Basically there is one of these
initialized per input, then each operation is done with this object, then
it generates the appropriate outputs and `chop` actually writes them.
Used in `chop`.
The `.seqs` attribute holds the sequences accessed by the matching,
initialized with the `input_record` SeqRecord and a `dummyspacer` for
output formatting with a separator.
:param input_record: an input SeqRecord object
:type input_record: Bio.SeqRecord.SeqRecord
:param configuration: the whole program's Configuration object, with
appropriate file-handles opened up and defaults set
:type configuration: itermae.Configuration
# :raises [ErrorType]: [ErrorDescription]
# :return: [ReturnDescription]
# :rtype: [ReturnType]
"""
def __init__(self, input_record, configuration):
self.seqs = {
'dummyspacer': SeqRecord.SeqRecord(Seq.Seq("X"),id="dummyspacer"),
'input': input_record }
self.seqs['dummyspacer'].letter_annotations['phred_quality'] = [40]
self.configuration = configuration
# These two dicts hold the scores for each match operation (in order),
# and the start end length statistics for each matched group.
self.match_scores = {}
self.group_stats = {}
def apply_operation(self, match_id, input_group, regex):
"""This applies the given match to the `SeqHolder` object, and saves
how it did internally.
:param match_id: what name should we call this match? This is useful
for debugging reports and filtering only.
:type match_id: str
:param input_group: which input group to use, by name of the group
:type input_group: str
:param regex: the regular expression to apply, complete with named
groups to save for subsequent match operations
:type regex: regex compiled regular expression object
:return: self, this is just done so it can exit early if no valid input
:rtype: itermae.SeqHolder
"""
# Try to find the input, if it ain't here then just return
try:
self.seqs[input_group]
except:
self.match_scores[match_id] = MatchScores(None,None,None)
return self
if self.configuration.verbosity >= 3:
print("\n["+str(time.time())+"] : attempting to match : "+
str(regex)+" against "+self.seqs[input_group].seq,
file=sys.stderr)
# Here we execute the actual meat of the business.
# Note that the input is made uppercase!
fuzzy_match = regex.search( str(self.seqs[input_group].seq).upper() )
if self.configuration.verbosity >= 3:
print("\n["+str(time.time())+"] : match is : "+str(fuzzy_match),
file=sys.stderr)
try:
# This is making and storing an object for just accessing these
# numbers nicely in the arguments for forming outputs and filtering.
self.match_scores[match_id] = MatchScores(*fuzzy_match.fuzzy_counts)
# Then for each of the groups matched by the regex
for match_name in fuzzy_match.groupdict():
# We stick into the holder a slice of the input seq, that is
# the matched # span of this matching group. So, extract.
self.seqs[match_name] = \
self.seqs[input_group][slice(*fuzzy_match.span(match_name))]
#self.seqs[match_name].description = ""
# This is to fix a bug where the ID is stuck into the
# description and gets unpacked on forming outputs
# Then we record the start, end, and length of the matched span
self.group_stats[match_name] = \
GroupStats(*fuzzy_match.span(match_name),
seq=self.seqs[match_name],
quality=self.seqs[match_name].letter_annotations['phred_quality']
)
except:
self.match_scores[match_id] = MatchScores(None,None,None)
def build_context(self):
"""This unpacks group match stats/scores into an environment that
the filter can then use to ... well ... filter.
"""
# This is context for the filters, so is operating more as values,
# as opposed to the context_seq which is operating with SeqRecords
self.context_filter = { **self.group_stats , **self.match_scores }
# Then unpack the sequences as a context for building the output
# sequences, this is different so that the qualities get stuck with
# the bases of the groups
self.context_seq = { **self.seqs }
# Then one for the IDs, so we're setting the input ID as 'id', and then
# each group name just refers to the sequence. And I finally put seq
# qualities in the ID. We do make 'description' available if needed
self.context_id = {
'id': self.seqs['input'].id ,
'description': self.seqs['input'].description ,
**{ i: str(self.seqs[i].seq) for i in self.seqs } ,
**{ i+'_quality': self.group_stats[i].quality_string
for i in self.group_stats } }
def evaluate_filter_of_output(self,output_dict):
"""This tests a user-defined filter on the 'seq_holder' object.
This has already been `compile`'d, and here we just attempt to
evaluate these to `True`, where `True` is passing the filter.
Exceptions are blocked by using `try`/`except` so that it can fail on
a single match and move onto the next match/read.
:param output_dict: a dictionary of outputs to form, as generated from
the configuration initialization
:type output_dict: dict
:return: `True` if the filter passed and the output should be generated
:rtype: bool
"""
try:
filter_result = eval(output_dict['filter'][1],globals(),self.context_filter)
if self.configuration.verbosity >= 3:
print("\n["+str(time.time())+"] : This read "+
self.seqs['input'].id+" successfully evaluated the filter "+
str(output_dict['filter'][0])+" as "+str(filter_result),
file=sys.stderr)
return filter_result
except Exception as error:
if self.configuration.verbosity >= 3:
print("\n["+str(time.time())+"] : This read "+
self.seqs['input'].id+" failed to evaluate the filter "+
str(output_dict['filter'][0])+". "+
repr(error),file=sys.stderr)
return False
def build_output(self,output_dict):
"""Builds the output from the `SeqHolder` object according to the
outputs in `output_dict`.
:param output_dict: a dictionary of outputs to form, as generated from
the configuration initialization
:type output_dict: dict
:return: the successfully built SeqRecord, or None if it fails
:rtype: Bio.SeqRecord.SeqRecord or None
"""
try:
output_seq = eval(output_dict['seq'][1],globals(),self.context_seq)
out_seq = SeqRecord.SeqRecord(
seq = Seq.Seq(str(output_seq.seq)) ,
id = str(eval(output_dict['id'][1],globals(),self.context_id)) ,
description = str(eval(output_dict['description'][1],globals(),self.context_id)) ,
letter_annotations = {'phred_quality':output_seq.letter_annotations['phred_quality']}
)
if self.configuration.verbosity >= 3:
print("\n["+str(time.time())+"] : This read "+
self.seqs['input'].id+" successfully built the output of "+
"id: '"+str(output_dict['id'][0])+"', and "+
"description: '"+str(output_dict['description'][0])+"', and "+
"seq: '"+str(output_dict['seq'][0])+"'." ,file=sys.stderr)
return out_seq
except Exception as error:
if self.configuration.verbosity >= 3:
print("\n["+str(time.time())+"] : This read "+
self.seqs['input'].id+" failed to build the output of "+
"id: '"+str(output_dict['id'][0])+"', and "+
"description: '"+str(output_dict['description'][0])+"', and "+
"seq: '"+str(output_dict['seq'][0])+"'. "+
repr(error) ,file=sys.stderr)
return None
def format_report(self,label,output_seq):
"""Formats a standard report line for the debug reporting function.
:param label: what type of report line this is, so a string describing
how it went - passed? Failed?
:type label: str
:param label: the attempt at generating an output SeqRecord, so either
one that was formed or None
:type label: Bio.SeqRecord.SeqRecord or None
:return: the string for the report
:rtype: str
"""
if output_seq is None:
output_seq = SeqRecord.SeqRecord('X',
id='ERROR',
letter_annotations={'phred_quality':[0]})
try:
output_string = ( str(output_seq.id)+"\",\""+
str(output_seq.seq)+"\",\""+
phred_number_array_to_joined_string(
output_seq.letter_annotations['phred_quality']) )
except:
output_string = "*,*,*"
return ( "\""+label+"\",\""+
str(self.seqs['input'].id)+"\",\""+
str(self.seqs['input'].seq)+"\",\""+
phred_number_array_to_joined_string(self.seqs['input'].letter_annotations['phred_quality'])+"\",\""+
output_string+"\",\""+
"-".join([ i+"_"+self.group_stats[i].flatten()
for i in self.group_stats ] )+
"\"" ) # See group_stats method for what these are (start stop len)
def chop(self):
"""This executes the intended purpose of the `SeqRecord` object, and is
called once. It uses the configured object to apply each match
operation as best it can with the sequences it is given or can generate,
then writes the outputs in the specified formats to specified places
as configured.
"""
# If qualities are missing, add them as just 40
if 'phred_quality' not in self.seqs['input'].letter_annotations.keys():
self.seqs['input'].letter_annotations['phred_quality'] = [40]*len(self.seqs['input'])
if self.configuration.verbosity >= 2:
print("\n["+str(time.time())+"] : adding missing qualities of 40 "+
"to sequence.", file=sys.stderr)
# For chop grained self.configuration.verbosity, report
if self.configuration.verbosity >= 2:
print("\n["+str(time.time())+"] : starting to process : "+
self.seqs['input'].id+"\n "+self.seqs['input'].seq+"\n "+
phred_number_array_to_joined_string(self.seqs['input'].letter_annotations['phred_quality']),
file=sys.stderr)
# This should fail if you didn't specify anything taking from input stream!
assert self.configuration.matches_array[0]['input'] == "input", (
"can't find the sequence named `input`, rather we see `"+
self.configuration.matches_array[0]['input']+"` in the holder, so breaking. You should "+
"have the first operation start with `input` as a source." )
# Next, iterate through the matches, applying each one
for operation_number, operation in enumerate(self.configuration.matches_array):
self.apply_operation( 'match_'+str(operation_number),
operation['input'], operation['regex'] )
# Now self should have a lot of matches, match scores and group stats,
# and matched sequences groups. All these values allow us to apply filters
# We unpack matches and scores into an internal environment for the filters
self.build_context()
# Then we eval the filters and build outputs, for each output
output_records = []
for each_output in self.configuration.outputs_array:
output_records.append( {
'name': each_output['name'],
'filter_result': self.evaluate_filter_of_output(each_output),
'output': self.build_output(each_output)
} )
# This is just if we pass all the filters provided
passed_filters = not any(
[ i['filter_result'] == False for i in output_records ] )
# Then we can make the report CSV if asked for (mainly for debugging/tuning)
if self.configuration.report_fh != None:
for output_record in output_records:
if output_record['filter_result']:
print( self.format_report(
"PassedFilterFor_"+output_record['name'],
output_record['output'] ) ,file=self.configuration.report_fh)
else:
print( self.format_report(
"FailedFilterFor_"+output_record['name'],
output_record['output'] ) ,file=self.configuration.report_fh)
# Finally, write all the outputs, to main stream if passed, otherwise to
# the failed output (if provided)
for output_record in output_records:
if output_record['filter_result'] and output_record['output'] is not None:
write_out_seq(output_record['output'], self.configuration.output_fh, self.configuration.output_format,
output_record['name'])
if self.configuration.verbosity >= 3:
print("\n["+str(time.time())+"] : wrote out output '"+
output_record['name']+"' for this input",
file=sys.stderr)
elif self.configuration.failed_fh != None:
write_out_seq(self.seqs['input'], self.configuration.failed_fh, self.configuration.input_format,
output_record['name'])
if self.configuration.verbosity >= 3:
print("\n["+str(time.time())+"] : output "+
output_record['name']+" failed, written to fail file\n",
file=sys.stderr)
|
{"/itermae/tests/test.py": ["/itermae/__init__.py"], "/tests/test_itermae.py": ["/itermae/__init__.py"]}
|
44,073,505
|
kent1201/Master-thesis
|
refs/heads/master
|
/Network/Self_Attention/utils.py
|
import torch
import torch.nn as nn
from torch.autograd import Variable
import numpy as np
import math
class PositionalEncoding(nn.Module):
"Implement the PE function."
def __init__(self, d_model, dropout, max_len=5000):
super(PositionalEncoding, self).__init__()
self.dropout = nn.Dropout(p=dropout)
# Compute the positional encodings once in log space.
pe = torch.zeros(max_len, d_model)
position = torch.arange(0, max_len).unsqueeze(1)
div_term = torch.exp(torch.arange(0, d_model, 2) *
-(math.log(10000.0) / d_model))
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0)
self.register_buffer('pe', pe)
def forward(self, x):
x = x + Variable(self.pe[:, :x.size(1)],
requires_grad=False)
return self.dropout(x)
# gpu-used
CUDA_DEVICES = torch.device("cuda:1" if torch.cuda.is_available() else "cpu")
def test():
Inputs = torch.randn(64, 24, 24)
T = []
max_seq_len = 0
for i in range(len(Inputs)):
max_seq_len = max(max_seq_len, len(Inputs[i][:, 0]))
T.append(len(Inputs[i][:, 0]))
model = PositionalEncoding(
d_model=24,
dropout=0.1,
max_len=max_seq_len
)
model.train()
outputs = model(Inputs)
print("[Self attention utils.py] model: {}".format(model))
print("[Self attention utils.py] inputs: {}".format(Inputs.shape))
print("[Self attention utils.py] outputs: {}".format(outputs.shape))
if __name__ == '__main__':
test()
|
{"/train.py": ["/Network/supervisor.py", "/Network/generator.py", "/Timedataset.py", "/Loss/embedder_loss.py", "/Loss/supervised_loss.py", "/Loss/joint_Gloss.py", "/Loss/joint_Dloss.py", "/utils.py"], "/Time_generate_data.py": ["/Timedataset.py", "/utils.py", "/dataset_preprocess.py"], "/dataset.py": ["/dataset_preprocess.py"], "/Timedataset.py": ["/dataset_preprocess.py"], "/utils.py": ["/dataset.py"], "/Timetest-tf.py": ["/dataset_preprocess.py"], "/Timetest.py": ["/Timedataset.py", "/Network/simple_predictor.py", "/utils.py"], "/train_c_rnn_gan.py": ["/Timedataset.py", "/utils.py"], "/c_rnn_gan_generate_data.py": ["/Timedataset.py", "/utils.py", "/dataset_preprocess.py"], "/Network/generator.py": ["/Network/Self_Attention/utils.py"]}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.