text stringlengths 38 1.54M |
|---|
import sys
import os
import rospy
import math
import numpy as np
from threading import Thread
from tf.transformations import euler_from_quaternion
from geometry_msgs.msg import PolygonStamped, PointStamped, PoseStamped, PoseWithCovarianceStamped
from nav_msgs.msg import Odometry
class StateSubscriber:
"""
Class which creates object that holds the state of the car.
It subscribes to the topics /robot_pose and /odometry/filtered.
"""
def __init__(self):
self.x = None
self.y = None
self.yaw = None
self.v = None
def __str__(self):
string = 'SVEA state : ' + 'x : ' + str(self.x) + ', y : ' + str(self.y) + ', yaw : ' + str(self.yaw)
return string
def start(self):
"""
Spins up ROS background thread; must be called to start
receiving data.
:return: itself
:rtype: StateSubscriber
"""
Thread(target=self._init_and_spin_ros, args=()).start()
return self
def _init_and_spin_ros(self):
rospy.loginfo('Starting Car State Subscriber Node')
self.node_name = 'point_subscriber'
self._start_listen()
self.is_ready = True
rospy.spin()
def _start_listen(self):
rospy.Subscriber('/robot_pose', PoseStamped, self._update_pose, queue_size=1)
rospy.Subscriber('/odometry/filtered', Odometry, self._read_velocity, queue_size=1)
def _read_velocity(self, msg):
self.v = np.sign(msg.twist.twist.linear.x) * np.linalg.norm([msg.twist.twist.linear.x,
msg.twist.twist.linear.y])
def _update_pose(self, msg):
self.x = msg.pose.position.x
self.y = msg.pose.position.y
quaternions = (msg.pose.orientation.x,
msg.pose.orientation.y,
msg.pose.orientation.z,
msg.pose.orientation.w)
euler = euler_from_quaternion(quaternions)
self.yaw = euler[2]
|
# coding: utf-8
import collections
import sys
ID, GENDER, PRODUCT, CLASP_TYPE, POCKET_TYPE, SEAM_LENGHT, CUTTING, DESIGN_EFFECTS, SEASON = range(9)
Description = collections.namedtuple("Description",
['gender', 'product', 'clas_type', 'pocket_type', 'seam_lenght', 'cutting', 'design_effects', 'season', 'id'])
def main():
if len(sys.argv) == 1 or sys.argv[1] in {"-h", "--help"}:
print("usage: {0} file1 [file2 [... fileN]]".format(sys.argv[0]))
sys.exit()
filenames = sys.argv[1:]
descriptions = create_discriptions(filenames)
save_discriptions(descriptions)
print(descriptions)
def create_discriptions(filenames):
descriptions = {}
for filename in filenames:
for line in open(filename, encoding="utf8"):
line = line.rstrip()
if line:
description = process_line(line)
descriptions[(description.id)] = description
return descriptions
def process_line(line):
fields = line.split(":")
description = Description(fields[GENDER], fields[PRODUCT], fields[CLASP_TYPE],
fields[POCKET_TYPE], fields[SEAM_LENGHT], fields[CUTTING],
fields[DESIGN_EFFECTS], fields[SEASON], fields[ID])
return description
def save_discriptions(descriptions):
for key in sorted(descriptions):
descript = descriptions[key]
with open('data/descriptions_out.txt', 'a') as file:
print("{0:.5}ие {1} на {2:.7}ах декорированные {3:.6}ми карманами."
"Длинные брючины {4}. Крой типа {5} позволяет подчеркнуть вашу фигуру,"
"а эффект {6} создает небрежный образ."
"Подходит на {7} сезон.:{8}".format(descript.gender, descript.product, descript.clas_type,
descript.pocket_type, descript.seam_lenght, descript.cutting,
descript.design_effects, descript.season, descript.id), file=file)
main()
#todo без сортировки итерироваться
#todo с сортировкой
# format вместе с print
# create_description на две функции
|
# coding=utf-8
from __future__ import unicode_literals, print_function
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
import os
from pub_site import create_app
app = create_app(os.getenv('ENV') or 'prod')
|
from flask import g, abort
from flask_restaction import Resource
from purepage import db
from couchdb.http import NotFound, CouchdbException
class Article(Resource):
"""Article"""
schema_article = {
"_id": ("unicode&required", "文章ID"),
"userid": ("unicode&required", "作者"),
"catalog": ("unicode&required", "目录"),
"article": ("unicode&required", "文章名称"),
"title": ("unicode&required", "文章标题"),
"summary": ("unicode", "文章摘要"),
"tags": [("unicode&required", "标签")],
"date": ("datetime&required&output", "创建/修改日期"),
"content": ("unicode&required", "文章内容"),
}
schema_article_no_content = schema_article.copy()
del schema_article_no_content['content']
schema_article_create = schema_article.copy()
del schema_article_create['_id']
del schema_article_create['userid']
schema_inputs = {
"get": {
"userid": ("unicode&required", "作者"),
"catalog": ("unicode&required", "目录"),
"article": ("unicode&required", "文章名称"),
},
"get_list": {
"pagenum": ("+int&default=1", "第几页,从1开始计算"),
"pagesize": ("+int&default=10", "每页的数量"),
"userid": ("unicode", "作者"),
"catalog": ("unicode", "目录"),
"tag": ("unicode", "标签")
},
"post": schema_article_create
}
schema_outputs = {
"get": schema_article,
"get_list": {
"total": "int&required",
"offset": "int&required",
"rows": [schema_article_no_content]
},
"post": schema_article,
}
def get(self, userid, catalog, article):
"""获取一篇文章"""
key = ".".join([userid, catalog, article])
result = db.get(key)
return result
def get_list(self, pagenum, pagesize, userid, catalog, tag):
"""
获取文章列表,结果按时间倒序排序。
过滤参数有以下组合:
1. userid: 只返回指定作者的文章
2. userid + catalog: 只返回指定作者的指定目录的文章
3. userid + tag: 只返回指定作者的指定标签的文章
4. tag: 只返回指定标签的文章
"""
if userid:
if tag:
view = "by_user_tag"
startkey = [userid, tag, {}]
endkey = [userid, tag]
elif catalog:
view = "by_user_catalog"
startkey = [userid, catalog, {}]
endkey = [userid, catalog]
else:
view = "by_user"
startkey = [userid, {}]
endkey = [userid]
elif tag:
view = "by_tag"
startkey = [tag, {}]
endkey = [tag]
else:
view = "by_date"
startkey = None
endkey = {}
params = {
"reduce": False,
"include_docs": True,
"skip": (pagenum - 1) * pagesize,
"limit": pagesize,
"descending": True,
}
view = ("article", view)
if startkey:
params["startkey"] = startkey
params["endkey"] = endkey
result = db.query(view, **params)
return {
"total": result['total_rows'],
"offset": result['offset'],
"rows": [x['doc'] for x in result['rows']]
}
def post(self, catalog, article, **info):
"""创建或修改文章"""
userid = g.user['_id']
_id = '.'.join([userid, catalog, article])
try:
origin = db.get(_id)
except NotFound:
origin = {
'type': 'article',
'_id': _id,
'userid': userid,
'catalog': catalog,
'article': article
}
origin.update(info)
db.put(origin)
return origin
@Article.error_handler
def handler_404(self, ex):
if isinstance(ex, CouchdbException):
if ex.status_code == 404:
abort(404, '%s: %s' % (ex.error or 'Not Found', ex.reason or ''))
|
import urllib2
import json
def get_parking_spots():
contents = json.loads(urllib2.urlopen("http://smart-parking-bruck.c9users.io:8081/parking_spots?name=P1").read())
if(contents[0]["name"] != "P1"):
print "Test failed: get_parking_spots()"
else:
print "Test passed: get_parking_spots()"
def get_parking_lot():
contents = json.loads(urllib2.urlopen("http://smart-parking-bruck.c9users.io:8081/parking_lots").read())
if(len(contents) != 0):
print "Test failed: get_parking_lots()"
else:
print "Test passed: get_parking_lots()"
def user_sign_in():
contents = json.loads(urllib2.urlopen("http://smart-parking-bruck.c9users.io:8081/users?email=bruckwendu80@gmail.com").read())
if(contents[0]["username"] != "bruck"):
print "Test failed: get_parking_spots()"
else:
print "Test passed: get_parking_spots()"
def update_parking_space():
contents = json.loads(urllib2.urlopen("http://smart-parking-bruck.c9users.io:8081/parking_spots?name=P1").read())
contents[0]['occupied'] = True
parking_space = contents[0]
open_connection = urllib2.build_opener(urllib2.HTTPHandler)
request = urllib2.Request('http://smart-parking-bruck.c9users.io:8081/parking_spots/'+contents[0]["id"]["$oid"], data=str(json.dumps(parking_space)))
request.add_header('Content-Type', 'application/json')
request.get_method = lambda: 'PUT'
url = open_connection.open(request)
contents = json.loads(urllib2.urlopen("http://smart-parking-bruck.c9users.io:8081/parking_spots?name=P1").read())
if(contents[0]['occupied'] != True):
print "Test failed: update_parking_space()"
else:
parking_space = {'occupied': False}
url = open_connection.open(request)
contents = json.loads(urllib2.urlopen("http://smart-parking-bruck.c9users.io:8081/parking_spots?name=P1").read())
if(contents[0]["occupied"] != False):
print "Test failed: update_parking_spots()"
else:
print "Test passed: update_parking_spots()"
def update_reserved_parking_info():
contents = json.loads(urllib2.urlopen("http://smart-parking-bruck.c9users.io:8081/parking_spots?name=R1").read())
contents[0]['occupied'] = True
parking_space = contents[0]
open_connection = urllib2.build_opener(urllib2.HTTPHandler)
request = urllib2.Request('http://smart-parking-bruck.c9users.io:8081/parking_spots/'+contents[0]["id"]["$oid"], data=str(json.dumps(parking_space)))
request.add_header('Content-Type', 'application/json')
request.get_method = lambda: 'PUT'
url = open_connection.open(request)
contents = json.loads(urllib2.urlopen("http://smart-parking-bruck.c9users.io:8081/parking_spots?name=P1").read())
if(contents[0]['occupied'] != True):
print "Test failed: update_reserved_parking_info()"
else:
parking_space = {'occupied': False}
url = open_connection.open(request)
contents = json.loads(urllib2.urlopen("http://smart-parking-bruck.c9users.io:8081/parking_spots?name=P1").read())
if(contents[0]["occupied"] != False):
print "Test failed: update_reserved_parking_info()"
else:
print "Test passed: update_reserved_parking_info()"
get_parking_spots()
get_parking_lot()
update_parking_space()
update_reserved_parking_info()
|
from aws_cdk import (
aws_lambda as lambda_,
aws_apigateway as apigw,
core
)
class ApplicationStack(core.Stack):
def __init__(self, scope: core.Construct, id: str, lambda_arn: str, **kwargs) -> None:
super().__init__(scope, id, **kwargs)
referenced_function = lambda_.Function.from_function_arn(self, id="LocalNameForFunction", function_arn=lambda_arn)
my_api = apigw.LambdaRestApi(self, "myRestAPI", handler=referenced_function)
|
import unittest
import os,sys
cur_dir = os.path.dirname(__file__)
par_dir = os.path.dirname(cur_dir)
sys.path.append(par_dir)
from mysql import *
import pytest
class Mysql(unittest.TestCase):
@pytest.mark.run(order=3)
def test_connect(self):
with mysql() as db:
self.assertEqual(db.connect(),1)
@pytest.mark.run(order=4)
def test_cmd(self):
with mysql() as db:
db.connect()
sql = 'create table test (id int)'
self.assertNotEqual(db.cmd(sql),-1)
sql = 'drop table test'
self.assertNotEqual(db.cmd(sql),-1)
@pytest.mark.run(order=5)
def test_query(self):
with mysql() as db:
db.connect()
sql = 'show tables'
self.assertNotEqual(db.query(sql),-1)
@pytest.mark.run(order=6)
def test_close(self):
with mysql() as db:
db.connect()
db.close()
if __name__ == '__main__':
unittest.main()
|
from flask import Flask, Blueprint,render_template,redirect,url_for, request, jsonify, make_response
app=Blueprint('api_tester', __name__)
@app.route('/api_tester', methods=['GET', 'POST'])
def display_tester():
return render_template("api_tester.html")
|
from fastapi.encoders import jsonable_encoder
from pydantic import BaseModel, Field
from typing import Optional
from schemas.pyobject import PyObjectId
class InputItem(BaseModel):
name: str
description: Optional[str] = None
class Item(BaseModel):
id: PyObjectId = Field(..., alias='_id')
name: str
description: Optional[str]
class UpdateItem(BaseModel):
name: Optional[str]
description: Optional[str] |
"""
.. moduleauthor:: Li, Wang <wangziqi@foreseefund.com>
"""
import pandas as pd
from orca.barra.base import BarraOptimizerBase
from orca.barra import util
class BarraOptimizer(BarraOptimizerBase):
def __init__(self, config, debug_on, alpha, univ, dates):
super(BarraOptimizer, self).__init__(config, debug_on=debug_on)
self.alpha = alpha
self.univ = univ
self.dates = dates
self.positions = {}
self.returns_ser, self.ir_ser = {}, {}
self.factor_risk_ser, self.specific_risk_ser = {}, {}
self.turnover_ser, self.risk_ser = {}, {}
def before(self, date):
alpha, univ = self.alpha.ix[date], self.univ.ix[date].astype(bool)
alpha = pd.DataFrame({'alpha': alpha}).dropna()
alpha['sid'] = alpha.index
alpha['bid'] = alpha['sid'].map(self.sid_bid)
alpha = alpha.reindex(columns=['sid', 'bid', 'alpha'])
alpha = alpha.dropna()
config = self.config.xpath('Assets')[0]
path = util.generate_path(config.attrib['path'], date)
dirname = os.path.dirname(path)
if not os.path.exists(dirname):
os.makedirs(dirname)
alpha.to_csv(path, index=False, float_format='%.6f')
univ = pd.DataFrame({'sid': univ.ix[univ].index})
univ['bid'] = univ['sid'].map(self.sid_bid)
univ = univ.dropna()
config = self.config.xpath('Universe')[0]
path = util.generate_path(config.attrib['path'], date)
dirname = os.path.dirname(path)
if not os.path.exists(dirname):
os.makedirs(dirname)
univ.to_csv(path, index=False)
def after(self, date):
self.returns_ser[date], self.ir_ser[date] = self.returns, self.ir
self.factor_risk_ser[date], self.specific_risk_ser[date] = self.factor_risk, self.specific_risk
self.turnover_ser[date], self.risk_ser[date] = self.turnover, self.risk
if date == self.dates[-1]:
self.returns_ser, self.ir_ser = pd.Series(self.returns_ser), pd.Series(self.ir_ser)
self.factor_risk_ser, self.specific_risk_ser = pd.Series(self.factor_risk_ser), pd.Series(self.specific_risk_ser)
self.turnover_ser, self.risk_ser = pd.Series(self.turnover_ser), pd.Series(self.risk_ser)
df = pd.concat([self.returns_ser, self.ir_ser,
self.factor_risk_ser, self.specific_risk_ser,
self.turnover_ser, self.risk_ser], axis=1)
df.columns = ['returns', 'ir', 'factor_risk', 'specific_risk', 'turnover', 'risk']
df.index = pd.to_datetime(df.index)
df.to_csv('metrics', index=True, float_format='%.4f')
self.positions[date] = self.output_portfolio_df['weight']
if date == self.dates[-1]:
self.positions = pd.DataFrame(self.positions).T
self.positions.index = pd.to_datetime(self.positions.index)
self.positions.to_csv('positions', index=True, float_format='%.6f')
return
ndate = self.dates[self.dates.index(date)+1]
config = self.config.xpath('InitPortfolio')[0]
path = util.generate_path(config.attrib['path'], ndate)
dirname = os.path.dirname(path)
if not os.path.exists(dirname):
os.makedirs(dirname)
self.output_portfolio_df.to_csv(path, index=True, float_format='%.6f')
if __name__ == '__main__':
import argparse
from orca import DATES
from orca.utils.io import read_frame
import os
import shutil
parser = argparse.ArgumentParser()
parser.add_argument('-a', '--alpha', required=True, type=str)
parser.add_argument('-c', '--config', required=True, type=str)
parser.add_argument('-d', '--dir', required=True, type=str)
parser.add_argument('-u', '--univ', required=True, type=str)
parser.add_argument('-s', '--start', type=str)
parser.add_argument('-e', '--end', type=str)
parser.add_argument('-f', '--freq', default=1, type=int)
parser.add_argument('-o', '--offset', default=0, type=int)
parser.add_argument('--debug_on', action='store_true')
args = parser.parse_args()
if args.start:
dates = [date for date in DATES if date >= args.start]
if args.end:
dates = [date for date in dates if date <= args.end]
dates = dates[args.offset::args.freq]
if not os.path.exists(args.dir):
os.makedirs(args.dir)
shutil.copy(args.alpha, args.dir)
shutil.copy(args.config, args.dir)
shutil.copy(args.univ, args.dir)
os.chdir(args.dir)
alpha, univ = read_frame(args.alpha), read_frame(args.univ)
optimizer = BarraOptimizer(args.config, args.debug_on, alpha, univ, dates)
for date in dates:
optimizer.run(date)
|
from openpyxl import *
xl=load_workbook("Login.xlsx")
ss=xl.active
for i in ss.iter_cols(min_col=1,max_col=2,min_row=1,max_row=3,values_only=True):
for c in i:
print(c) |
"""
qSQLA Query Syntax
==================
qSQLA is a Query Syntax to filter flat records derived from SQLAlchemy selectable objects.
Each field can be queried with a number of different operators.
The filters are provided in the query string of a ``HTTP GET`` request. The operator is added with a double underscore
to the field name. Unary Operators do not need to specify a value.
.. code::
GET http://host/vsi/log/deliveries?delivery_id__eq=55&delivery_date__gt=2016-01-01T01:00:00
Filter:
delivery_id = 55
delivery_date > 2016-01-01T01:00:00
Response:
[{u'delivery_id': 55,
u'delivery_category': u'Locations',
u'delivery_date': u'2016-06-14T06:46:02.296028+00:00',
u'id': 42,
u'create_date': u'2016-06-14T06:46:02.296028+00:00',
u'error_info': None,
u'row_count': 1,
u'state': 2,
u'update_date': u'2016-06-14T06:46:02.296028+00:00'}]
Supported Operators are:
Unary operators:
- ``is_null`` for all fields
- ``is_not_null`` for all fields
- ``is_true`` for Boolean fields
- ``is_false`` for Boolean fields
Binary operators:
- ``eq`` for Integer, String, Date and DateTime fields
- ``ne`` for Integer, String, Date and DateTime fields
- ``ieq`` case insentitive equal for String fields
- ``gt`` for Integer, Date and DateTime fields
- ``lt`` for Integer, Date and DateTime fields
- ``gte`` for Integer, Date and DateTime fields
- ``lte`` for Integer, Date and DateTime fields
- ``like`` for String fields
- ``not_like`` for String fields
- ``in`` for Integer, String fields. The values are provided as a comma separated list.
- ``not_in`` for Integer, String fields. The values are provided as a comma separated list.
Supported Types:
- ``sqlalchemy.types.Integer``
- ``sqlalchemy.types.Boolean``
- ``sqlalchemy.types.Date``
- ``sqlalchemy.types.DateTime``
- ``sqlalchemy.types.String``
In addition to the filters one can provide
- ``_limit`` Limit the query to a number of records.
- ``_offset`` Add an offset to the query.
- ``_order`` The order field.
- ``_desc`` If provided sort in descending order, else in ascending.
"""
from sqlalchemy import and_, select, func
from sqlalchemy import types
import functools
import dateutil.parser
def requires_types(*types):
def dec(f):
@functools.wraps(f)
def wrapper(arg1, arg2=None):
if not any([isinstance(arg1.type, t) for t in types]):
raise TypeError("Cannot apply filter to field {}".format(arg1.name))
return f(arg1, arg2)
return wrapper
return dec
def convert_type(type_, value):
cls = type_.__class__
if issubclass(cls, types.Integer):
return int(value)
elif issubclass(cls, types.String):
return value
elif issubclass(cls, types.DateTime):
return dateutil.parser.parse(value)
def convert_generic(f):
@functools.wraps(f)
def wrapper(arg1, arg2=None):
return f(arg1, convert_type(arg1.type, arg2))
return wrapper
def convert_list(f):
@functools.wraps(f)
def wrapper(arg1, arg2=None):
vals = [convert_type(arg1.type, arg.strip()) for arg in arg2.split(",")]
return f(arg1, vals)
return wrapper
def is_null(arg1, arg2=None):
return arg1 == None # NOQA
def is_not_null(arg1, arg2=None):
return arg1 != None # NOQA
@requires_types(types.Boolean)
def is_true(arg1, arg2=None):
return arg1 == True # NOQA
@requires_types(types.Boolean)
def is_false(arg1, arg2=None):
return arg1 == False # NOQA
@requires_types(types.Integer, types.String, types.DateTime)
@convert_generic
def equals(arg1, arg2):
return arg1 == arg2
@requires_types(types.Integer, types.String, types.DateTime)
@convert_generic
def not_equals(arg1, arg2):
return arg1 != arg2
@requires_types(types.String)
@convert_generic
def ignore_case_equals(arg1, arg2):
return func.lower(arg1) == arg2.lower()
@requires_types(types.Integer, types.DateTime)
@convert_generic
def greater_than(arg1, arg2):
return arg1 > arg2
@requires_types(types.Integer, types.DateTime)
@convert_generic
def greater_than_equals(arg1, arg2):
return arg1 >= arg2
@requires_types(types.Integer, types.DateTime)
@convert_generic
def less_than(arg1, arg2):
return arg1 < arg2
@requires_types(types.Integer, types.DateTime)
@convert_generic
def less_than_equals(arg1, arg2):
return arg1 <= arg2
@requires_types(types.String)
@convert_generic
def like(arg1, arg2):
return arg1.like(arg2)
@requires_types(types.String)
@convert_generic
def not_like(arg1, arg2):
return ~arg1.like(arg2)
@requires_types(types.Integer, types.String)
@convert_list
def in_(arg1, arg2):
return arg1.in_(arg2)
@requires_types(types.Integer, types.String)
@convert_list
def not_in(arg1, arg2):
return ~arg1.in_(arg2)
UNRAY_OPERATORS = ['is_null', 'is_not_null', 'is_true', 'is_false']
OPERATORS = {
# Unary operators.
'is_null': is_null,
'is_not_null': is_not_null,
'is_true': is_true,
'is_false': is_false,
# Binary operators.
'eq': equals,
'ne': not_equals,
'ieq': ignore_case_equals,
'gt': greater_than,
'lt': less_than,
'gte': greater_than_equals,
'lte': less_than_equals,
'like': like,
'not_like': not_like,
'in': in_,
'not_in': not_in,
}
def split_operator(param):
query = param.rsplit('__', 1)
if len(query) == 1:
name = query[0]
operator = 'eq'
else:
name = query[0]
operator = query[1].lower()
if name == '':
raise ValueError("No valid parameter provided")
return (name, operator)
def build_filters(query):
""" build filter dictionary from a query dict"""
filters = []
for key, val in query.items():
name, operator = split_operator(key)
filters.append({"name": name, "op": operator, "val": val})
return filters
def get_column(s, name):
for col in s.columns:
if col.name.lower() == name.lower():
return col
raise KeyError("column {} not found".format(name))
def query(selectable, filters, limit=None, offset=None, order=None, asc=True):
"""add filters to an sqlalchemy selactable
:param selectable: the select statements
:param filters: a dictionary with filters
:param limit: the limit
:param offset: the offset
:param order: the order field
:param asc: boolean if sorting should be ascending
:raises KeyError: if key is not available in query
:raises ValueError: if value cannont be converted to Column Type
:raises TypeError: if filter is not available for SQLAlchemy Column Type
:return: a selectable with the filters, offset and order applied
"""
restrictions = []
alias = selectable.alias("query")
for f in filters:
col = get_column(alias, f["name"])
if f["op"] in UNRAY_OPERATORS:
restrictions.append(OPERATORS[f["op"]](col))
else:
restrictions.append(OPERATORS[f["op"]](col, f["val"]))
if restrictions:
sel = select([alias], whereclause=and_(*restrictions))
else:
sel = select([alias])
if limit is None:
limit = 10000
else:
limit = min(int(limit), 10000)
sel = sel.limit(limit)
if offset:
sel = sel.offset(offset)
if order:
order_col = get_column(alias, order)
if order_col is None:
order_col = list(alias.columns)[0]
if not asc:
order_col = order_col.desc()
sel = sel.order_by(order_col)
return sel
|
from pycorenlp import StanfordCoreNLP
# You have to download the latest StanfordCoreNLP model from https://stanfordnlp.github.io/CoreNLP/index.html#download and call it with the following command, adjusting the path accordingly
# java -mx4g -cp "D:\Felix\Downloads\stanford-corenlp-4.2.0\\*" edu.stanford.nlp.pipeline.StanfordCoreNLPServer -port 9000
nlp = StanfordCoreNLP('http://localhost:9000')
def resolve(corenlp_output):
""" Transfer the word form of the antecedent to its associated pronominal anaphor(s) """
for coref in corenlp_output['corefs']:
mentions = corenlp_output['corefs'][coref]
antecedent = mentions[0] # the antecedent is the first mention in the coreference chain
for j in range(1, len(mentions)):
mention = mentions[j]
if mention['type'] == 'NOMINAL' and mention['text'] != antecedent['text']:
antecedent = mention
if mention['type'] == 'PRONOMINAL':
# get the attributes of the target mention in the corresponding sentence
target_sentence = mention['sentNum']
target_token = mention['startIndex'] - 1
# transfer the antecedent's word form to the appropriate token in the sentence
corenlp_output['sentences'][target_sentence - 1]['tokens'][target_token]['word'] = antecedent['text']
def get_resolved(text):
output_text = ''
corenlp_output = nlp.annotate(text, properties= {'timeout': '50000','annotators':'dcoref','outputFormat':'json','ner.useSUTime':'false'})
#print(corenlp_output)
resolve(corenlp_output)
possessives = ['hers', 'his', 'their', 'theirs','ours','yours']
for sentence in corenlp_output['sentences']:
for token in sentence['tokens']:
output_word = token['word']
# check lemmas as well as tags for possessive pronouns in case of tagging errors
if token['lemma'] in possessives or token['pos'] == 'PRP$':
output_word += "'s" # add the possessive morpheme
output_word += token['after']
#print(output_word, end='')
output_text += output_word
#print(output_text)
return output_text
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Project: Fast Azimuthal Integration
# https://github.com/pyFAI/pyFAI
#
# Copyright (C) European Synchrotron Radiation Facility, Grenoble, France
#
# Principal author: Jérôme Kieffer (Jerome.Kieffer@ESRF.eu)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"test suite for dark_current / flat_field correction"
__author__ = "Jérôme Kieffer"
__contact__ = "Jerome.Kieffer@ESRF.eu"
__license__ = "GPLv3+"
__copyright__ = "European Synchrotron Radiation Facility, Grenoble, France"
__date__ = "15/12/2014"
import unittest
import os
import numpy
import logging
import time
import sys
import fabio
if __name__ == '__main__':
import pkgutil, os
__path__ = pkgutil.extend_path([os.path.dirname(__file__)], "pyFAI.test")
from .utilstest import UtilsTest, Rwp, getLogger
logger = getLogger(__file__)
pyFAI = sys.modules["pyFAI"]
from pyFAI.opencl import ocl
if logger.getEffectiveLevel() <= logging.INFO:
import pylab
class TestFlat1D(unittest.TestCase):
shape = 640, 480
flat = 1 + numpy.random.random(shape)
dark = numpy.random.random(shape)
raw = flat + dark
eps = 1e-6
ai = pyFAI.AzimuthalIntegrator()
ai.setFit2D(directDist=1, centerX=shape[1] // 2, centerY=shape[0] // 2, pixelX=1, pixelY=1)
bins = 500
def test_no_correct(self):
r, I = self.ai.integrate1d(self.raw, self.bins, unit="r_mm", correctSolidAngle=False)
logger.info("1D Without correction Imin=%s Imax=%s <I>=%s std=%s" % (I.min(), I.max(), I.mean(), I.std()))
self.assertNotAlmostEqual(I.mean(), 1, 2, "Mean should not be 1")
self.assertFalse(I.max() - I.min() < self.eps, "deviation should be large")
def test_correct(self):
all_methods = ["numpy", "cython", "splitbbox", "splitpix", "lut", "csr"]
if ocl:
for device in ["cpu", "gpu", "acc"]:
if ocl.select_device(dtype=device):
all_methods.append("lut_ocl_%s" % device)
all_methods.append("csr_ocl_%s" % device)
for meth in all_methods:
r, I = self.ai.integrate1d(self.raw, self.bins, unit="r_mm", method=meth, correctSolidAngle=False, dark=self.dark, flat=self.flat)
logger.info("1D method:%s Imin=%s Imax=%s <I>=%s std=%s" % (meth, I.min(), I.max(), I.mean(), I.std()))
self.assertAlmostEqual(I.mean(), 1, 2, "Mean should be 1 in %s" % meth)
self.assert_(I.max() - I.min() < self.eps, "deviation should be small with meth %s, got %s" % (meth, I.max() - I.min()))
for meth in ["xrpd_numpy", "xrpd_cython", "xrpd_splitBBox", "xrpd_splitPixel"]: # , "xrpd_OpenCL" ]: bug with 32 bit GPU and request 64 bit integration
r, I = self.ai.__getattribute__(meth)(self.raw, self.bins, correctSolidAngle=False, dark=self.dark, flat=self.flat)
logger.info("1D method:%s Imin=%s Imax=%s <I>=%s std=%s" % (meth, I.min(), I.max(), I.mean(), I.std()))
self.assertAlmostEqual(I.mean(), 1, 2, "Mean should be 1 in %s" % meth)
self.assert_(I.max() - I.min() < self.eps, "deviation should be small with meth %s, got %s" % (meth, I.max() - I.min()))
if ocl and pyFAI.opencl.ocl.select_device("gpu", extensions=["cl_khr_fp64"]):
meth = "xrpd_OpenCL"
r, I = self.ai.__getattribute__(meth)(self.raw, self.bins, correctSolidAngle=False, dark=self.dark, flat=self.flat)
logger.info("1D method:%s Imin=%s Imax=%s <I>=%s std=%s" % (meth, I.min(), I.max(), I.mean(), I.std()))
self.assertAlmostEqual(I.mean(), 1, 2, "Mean should be 1 in %s" % meth)
self.assert_(I.max() - I.min() < self.eps, "deviation should be small with meth %s, got %s" % (meth, I.max() - I.min()))
class TestFlat2D(unittest.TestCase):
shape = 640, 480
flat = 1 + numpy.random.random(shape)
dark = numpy.random.random(shape)
raw = flat + dark
eps = 1e-6
ai = pyFAI.AzimuthalIntegrator()
ai.setFit2D(directDist=1, centerX=shape[1] // 2, centerY=shape[0] // 2, pixelX=1, pixelY=1)
bins = 500
azim = 360
def test_no_correct(self):
I, _ , _ = self.ai.integrate2d(self.raw, self.bins, self.azim, unit="r_mm", correctSolidAngle=False)
I = I[numpy.where(I > 0)]
logger.info("2D Without correction Imin=%s Imax=%s <I>=%s std=%s" % (I.min(), I.max(), I.mean(), I.std()))
self.assertNotAlmostEqual(I.mean(), 1, 2, "Mean should not be 1")
self.assertFalse(I.max() - I.min() < self.eps, "deviation should be large")
def test_correct(self):
test2d = {"numpy": self.eps,
"cython": self.eps,
"splitbbox": self.eps,
"splitpix": self.eps,
"lut": self.eps,
}
if ocl:
for device in ["cpu", "gpu", "acc"]:
if ocl.select_device(dtype=device):
test2d["lut_ocl_%s" % device] = self.eps
test2d["csr_ocl_%s" % device] = self.eps
test2d_direct = {"xrpd2_numpy": 0.3, # histograms are very noisy in 2D
"xrpd2_histogram": 0.3, # histograms are very noisy in 2D
"xrpd2_splitBBox": self.eps,
"xrpd2_splitPixel": self.eps}
for meth in test2d:
logger.info("About to test2d %s" % meth)
try:
I, _, _ = self.ai.integrate2d(self.raw, self.bins, self.azim, unit="r_mm", method=meth, correctSolidAngle=False, dark=self.dark, flat=self.flat)
except (MemoryError, pyFAI.opencl.pyopencl.MemoryError):
logger.warning("Got MemoryError from OpenCL device")
continue
I = I[numpy.where(I > 0)]
logger.info("2D method:%s Imin=%s Imax=%s <I>=%s std=%s" % (meth, I.min(), I.max(), I.mean(), I.std()))
self.assertAlmostEqual(I.mean(), 1, 2, "Mean should be 1 in %s" % meth)
self.assert_(I.max() - I.min() < test2d[meth], "deviation should be small with meth %s, got %s" % (meth, I.max() - I.min()))
for meth in test2d_direct:
logger.info("About to test2d_direct %s" % meth)
I, _, _ = self.ai.__getattribute__(meth)(self.raw, self.bins, self.azim, correctSolidAngle=False, dark=self.dark, flat=self.flat)
I = I[numpy.where(I > 0)]
logger.info("1D method:%s Imin=%s Imax=%s <I>=%s std=%s" % (meth, I.min(), I.max(), I.mean(), I.std()))
self.assert_(abs(I.mean() - 1) < test2d_direct[meth], "Mean should be 1 in %s" % meth)
self.assert_(I.max() - I.min() < test2d_direct[meth], "deviation should be small with meth %s, got %s" % (meth, I.max() - I.min()))
def test_suite_all_Flat():
testSuite = unittest.TestSuite()
testSuite.addTest(TestFlat1D("test_no_correct"))
testSuite.addTest(TestFlat1D("test_correct"))
testSuite.addTest(TestFlat2D("test_no_correct"))
testSuite.addTest(TestFlat2D("test_correct"))
return testSuite
if __name__ == '__main__':
mysuite = test_suite_all_Flat()
runner = unittest.TextTestRunner()
runner.run(mysuite)
|
# -*- coding: utf-8 -*-
# Licensed to Elasticsearch B.V. under one or more contributor
# license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright
# ownership. Elasticsearch B.V. licenses this file to you under
# the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
"""
Tests for the Openbeans Dyno Docker integration
"""
from pytest import mark, raises
from unittest import mock
from flask import url_for
import dyno.app.api.docker as dkr
CONTAINER_NAME_FUZZ = ['a_foo', 'b__foo', '_c_foo']
@mark.parametrize('container_fuzz', CONTAINER_NAME_FUZZ)
@mock.patch('dyno.app.api.docker.container_list', return_value={'containers': CONTAINER_NAME_FUZZ})
def test_normalize_name_multiple(cl, container_fuzz):
"""
GIVEN multiple containers with names which end in `foo`
WHEN the name ending in `foo` is passed into the _normalize_name function
THEN function raises an exception
"""
with raises(Exception, match="more than one"):
dkr._normalize_name('foo')
@mock.patch('dyno.app.api.docker.container_list', return_value={'containers': CONTAINER_NAME_FUZZ})
def test_normalize_name_multiple_not_found(cl):
"""
GIVEN no containers which end in `baz`
WHEN a name ending in `baz` if passed into the _normalize_name func
THEN an exception is raised
"""
with raises(Exception, match="not found"):
dkr._normalize_name('baz')
@mock.patch('dyno.app.api.docker.client')
def test_list(docker_mock, client):
"""
GIVEN an HTTP call to /docker/list
WHEN the results are returned
THEN the results contain a list of running containers
"""
fake_container = mock.Mock()
fake_container.name = 'fake_container'
list_mock = mock.Mock(return_value=[fake_container], name='list_mock')
docker_mock.containers.list = list_mock
ret = client.get(url_for('docker.container_list'))
assert ret.json == {'containers': ['fake_container']}
@mock.patch('dyno.app.api.docker._normalize_name', return_value='fake_container_name')
def test_query(fake_container_patch, docker_inspect, client):
"""
GIVEN an HTTP call to /docker/query
WHEN the results are returned
THEN the results container info about the CPU and memory
"""
with mock.patch.object(dkr.low_client, 'inspect_container', return_value=docker_inspect):
ret = client.get(url_for('docker.query'), query_string={'c': 'fake_container_name'})
assert ret.json['CPU'] == 1000
assert ret.json['Mem'] == 200
@mock.patch('dyno.app.api.docker.client', name='docker_mock')
@mock.patch('dyno.app.api.docker._normalize_name', return_value='fake_container_name', name='normalize_mock')
def test_update(fake_container_patch, docker_mock, client):
"""
GIVEN an HTTP call to /docker/update
WHEN the call contains settings to be updated
THEN the settings are updated
"""
fake_container = mock.Mock(name='fake_container')
fake_container.name = 'fake_container'
get_mock = mock.Mock(return_value=fake_container, name='get_mock')
docker_mock.containers.get = get_mock
client.get(url_for('docker.update'), query_string={'c': 'opbeans-python', 'component': 'CPU', 'val': 100})
fake_container.update.assert_called_with(cpu_quota=25990)
# FIXME This is marked as xfail pending a centralization of the normalization functions
@mark.xfail
@mark.parametrize('val', range(1,101, 10))
@mock.patch('dyno.app.api.control._range', mock.Mock(return_value={'Fr': [1,10]}))
def test_normalize(val):
"""
GIVEN values between 1-100
WHEN the value is sent to be normalized
THEN the correct normalized value is returned
"""
got = dkr._normalize_value('cpu', val)
want = (101 - val) / 10
assert got == want
# FIXME This is marked as xfail pending a centralization of the normalization functions
@mark.xfail
@mark.parametrize('val', range(1,10))
@mock.patch('dyno.app.api.control._range', mock.Mock(return_value={'Fr': [1,10]}))
def test_denormalize(val):
"""
GIVEN values between 1-100
WHEN the value is sent to be denormalized
THEN the correct normalized value is returned
"""
got = dkr._denormalize_value('cpu', val)
want = 100 - (val * 10)
assert got == want
|
"""
Generic plot functions based on matplotlib
"""
from __future__ import absolute_import, division, print_function, unicode_literals
from builtins import int
try:
## Python 2
basestring
except:
## Python 3
basestring = str
import pylab
import matplotlib
import matplotlib.ticker
import matplotlib.dates as mpl_dates
__all__ = ['plot_ax_frame']
MPL_FONT_SIZES = ['xx-small', 'x-small', 'small', 'medium',
'large', 'x-large', 'xx-large']
MPL_INTERVAL_DICT = {'Y': 0, 'M': 1, 'W': 2, 'D': 3, 'h': 4, 'm': 5, 's': 6}
MPL_DATE_LOCATOR_DICT = {'Y': mpl_dates.YearLocator,
'M': mpl_dates.MonthLocator,
'd': mpl_dates.WeekdayLocator,
'D': mpl_dates.DayLocator,
'h': mpl_dates.HourLocator,
'm': mpl_dates.MinuteLocator,
's': mpl_dates.SecondLocator}
def _create_date_locator(tick_interval):
"""
Create matplotlib date locator from tick interval specification
:param tick_interval:
- 0 (= no ticks)
- None (= automatic ticks)
- string XXY, with XX interval and Y time unit:
'Y', 'M', 'D', 'd', 'h', 'm', 's'
(year|month|day|weekday|hour|minute|second)
:return:
matplotlib date locator object
"""
if tick_interval == 0:
date_loc = matplotlib.ticker.NullLocator()
elif tick_interval is None:
date_loc = mpl_dates.AutoDateLocator(interval_multiples=True)
else:
if isinstance(tick_interval, basestring):
val, tick_unit = int(tick_interval[:-1]), tick_interval[-1:]
else:
val = tick_interval
tick_unit = 'Y'
#tu_key = MPL_INTERVAL_DICT[tick_unit]
#for key in range(tu_key):
# date_loc.intervald[key] = []
#date_loc.intervald[tu_key] = [val]
loc_kwargs = {}
loc_kwargs[{'Y': 'base'}.get(tick_unit, 'interval')] = val
date_loc = MPL_DATE_LOCATOR_DICT[tick_unit](**loc_kwargs)
return date_loc
ax_frame_doc = """
Frame arguments:
:param xscaling:
str, scaling to use for X axis ('lin' or 'log')
Prepend '-' to invert orientation of X axis
(default: 'lin')
:param yscaling:
str, scaling to use for Y axis ('lin' or 'log')
Prepend '-' to invert orientation of Y axis
(default: 'lin')
:param xmin:
float, start value for X axis
Note that, if X values of :param:`datasets` are datetimes,
this should be datetime also
(default: None, let matplotlib decide)
:param xmax:
float, end value for X axis
Note that, if X values of :param:`datasets` are datetimes,
this should be datetime also
(default: None, let matplotlib decide)
:param ymin:
float, start value for Y axis
Note that, if Y values of :param:`datasets` are datetimes,
this should be datetime also
(default: None, let matplotlib decide)
:param ymax:
float, end value for Y axis
Note that, if Y values of :param:`datasets` are datetimes,
this should be datetime also
(default: None, let matplotlib decide)
:param xlabel:
str, label for X axis
(default: '')
:param ylabel:
str, label for Y axis
(default: '')
:param ax_label_fontsize:
int or str, font size to use for axis labels
(default: 'large')
:param xticks:
list or array, X axis tick positions
Note that, if X values of :param:`datasets` are datetimes,
these should be datetimes also
(default: None, let matplotlib decide)
:param xtick_labels:
X axis tick labels, either:
- None (= automatic labels)
- list of labels corresponding to :param:`xticks`
- matplotlib Formatter object
- format string (for dates or scalars)
- '' or [] (= no tick labels)
(default: None, let matplotlib decide)
:param xtick_interval:
X axis tick interval specification
single value (major ticks only) or tuple (major/minor ticks) of:
- matplotlib Locator object
- None (= automatic ticks)
- 0 (= no ticks)
- int (= integer tick interval)
- str (= tick interval for dates, where last char is in YMDdhms
(year|month|day|weekday|hour|minute|second)
(default: None)
:param xtick_rotation:
float, rotation angle for X axis tick labels
(default: 0)
:param xtick_direction:
str, X axis tick direction: 'in', 'out' or 'both'
(default: '')
:param xtick_side:
str, on which side of the plot X ticks should be drawn:
'bottom', 'top', 'both' or 'none'
(default: '')
:param xlabel_side:
str, on which side of the plot X tick labels should be drawn:
'bottom', 'top', 'both' or 'none'
(default: '', will take same value as :param:`xtick_side`)
:param yticks:
list or array, Y axis tick positions
Note that, if Y values of :param:`datasets` are datetimes,
these should be datetimes also
(default: None, let matplotlib decide)
:param ytick_labels:
Y axis tick labels
See :param:`xtick_labels` for options
:param ytick_interval:
Y axis tick interval specification
see :param:`xtick_interval` for options
:param ytick_rotation:
float, rotation angle for Y axis tick labels
(default: 0)
:param ytick_direction:
str, Y axis tick direction: 'in', 'out' or 'both'
(default: '')
:param ytick_side:
str, on which side of the plot Y ticks should be drawn:
'left', 'right', 'both' or 'none'
(default: '')
:param ylabel_side:
str, on which side of the plot Y tick labels should be drawn:
'left', 'right', 'both' or 'none'
(default: '', will take same value as :param:`ytick_side`)
:param tick_label_fontsize:
int or str, font size to use for axis tick labels
(default: 'medium')
:param tick_params:
dict, containing keyword arguments for :func:`ax.tick_params`,
that will be applied to both the X and Y axes
(default: {})
:param title:
str, plot title
(default: '')
:param title_fontsize:
str, font size to use for plot title
(default: 'large')
:param xgrid:
int, 0/1/2/3 = draw no/major/minor/major+minor X grid lines
(default: 0)
:param ygrid:
int, 0/1/2/3 = draw no/major/minor/major+minor Y grid lines
(default: 0)
:param aspect_ratio:
float, vertical-to-horizontal aspect ratio in data units
or str ('equal', 'auto')
(default: None)
:param hlines:
[y, xmin, xmax] list of arrays (of same length) or scalars
If xmin or xmax are None, limits of X axis will be used
(default: [])
:param hline_args:
dict, containing keyword arguments understood by :func:`pylab.hlines`
(e.g., 'colors', 'linestyles', 'linewidth', 'label')
(default: {})
:param vlines:
[x, ymin, ymax] list of arrays (of same length) or scalars
If ymin or ymax are None, limits of Y axis will be used
(default: [])
:param vline_args:
dict, containing keyword arguments understood by :func:`pylab.vlines`
(e.g., 'colors', 'linestyles', 'linewidth', 'label')
(default: {})
"""
def plot_ax_frame(ax, x_is_date=False, y_is_date=False,
xscaling='lin', yscaling='lin',
xmin=None, xmax=None, ymin=None, ymax=None,
xlabel='', ylabel='', ax_label_fontsize='large',
xticks=None, xtick_labels=None, xtick_interval=None, xtick_rotation=0,
xtick_direction='', xtick_side='', xlabel_side='',
yticks=None, ytick_labels=None, ytick_interval=None, ytick_rotation=0,
ytick_direction='', ytick_side='', ylabel_side='',
tick_label_fontsize='medium', tick_params={},
title='', title_fontsize='large',
xgrid=0, ygrid=0, aspect_ratio=None,
hlines=[], hline_args={}, vlines=[], vline_args={}):
"""
Plot ax frame
:param ax:
matplotlib Axes instance, in which frame will be drawn
:param x_is_date:
bool, whether or not X axis contains datetimes
(default: False)
:para y_is_date:
bool, whether or not Y axis contains datetimes
(default: False)
:return:
None
"""
## Axis scaling
if xscaling[0] == '-':
xscaling = xscaling[1:]
ax.invert_xaxis()
xscaling = {'lin': 'linear', 'log': 'log'}[xscaling[:3]]
ax.set_xscale(xscaling)
if yscaling[0] == '-':
yscaling = yscaling[1:]
ax.invert_yaxis()
yscaling = {'lin': 'linear', 'log': 'log'}[yscaling[:3]]
ax.set_yscale(yscaling)
## Vertical / horizontal aspect ratio (in data units)
if aspect_ratio is not None:
ax.set_aspect(aspect_ratio)
## Axis limits (should come after axis scaling!)
if not (xmin is None and xmax is None):
_xmin, _xmax = ax.get_xlim()
xmin = _xmin if xmin is None else xmin
xmax = _xmax if xmax is None else xmax
ax.set_xlim(xmin, xmax)
if not (ymin is None and ymax is None):
_ymin, _ymax = ax.get_ylim()
ymin = _ymin if ymin is None else ymin
ymax = _ymax if ymax is None else ymax
ax.set_ylim(ymin, ymax)
## Axis labels
if xlabel:
ax.set_xlabel(xlabel, fontsize=ax_label_fontsize)
if ylabel:
ax.set_ylabel(ylabel, fontsize=ax_label_fontsize)
## Horizontal / vertical lines
if hlines:
y, xmin, xmax = hlines
_xmin, _xmax = ax.get_xlim()
xmin = _xmin if xmin is None else xmin
xmax = _xmax if xmax is None else xmax
ax.hlines(y, xmin, xmax, **hline_args)
if vlines:
x, ymin, ymax = vlines
_ymin, _ymax = ax.get_ylim()
ymin = _ymin if ymin is None else ymin
ymax = _ymax if ymax is None else ymax
ax.vlines(x, ymin, ymax, **vline_args)
## X ticks
if xticks is not None:
ax.set_xticks(xticks)
#elif xtick_interval is not None:
else:
if isinstance(xtick_interval, tuple) and len(xtick_interval) == 2:
major_tick_interval, minor_tick_interval = xtick_interval
else:
major_tick_interval, minor_tick_interval = xtick_interval, None
if isinstance(major_tick_interval, matplotlib.ticker.Locator):
major_loc = major_tick_interval
elif x_is_date:
major_loc = _create_date_locator(major_tick_interval)
elif major_tick_interval:
major_loc = matplotlib.ticker.MultipleLocator(major_tick_interval)
elif major_tick_interval is None:
if xscaling[:3] == 'log':
major_loc = matplotlib.ticker.LogLocator()
else:
major_loc = matplotlib.ticker.AutoLocator()
else:
major_loc = matplotlib.ticker.NullLocator()
if major_loc:
ax.xaxis.set_major_locator(major_loc)
if isinstance(major_loc, mpl_dates.DateLocator):
if xtick_labels is None:
ax.xaxis.set_major_formatter(mpl_dates.AutoDateFormatter(locator=major_loc))
if isinstance(minor_tick_interval, matplotlib.ticker.Locator):
minor_loc = minor_tick_interval
elif x_is_date:
minor_loc = _create_date_locator(minor_tick_interval)
elif minor_tick_interval:
minor_loc = matplotlib.ticker.MultipleLocator(minor_tick_interval)
elif minor_tick_interval is None:
if xscaling[:3] == 'log':
minor_loc = None
else:
minor_loc = matplotlib.ticker.AutoMinorLocator()
else:
minor_loc = matplotlib.ticker.NullLocator()
if minor_loc:
ax.xaxis.set_minor_locator(minor_loc)
## Note: no formatter for minor ticks, as we don't print them
## X ticklabels
if xscaling[:3] == 'log' and xtick_labels is None:
## Do not use log notation for small exponents
_xmin, _xmax = ax.get_xlim()
xmin = _xmin if xmin is None else xmin
xmax = _xmax if xmax is None else xmax
if xmin > 1E-4 and xmax < 1E+4:
xtick_labels = matplotlib.ticker.FormatStrFormatter('%g')
#else:
# xtick_labels = matplotlib.ticker.LogFormatter()
if isinstance(xtick_labels, matplotlib.ticker.Formatter):
ax.xaxis.set_major_formatter(xtick_labels)
elif isinstance(xtick_labels, basestring):
if xtick_labels == '':
major_formatter = matplotlib.ticker.NullFormatter()
elif x_is_date:
major_formatter = mpl_dates.DateFormatter(xtick_labels)
else:
major_formatter = matplotlib.ticker.FormatStrFormatter(xtick_labels)
ax.xaxis.set_major_formatter(major_formatter)
elif xtick_labels is not None:
ax.set_xticklabels(xtick_labels)
## Y ticks
if yticks is not None:
ax.set_yticks(yticks)
#if ytick_interval is not None:
else:
if isinstance(ytick_interval, tuple) and len(ytick_interval) == 2:
major_tick_interval, minor_tick_interval = ytick_interval
else:
major_tick_interval, minor_tick_interval = ytick_interval, None
if isinstance(major_tick_interval, matplotlib.ticker.Locator):
major_loc = major_tick_interval
elif y_is_date:
major_loc = _create_date_locator(major_tick_interval)
elif major_tick_interval:
major_loc = matplotlib.ticker.MultipleLocator(major_tick_interval)
elif major_tick_interval is None:
if yscaling[:3] == 'log':
major_loc = matplotlib.ticker.LogLocator()
else:
major_loc = matplotlib.ticker.AutoLocator()
else:
major_loc = matplotlib.ticker.NullLocator()
if major_loc:
ax.yaxis.set_major_locator(major_loc)
if isinstance(major_loc, mpl_dates.DateLocator):
if ytick_labels is None:
ax.yaxis.set_major_formatter(mpl_dates.AutoDateFormatter(locator=major_loc))
if isinstance(minor_tick_interval, matplotlib.ticker.Locator):
minor_loc = minor_tick_interval
elif y_is_date:
minor_loc = _create_date_locator(minor_tick_interval)
elif minor_tick_interval:
minor_loc = matplotlib.ticker.MultipleLocator(minor_tick_interval)
elif minor_tick_interval is None:
if yscaling[:3] == 'log':
minor_loc = None
else:
minor_loc = matplotlib.ticker.AutoMinorLocator()
else:
minor_loc = matplotlib.ticker.NullLocator()
if minor_loc:
ax.yaxis.set_minor_locator(minor_loc)
## Note: no formatter for minor ticks, as we don't print them
## Y tick labels
if yscaling[:3] == 'log' and ytick_labels is None:
_ymin, _ymax = ax.get_ylim()
ymin = _ymin if ymin is None else ymin
ymax = _ymax if ymax is None else ymax
## Do not use log notation for small exponents
if ymin > 1E-4 and ymax < 1E+4:
ytick_labels = matplotlib.ticker.FormatStrFormatter('%g')
#else:
# ytick_labels = matplotlib.ticker.LogFormatterExponent()
if isinstance(ytick_labels, matplotlib.ticker.Formatter):
ax.yaxis.set_major_formatter(ytick_labels)
elif isinstance(ytick_labels, basestring):
if ytick_labels == '':
major_formatter = matplotlib.ticker.NullFormatter()
elif y_is_date:
major_formatter = mpl_dates.DateFormatter(ytick_labels)
else:
major_formatter = matplotlib.ticker.FormatStrFormatter(ytick_labels)
ax.yaxis.set_major_formatter(major_formatter)
elif ytick_labels is not None:
ax.set_yticklabels(ytick_labels)
## Tick label size and rotation
pylab.setp(ax.get_xticklabels(), fontsize=tick_label_fontsize)
pylab.setp(ax.get_yticklabels(), fontsize=tick_label_fontsize)
if xtick_rotation:
pylab.setp(ax.get_xticklabels(), ha='right', rotation=xtick_rotation)
if ytick_rotation:
pylab.setp(ax.get_yticklabels(), ha='right', rotation=ytick_rotation)
## Tick aspect
if tick_params:
ax.tick_params(axis='both', **tick_params)
if xtick_direction:
ax.tick_params(axis='x', direction=xtick_direction)
if xtick_side:
if not xlabel_side:
xlabel_side = xtick_side
side_kwargs = {}
if xtick_side in ('top', 'both'):
side_kwargs['top'] = True
if xtick_side in ('bottom', 'both'):
side_kwargs['bottom'] = True
if xtick_side == 'none':
side_kwargs['top'] = side_kwargs['bottom'] = False
ax.tick_params(axis='x', **side_kwargs)
if xlabel_side:
side_kwargs = {}
if xlabel_side == 'bottom':
side_kwargs['labeltop'] = False
side_kwargs['labelbottom'] = True
elif xlabel_side == 'top':
side_kwargs['labeltop'] = True
side_kwargs['labelbottom'] = False
elif xlabel_side == 'both':
side_kwargs['labeltop'] = side_kwargs['labelbottom'] = True
elif xlabel_side == 'none':
side_kwargs['labeltop'] = side_kwargs['labelbottom'] = False
ax.tick_params(axis='x', **side_kwargs)
if ytick_direction:
ax.tick_params(axis='y', direction=ytick_direction)
if ytick_side:
if not ylabel_side:
ylabel_side = ytick_side
side_kwargs = {}
if ytick_side in ('left', 'both'):
side_kwargs['left'] = True
if ytick_side in ('right', 'both'):
side_kwargs['right'] = True
if ytick_side == 'none':
side_kwargs['left'] = side_kwargs['right'] = False
ax.tick_params(axis='y', **side_kwargs)
if ylabel_side:
side_kwargs = {}
if ylabel_side == 'left':
side_kwargs['labelleft'] = True
side_kwargs['labelright'] = False
elif ylabel_side == 'right':
side_kwargs['labelleft'] = False
side_kwargs['labelright'] = True
elif ylabel_side == 'both':
side_kwargs['labelleft'] = side_kwargs['labelright'] = True
elif ylabel_side == 'none':
side_kwargs['labelleft'] = side_kwargs['labelright'] = False
ax.tick_params(axis='y', **side_kwargs)
## Grid
if xgrid:
which = {1: 'major', 2: 'minor', 3: 'both'}[xgrid]
ax.grid(True, which=which, axis='x')
if ygrid:
which = {1: 'major', 2: 'minor', 3: 'both'}[ygrid]
ax.grid(True, which=which, axis='y')
## Title
if title:
ax.set_title(title, fontsize=title_fontsize)
plot_ax_frame.__doc__ += ax_frame_doc
|
from aocd import data
from itertools import repeat, starmap
def remove_matchy_matchy(d):
for i in range(len(d) - 1):
if d[i].lower() == d[i+1].lower() and d[i].islower() != d[i+1].islower():
return d[:i] + d[i+2:]
return d
def stripped(d, l):
n = d.replace(l, '').replace(l.upper(), '')
return part_a(n)
def part_a(d):
while True:
new = remove_matchy_matchy(d)
if new == d:
return len(new)
d = new
def part_b(d):
scores = starmap(stripped, zip(repeat(d), set(d.lower())))
return min(scores)
def part_a_faster(units):
# Someone else's version from reddit that is way more efficient. Storing here for posterity
# https://www.reddit.com/r/adventofcode/comments/a3912m/2018_day_5_solutions/eb4iuqo
new_units = []
for i in range(len(units)):
if len(new_units) > 0 and abs(ord(units[i])-ord(new_units[-1])) == 32:
new_units.pop()
else:
new_units.append(units[i])
return len(new_units)
ex1 = 'dabAcCaCBAcCcaDA'
assert part_a(ex1) == 10
print("A: {}".format(part_a(data)))
assert part_b(ex1) == 4
print("B: {}".format(part_b(data))) |
# -*- coding: utf-8 -*-
# @Time : 2020/2/27 14:40
# @Author : Deng Wenxing
# @Email : dengwenxingae86@163.com
# @File : baseAsyncIOCoroutine.py
# @Software: PyCharm
from asyncio import Queue
import asyncio
async def add(store,name):
for i in range(5):
await asyncio.sleep(1)
await store.put(i)
print('{} add one {} >>>> size {}'.format(name,i, store.qsize()))
async def reduce(store):
for i in range(10):
reset = await store.get()
print('reduce one {} <<<< size {}'.format(reset, store.qsize()))
def demo():
q = Queue()
event1 = add(q,'a')
event2 = add(q,'b')
r1 = reduce(q)
loop = asyncio.get_event_loop()
loop.run_until_complete(asyncio.gather(event1,event2,r1))
loop.close()
if __name__ == "__main__":
demo() |
"""
author @shashanknp
created @2020-09-13 01:30:51
"""
def findInfected(i, time, V, current, pos):
if pos[i] == 1:
return
pos[i] = 1
for j in range(len(V)):
if time[i][j] > current:
findInfected(j, time, V, time[i][j], pos)
return pos
T = int(input())
for it in range(T):
N = int(input())
V = list(map(int, input().split()))
time = [[0]*N for i in range(N)]
for i in range(N):
for j in range(i, N):
if i == j:
time[i][j] = 0
else:
if V[i] == V[j]:
time[i][j], time[j][i] = 0, 0
else:
t = (i-j)/(V[j]-V[i])
if t > 0:
time[i][j], time[j][i] = t, t
else:
time[i][j], time[j][i] = 0, 0
infected = []
for i in range(N):
infected.append(findInfected(i, time, V, 0, [0 for i in range(N)]).count(1))
print(min(infected), max(infected))
|
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 3 19:04:29 2021
@author: USER
"""
import random
ans = random.sample(range(1,50),6)
print(ans) |
# -*- coding: utf-8 -*-
"""
Updated Jan 21, 2018
The primary goal of this file is to demonstrate a simple unittest implementation
@author: jrr
@author: rk
"""
import unittest
from triangle import classify_triangle
# This code implements the unit test functionality
# https://docs.python.org/3/library/unittest.html has a nice description of the framework
class TestTriangles(unittest.TestCase):
# define multiple sets of tests as functions with names that begin
def testRightTriangleA(self):
self.assertEqual(classify_triangle(3, 4, 5), 'Right', '3,4,5 is a Right triangle')
def testRightTriangleB(self):
self.assertEqual(classify_triangle(5, 3, 4), 'Right', '5,3,4 is a Right triangle')
def testEquilateralTriangles(self):
self.assertEqual(classify_triangle(1, 1, 1), 'Equilateral', '1,1,1 should be equilateral')
def testIsoscelesTriangles(self):
self.assertEqual(classify_triangle(5, 5, 8), 'Isosceles', '5,5,8 should be isosceles')
def testScaleneTriangles(self):
self.assertEqual(classify_triangle(4, 7, 5), 'Scalene', '4,7,5 should be scalene')
def testInvalidTriangles(self):
self.assertEqual(classify_triangle(3, 3, 15), 'NotATriangle', '3,3,15 is not a triangle')
def testOutUpperBoundTriangles(self):
self.assertEqual(classify_triangle(344, 300, 199), 'InvalidInput', 'data is outside upper bound')
def testOutLowerBoundTriangles(self):
self.assertEqual(classify_triangle(0, -1, 2), 'InvalidInput', 'data is outside lower bound')
def testInvalidDataTriangles(self):
with self.assertRaises(TypeError):
self.assertEqual(classify_triangle('x', 'y', 'z'), 'InvalidInput', 'x,y,z is not valid input')
if __name__ == '__main__':
print('Running unit tests')
unittest.main()
|
"""This program will determine the cost of painting a wall"""
# 1. Define
import math
def gather_wall_sq_ft():
"""This function gathers the number of walls and the dimensions of those walls from the user, transforms the dimensions
into sq ft, and then outputs a list of the square footage of each wall
"""
walls_to_paint = int(input('Hello, how many wals will you be painting: '))
list_of_walls_sq_ft = []
while walls_to_paint > 0:
current_wall_width_feet = int(input('Width of wall #' + walls_to_paint[-1]' wall in feet: '))
current_wall_height_feet = int(input('Height of wall in feet: '))
num_coats = int(input('How many coats would you like to put on your wall: '))
current_wall_sq_ft = current_wall_width_feet * current_wall_height_feet
current_wall_sq_ft_to_paint = current_wall_sq_ft * num_coats
list_of_walls_sq_ft = [current_wall_sq_ft_to_paint]
walls_to_paint -= 1
return list_of_walls_sq_ft
def generate_total_cost(total_sq_feet_to_paint):
"""This function asks the user for the cost of a gallon of paint, then generates the total cost of painting all walls
from the total sq feet given
"""
GALLON_PAINT_COVERAGE_IN_SQ_FT = 400
paint_cost_dollars = float(input('Cost of a gallon of paint, in dollars: '))
gallons_required = total_sq_feet_to_paint / GALLON_PAINT_COVERAGE_IN_SQ_FT
gallons_to_purchase = math.ceil(gallons_required)
total_cost_dollars = float(gallons_to_purchase) * paint_cost_dollars
return total_cost_dollars
def generate_output_statement(total_cost_dollars):
"""This function generates the output statement of the job cost to print"""
if total_cost_dollars == 1:
output_statement = ('This job will require ' + str(gallons_to_purchase) + ' gallons of paint and cost '+
str(round(total_cost_dollars)) + ' dollar')
else:
output_statement = ('This job will require ' + str(gallons_to_purchase) + ' gallons of paint and cost ' +
str(round(total_cost_dollars)) + ' dollars')
return output_statement
# 2. Main
def main():
list_of_walls_sq_ft = gather_wall_sq_ft()
total_sq_feet_to_paint = sum(list_of_walls_sq_ft)
total_cost_dollars = generate_total_cost(total_sq_feet_to_paint)
output_statement = generate_output_statement(total_cost_dollars)
print(output_statement)
return output_statement
main()
# 3. Input
# 4. Transform
# 5. Output
|
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 11 07:52:43 2016
@author: OffermanTW1
"""
from pandas import Series, DataFrame
#Import regular operators and comparisons
from operator import lt, le, eq, ne, gt, ge
from operator import add, sub, truediv, mul
#Import custom operators and comparison
from operator import or_
from functools import reduce
from collections import Counter
def approx_local_ratio():
#dummy
pass
class EvaluationEngine():
comparisons = {'<': lt,
'<=': le,
'==': eq,
'!=': ne,
'>': gt,
'>=': ge,
'zit in': None,
'zit niet in': None}
operators = {'+': add,
'-': sub,
'/': truediv,
'*': mul,
'fuzzymatch met': approx_local_ratio}
precedence = {'**': 0,
'*': 1, '/': 1,
'+': 2, '-': 2, 'fuzzymatch met': 2,
'<=': 3, '<': 3, '>': 3, '>=': 3,
'==':4, '!=': 4,
'in': 5, 'not in': 5}
connectors = {'from', 'van'}
possible_frames = {'eerste regel', 'tweede regel', 'alle regels'}
known_elements = reduce(or_, [{n for n in operators}, #
{n for n in comparisons},
connectors,
possible_frames])
@classmethod
def isoperator(cls, operator):
return operator in cls.operators
@classmethod
def iscomparison(cls, comparison):
return comparison in cls.comparisons
@classmethod
def getcomparison(cls, comparison):
try:
return cls.comparisons[comparison]
except:
raise NameError("This comparison is not supported by the engine: " + str(comparison))
@classmethod
def getoperator(cls, operator):
try:
return cls.operators[operator]
except:
raise NameError("This operator is not supported by the engine: " + str(operator))
@classmethod
def getprecedence(cls, symbol):
try:
return cls.precedence[symbol]
except:
raise NameError("This operator/comparison is not supported by the engine: " + str(symbol))
class StatementElement():
def __init__(self, element1, element2):
self.element1 = element1
self.element2 = element2
def eval_op(self, operand, from_obj):
return operand.evaluate_statement(from_obj)
def evaluate_statement(self, from_obj):
elements = [self.element1, self.element2]
op1, op2 = [self.eval_op(n, from_obj) for n in elements]
op1_pandas_struct = isinstance(op1, Series) or isinstance(op1, DataFrame)
op2_pandas_struct = isinstance(op2, Series) or isinstance(op2, DataFrame)
#It is important to change the syntax if one of the ops is a single value
#It should then be: Pandas Structure -operator- single value
if op1_pandas_struct and op2_pandas_struct: #Both are pandas struct
return self.action(op1, op2)
elif op1_pandas_struct:
return self.action(op1, op2)
elif op2_pandas_struct:
return self.action(op2, op1)
else: #None are a pandas struct
return self.action(op1, op2)
def return_variables(self):
def get_var_from_operand(operand):
variables = set([])
if isinstance(operand, StatementElement):
variables = operand.return_variables()
elif isinstance(operand, Variable):
variables = {operand.return_description()}
return variables
var1 = get_var_from_operand(self.element1)
var2 = get_var_from_operand(self.element2)
variables = var1.union(var2)
return variables
def return_frames(self):
def get_frames_from_operand(operand):
frames = set([])
if isinstance(operand, FrameWithColumn):
frames = {operand.return_description()}
elif isinstance(operand, StatementElement):
frames = operand.return_frames()
return frames
frame1 = get_frames_from_operand(self.element1)
frame2 = get_frames_from_operand(self.element2)
frames = frame1.union(frame2)
return frames
def return_abstract_repr(self):
return self.action_obj.return_abstract_repr()
class OperatorStatement(StatementElement):
def __init__(self, operator, operand1, operand2):
self.action_obj = operator
self.action = EvaluationEngine.getoperator(str(operator))
super(OperatorStatement, self).__init__(operand1, operand2)
class ComparisonStatement(StatementElement):
def __init__(self, operator,operand1, operand2):
self.action_obj = operator
self.action = EvaluationEngine.getcomparison(str(operator))
super(ComparisonStatement, self).__init__(operand1, operand2)
class FrameWithColumn():
def __init__(self, framename, column):
self.frame = framename.replace(' ', '')
self.column = column
def evaluate_statement(self, from_obj):
try:
frame = getattr(from_obj, self.frame)
except:
raise NameError("Could not retrieve this frame: " + str(self.frame))
try:
return frame[self.column]
except:
raise NameError("This column is not in the frame: " + str(self.column))
def return_description(self):
return self.frame
def return_abstract_repr(self):
return '{0} Frame'.format(self.column)
class Variable:
def __init__(self, description):
self.description = description
def evaluate_statement(self, from_obj):
try:
return getattr(from_obj, self.description)
except:
raise NameError("This variable is not defined")
def return_description(self):
return self.description
def return_abstract_repr(self):
return self.return_description()
class OperCompar():
def __init__(self, symbol):
self.symbol = symbol
def __str__(self):
return self.symbol
def getprecedence(self):
return EvaluationEngine.getprecedence(self.symbol)
def return_description(self):
return self.symbol
def return_abstract_repr(self):
return self.return_description()
class Operator(OperCompar):
pass
class Comparison(OperCompar):
pass
class Expression():
def __init__(self, string):
self.object_elements = self.parse_expression(string)
if len(self.object_elements) == 0:
raise NameError('No object elements found')
expression = self.construct_expression(self.object_elements)
self.raw_expression = string
self.expression = expression[0]
def parse_expression(self, string):
for known_ in EvaluationEngine.known_elements:
string = string.replace(" {0} ".format(known_), " # {0} # ".format(known_))
parsed_elements = []
string = " " + string + " "
elements = [n.strip() for n in string.split(" # ")]
elements = [n for n in elements if n != '']
last_index = len(elements) - 1
for index, element in enumerate(elements):
if element in EvaluationEngine.connectors:
assert 0 < index < last_index
column, frame = elements[index-1], elements[index+1]
parsed_elements.append(FrameWithColumn(frame, column))
elif element in EvaluationEngine.comparisons:
parsed_elements.append(Comparison(element))
elif element in EvaluationEngine.operators:
parsed_elements.append(Operator(element))
elif element in EvaluationEngine.possible_frames:
error_string = "{0} not found in connectors, check hashtags".format(elements[index-1])
assert elements[index-1] in EvaluationEngine.connectors, error_string
elif index != last_index and elements[index+1] in EvaluationEngine.connectors:
continue
else:
parsed_elements.append(Variable(element))
return parsed_elements
def construct_expression(self, object_elements):
object_elements = list(object_elements) #So original stays the same
precedences = []
for index, obj in enumerate(object_elements):
if isinstance(obj, OperCompar):
precedences.append(index)
indices = sorted(precedences, key = lambda x: object_elements[x].getprecedence())
for i in range(len(indices)):
index = indices[i]
operand1, opcompar, operand2 = object_elements[index-1:index+2]
if isinstance(opcompar, Comparison):
obj = ComparisonStatement(opcompar, operand1, operand2)
elif isinstance(opcompar, Operator):
obj = OperatorStatement(opcompar, operand1, operand2)
object_elements[index] = obj
object_elements.pop(index+1)
object_elements.pop(index-1)
for remaining_index in range(i, len(indices[i:]) + i):
if indices[remaining_index] > index:
indices[remaining_index] -= 2
return object_elements
def evaluate(self, from_obj):
return self.expression.evaluate_statement(from_obj)
def return_frames(self):
e = 'return_frames only works on statements, not frames or variables'
assert isinstance(self.expression, StatementElement), e
return self.expression.return_frames()
def return_variables(self):
e = 'return_variables only works on statements, not frames or variables'
assert isinstance(self.expression, StatementElement), e
return self.expression.return_variables()
def return_abstract_representation(self):
abstr_string = ""
from collections import defaultdict
abstr_dict = defaultdict(set)
#We use set because it will make it easier to determine whether
#the same variable/column is mentioned in the frames
for obj in self.object_elements:
abstr_repr = obj.return_abstract_repr()
abstr_string += '{0} '.format(abstr_repr)
abstr_dict[type(obj)].add(abstr_repr)
return abstr_string.strip(), abstr_dict
def compare_with_other_expression(self, other):
self_str, self_dict = self.return_abstract_expression()
other_str, other_dict = self.return_abstract_representation()
#TO DO: make this
raise NameError('This function is still under construction')
|
from pprint import pprint
from fb_mcbot.models import FBUser, Conversation, StudentSociety, Admin, Major, Course
class Question:
question_type = {'NOTHING': 0, 'USER_TYPE': 1, 'AUTHENTICATE': 2, 'CHANGE_STATUS': 9, 'EVENT_TYPES':10}
def get_question_type(question):
try:
result = Question.question_type.get(question)
except KeyError:
pprint("Internal Error! " + question + " is not a question type!")
return result
class UserService:
def getUser(userid):
try:
user = FBUser.objects.get(user_id=userid)
except FBUser.DoesNotExist:
pprint("User id not found in db, the user does not exist.")
return None
return user
def getAllUsers():
try:
users = FBUser.objects.all()
except FBUser.DoesNotExist:
pprint("DataBase Error")
return None
return users
def getStudentsInCourse(courseName):
try:
course = Course.objects.get(name=courseName)
except Course.DoesNotExist:
raise
return course.fbuser_set.all()
def create_new_user(user_info,user_id):
pprint("Creating new user")
firstname = user_info['first_name']
lastname = user_info['last_name']
timezone = user_info['timezone']
new_user = FBUser(first_name=firstname, last_name=lastname, user_id=user_id, timezone=timezone)
new_user.save()
return new_user
def create_new_conversation(fbuser):
pprint("Creating new conversation")
new_conversation = Conversation()
new_conversation.fbuser = fbuser
# default question USER_TYPE
new_conversation.question = Question.get_question_type('USER_TYPE')
new_conversation.save()
return new_conversation
def get_conversation(fbuser):
try:
conversation = Conversation.objects.get(fbuser=fbuser)
except Conversation.DoesNotExist:
pprint("Conversation with " + fbuser.user_id + " not found in db")
return None
return conversation
def get_student_society(fbuser):
try:
ssociety = StudentSociety.objects.get(fbuser=fbuser)
except StudentSociety.DoesNotExist:
pprint("Student StudentSociety with " + fbuser.user_id + " not found in db")
return None
return ssociety
def is_admin(fbuser):
try:
Admin.objects.get(fbuser=fbuser)
return True
except Admin.DoesNotExist:
return False
def get_major(fbuser):
print (fbuser)
major = fbuser.major;
if(major):
return True
else:
return False
def set_major(fbuser,major):
fbuser.major = Major.objects.get(name=major)
fbuser.save()
return ("You've set your major to " + fbuser.major.name + ". What courses are you taking?")
def add_courses(fbuser,courses):
for course in courses:
c = Course.objects.get(pk=course)
fbuser.courses.add(c)
fbuser.save()
response = ""
newCourses = fbuser.courses.all()
for course in newCourses:
response += (course.name + " ,")
return ("Your courses are now: " + response)
|
import mnist_loader
training_data, validation_data, test_data = mnist_loader.load_data_wrapper()
training_data = list(training_data)
# print(len(training_data))
# print(training_data[0][0].shape)
# x, y = training_data[0]
# print("Training data shape")
# print(x.shape)
# print(y.shape)
# Display the image
# from matplotlib import pyplot as plt
# plt.imshow(training_data[1000][0].reshape((28,28)), interpolation='nearest',cmap='gray')
# plt.show()
import dnn
net = dnn.DNN([784, 30, 10])
# print(net.feedForward(training_data[1000][0]))
net.sgd(training_data=training_data, epochs=30, mini_batch_size=10, eta=10.0, test_data=validation_data)
|
from django.shortcuts import render
from django.http import HttpResponse, JsonResponse
from django.views.generic import TemplateView, ListView, DetailView, CreateView, UpdateView, DeleteView
import RPi.GPIO as GPIO
import time
# Create your views here.
def index(request):
my_dict = {'key': 'value'}
return render(request, 'SmartOutlet/home.html', context=my_dict)
def outlets(request):
my_dict = {'key': 'value'}
return render(request, 'SmartOutlet/outlets.html', context=my_dict)
def toggle_outlet(request):
if request.is_ajax:
command = request.GET.get('command', None)
GPIO.setmode(GPIO.BCM)
GPIO.setup(18,GPIO.OUT)
if command == "on":
GPIO.output(18, True)
else:
GPIO.output(18, False)
GPIO.cleanup()
return JsonResponse({"error": "", "status": 200})
|
from tkinter import *
import sys
import os
columns = int(sys.argv[1]) #x
rows = int(sys.argv[2]) #y
b = rows
print("line equation is y= ",-rows/columns, "x + ", b)
print("point({}:0)".format(columns))
print("point(0:{})".format(rows))
print("row = {}\ncolumn = {}".format(rows,columns))
root = Tk()
w = 0
h = 0
#------------------------------------------------------------------------
def redraw(event):
# print(event.width, " ", event.height)
canvas.delete('all')
w = event.width/columns
h = event.height/rows
#create columns
for i in range(1,columns):
canvas.create_line(
w * i, 0,
w * i, event.height,
fill="white", width = 2
)
#create rows
for i in range(1,rows):
canvas.create_line(
0, h*i,
event.width, h*i,
fill="white", width=2
)
canvas.create_line(0,0,event.width,event.height,fill="white",width=2)
#------------------------------------------------------------------------
canvas = Canvas(root, width = 400, height = 400, background='grey')
canvas.pack(fill = BOTH,expand=1)
#canvas.update()
canvas.bind("<Configure>",redraw)
mainloop()
|
# coding: utf-8
from collections.abc import MutableSequence, MutableMapping, MutableSet
from types import SimpleNamespace
import copy, itertools, uuid, pprint
import sys, io
import importlib
import threading
from contextlib import contextmanager
import dictdiffer
from tinysync.wrappers import *
from tinysync.persistence import *
from tinysync.util import *
from tinysync.sync import *
class NoNameNoPersistence:
def __str__(self):
return "<No name - no persistence>"
def track(
target,
name=NoNameNoPersistence(),
persist=None,
sync=False,
change_callback=None,
history=False,
conflict_callback=None,
path_prefix=None,
dot_access=None,
):
""" Main function to start tracking changes to structures.
Give it a structure consisting of dicts,
lists, sets and contained objects, and
it returns an object that looks much the
same but tracks changes.
Parameters:
* `target`: The data structure to track.
* `name`: Name is used for persistence or synchronization, e.g. as the file name.
It is also included in change notifications.
* `persist`: Optional - Overrides class default persistence setting as defined by
the `Tracker` class `default_persistence` attribute.
* `sync`: Optional - Conduit used to sync this data structure with other devices.
* `change_callback`: Optional - Function that is called every time
the tracked structure is changed. Function is called
* `conflict_callback`: Optional - Function called to resolve conflicts
between the latest changes and the persisted values.
* `path_prefix`: Optional - Path prefix as a list of segments.
* `dot_access`: Optional - True or False to indicate whether you want this
tracked structures’ dict values to be accessible with the attribute-like
dot notation. Default is False unless changed globally by calling the
`dot_off` function.
"""
tracked = None
persistence = None
path_prefix = path_prefix if path_prefix is not None else []
if istracked(target):
return target
if not isinstance(name, NoNameNoPersistence) and persist is not False:
if persist is not None and persist is not True:
if isinstance(persist, Persistence):
persistence = persist
elif issubclass(persist, Persistence):
persistence = persist(name)
elif (
Handler.persistence_default is not None
): # issubclass(Handler.persistence_default, Persistence):
persistence = Handler.persistence_default(name)
initial = True
if persistence is not None:
loaded_target = persistence.load()
if loaded_target is not None:
target = loaded_target
initial = False
handler = Handler(
target,
name,
persistence,
sync,
change_callback,
history,
conflict_callback,
path_prefix,
dot_access,
)
if persistence is not None and initial:
persistence.dump(handler.root)
return handler.root
@contextmanager
def atomic(tracked, remote=False):
""" Context manager used to ensure that
all the changes are applied to the tracked data structure, or none.
Delays any change notifications, saves and syncs until successful
completion of the context. """
with tracked:
handler(tracked).track = False
backup_copy = copy.deepcopy(tracked)
try:
yield #transient_copy
except:
tracked.__subject__ = backup_copy
handler(tracked).start_to_track(tracked, [], force=True)
handler(tracked).track = True
raise
else:
all_changes = list(dictdiffer.diff(backup_copy, tracked))
handler(tracked).track = True
handler(tracked).on_change(tracked, all_changes, remote)
#finally:
# tracked._tracker.handler.tracked = True
'''
handler.save_changes = False
handler.sync_on = False
handler.save()
handler.save_changes = True
handler.sync_on = True
if handler.sync is not None:
handler.sync.update_others()
'''
class Handler:
persistence_default = SafeYamlFile
dot_access_default = False
def __init__(
self,
subject,
name,
persist,
sync_conduit,
change_callback,
history,
conflict_callback,
path_prefix,
dot_access,
):
self.lock = threading.RLock()
self.name = name
self.persist = persist
self.change_callback = change_callback
self.conflict_callback = conflict_callback
self.path_prefix = path_prefix
#self.change_paths = ChangePathItem()
self.save_changes = True
self.track = True
self.history = None if not history else History(self, 0 if history is True else history)
dot_access_on = (
dot_access if dot_access is not None else self.dot_access_default
)
self.trackable_types = trackable_types.copy()
if dot_access_on:
self.trackable_types[MutableMapping] = DictWrapper_Dot
self.root = self.start_to_track(subject, path_prefix)
if sync_conduit is not False:
sync_name = 'default' if type(name) is not str else name
self.sync = Sync(
{},
content=self.root,
data_id=sync_name,
conduit=sync_conduit,
)
self.sync_on = True
else:
self.sync = None
self.sync_on = False
def on_change(self, target, changes, remote=False):
if not self.track:
return
self.make_updates(target)
#self.record_change_footprint(target._tracker.path)
#if self.change_action:
# self.change_action()
if self.history is not None:
self.history.new_entry(changes)
if self.change_callback:
change_data = SimpleNamespace(
name=self.name,
root=self.root,
path=target._tracker.path,
target=target,
changes=changes
)
self.change_callback(change_data)
if not remote and self.sync_on and self.sync is not None:
self.sync.update_others()
if self.persist is not None:
self.persist.change_advisory(change_data)
if self.save_changes:
self.save()
def save(self):
if self.persist is not None:
self.persist.dump(self.root, self, self.conflict_callback)
def load(self, key, path):
value = self.persist.load_specific(key)
tracked_value = self.start_to_track(value, path + [key])
return tracked_value
def start_to_track(self, target, path, force=False):
if not force and (
istracked(target) or isinstance(target, LazyLoadMarker)
):
return target
tracked = None
for abc in self.trackable_types:
if isinstance(target, abc):
tracked = self.trackable_types[abc](target, path, self)
if tracked is None and hasattr(target, "__dict__"):
tracked = CustomWrapper(target, path, self)
if tracked is not None:
self.make_updates(tracked)
return tracked
raise TypeError(
"'%s' does not have a trackable type: %s" % (target, type(target))
)
def make_updates(self, node):
""" Checks to see if some of the changed node's contents now need to
be tracked.
"""
to_upgrade = []
for key, value in self.get_iterable(node):
if self.should_upgrade(value):
to_upgrade.append((key, value))
else:
if istracked(value):
value._tracker.path = node._tracker.path + [key]
for key, value in to_upgrade:
self.set_value(
node.__subject__,
key,
value,
self.start_to_track(value, node._tracker.path + [key]),
)
def should_upgrade(self, contained):
if istracked(contained):
return False
for abc in trackable_types.keys():
if isinstance(contained, abc):
return True
if hasattr(contained, "__dict__"):
return True
if hasattr(contained, "__hash__"):
return False
raise TypeError("Not a trackable or hashable type: " + str(contained))
def get_iterable(self, obj):
""" Returns a (key, value) iterator regardless of object type. """
if isinstance(obj, MutableSequence):
return list(enumerate(obj))
elif isinstance(obj, MutableMapping):
return list(obj.items())
elif isinstance(obj, MutableSet):
return [(value, value) for value in obj]
elif hasattr(obj, "__dict__"):
return list(obj.__dict__.items())
else:
raise TypeError("Cannot return an iterator for type " +
str(type(obj)))
def set_value(self, obj, key, old_value, new_value):
if isinstance(obj, MutableSequence) or isinstance(obj, MutableMapping):
obj[key] = new_value
elif isinstance(obj, MutableSet):
obj.remove(old_value)
obj.add(new_value)
elif hasattr(obj, "__dict__"):
object.setattr(obj, key, new_value)
else:
raise TypeError("Cannot set value for type " + str(type(obj)))
def get_value(self, obj, key):
if isinstance(obj, MutableSet):
return key
elif isinstance(key, str) and hasattr(obj, key):
return getattr(obj, key)
else:
return obj[key]
def at(self, path):
current = self.root
for key in path:
current = self.get_value(current, key)
return current
def set(self, path, value):
assert isinstance(path, list)
if len(path) == 0:
raise ValueError("Empty path, cannot set root")
current = self.root
for key in path[:-1]:
current = self.get_value(current, key)
key = path[-1]
old_value = self.get_value(current, key)
self.set_value(current, path[-1], old_value, value)
'''
def record_change_footprint(self, change_path):
change_id = str(uuid.uuid4())[-12:]
current = self.change_paths
for node in change_path:
current.change_id = change_id
current = current.setdefault(node, ChangePathItem())
current.change_id = change_id
current.end_id = change_id
# Any older, more detailed changes are no longer interesting
current.clear()
'''
def copy(self):
""" Returns a deep copy of the handler but not of the handled. """
root = self.root
self.root = None
new_me = copy.deepcopy(self)
self.root = root
return new_me
'''
class ChangePathItem(dict):
"Class to enable adding a change ID to change path items."
'''
def handler(tracked_object):
""" Returns a handler object for the tracked
object given as a parameter.
Handler can be used to access and change key
configuration parameters of the tracker:
* name (read-only)
* persist
* change_callback
* conflict_callback
* save_changes
"""
if not istracked(tracked_object):
raise TypeError(
"Cannot return a handler for non-tracked object of type %s"
% type(tracked_object)
)
return tracked_object._tracker.handler
def config(tracked_object, **kwargs):
if not istracked(tracked_object):
raise TypeError(
"Cannot configure a non-tracked object of type %s" % type(tracked_object)
)
handler = tracked_object._tracker.handler
for key in kwargs:
if hasattr(handler, key):
setattr(handler, key, kwargs[key])
else:
raise AttributeError("%s is not an available option to set" % key)
def istracked(obj):
return issubclass(type(obj), TrackerWrapper)
class History(list):
def __init__(self, handler, capacity):
super().__init__()
self.handler = handler
self.capacity = capacity
self.active = 0
def new_entry(self, delta):
del self[:self.active]
self.insert(0, delta)
self.active = 0
if self.capacity > 0:
del self[self.capacity:]
def undo(self):
if self.active + 1 >= len(self):
return self.active
delta = self[self.active]
with self.handler.root:
self.handler.track = False
dictdiffer.revert(delta, self.handler.root, in_place=True)
self.handler.track = True
self.active += 1
return self.active
def redo(self):
if self.active == 0:
return self.active
self.active -= 1
delta = self[self.active]
with self.handler.root:
self.handler.track = False
dictdiffer.patch(delta, self.handler.root, in_place=True)
self.handler.track = True
return self.active
def deepcopy_tracked(obj):
if not istracked(obj):
raise TypeError("Cannot copy a non-tracked type: " + str(type(obj)))
content = copy.deepcopy(obj)
return _track_and_copy_meta(content, obj)
def _track_and_copy_meta(content, source_tracker):
old_handler = source_tracker._tracker.handler
tracked = track(content, old_handler.name)
new_handler = tracked._tracker.handler
new_handler.change_paths = copy.deepcopy(old_handler.change_paths)
new_handler.callback = old_handler.callback
return tracked
'''
def diff_paths(earlier_version, later_version=None):
if later_version is not None:
earlier_version = earlier_version._tracker.handler.change_paths
later_version = later_version._tracker.handler.change_paths
else:
earlier_version = ChangePathItem()
later_version = earlier_version._tracker.handler.change_paths
paths = []
def get_paths(earlier, later, path):
if earlier is not None and later.change_id == earlier.change_id:
return
if hasattr(later, "end_id"):
if not hasattr(earlier, "end_id") or later.end_id != earlier.end_id:
paths.append(path)
return
for key in later:
new_earlier = earlier
if new_earlier is not None:
new_earlier = earlier.get(key, None)
get_paths(new_earlier, later[key], path + [key])
get_paths(earlier_version, later_version, [])
return paths
'''
'''
def diff(earlier_version, later_version):
paths = diff_paths(earlier_version, later_version)
results = []
for path in paths:
earlier = earlier_version._tracker.handler.at(path)
later = later_version._tracker.handler.at(path)
results.append(dictdiffer.diff(earlier, later, node=path))
return itertools.chain(*results)
'''
'''
def patch(changes, target, in_place=False):
if not istracked(target):
raise TypeError(
"This method is intended to patch a tracked type, not " + str(type(obj))
)
if not in_place:
target = deepcopy_tracked(target)
dictdiffer.patch(changes, target, in_place=True)
return target
'''
'''
def revert(changes, target, in_place=False):
if not istracked(target):
raise TypeError(
"This method is intended to revert a tracked type, not " + str(type(obj))
)
if not in_place:
target = deepcopy_tracked(target)
dictdiffer.revert(changes, target, in_place=True)
return target
'''
def dot_off():
Handler.dot_access_default = False
# trackable_types[MutableMapping] = DictWrapper_Not
def dot_on():
Handler.dot_access_default = True
# trackable_types[MutableMapping] = DictWrapper_Dot
class HandlerProxy:
""" Object returned by the `handler` function.
Provides restricted access to key handler properties. """
def __init__(self, tracked_object):
self.handler = tracked_object._tracker.handler
@property # read only
def name(self):
return self.handler.name
@property
def persist(self):
return self.handler.persist
@persist.setter
def persist(self, value):
self.handler.persist = value
@property
def sync(self):
return self.handler.sync
@sync.setter
def sync(self, value):
self.handler.sync = value
@property
def sync_on(self):
return self.handler.sync_on
@sync_on.setter
def sync_on(self, value):
self.handler.sync_on = value
@property
def change_callback(self):
return self.handler.change_callback
@change_callback.setter
def change_callback(self, value):
self.handler.change_callback = value
@property
def conflict_callback(self):
return self.handler.conflict_callback
@conflict_callback.setter
def conflict_callback(self, value):
self.handler.conflict_callback = value
@property
def save_changes(self):
return self.handler.save_changes
@save_changes.setter
def save_changes(self, value):
self.handler.save_changes = value
if __name__ == "__main__":
class TestCatcher:
change = "No change"
def cb(self, change):
for key in change.__dict__:
self.__dict__[key] = change.__dict__[key]
import doctest
extraglobs = {"catcher": TestCatcher()}
# Handler.persistence_default = None
doctest.testmod(extraglobs=extraglobs)
extraglobs.update(importlib.import_module("tracker").__dict__)
CouchDB.server_address = track({}, "couchdb-conf").couchdb_url
doctest.testfile("README.md", extraglobs=extraglobs)
# Remove temporary example files
import os, glob
os.remove("example-config.yaml")
for f in glob.glob("example-dbm.dbm.*"):
os.remove(f)
"""
l = [0, 2]
m = track(l, persist=False)
with m:
m[0] = { 'a': {'b': {'c', 'd'}}}
m.append(3)
m.append({4: 5})
m[3][6] = 7
assert m._tracker.handler.change_paths == {3: {}}
back_to_l = copy.deepcopy(m)
assert type(back_to_l[3]) == dict
n = deepcopy_tracked(m)
assert type(n[3]) == DictWrapper
n[3][6] = { 7: 8 }
n[3][6][7] = 9
assert n._tracker.handler.change_paths == {3: {6: {}}}
assert diff_paths(m, n) == [[3]]
o = deepcopy_tracked(n)
o[0]['a']['b'].add('e')
d = diff(m, o)
res = patch(d, m)
assert o == res
assert type(res) == type(m)
g = { 'a': SimpleNamespace(b=1)}
catcher = TestCatcher()
h = track(g, callback=catcher.cb)
h.a.b = 'new value'
assert catcher.target.b == 'new value'
h.a._tracker.foobar = 'blaa'
"""
|
input = open("input", "r")
inputLines = input.readlines()
input.close()
inputNums = []
for line in inputLines:
inputNums.append(int(line.rstrip()))
inputNums.sort()
# targetJolts = inputNums[-1] + 3
oneDiffs = 0
threeDiffs = 0
inputNums.insert(0, 0) # insert 0 at beginning for charging input
for n in range(0, len(inputNums)):
if n + 1 == len(inputNums):
threeDiffs += 1
break
currentAdapter = inputNums[n]
nextAdapter = inputNums[n + 1]
adapterDifference = nextAdapter - currentAdapter
if adapterDifference == 1:
oneDiffs += 1
elif adapterDifference == 3:
threeDiffs += 1
print(oneDiffs * threeDiffs) |
import psycopg2
class PostgresConnection:
def __init__(self, _host, _database, _user, _pass):
self.conn = psycopg2.connect(
host=_host,
database=_database,
user=_user,
password=_pass)
self.conn.set_client_encoding('UTF8')
self.cur = self.conn.cursor()
def get_cursor(self):
return self.cur
def get_conn(self):
return self.conn
def __del__(self):
self.cur.close()
self.conn.close()
|
__author__ = 'leebird'
import re
import codecs
import os
import random
import string
class TextProcessor(object):
# in text there may be {not_tag} <not_tag>
# may be a more strict pattern for bionex tags
pattern_bracket = re.compile(r'<[^<>]*?>')
pattern_brace = re.compile(r'\{[^{}]*?\}')
pattern_open_bracket = re.compile(r'<([^<>/]*?)>')
pattern_close_bracket = re.compile(r'</([^<>/]*?)>')
def __init__(self):
pass
@classmethod
def remove_bracket(cls, text):
return re.sub(cls.pattern_bracket, '', text)
@classmethod
def remove_brace(cls, text):
return re.sub(cls.pattern_brace, '', text)
@classmethod
def remove_tags(cls, text):
return cls.remove_bracket(cls.remove_brace(text))
class FileProcessor(object):
@staticmethod
def read_file(filepath):
if os.path.isfile(filepath):
f = codecs.open(filepath, 'r', 'utf-8')
text = f.read()
f.close()
return text
@staticmethod
def open_file(filepath):
if os.path.isfile(filepath):
f = codecs.open(filepath, 'r', 'utf-8')
return f
@staticmethod
def write_file(filepath, content, flag='w'):
f = codecs.open(filepath, flag, 'utf-8')
f.write(content)
f.close()
class RandomGenerator(object):
@staticmethod
def random_id(length=10):
# http://stackoverflow.com/questions/2257441/random-string-generation-with-upper-case-letters-and-digits-in-python
# generate a random id
return ''.join(random.choice(string.ascii_lowercase + string.digits) for _ in range(length)) |
"""
Write a code to take the user input
a name of the pair & display the pair formed.
For e.g. Pair of Bat is:
User Input: Ball
Output : So the Pair is Bat & Ball.
"""
print("Welcome!! Here I am Gonna Write the Name of a Pair of a Thing & you Have to Guess the Another Pair ")
user = input("Pair of Bat is : ")
if user=="Ball":
print("So the Pair is Bat and " + user ) |
from typing import Dict
from typing import List
from . import dependency
from . import test_job
from .. import docker
import logging
import time
class SingleContainerCLISuiteJob(test_job.TestJob):
def __init__(self, steps: List[test_job.CLITest], docker_image_under_test: str | dependency.Dependency, cmd_to_run_in_dut: str, dut_port_mappings: Dict[int, int]) -> None:
super().__init__(artifacts=[], steps=steps)
self.dut = docker_image_under_test
self.cmd_to_run_in_dut = cmd_to_run_in_dut
self.dut_port_mappings = dut_port_mappings
self._dut_container = None
def setup(self, args):
"""
Set up the DUT by using this object's `docker_cmd`.
"""
super().setup(args)
if issubclass(type(self.dut), dependency.Dependency):
docker_image_name = self.dut.evaluate(args).item
else:
docker_image_name = str(docker.construct_docker_image_name(args, self.dut))
kwargs = {'environment': {'ARTIE_RUN_MODE': 'unit'}, 'ports': self.dut_port_mappings}
self._dut_container = docker.start_docker_container(docker_image_name, self.cmd_to_run_in_dut, **kwargs)
for step in self.steps:
step.link_pids_to_expected_outs(args, {docker_image_name: self._dut_container.id})
# Give some time for the container to initialize before we start testing it
logging.info("Waiting for DUT to come online...")
time.sleep(min(args.test_timeout_s / 3, 10))
def teardown(self, args):
"""
Shutdown any Docker containers still at large.
"""
super().teardown(args)
logging.info(f"Tearing down. Stopping docker container...")
try:
self._dut_container.stop()
except docker.docker_errors.NotFound:
pass # Container already stopped
|
class Solution:
def slowestKey(self, releaseTimes: List[int], keysPressed: str) -> str:
prev = 0
hi = 0
ans = keysPressed[0]
for i, t in enumerate(releaseTimes):
if t-prev > hi:
hi = t-prev
ans = keysPressed[i]
elif t-prev == hi:
ans = max(ans, keysPressed[i])
prev = t
return ans
|
# encoding: utf-8
import numpy as np
import glob
import time
import cv2
import os
from torch.utils.data import Dataset
from cvtransforms import *
import torch
import glob
import re
import copy
import json
import random
import math
import editdistance
from torch.utils.data import Dataset, DataLoader
class MyDataset(Dataset):
letters = [' ', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z']
def __init__(self, vid_path, anno_path, vid_pad, txt_pad, programs_txt, phase):
self.vid_path = vid_path
self.anno_path = anno_path
self.vid_pad = vid_pad
self.txt_pad = txt_pad
self.phase = phase
self.programs = programs_txt
programs_list = open(self.programs, 'r')
programs = programs_list.readlines()
print(programs)
self.annos = []
self.program_files = glob.glob(os.path.join(anno_path, '*', '*'))
for program in programs:
for pro_file in self.program_files:
#print(program, pro_file)
if program[:-2] in pro_file:
# print(program)
# print(pro_file)
self.annos.extend(glob.glob(os.path.join(pro_file, '*', '*', '*.txt')))
# print(self.annos)
#self.anno = glob.glob(os.path.join(anno_path, '*', '*', '*', '*', '*.txt'))
self.imgs = glob.glob(os.path.join(vid_path, '*', '*', '*', '*'))
#self.imgs = filter(lambda x: not os.listdir(x) is None, self.imgs)
self.data = []
for anno in self.annos:
f = open(anno, 'r')
lines = f.readlines()
#items = anno.split(os.path.sep)
items1 = anno.split('utterances')
images_path = items1[0] + 'crop' + items1[1][:-10]
#print(len(os.listdir(images_path)))
# if images_path not in self.imgs:
# print(anno, images_path)
if images_path in self.imgs and len(os.listdir(images_path)) != 0:
#print(len(os.listdir(images_path)))
#if program in images_path:
st = float(lines[2].split(' ')[1])
ed = st + float(lines[3].split(' ')[1])
anno = lines[6].rstrip('\n')
#print((lines[6], images_path, (st, ed)))
#anno_list = []
#print(anno)
anno_list = list(anno)
#l = len(anno_list)
n = 0
#print(anno_list)
for ch in anno_list:
if ch.upper() not in MyDataset.letters:
#print(ch.upper())
n += 1
if n == 0:
self.data.append((anno, images_path, (st, ed)))
#print(self.data)
#print(self.data)
def __getitem__(self, idx):
(anno, images_path, time) = self.data[idx]
vid = self._load_vid(images_path)
vid = self._load_boundary(vid, time)
#print(anno)
# if '\n' not in anno:
# anno = anno
# else:
# anno = anno[:-2]
anno = self._load_anno(anno)
#print(anno)
#print(anno)
#print('anno: ', anno)
#anno_len = anno.shape[0]
vid = self._padding(vid, self.vid_pad)
anno = self._padding(anno, self.txt_pad)
if(self.phase == 'train'):
vid = HorizontalFlip(vid)
vid = FrameRemoval(vid)
vid = ColorNormalize(vid)
#print(anno)
return {'encoder_tensor': torch.FloatTensor(vid.transpose(3, 0, 1, 2)),
'decoder_tensor': torch.LongTensor(anno)}
def __len__(self):
return len(self.data)
def _load_vid(self, p):
#files = sorted(os.listdir(p))
files = sorted(os.listdir(p), key=lambda x:int(x.split('.')[0]))
array = [cv2.imread(os.path.join(p, file)) for file in files]
array = list(filter(lambda im: not im is None, array))
# array = [cv2.resize(im, (50, 100)).reshape(50, 100, 3) for im in array]
array = [cv2.resize(im, (100, 50)) for im in array]
#print(len(array))
array = np.stack(array, axis=0)
return array
def _load_boundary(self, arr, time):
st = math.floor(time[0] * 25)
ed = math.ceil(time[1] * 25)
return arr[st: ed]
def _load_anno(self, name):
# with open(name, 'r') as f:
# lines = [line.strip().split(' ') for line in f.readlines()]
# txt = [line[2] for line in lines]
#txt = list(name)
#print(txt)
txt = name
txt = list(filter(lambda s: not s.upper() in ['SIL', 'SP'], txt))
#print(txt)
return MyDataset.txt2arr(''.join(txt).upper(), 1)
def _padding(self, array, length):
array = [array[_] for _ in range(array.shape[0])]
#print(len(array))
size = array[0].shape
for i in range(length - len(array)):
array.append(np.zeros(size))
return np.stack(array, axis=0)
@staticmethod
def txt2arr(txt, SOS=False):
# SOS: 1, EOS: 2, P: 0, OTH: 3+x
arr = []
if(SOS):
tensor = [1]
else:
tensor = []
for c in list(txt):
tensor.append(3 + MyDataset.letters.index(c))
tensor.append(2)
return np.array(tensor)
@staticmethod
def arr2txt(arr):
# (B, T)
result = []
n = arr.size(0)
T = arr.size(1)
for i in range(n):
text = []
for t in range(T):
c = arr[i,t]
if(c >= 3):
text.append(MyDataset.letters[c - 3])
text = ''.join(text)
result.append(text)
return result
@staticmethod
def ctc_arr2txt(arr, start):
pre = -1
txt = []
for n in arr:
if(pre != n and n >= start):
txt.append(MyDataset.letters[n - start])
pre = n
return ''.join(txt)
@staticmethod
def wer(predict, truth):
word_pairs = [(p[0].split(' '), p[1].split(' ')) for p in zip(predict, truth)]
wer = [1.0*editdistance.eval(p[0], p[1])/len(p[1]) for p in word_pairs]
return np.array(wer).mean()
@staticmethod
def cer(predict, truth):
cer = [1.0*editdistance.eval(p[0], p[1])/len(p[1]) for p in zip(predict, truth)]
return np.array(cer).mean()
# import options as opt
# def data_from_opt(vid_path, programs_txt, phase):
# dataset = MyDataset(vid_path,
# opt.anno_path,
# opt.vid_pad,
# opt.txt_pad,
# programs_txt=programs_txt,
# phase=phase)
# print('vid_path:{},num_data:{}'.format(vid_path,len(dataset.data)))
# loader = DataLoader(dataset,
# batch_size=opt.batch_size,
# num_workers=1,
# drop_last=False,
# shuffle=True)
# return (dataset, loader)
# train_datasets, dataloaders = data_from_opt(opt.vid_path, programs_txt=opt.trn_programs_txt, phase='train')
# val_datasets, dataloaders = data_from_opt(opt.vid_path, programs_txt=opt.val_programs_txt, phase='val')
# tst_datasets, dataloaders = data_from_opt(opt.vid_path, programs_txt=opt.tst_programs_txt, phase='tst')
# for idx, batch in enumerate(dataloaders):
# arr = batch['decoder_tensor']
# print(arr)
# print(MyDataset.arr2txt(arr))
|
from django.db import models
from datetime import datetime
from django.contrib.auth.models import User
class Band(models.Model):
name = models.CharField(max_length=100)
members = models.ManyToManyField( User, through='UserHasBand')
created_at = models.DateTimeField( default=datetime.now, blank=True )
updated_at = models.DateTimeField( default=datetime.now, blank=True )
class Meta:
db_table = "bands"
def __unicode__(self):
return self.name
class UserHasBand(models.Model):
user = models.ForeignKey(User)
band = models.ForeignKey(Band)
user_level_id = models.IntegerField(default=0)
class Meta:
db_table = "users_has_bands" |
__author__ = 'ash'
import numpy as np
from sets import Set
import sys
#import pulp
import Node
class Task:
def __init__(self,vm_dep_list,storage_priority,public_priority):
self.vm_dep_list = vm_dep_list
self.storage_priority = storage_priority
self.public_priority = public_priority
@staticmethod
def example_task():
vm_dep_list = [(7,3),(6,4),(13,5),(9,4)] # list of dependencies. format: (<compute_node_with_vm_id>,<priority>)
storage_priority = 4
public_priority = 4
task = Task(vm_dep_list,storage_priority,public_priority)
return task
class Scheduler:
def __init__(self,node_list,edges_list):
self.node_list = node_list
self.edge_list = edges_list
self.dim = len(node_list)
self.infinity = 10000
self.undefined = -1
def make_adjacency_matrix(self):
# matrix = np.matrix(np.zeros((self.dim,self.dim),dtype=np.int))
matrix = [[self.infinity for x in xrange(self.dim)] for y in xrange(self.dim)]
# test = matrix[0][1]
for edge in self.edge_list:
i,j = edge.node_pair
test = matrix[i][j]
matrix[i][j] = int(1)
matrix[j][i] = int(1)
return matrix
def min_distance(self,dist,q):
"""
Finds in dist minimal distance with indexes from the queue q
"""
min = sys.maxint
minind = -1
for elem in q:
if (dist[elem] < min):
min = dist[elem]
minind = elem
return minind
def dijkstra(self,matrix,src):
"""
Standard Dijkstra algorithm. For source finds shortest pathes to every other node.
"""
dist = [self.infinity for x in xrange(self.dim)]
previous = [self.undefined for x in xrange(self.dim)]
route_list = [[] for x in xrange(self.dim)]
dist[src] = 0
# previous[src] = src
q = Set()
for i in range(0,self.dim):
q.add(i)
while (len(q) > 0):
if (len(q) == self.dim):
u = src
else:
u = self.min_distance(dist,q)
q.remove(u)
target = u
path_node = u
while previous[path_node] != self.undefined:
route_list[target].append(path_node)
path_node = previous[path_node]
route_list[target].append(src)
route_list[target].reverse() # as we aggregate it reverse
for j in range(0,self.dim):
if j == u:
continue
alt = dist[u] + matrix[u][j]
if alt < dist[j]:
dist[j] = alt
previous[j] = u
return (dist,route_list)
def calc_routes(self):
"""
With dijkstra algorithm builds the route matrix in the whole topology
"""
matrix = self.make_adjacency_matrix()
route_matrix = [] #np.matrix((self.dim,self.dim),dtype=Route)
for i in range(0,self.dim):
#previous = np.zeros((1,self.dim),dtype=np.int)
(dist, route_list) = self.dijkstra(matrix,i)
# print previous
route_matrix.append([])
for j in range(0,self.dim):
rt = Route(dist[j],route_list[j])
route_matrix[i].append(rt)
return route_matrix
@staticmethod
def build_distances(bw_hist):
"""
Takes the information about the weights on edges
and builds the matrix of distances between nodes.
"""
# assuming that edge_list has changed after TrafficGen
route_matrix = bw_hist.route_matrix
edge_dict = bw_hist.edge_dict
dim = len(route_matrix)
dist = [[0 for x in range(0,dim)] for y in range(0,dim)]
for i in range(0,dim):
for j in range(0,dim):
route = route_matrix[i][j].route
route_sum = 0
for k in range(0,len(route)-1):
(v1,v2) = (route[k],route[k+1])
if edge_dict.has_key((v1,v2)):
route_sum += edge_dict[(v1,v2)].avgbw
else:
route_sum += edge_dict[(v2,v1)].avgbw
dist[i][j] = route_sum
return dist
@staticmethod
def prepare_priority_list(task,node_list):
"""
Takes the information about the task
And constructs the list of pairs : (<node>,<priority>)
"""
# construct (<storage>,<priority> list)
st_dep_list = []
for x in node_list:
if type(x) is Node.Storage:
st_dep_list.append((x.id,task.storage_priority))
# construct public priority list
pub_dep_list = []
for x in node_list:
if type(x) is Node.NetworkNode:
pub_dep_list.append((x.id,task.public_priority))
# append to vm dep_list
priorities = []
priorities.extend(task.vm_dep_list)
priorities.extend(st_dep_list)
priorities.extend(pub_dep_list)
return priorities
@staticmethod
def schedule(dist,task,node_list):
"""
Simple scheduler. For every appropriate node (Compute node)
finds the sum to the prior nodes
"""
priorities = Scheduler.prepare_priority_list(task,node_list)
min_dist = sys.maxint
min_glob = sys.maxint
min_id = -1
for node in node_list:
if not isinstance(node,Node.ComputeNode):
continue
max_route = 0
for prior in priorities:
traf = dist[node.id][prior[0]]*prior[1]
if traf > max_route: # We are searching for maximum traffic on route link
max_route = traf
if max_route < min_glob:
min_glob = max_route
min_id = node.id
return min_id
def print_route(self, route_matrix):
for i in range(0,self.dim):
for j in range(0,self.dim):
sys.stdout.write("From " + str(i) + " to " + str(j) + " dist " + str(route_matrix[i][j].dist) + " Route: ")
print route_matrix[i][j].route
class Route:
def __init__(self,dist,route):
self.dist = dist
self.route = route
|
import sys
import math
from PIL import Image
if len(sys.argv) < 2:
print('Not enough arguments')
filename = sys.argv[1]
im = Image.open(filename)
pixelMap = im.load()
img = Image.new(im.mode, im.size)
pixelsNew = img.load()
for i in range(img.size[0]):
for j in range(img.size[1]):
c1 = math.floor(pixelMap[i, j][0] / 2)
c2 = math.floor(pixelMap[i, j][1] / 2)
c3 = math.floor(pixelMap[i, j][2] / 2)
pixelsNew[i, j] = (c1, c2, c3)
img.save('Q2.png')
|
from django.apps import AppConfig
class KairosConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'kairos'
|
# ==============================================================================================================
# The following snippet demonstrates how to manipulate kaolin's camera.
# ==============================================================================================================
import torch
from kaolin.render.camera import Camera
camera = Camera.from_args(
eye=torch.tensor([0.0, 0.0, -1.0]),
at=torch.tensor([0.0, 0.0, 0.0]),
up=torch.tensor([0.0, 1.0, 0.0]),
width=800, height=600,
fov=1.0,
device='cuda'
)
# Extrinsic rigid transformations managed by CameraExtrinsics
camera.move_forward(amount=10.0) # Translate forward in world coordinates (this is wisp's mouse zoom)
camera.move_right(amount=-5.0) # Translate left in world coordinates
camera.move_up(amount=5.0) # Translate up in world coordinates
camera.rotate(yaw=0.1, pitch=0.02, roll=1.0) # Rotate the camera
# Intrinsic lens transformations managed by CameraIntrinsics
# Zoom in to decrease field of view - for Orthographic projection the internal implementation differs
# as there is no acual fov or depth concept (hence we use a "made up" fov distance parameter, see the projection matrix)
camera.zoom(amount=0.5)
|
import textwrap
def wrap(string, max_width):
finalString = ""
for i in range(0, len(string), int(max_width)):
finalString += string[i:i+max_width] + '\n'
return finalString
if __name__ == '__main__':
string, max_width = input(), int(input())
result = wrap(string, max_width)
print(result)
|
import numpy as np
import torch
from torch.utils.data.sampler import SubsetRandomSampler
from torchvision import datasets, transforms
def get_random_shift(scale=2, image_size=28):
pad_amount = image_size * (scale - 1)
crop_size = image_size + pad_amount
random_shift = transforms.Compose([
transforms.Pad(pad_amount, fill=0),
transforms.RandomCrop(crop_size),
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,)),
])
return random_shift
def get_train_valid_loader(task, batch_size, random_seed, valid_size=0.2):
assert ((valid_size >= 0) and (valid_size <= 1)), "[!] valid_size should be in the range [0, 1]."
assert task.upper() == 'MNIST', "only MNIST is supported"
random_shift = get_random_shift(scale=1)
data_loader = torch.utils.data.DataLoader(
datasets.MNIST('data', train=True, download=True,
transform=random_shift),
batch_size=batch_size, shuffle=True)
all_indices = np.arange(0, data_loader.dataset.data.shape[0])
np.random.seed(random_seed)
np.random.shuffle(all_indices)
train_amount = (int(len(all_indices) * (1 - valid_size)) // batch_size) * batch_size
train_sampler = SubsetRandomSampler(all_indices[:train_amount])
valid_sampler = SubsetRandomSampler(all_indices[train_amount:])
train_loader = torch.utils.data.DataLoader(
datasets.MNIST('data', train=True, download=True,
transform=random_shift),
batch_size=batch_size, sampler=train_sampler)
valid_loader = torch.utils.data.DataLoader(
datasets.MNIST('data', train=True, download=True,
transform=random_shift),
batch_size=batch_size, sampler=valid_sampler)
classes = 10
return train_loader, valid_loader, classes
def get_test_loader(task, batch_size):
if task.upper() != 'MNIST':
raise RuntimeError()
random_shift = get_random_shift()
test_loader = torch.utils.data.DataLoader(
datasets.MNIST('data', train=False, download=True,
transform=random_shift),
batch_size=batch_size, shuffle=True)
return test_loader
|
import os
from bllipparser import RerankingParser
from bllipparser.ModelFetcher import download_and_install_model
import logging
FORMAT = "[%(filename)s:%(lineno)s - %(funcName)20s() ] %(message)s"
logging.basicConfig(level=logging.DEBUG, format=FORMAT)
def importArticles(corpusFileName):
articles = []
path = os.getcwd()
with open(path + '/' + corpusFileName, "r") as f:
lines = f.readlines()
article = []
for line in lines:
line = line.rstrip()
if line == "~~~~~":
if article:
articles.append(article)
article = []
else:
# Removes the start stop tags for the sentence
line = line[4:]
line = line[:-4]
line = line.rstrip()
article.append(line)
articles.append(article)
return articles
def getFakeGood(labelsFileName):
path = os.getcwd()
with open(path + '/' + labelsFileName, "r") as f:
lines = f.readlines()
labels = []
for line in lines:
line = line.rstrip()
labels.append(int(line))
return labels
def main():
# model_dir = download_and_install_model('WSJ', '/tmp/models')
model_dir = download_and_install_model('WSJ+Gigaword-v2', '/tmp/models')
parser = RerankingParser.from_unified_model_dir(model_dir)
goodArticles = []
badArticles = []
articles = importArticles('trainingSet.dat')
labels = getFakeGood('trainingSetLabels.dat')
fg = open('goodArticlesBllip.txt', 'w')
fb = open('badArticlesBllip.txt', 'w')
i = 0
for label in labels:
if label == 1:
goodArticles.append(articles[i])
articleScores = []
for sentence in articles[i]:
logging.debug("Looking into good sentence: %s" % sentence)
sentenceParses = parser.parse(sentence,1)
sentenceBestScore = sentenceParses[0].parser_score
logging.debug("Score for good sentence: %s" % sentenceBestScore)
articleScores.append(sentenceBestScore)
sum = 0
for a in articleScores:
a = float(a)
sum = sum + a
averageScore = sum/len(articleScores)
fg.write("%s, %s, %f\n" % (articles[i], articleScores, averageScore))
if label == 0:
badArticles.append(articles[i])
articleScores = []
for sentence in articles[i]:
logging.debug("Looking into bad sentence: %s" % sentence)
sentenceParses = parser.parse(sentence,1)
sentenceBestScore = sentenceParses[0].parser_score
logging.debug("Score for bad sentence: %s" % sentenceBestScore)
articleScores.append(sentenceBestScore)
sum = 0
for a in articleScores:
a = float(a)
sum = sum + a
averageScore = sum / len(articleScores)
fb.write("%s, %s, %f\n" % (articles[i], articleScores, averageScore))
i = i + 1
fg.close()
fb.close()
if __name__ == "__main__": main()
|
from Functions.MyViews import ItemView, ItemsView
from .. import models
from Functions.DynamicSer import DynamicSerializer
MyModel = models.Availablity
class ModelSer(DynamicSerializer):
class Meta:
model = MyModel
fields = '__all__'
class Views(ItemsView):
queryset = MyModel.objects.all()
serializer_class = ModelSer
def post(self,*args,**kwargs):
"""
var params = {
title: "<str>",
description: '<str>',
start: '<yyyy-mmm-dddTHH:MM:SS.FF>',
end: '<yyyy-mmm-dddTHH:MM:SS.FF>',
recurrence: ['str','stre'],
user:<id>,
}
"""
return super().post(*args,**kwargs)
class View(ItemView):
MyModel = MyModel
queryset = MyModel.objects.all()
serializer_class = ModelSer
# TODO test this
def get_queryset(self,pk,request,*args,**kwargs):
setattr(request,'is_owner',MyModel.objects.get(id=pk).user == request.user)
return super().get(pk,request,*args,**kwargs)
|
# -*- coding: utf-8 -*-
from gallery.utils import db_cli
class BaseResource(object):
def __init__(self, type):
self._db = db_cli.resource
self.type = type
self.tags = []
self.visit_log = []
def save(self):
self._save_disk()
data = self._to_document()
self._db.insert_one(data)
def _save_disk(self):
raise NotImplementedError
def _to_document(self):
raise NotImplementedError
|
from django.db import models
class SUBJECT(models.Model):
"""
The patient class for MIMIC-III.
"ROW_ID","SUBJECT_ID","GENDER","DOB","DOD","DOD_HOSP","DOD_SSN","EXPIRE_FLAG"
we keeo all fields except row-IDs.
"""
# TODO: 1. Determine if the pre-pop-processing will:
# TODO: - integerize static patients vars (GENDER, ETHNICITY)
SUBJECT_ID = models.IntegerField(default=None, primary_key=True)
GENDER = models.CharField(max_length=10)
DOB = models.DateTimeField(default=None, max_length=20)
DOD = models.DateTimeField(default=None, max_length=20)
DOD_HOSP = models.DateTimeField(default=None, max_length=20)
DOD_SSN = models.DateTimeField(default=None, max_length=20) # ssn = stationary?
EXPIRE_FLAG = models.BooleanField(default=None) # TODO: is there a default?
class ADMISSION(models.Model):
"""
Holds the information for a single admission period.
"ROW_ID","SUBJECT_ID","HADM_ID","ADMITTIME","DISCHTIME","DEATHTIME","ADMISSION_TYPE",
"ADMISSION_LOCATION","DISCHARGE_LOCATION","INSURANCE","LANGUAGE","RELIGION","MARITAL_STATUS",
"ETHNICITY","EDREGTIME","EDOUTTIME","DIAGNOSIS","HOSPITAL_EXPIRE_FLAG","HAS_CHARTEVENTS_DATA"
"""
ADMISSION_CHOICES = ( # TODO: since we're storing real data now: do we still want these?
('Elective',
(('NB', 'newborn'),
('EL', 'elective')
)),
('Non-elective',
(('UR', 'urgent'),
('EM', 'emergency')
)),
)
# meta
SUBJECT = models.ForeignKey('SUBJECT', on_delete=models.CASCADE)
HADM_ID = models.IntegerField(default=None, primary_key=True)
# adm_time = models.CharField(default=None, max_length=20, null=True, blank=True)
ADMITTIME = models.DateTimeField(null=True, blank=True)
# disch_time = models.CharField(default=None, max_length=20, null=True, blank=True)
DISCHTIME = models.DateTimeField(null=True, blank=True)
# death_time = models.CharField(default=None, max_length=20, null=True, blank=True)
DEATHTIME = models.DateTimeField(default=None, null=True, blank=True)
ADMISSION_TYPE = models.CharField(choices=ADMISSION_CHOICES, default=None, max_length=40)
ADMISSION_LOCATION = models.CharField(default=None, max_length=40) # TODO: get choices for this?
DISCHARGE_LOCATION = models.CharField(default=None, max_length=40) # TODO: get choices for this?
INSURANCE = models.CharField(default=None, max_length=15) # TODO: get choices for this?
LANGUAGE = models.CharField(default=None, max_length=10, blank=True, null=True) # may be `nan` (still str)
RELIGION = models.CharField(default=None, max_length=30, blank=True, null=True) # Fun Experiment proposal: proof indifference between religions by demonstrating statistical insignificance of religion choice as model covariate (after stripping outgroups. Sorry my `7TH DAY ADVENTIST`s)
MARITAL_STATUS = models.CharField(default=None, max_length=50, blank=True, null=True) # -> GREAT EXPERIMENTS IMAGINABLE HERE....
ETHNICITY = models.CharField(default=None, max_length=150)
EDREGTIME = models.DateTimeField(default=None, null=True, blank=True)
EDOUTTIME = models.DateTimeField(default=None, null=True, blank=True)
DIAGNOSIS = models.CharField(default=None, max_length=300) # some of them are really detailed in description -> definitively better to use codes
HOSPITAL_EXPIRE_FLAG = models.BooleanField(default=None) # TODO find out what this is
HAS_CHARTEVENTS_DATA = models.BooleanField(default=None)
class ICUSTAY(models.Model):
"""
The class holding ICU stays:
"ROW_ID","SUBJECT_ID","HADM_ID","ICUSTAY_ID","DBSOURCE","FIRST_CAREUNIT","LAST_CAREUNIT", \
"FIRST_WARDID","LAST_WARDID","INTIME","OUTTIME","LOS"
"""
SUBJECT = models.ForeignKey('SUBJECT', on_delete=models.CASCADE)
ADMISSION = models.ForeignKey('ADMISSION', on_delete=models.CASCADE)
ICUSTAY_ID = models.IntegerField(default=None, primary_key=True)
DBSOURCE = models.CharField(default=None, max_length=10) # TODO we most vertainly want a choice here
FIRST_CAREUNIT = models.CharField(default=None, max_length=10)
LAST_CAREUNIT = models.CharField(default=None, max_length=10)
FIRST_WARDID = models.IntegerField(default=None)
LAST_WARDID = models.IntegerField(default=None)
INTIME = models.DateTimeField(default=None) # important field.
OUTTIME = models.DateTimeField(default=None) # important field.
LOS = models.IntegerField(default=None)
class CHARTEVENTVALUE(models.Model):
"""
"ROW_ID","SUBJECT_ID","HADM_ID","ICUSTAY_ID","ITEMID","CHARTTIME","STORETIME","CGID","VALUE",
"VALUENUM","VALUEUOM","WARNING","ERROR","RESULTSTATUS","STOPPED"
This holds a single lab value
itemID
timestamps
value
unit (valueuom)
"""
# keys
SUBJECT = models.ForeignKey('SUBJECT', on_delete=models.CASCADE)
ADMISSION = models.ForeignKey('ADMISSION', on_delete=models.CASCADE)
ICUSTAY = models.ForeignKey('ICUSTAY', on_delete=models.CASCADE)
ITEM = models.ForeignKey('CHARTITEM', on_delete=models.CASCADE)
# Fields:
CHARTTIME = models.DateTimeField(default=None) #, max_length=20)
STORETIME = models.DateTimeField(default=None, null=True, blank=True) #, max_length=20, null=True, blank=True)
CGID = models.CharField(default=None, max_length=10, null=True, blank=True)
VALUE = models.CharField(default=None, max_length=210)
VALUENUM = models.CharField(max_length=25, default=None, null=True, blank=True) # TOOD check if float is safe here
VALUEUOM = models.CharField(max_length=50, default=None, null=True, blank=True) # TOOD check if float is safe here
WARNING = models.CharField(default=None, max_length=25, null=True, blank=True)
ERROR = models.CharField(default=None, max_length=25, null=True, blank=True)
RESULTSTATUS = models.CharField(default=None, max_length=50, null=True, blank=True) # contained only nans the top 1 Mio rows
STOPPED = models.CharField(default=None, max_length=50, null=True, blank=True) # contained only nans the top 1 Mio rows
class LABEVENTVALUE(models.Model):
"""
This holds a single lab value
itemID
timestamps
value
unit (valueuom)
"ROW_ID","SUBJECT_ID","HADM_ID","ITEMID","CHARTTIME","VALUE","VALUENUM","VALUEUOM","FLAG"
"""
# keys
SUBJECT = models.ForeignKey('SUBJECT', on_delete=models.CASCADE)
ADMISSION = models.ForeignKey('ADMISSION', on_delete=models.CASCADE)
ITEM = models.ForeignKey('LABITEM', on_delete=models.CASCADE)#, primary_key=True) # TODO sure? it might actually be smart to have squential keys...
# Fields:
CHARTTIME = models.DateTimeField(default=None, blank=True, null=True)
VALUE = models.CharField(default=None, max_length=50, blank=True, null=True)
VALUENUM = models.FloatField(default=None, null=True, blank=True) # TOOD check if float is safe here
VALUEUOM = models.CharField(max_length=50, default=None, null=True, blank=True) # TOOD check if float is safe here
UNIT = models.CharField(max_length=50, null=True, blank=True)
FLAG = models.CharField(default=None, max_length=8, null=True, blank=True) # abnormal or normal for lab values
class SERVICE(models.Model):
"""
Holds information on the servive
"SUBJECT_ID","HADM_ID","TRANSFERTIME","PREV_SERVICE","CURR_SERVICE"
"""
# keys
SUBJECT = models.ForeignKey('SUBJECT', on_delete=models.CASCADE)
ADMISSION = models.ForeignKey('ADMISSION', on_delete=models.CASCADE)
# fields:
TRANSFERTIME = models.CharField(default=None, max_length=20)
# TODO: standardize through choices?
PREV_SERVICE = models.CharField(default=None, max_length=10)
CURR_SERVICE = models.CharField(default=None, max_length=10)
class CHARTITEM(models.Model):
"""
"ROW_ID","ITEMID","LABEL","ABBREVIATION","DBSOURCE","LINKSTO","CATEGORY","UNITNAME","PARAM_TYPE","CONCEPTID"
"""
ITEMID = models.IntegerField(primary_key=True, default=None)
LABEL = models.CharField(default=None, max_length=100, null=True, blank=True)
ABBREVIATION = models.CharField(default=None, max_length=100, null=True, blank=True)
DBSOURCE = models.CharField(default=None, max_length=100, null=True, blank=True)
LINKSTO = models.CharField(default=None, max_length=100, null=True, blank=True)
CATEGORY = models.CharField(default=None, max_length=100, null=True, blank=True)
UNITNAME = models.CharField(default=None, max_length=100, null=True, blank=True)
PARAM_TYPE = models.CharField(default=None, max_length=100, null=True, blank=True)
CONCEPTID = models.CharField(default=None, max_length=100, null=True, blank=True)
class LABITEM(models.Model):
"""
"ROW_ID","ITEMID","LABEL","FLUID","CATEGORY","LOINC_CODE"
"""
ITEMID = models.IntegerField(primary_key=True, default=None)
LABEL = models.CharField(default=None, max_length=100, null=True, blank=True)
FLUID = models.CharField(default=None, max_length=100, null=True, blank=True)
CATEGORY = models.CharField(default=None, max_length=100, null=True, blank=True)
LOINC_CODE = models.CharField(default=None, max_length=100, null=True, blank=True)
class DIAGNOSIS(models.Model):
"""
Holds information on the diagnosis.
"ROW_ID","SUBJECT_ID","HADM_ID","SEQ_NUM","ICD9_CODE"
"""
# keys
SUBJECT = models.ForeignKey('SUBJECT', on_delete=models.CASCADE)
ADMISSION = models.ForeignKey('ADMISSION', on_delete=models.CASCADE)
# no ICU here
# fields
SEQ_NUM = models.CharField(default=None, max_length=20, null=True, blank=True) # e.g. the rank of the diagnosis in the end of the admission
ICD9_CODE = models.CharField(default=None, max_length=20, null=True, blank=True)
ICD_CLASS = models.CharField(default=None, max_length=20, null=True, blank=True)
class PRESCRIPTION(models.Model):
"""
Holds information about a drug
"""
DRUG_TYPE_CHOICES = (
('M', 'MAIN'),
('A', 'ADDITIVE'),
('B', 'BASE')
)
ROUTE_CHOICES = (
# TODO: implement
)
# keys
SUBJECT = models.ForeignKey('SUBJECT', on_delete=models.CASCADE)
ADMISSION = models.ForeignKey('ADMISSION', on_delete=models.CASCADE)
ICUSTAY = models.ForeignKey('ICUSTAY', on_delete=models.CASCADE)
# fields
STARTDATE = models.CharField(default=None, max_length=20, null=True)
ENDDATE = models.CharField(default=None, max_length=20, null=True)
DRUG_TYPE = models.CharField(default=None, max_length=20, null=True)
DRUG = models.CharField(default=None, max_length=100, null=True)#, primary_key=True) # TODO: check if we want primary key here
DRUG_NAME_POE = models.CharField(default=None, max_length=100, null=True)
DRUG_NAME_GENERIC = models.CharField(default=None, max_length=100, null=True)
FORMULARY_DRUG_CD = models.CharField(default=None, max_length=100, null=True)
GSN = models.FloatField(default=None, null=True, blank=True) # this is mostly INTs but some NaNs disallow intfield.
NDC = models.FloatField(default=None, null=True, blank=True)
PROD_STRENGTH = models.CharField(default=None, max_length=100, null=True)
DOSE_VAL_RX = models.CharField(default=None, max_length=100, null=True) # can't take float here as there are ranges somtimes
DOSE_UNIT_RX = models.CharField(default=None, max_length=100, null=True)
FORM_VAL_DISP = models.CharField(default=None, max_length=100, null=True) # can't take float here as there are ranges somtimes
FORM_UNIT_DISP = models.CharField(default=None, max_length=100, null=True)
ROUTE = models.CharField(default=None, max_length=100, null=True) # TODO: establish a CHOICE set here that is hierarchical!
|
text = input()
for i in range(len(text)):
if text[i] == ":":
if (i + 1) in range(len(text)) and not text[i+1] == " ":
print(f"{text[i]}{text[i+1]}")
|
import random
#H - represents Heads
#T - represents Tails
class Game:
def Simulate(self, probHeads):
reward = -250
twoBack = ""
oneBack = ""
for i in range(1,21):
score = random.random()
if score <= probHeads:
outcome = 'H'
else:
outcome = 'T'
if twoBack == 'T' and oneBack == 'T' and outcome == 'H':
reward = reward + 100
twoBack = oneBack
oneBack = outcome
return reward
g = Game()
sumOfRewards = 0
for i in range(0, 1000):
rew = g.Simulate(0.4)
sumOfRewards = sumOfRewards + rew
print("Average of 1000 realizations is {}".format( sumOfRewards/1000))
|
import asyncio
import json
import csv
from os import pardir, pipe
from binance import AsyncClient
from binance.client import Client
from binance.enums import *
from binance.exceptions import BinanceAPIException, BinanceOrderException
api_key = '8VLvEFhyBTmZOp8XZDLHhuRy0WpFHAlKzp9RLGRN5laRvPB4lmuuC3kDoeK9a14q'
api_secret = 'cZdRIINvixBiMsD4JR81rNYURthJQIjuXvMmIkZaDtno3q1MbHjY9WAuoGZ95KBj'
#выдача актуальных для торговли пар=======================
async def get_pair(client):
try:
avg_price = await client.get_account()
arr = []
slovar = ["USDT", "BUSD", "BNB", "BTC", "TRY"]
pair = []
for key in avg_price["balances"]:
if key['free'] != '0.00000000' and key['free'] != '0.00':
arr.append(key['asset'])
#сформировываем основные пары
for key in arr:
for slovo in slovar:
if key != slovo:
pair.append(key + slovo)
else:
continue
#формируем нормальный ответ файл
index = 0
string = ""
for key in pair:
if index == len(pair) - 1:
string += f"[{key}]"
else:
string += f"[{key}], "
index += 1
#Вывод в файл ответа
with open('pair.txt', 'w') as d:
d.write(f'{string}')
print(string)
except Exception as e:
with open('errors/errors.txt', 'a') as d:
d.write(f'{e}\n')
print(e)
#выдача актуальных для торговли пар=======================
#выдача активных ордеров или всех в принципе=======================
async def get_orders_func(client, func):
try:
'''
avg_price = await client.get_account()
arr = []
slovar = ["USDT", "BUSD", "BNB", "BTC", "TRY"]
pair = []
for key in avg_price["balances"]:
if key['free'] != '0.00000000' and key['free'] != '0.00':
arr.append(key['asset'])
#сформировываем основные пары
for key in arr:
for slovo in slovar:
if key != slovo:
pair.append(key + slovo)
else:
continue
'''
#получение всех актуальных пар для нас
avg_price = await client.get_account()
arr = []
pair = []
for key in avg_price["balances"]:
if key['free'] != '0.00000000' and key['free'] != '0.00':
arr.append(key['asset'])
print(arr)
products = await client.get_products()
#распарсиваем ответ от сервера
for key in products["data"]:
for symbol in arr:
if symbol == key["b"]:
pair.append(key["s"])
print(key["s"])
#получение всех актуальных пар для нас
#получение всех ордеров
collector = []
if func == 0:
for key in pair:
print(key)
try:
depth = await client.get_my_trades(symbol=key)
if depth == []:
continue
#print(json.dumps(depth, indent=2))
collector.append(depth)
except:
pass
#получение открытых ордеров
elif func == 1:
for key in pair:
print(key)
try:
depth = await client.get_open_orders(symbol=key)
if depth == []:
continue
#print(json.dumps(depth, indent=2))
collector.append(depth)
except:
pass
print("===========")
for key in collector:
print(key)
'''
#формируем нормальный ответ файл
index = 0
string = ""
for key in pair:
if index == len(pair) - 1:
string += f"[{key}]"
else:
string += f"[{key}], "
index += 1
#Вывод в файл ответа
with open('pair.txt', 'w') as d:
d.write(f'{string}')
print(string)
'''
except Exception as e:
with open('errors/errors.txt', 'a') as d:
d.write(f'{e}\n')
print(e)
#выдача активных ордеров или всех в принципе=======================
#выдача и запись в файлы данных о кошельке
async def get_account(client):
avg_price = await client.get_account()
arr = []
arr1 = []
for key in avg_price["balances"]:
#print(i)
#print(key['asset'])
if key['asset'] == 'BTC':
#print(key['free'])
arr.append(key)
if key['asset'] == 'RVN':
#print(key['free'])
arr.append(key)
if key['free'] != '0.00000000' and key['free'] != '0.00':
arr1.append(key)
for key in arr:
print(key)
with open('btc_bars2.csv', 'w') as d:
for line in arr:
d.write(f'{line["asset"]}, {line["free"]}\n')
with open('btc_bars3.csv', 'w') as d:
for line in arr1:
d.write(f'{line["asset"]}, {line["free"]}\n')
#универсальная функция, осзволяющая ставить лимитный ордер на покупку или продажу
async def create_order(client, symbol, side, type, qua, price):
#покупка
try:
buy_limit = await client.create_order(
symbol = symbol,
side = side,
type = type,
timeInForce = 'GTC',
quantity = qua,
price = price)
await writeFileOutput(buy_limit)
print(buy_limit)
except BinanceAPIException as e:
# error handling goes here
print(e)
except BinanceOrderException as e:
# error handling goes here
print(e)
#запрос текущего курса по не пустым криптовалютам
async def get_currency(client):
try:
avg_price = await client.get_account()
arr = []
for key in avg_price["balances"]:
if key['free'] != '0.00000000' and key['free'] != '0.00':
str = f"{key['asset']} {key['free']}"
arr.append(str)
for key in arr:
print(key)
await writeFileOutput(arr)
except Exception as e:
print(e)
#получение обфчного файла
async def readFile(name):
with open(name, "r") as file:
contents = file.read()
return contents
#получение json файла
async def readJSONFile(name):
with open(name, 'r') as file:
jsonn = json.load(file)
return jsonn
'''
# чтение файла input
async def readFileINPUT():
with open("input.txt", 'r') as file:
inputTXT = file.read()
accArr = []
inputTXT = inputTXT.split("\n")
i = 0
for n in inputTXT:
accArr.append([i])
n = n.split(" ")
j = 0
for key in n:
if j == 4:
key = float(key)
accArr[i].append(key)
j += 1
i += 1
for key in accArr:
print(key)
return accArr
'''
# чтение файла input
async def readFileINPUT(client):
with open("input.txt", 'r') as file:
inputTXT = file.read()
accArr = []
inputTXT = inputTXT.split("\n")
i = 0
#построчно читаем файл как строку
for n in inputTXT:
j = 0
#читаем самый первый символ в строке
if n[0] == "0":
#разделяем по пробелам
n = n.split(" ")
#добавляем индекс записи
accArr.append([i])
for key in n:
#выбиаем строку количества и преобр в тип float
if j == 4:
key = float(key)
accArr[i].append(key)
j += 1
#если 1, то выдача информации по паре за определенный промежуток времени
elif n[0] == "1":
#резделяем по пробелам
n = n.split(" ")
#добавляем индекс записи
accArr.append([i])
for key in n:
#выбираем строки, которые отвечают за интервал времени
if (j == 3) or (j == 4):
#Заменяем разделитель на пробелы
arr_to_str = key.replace("|", " ")
#преобразуем в строку
key = ""
for str in arr_to_str:
key += str
#добавляем в массив
accArr[i].append(key)
j += 1
#Вызов функции на получение истории по конкретной паре
await get_info_param(client, accArr[i][2], accArr[i][3], accArr[i][4], accArr[i][5])
i += 1
'''
n = n.split(" ")
#print(n)
accArr.append([i])
j = 0
#если 0, то устанавливаем ордер
if n[0] == "0":
print("-----0------")
for key in n:
if j == 4:
key = float(key)
accArr[i].append(key)
j += 1
#если 1, то выдача информации по паре за определенный промежуток времени
elif n[0] == "1":
#print(n)
print("-----1------")
print(n[0], n[1], n[2], n[3])
i += 1
'''
for key in accArr:
print(key)
return accArr
'''
#блок добления на условия для дальнейшего последовательноо сценария
for key in getArrInput:
#если 0, то устанавливаем ордер
if key[1] == '0':
#await create_order(client, key[2], key[3], key[4], key[5], key[6])
await create_order(client, "RVNBNB", "SELL", "LIMIT", 195.0, "0.00034")
#если 1, то выдача информации по паре за определенный промежуток времени
elif key[1] == '1':
await get_info_param(client, "RVNBTC", "1h", "1 June, 2021", "23 July, 2021")
'''
#очистка файла
async def clearFile():
with open('input.txt', 'w') as d:
d.write("")
#запись ответа в output
async def writeFileOutput(mess):
with open('perem/output.txt', 'w') as d:
d.write(json.dumps(mess))
#запрос на получение истории по конкретной паре
async def get_info_param(client, symbol, interval, start, end):
klines = await client.get_historical_klines(symbol, interval, start, end)
print(json.dumps(klines, indent=2))
with open('spot_margin.txt', 'w') as d:
for line in klines:
d.write(f'{line}\n')
#"1 June, 2021", "30 June, 2021"
'''
Это все часть асинхронного запроса
get_account(**params) достать из этой функции весь баланс
'''
async def main():
client = await AsyncClient.create(api_key, api_secret)
#print(client.response.headers)
#await get_account(client)
#await create_order(client, 'RVNBTC', 'BUY', 'LIMIT', 200, '0.000001')
#contents = await readJSONFile("create_order.json")
#print(type(contents))
#for key in contents:
# print(key)
#await get_info_param(client, "RVNBTC", "1h", "1 June, 2021", "23 July, 2021")
#==============================
#getArrInput = await readFileINPUT(client)
#==============================
#orders = await client.get_all_orders(symbol='')
#print(json.dumps(orders, indent=2))
#await get_pair(client)
await get_orders_func(client, 0)
'''
#получение всех актуальных пар для нас
avg_price = await client.get_account()
arr = []
pair = []
for key in avg_price["balances"]:
if key['free'] != '0.00000000' and key['free'] != '0.00':
arr.append(key['asset'])
products = await client.get_products()
#распарсиваем ответ от сервера
for key in products["data"]:
for symbol in arr:
if symbol == key["b"]:
pair.append(key["s"])
print(key["s"])
#получение всех актуальных пар для нас
'''
#print(json.dumps(products, indent=2))
#for mass in products:
# for key in mass:
# print(key)
'''
symb = "RVNBTC"
depth = await client.get_my_trades(symbol=symb)
print(json.dumps(depth, indent=2))
'''
'''
#Выдача ордеров на данный момент
try:
depth = await client.get_order_book(symbol='BNBBTC')
print(json.dumps(depth, indent=2))
except Exception as e:
print(e)
'''
'''
#блок добления на условия для дальнейшего последовательноо сценария
for key in getArrInput:
#если 0, то устанавливаем ордер
if key[1] == '0':
#await create_order(client, key[2], key[3], key[4], key[5], key[6])
await create_order(client, "RVNBNB", "SELL", "LIMIT", 195.0, "0.00034")
#если 1, то выдача информации по паре за определенный промежуток времени
elif key[1] == '1':
await get_info_param(client, "RVNBTC", "1h", "1 June, 2021", "23 July, 2021")
'''
#==============================
#await get_currency(client)
'''
#get_all_tickers() → List[Dict[str, str]]
#список -> словарь
avg_price = await client.get_account()
arr = []
arr1 = []
for key in avg_price["balances"]:
#print(i)
#print(key['asset'])
if key['asset'] == 'BTC':
#print(key['free'])
arr.append(key)
if key['asset'] == 'RVN':
#print(key['free'])
arr.append(key)
if key['free'] != '0.00000000' and key['free'] != '0.00':
arr1.append(key)
for key in arr:
print(key)
with open('btc_bars2.csv', 'w') as d:
for line in arr:
d.write(f'{line["asset"]}, {line["free"]}\n')
with open('btc_bars3.csv', 'w') as d:
for line in arr1:
d.write(f'{line["asset"]}, {line["free"]}\n')
'''
'''
for keys,values in avg_price.items():
if values == "MLN":
print(keys)
print(values)
if keys == "MLN":
print(keys)
print(values)
'''
'''
#покупка
try:
buy_limit = await client.create_order(
symbol='RVNBTC',
side='BUY',
type='LIMIT',
timeInForce='GTC',
quantity=200,
price='0.000001')
except BinanceAPIException as e:
# error handling goes here
print(e)
except BinanceOrderException as e:
# error handling goes here
print(e)
print(buy_limit)
'''
'''
#выдать все открытые ордера
orders = await client.get_open_orders(symbol='RVNBTC')
print(json.dumps(orders, indent=2))
#выдать ордер по конкретному id
order = await client.get_order(symbol='RVNBTC', orderId=131134106)
print(json.dumps(order, indent=2))
#отменить заказ
result = await client.cancel_order(symbol='RVNBTC', orderId=131134106)
print(json.dumps(result, indent=2))
#tickers = await client.get_ticker()
#print(json.dumps(tickers, indent=2))
'''
await client.close_connection()
if __name__ == "__main__":
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
input() |
"""
You've got much data to manage and of course you use
zero-based and non-negative ID's to make each data item unique!
Therefore you need a method, which returns the smallest
unused ID for your next new data item...
Note: The given array of used IDs may be unsorted. For
test reasons there may be duplicate IDs, but you don't
have to find or remove them!
Go on and code some pure awesomeness!
"""
def next_id(arr):
id = 0
while id in arr:
id += 1
return id
print("Tests:")
print(next_id([0,1,2,3,4,5,6,7,8,9,10]))
print(next_id([5,4,3,2,1]))
print(next_id([0,1,2,3,5]))
print(next_id([0,0,0,0,0,0]))
print(next_id([]))
print(next_id([0,0,1,1,2,2]))
print(next_id([0,1,1,1,3,2]))
print(next_id([0,1,0,2,0,3]))
print(next_id([9,8,0,1,7,6]))
print(next_id([9,8,7,6,5,4])) |
#!/usr/bin/python3
#
# Copyright (c) 2012 Mikkel Schubert <MikkelSch@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import os
import logging
import paleomix
import paleomix.common.logging
import paleomix.resources
import paleomix.yaml
from paleomix.pipeline import Pypeline
from paleomix.nodes.samtools import FastaIndexNode
from paleomix.nodes.bwa import BWAIndexNode
from paleomix.nodes.bowtie2 import Bowtie2IndexNode
from paleomix.nodes.validation import ValidateFASTAFilesNode
from paleomix.pipelines.bam.makefile import MakefileError, read_makefiles
from paleomix.pipelines.bam.parts import Reads
import paleomix.pipelines.bam.parts as parts
import paleomix.pipelines.bam.config as bam_config
import paleomix.pipelines.bam.mkfile as bam_mkfile
def build_pipeline_trimming(config, makefile):
"""Builds only the nodes required to produce trimmed reads.
This reduces the required complexity of the makefile to a minimum."""
nodes = []
for (_, samples) in makefile["Targets"].items():
for libraries in samples.values():
for barcodes in libraries.values():
for record in barcodes.values():
if record["Type"] in ("Raw", "Trimmed"):
offset = record["Options"]["QualityOffset"]
reads = Reads(config, record, offset)
nodes.extend(reads.nodes)
return nodes
def build_pipeline_full(config, makefile, return_nodes=True):
result = []
features = makefile["Options"]["Features"]
for (target_name, sample_records) in makefile["Targets"].items():
prefixes = []
for (_, prefix) in makefile["Prefixes"].items():
samples = []
for (sample_name, library_records) in sample_records.items():
libraries = []
for (library_name, barcode_records) in library_records.items():
lanes = []
for (barcode, record) in barcode_records.items():
lane = parts.Lane(config, prefix, record, barcode)
# ExcludeReads settings may exlude entire lanes
if lane.bams:
lanes.append(lane)
if lanes:
libraries.append(
parts.Library(
config=config,
target=target_name,
prefix=prefix,
lanes=lanes,
name=library_name,
)
)
if libraries:
samples.append(
parts.Sample(
config=config,
prefix=prefix,
libraries=libraries,
name=sample_name,
)
)
if samples:
prefixes.append(
parts.Prefix(
config=config,
prefix=prefix,
samples=samples,
features=features,
target=target_name,
)
)
if prefixes:
target = parts.Target(config, prefixes, target_name)
# Construct coverage, depth-histogram, and summary nodes, etc.
parts.add_statistics_nodes(config, makefile, target)
if return_nodes:
# Extra tasks (e.g. coverage, depth-histograms, etc.)
result.extend(target.nodes)
# Output BAM files
result.extend(target.bams.values())
else:
result.append(target)
return result
def index_references(config, makefiles):
references = {}
references_bwa = {}
references_bowtie2 = {}
for makefile in makefiles:
for subdd in makefile["Prefixes"].values():
reference = subdd["Path"]
if reference not in references:
# Validation of the FASTA file; not blocking for the other
# steps, as it is only expected to fail very rarely, but will
# block subsequent analyses depending on the FASTA.
valid_node = ValidateFASTAFilesNode(
input_file=reference,
output_file=reference + ".validated",
)
# Indexing of FASTA file using 'samtools faidx'
faidx_node = FastaIndexNode(reference)
# Indexing of FASTA file using 'bwa index'
bwa_node = BWAIndexNode(
input_file=reference, dependencies=(valid_node,)
)
# Indexing of FASTA file using ''
bowtie2_node = Bowtie2IndexNode(
input_file=reference, dependencies=(valid_node,)
)
references[reference] = (valid_node, faidx_node)
references_bwa[reference] = (valid_node, faidx_node, bwa_node)
references_bowtie2[reference] = (valid_node, faidx_node, bowtie2_node)
subdd["Nodes"] = references[reference]
subdd["Nodes:BWA"] = references_bwa[reference]
subdd["Nodes:Bowtie2"] = references_bowtie2[reference]
def run(config, pipeline_variant):
paleomix.common.logging.initialize(
log_level=config.log_level, log_file=config.log_file, name="bam_pipeline"
)
logger = logging.getLogger(__name__)
if pipeline_variant not in ("bam", "trim"):
logger.critical("Unexpected BAM pipeline variant %r", pipeline_variant)
return 1
if not os.path.exists(config.temp_root):
try:
os.makedirs(config.temp_root)
except OSError as error:
logger.error("Could not create temp root: %s", error)
return 1
if not os.access(config.temp_root, os.R_OK | os.W_OK | os.X_OK):
logger.error("Insufficient permissions for temp root: %r", config.temp_root)
return 1
# Init worker-threads before reading in any more data
pipeline = Pypeline(config)
try:
makefiles = read_makefiles(config.makefiles, pipeline_variant)
except (MakefileError, paleomix.yaml.YAMLError, IOError) as error:
logger.error("Error reading makefiles: %s", error)
return 1
pipeline_func = build_pipeline_trimming
if pipeline_variant == "bam":
# Build .fai files for reference .fasta files
index_references(config, makefiles)
pipeline_func = build_pipeline_full
for makefile in makefiles:
logger.info("Building BAM pipeline for %r", makefile["Filename"])
try:
nodes = pipeline_func(config, makefile)
except paleomix.node.NodeError as error:
logger.error(
"Error while building pipeline for %r:\n%s", makefile["Filename"], error
)
return 1
pipeline.add_nodes(*nodes)
if config.list_input_files:
logger.info("Printing output files")
pipeline.print_input_files()
return 0
elif config.list_output_files:
logger.info("Printing output files")
pipeline.print_output_files()
return 0
elif config.list_executables:
logger.info("Printing required executables")
pipeline.print_required_executables()
return 0
logger.info("Running BAM pipeline")
if not pipeline.run(dry_run=config.dry_run, max_threads=config.max_threads):
return 1
return 0
def main(argv, pipeline="bam"):
if pipeline not in ("bam", "trim"):
raise ValueError(pipeline)
parser = bam_config.build_parser(pipeline)
if not argv:
parser.print_help()
return 0
args = parser.parse_args(argv)
if args.command in ("makefile", "mkfile"):
return bam_mkfile.main(args, pipeline=pipeline)
elif args.command in ("example",):
return paleomix.resources.copy_example("bam_pipeline", args)
if args.command.startswith("dry"):
args.dry_run = True
return run(args, pipeline_variant=pipeline)
|
# 单词翻转
def inplace(str):
str = list(str)
n = len(str)
i = 0
j = n-1
while i < j:
str[i],str[j] = str[j],str[i]
i +=1
j -=1
return "".join(str)
def inversion(str):
mid = []
# str =str[::-1]
str = inplace(str)
print(str)
str = str.split(" ")
for i in str:
# a = i[::-1]
a = inplace(i)
print(a)
mid.append(a)
return " ".join(mid)
if __name__ == '__main__':
a = "I love you too "
b = inversion(a)
print(b)
# b = inplace(a)
# print(b) |
from django.conf.urls.defaults import *
from piston.resource import Resource
from models import *
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
project_handler = Resource(AuthProjectHandler)
note_handler = Resource(AuthNoteHandler)
event_handler = Resource(AuthTimeEventHandler)
urlpatterns = patterns('resources.views',
(r'^test', 'test'),
(r'^project/$', project_handler),
(r'^project/(?P<slug>[a-zA-Z0-9_.-]+)$', project_handler),
# (r'^note/$', note_handler),
(r'^note/(?P<project_slug>[a-zA-Z0-9_.-]+)$', note_handler),
# (r'^event/$', event_handler),
(r'^event/(?P<project_slug>[a-zA-Z0-9_.-]+)$', event_handler),
)
|
from typing import Any, Dict, List
import pytest
import panel as pn
from panel.widgets import TextToSpeech, Utterance, Voice
TEXT = """By Aesop
There was a time, so the story goes, when all the animals lived together in harmony. The lion
didn’t chase the oxen, the wolf didn’t hunt the sheep, and owls didn’t swoop on the mice in the
field.
Once a year they would get together and choose a king, who would then reign over the animal
kingdom for the next twelve months. Those animals who thought they would like a turn at being king
would put themselves forward and would make speeches and give demonstrations of their prowess or
their wisdom. Then all the animals gathered together would vote, and the animal with the most
votes was crowned king. That’s probably where us humans got the idea of elections!
Now, monkey knew very well that he was neither very strong nor very wise, and he was not exactly
a great orator, but, boy, could he dance! So he did what he does best, and he danced acrobatically
and energetically, performing enormous leaps, back somersaults and cartwheels that truly dazzled
his audience. Compared to monkey, the elephant was grave and cumbersome, the lion was powerful and
authoritarian, and the snake was sly and sinister.
Nobody who was there remembers exactly how it happened, but somehow monkey scraped through with a
clear majority of all the votes cast, and he was announced the king of the animal kingdom for the
coming year. Most of the animals seemed quite content with this outcome, because they knew that
monkey would not take his duties too seriously and make all kinds of onerous demands on them, or
demand too much of a formal show of obedience. But there were some who thought that the election
of monkey diminished the stature of the kingship, and one of these was fox; in fact fox was pretty
disgusted, and he didn’t mind who knew it. So he set about concocting a scheme to make monkey look
stupid.
He gathered together some fine fresh fruit from the forest, mangos, figs and dates, and laid them
out on a trap he’d found. He waited for the monkey to pass by, and called out to him: “Sire, look
at these delicious dainty morsels I discovered here by the wayside. I was tempted to gorge myself
on them, but then I remembered fruits are your favourite repast, and I thought I should keep them
for you, our beloved king!”
Monkey could not resist either the flattery or the fruit, and just managed to compose himself long
enough to whisper a hurried “Why, thank you, Mr Fox” and made a beeline for the fruit. “Swish” and
“Clunk” went the trap, and “AAAYYY AAAYYY” went our unfortunate monkey king, the trap firmly
clasped around his paw.
Monkey bitterly reproached fox for leading him into such a dangerous situation, but fox just
laughed and laughed. “You call yourself king of all the animals,” he cried, “and you allow
yourself to be taken in just like that!”
Aesop
"""
_VOICES_NONE: List[Dict[str, Any]] = []
_VOICES_FIREFOX_WIN10: List[Dict[str, Any]] = [
{
"default": False,
"lang": "en-US",
"local_service": True,
"name": "Microsoft David Desktop - English (United States)",
"voice_uri": "urn:moz-tts:sapi:Microsoft David Desktop - English (United States)?en-US",
},
{
"default": False,
"lang": "en-US",
"local_service": True,
"name": "Microsoft Zira Desktop - English (United States)",
"voice_uri": "urn:moz-tts:sapi:Microsoft Zira Desktop - English (United States)?en-US",
},
]
_VOICES_CHROME_WIN10: List[Dict[str, Any]] = [
{
"default": True,
"lang": "en-US",
"local_service": True,
"name": "Microsoft David Desktop - English (United States)",
"voice_uri": "Microsoft David Desktop - English (United States)",
},
{
"default": False,
"lang": "en-US",
"local_service": True,
"name": "Microsoft Zira Desktop - English (United States)",
"voice_uri": "Microsoft Zira Desktop - English (United States)",
},
{
"default": False,
"lang": "de-DE",
"local_service": False,
"name": "Google Deutsch",
"voice_uri": "Google Deutsch",
},
{
"default": False,
"lang": "en-US",
"local_service": False,
"name": "Google US English",
"voice_uri": "Google US English",
},
{
"default": False,
"lang": "en-GB",
"local_service": False,
"name": "Google UK English Female",
"voice_uri": "Google UK English Female",
},
{
"default": False,
"lang": "en-GB",
"local_service": False,
"name": "Google UK English Male",
"voice_uri": "Google UK English Male",
},
{
"default": False,
"lang": "es-ES",
"local_service": False,
"name": "Google español",
"voice_uri": "Google español",
},
{
"default": False,
"lang": "es-US",
"local_service": False,
"name": "Google español de Estados Unidos",
"voice_uri": "Google español de Estados Unidos",
},
{
"default": False,
"lang": "fr-FR",
"local_service": False,
"name": "Google français",
"voice_uri": "Google français",
},
{
"default": False,
"lang": "hi-IN",
"local_service": False,
"name": "Google हिन्दी",
"voice_uri": "Google हिन्दी",
},
{
"default": False,
"lang": "id-ID",
"local_service": False,
"name": "Google Bahasa Indonesia",
"voice_uri": "Google Bahasa Indonesia",
},
{
"default": False,
"lang": "it-IT",
"local_service": False,
"name": "Google italiano",
"voice_uri": "Google italiano",
},
{
"default": False,
"lang": "ja-JP",
"local_service": False,
"name": "Google 日本語",
"voice_uri": "Google 日本語",
},
{
"default": False,
"lang": "ko-KR",
"local_service": False,
"name": "Google 한국의",
"voice_uri": "Google한국의",
},
{
"default": False,
"lang": "nl-NL",
"local_service": False,
"name": "Google Nederlands",
"voice_uri": "Google Nederlands",
},
{
"default": False,
"lang": "pl-PL",
"local_service": False,
"name": "Google polski",
"voice_uri": "Google polski",
},
{
"default": False,
"lang": "pt-BR",
"local_service": False,
"name": "Google português do Brasil",
"voice_uri": "Google português do Brasil",
},
{
"default": False,
"lang": "ru-RU",
"local_service": False,
"name": "Googleрусский",
"voice_uri": "Google русский",
},
{
"default": False,
"lang": "zh-CN",
"local_service": False,
"name": "Google\xa0普通话(中国大陆)",
"voice_uri": "Google\xa0普通话(中国大陆)",
},
{
"default": False,
"lang": "zh-HK",
"local_service": False,
"name": "Google\xa0粤語(香港)",
"voice_uri": "Google\xa0粤語(香港)",
},
{
"default": False,
"lang": "zh-TW",
"local_service": False,
"name": "Google 國語(臺灣)",
"voice_uri": "Google 國語(臺灣)",
},
]
@pytest.fixture
def voices():
return Voice.to_voices_list(_VOICES_FIREFOX_WIN10)
def test_to_voices_dict_firefox_win10():
# Given
voices = Voice.to_voices_list(_VOICES_FIREFOX_WIN10)
# When
actual = Voice.group_by_lang(voices)
# Then
assert "en-US" in actual
assert len(actual["en-US"]) == 2
def test_can_speak(document, comm):
text = "Give me back my money!"
# When
speaker = TextToSpeech()
speaker.value = text
model = speaker.get_root(document, comm)
assert model.speak["text"] == text
def test_can_set_voices():
# Given
voices = Voice.to_voices_list(_VOICES_CHROME_WIN10)
utterance = Utterance()
# When
utterance.set_voices(voices)
# Then
assert utterance.param.lang.default == "en-US"
assert utterance.lang == "en-US"
assert utterance.param.voice.default.lang == "en-US"
assert utterance.voice == utterance.param.voice.default
def manualtest_get_app():
text_to_speech = TextToSpeech(name="Speaker", value=TEXT, auto_speak=False)
speaker_settings = pn.Param(
text_to_speech,
parameters=[
"value",
"speak",
"paused",
"speaking",
"pending",
"pause",
"resume",
"cancel",
"lang",
"voice",
"pitch",
"rate",
"volume",
"speak",
"value",
],
widgets={
"speak": {"button_type": "success"},
"value": {"widget_type": pn.widgets.TextAreaInput, "height": 300},
},
expand_button=False,
show_name=False,
)
component = pn.Column(
text_to_speech,
speaker_settings,
width=500,
sizing_mode="fixed",
)
template = pn.template.MaterialTemplate(title="Panel - TextToSpeech Widget")
template.main.append(component)
return template
if pn.state.served:
pn.extension(sizing_mode="stretch_width")
manualtest_get_app().servable()
|
#!/usr/bin/env python3
# coding=utf-8
"""
线性判别分析(LDA):西瓜数据集
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
csv_file = 'watermelon.csv'
def model_training():
df = pd.read_csv(csv_file, encoding="utf-8")
m, n = df.shape
df0, df1 = df[df.Label == 0], df[df.Label == 1]
m0, m1 = df0.shape[0], df1.shape[0]
X0 = np.mat(df0[['Density', 'Sugariness']].values[:])
X1 = np.mat(df1[['Density', 'Sugariness']].values[:])
# 计算均值向量
mean0 = np.mat(np.average(X0, axis=0)).T
mean1 = np.mat(np.average(X1, axis=0)).T
# 计算协方差矩阵
covmatrix0, covmatrix1 = np.mat(np.zeros((n - 1, n - 1))), np.mat(np.zeros((n - 1, n - 1)))
for i in range(m0):
covmatrix0 += (X0[i].T - mean0) * (X0[i] - mean0.T)
for i in range(m1):
covmatrix1 += (X1[i].T - mean1) * (X1[i] - mean1.T)
# 奇异值分解
U, S, VT = np.linalg.svd(covmatrix0 + covmatrix1)
m, n = U.shape[0], VT.shape[0]
sigma = np.zeros((m, n))
for i in range(min(m, n)):
sigma[i][i] = S[i]
U, S, V = np.mat(U), np.mat(sigma), np.mat(VT.transpose())
W = V * S.I * U.T * (mean0 - mean1)
# W = (covmatrix0 + covmatrix1).I * (mean0 - mean1)
# plot
xcord1 = (X1[:, 0].T.tolist())[0]
ycord1 = (X1[:, 1].T.tolist())[0]
xcord2 = (X0[:, 0].T.tolist())[0]
ycord2 = (X0[:, 1].T.tolist())[0]
plt.figure(1)
ax = plt.subplot(111)
ax.scatter(xcord1, ycord1, s=30, c='red', marker='s')
ax.scatter(xcord2, ycord2, s=30, c='green')
x = np.arange(-0.2, 1.2, 0.1)
w0, w1 = float(W[0]), float(W[1])
y = -1 * ((w0 * x) / w1)
plt.sca(ax)
plt.plot(x, y)
plt.xlabel('Density')
plt.ylabel('Sugariness')
plt.title('LDA')
plt.show()
if __name__ == '__main__':
model_training()
|
from __future__ import unicode_literals, print_function
import pytest
from spacy.attrs import LOWER
from spacy.matcher import Matcher
@pytest.mark.models
def test_simple_types(EN):
tokens = EN(u'Mr. Best flew to New York on Saturday morning.')
ents = list(tokens.ents)
assert ents[0].start == 1
assert ents[0].end == 2
assert ents[0].label_ == 'PERSON'
assert ents[1].start == 4
assert ents[1].end == 6
assert ents[1].label_ == 'GPE'
@pytest.mark.models
def test_consistency_bug(EN):
'''Test an arbitrary sequence-consistency bug encountered during speed test'''
tokens = EN(u'Where rap essentially went mainstream, illustrated by seminal Public Enemy, Beastie Boys and L.L. Cool J. tracks.')
tokens = EN(u'''Charity and other short-term aid have buoyed them so far, and a tax-relief bill working its way through Congress would help. But the September 11 Victim Compensation Fund, enacted by Congress to discourage people from filing lawsuits, will determine the shape of their lives for years to come.\n\n''', entity=False)
tokens.ents += tuple(EN.matcher(tokens))
EN.entity(tokens)
@pytest.mark.models
def test_unit_end_gazetteer(EN):
'''Test a bug in the interaction between the NER model and the gazetteer'''
matcher = Matcher(EN.vocab,
{'MemberNames':
('PERSON', {},
[
[{LOWER: 'cal'}],
[{LOWER: 'cal'}, {LOWER: 'henderson'}],
]
)
}
)
doc = EN(u'who is cal the manager of?')
if len(list(doc.ents)) == 0:
ents = matcher(doc)
assert len(ents) == 1
doc.ents += tuple(ents)
EN.entity(doc)
assert list(doc.ents)[0].text == 'cal'
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'pop.ui'
#
# Created by: PyQt5 UI code generator 5.13.2
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
import mysql.connector as connector
import pandas as pd
mydb = connector.connect(host="localhost", user="root", passwd="Password@123", database="reform")
mycursor = mydb.cursor(buffered=True)
class Ui_pop(object):
def setupU(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(497, 188)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.user = QtWidgets.QLabel(self.centralwidget)
self.user.setGeometry(QtCore.QRect(210, 30, 67, 17))
self.user.setText("")
self.user.setObjectName("user")
self.pas = QtWidgets.QLabel(self.centralwidget)
self.pas.setGeometry(QtCore.QRect(220, 90, 67, 17))
self.pas.setText("")
self.pas.setObjectName("pass")
MainWindow.setCentralWidget(self.centralwidget)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
def show(self,d,r,c):
#self.pas.setText(d[c][r])
self.pas.setText(d[c][r])
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = Ui_pop()
ui.setuUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
|
# Copyright (c) 2015-2019 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
from django.template import defaultfilters as filters
from django.urls import reverse # noqa
from django.utils.translation import ugettext_lazy as _ # noqa
from horizon import tables
from starlingx_dashboard.dashboards.admin.inventory.storages.lvg_params \
import forms
from starlingx_dashboard import api as stx_api
class ParamEdit(tables.LinkAction):
name = "edit"
verbose_name = _("Edit")
url = "horizon:admin:inventory:storages:lvg:edit"
classes = ("btn-edit", "ajax-modal")
def get_link_url(self, params):
return reverse(self.url, args=[self.table.kwargs['lvg_id'],
params.key])
def get_parameters_name(datum):
return forms.get_param_key_name(datum.key)
def get_parameters_value(datum):
if datum is None or datum.value is None:
return None
value = None
if datum.key == stx_api.sysinv.LVG_CINDER_PARAM_LVM_TYPE:
value = datum.value
return value
class ParamsTable(tables.DataTable):
name = tables.Column(get_parameters_name,
verbose_name=_('Name'))
key = tables.Column('key', verbose_name=_('Key'))
value = tables.Column(get_parameters_value,
verbose_name=_('Value'),
filters=[filters.linebreaksbr])
class Meta(object):
name = "params"
verbose_name = _("Parameters")
row_actions = (ParamEdit,)
def get_object_id(self, datum):
return datum.key
def get_object_display(self, datum):
return datum.key
|
import copy
import hyperopt
import numpy as np
import xgboost as xgb
class CVTS(object):
"""
Time series cross validator.
Define time series folds in terms of dates.
Example with monthly data, where we care about a one- to three-month forecast
>>> import pandas as pd
>>> date_index = pd.date_range("1/1/2014", "12/1/2017", freq="MS")
>>> # We'll always start training at the beginning of the sample
>>> tr_start_date = "1/1/2014"
>>> # After 1/1/2014, keep extending the training window by one month
>>> tr_end_dates = pd.date_range("1/1/2015", "9/1/2017", freq="MS")
>>> # Test start date is the first date after the training end date
>>> te_start_dates = tr_end_dates + pd.tseries.offsets.MonthBegin(1)
>>> # We want to test over three months
>>> te_end_dates = tr_end_dates + pd.tseries.offsets.MonthBegin(3)
>>> # Create fold_dates param for KFoldTs
>>> fold_dates = [[(tr_start_dt, tr_end_date), (te_start_date, te_end_date)]
for (tr_end_date, te_start_dt, te_end_date)
in zip(tr_end_dates, te_start_dates, te_end_date)]
>>> cvts = CVTS(date_index, fold_dates)
"""
def __init__(self, date_index, fold_dates):
"""
Parameters
----------
date_index = [date]
fold_dates = [(tr_start_dt, tr_end_dt),
(te_start_dt, te_end_date)]
"""
self.date_index = date_index
self.fold_dates = fold_dates
self.n_splits = len(fold_dates)
def split(self, X=None, y=None, groups=None):
for (tr_start, tr_end), (te_start, te_end) in self.fold_dates:
train = np.where((tr_start <= self.date_index) & (self.date_index <= tr_end))[0]
test = np.where((te_start <= self.date_index) & (self.date_index <= te_end))[0]
yield train, test
def get_n_splits(self, X=None, y=None, groups=None):
return self.n_splits
class ParamOptimizeCV(object):
def __init__(self, param_space, num_evals, cv_metric,
fit_params=None, cv_params=None):
"""
Parameters
----------
param_space: {param_name: hyperopt_space}
num_evals: int
Number of hyperopt evaluations.
cv_metric: string
Name of cv_metric to minimize.
fit_params: dict()
Parameters we'll pass to to xgb.train
cv_params: dict()
Parameters we'll pass to xgb.cv
"""
self.param_space = param_space
self.num_evals = num_evals
self.__cv_metric = cv_metric
self.fit_params = copy.copy(fit_params) or dict(params=dict())
self.cv_params = copy.copy(cv_params) or dict(params=dict())
self.optimized_params_ = dict()
self.best_estimator_ = None
self.num_boost_round_ = None
@property
def _cv_metric(self):
return "test-%s-mean" % self.__cv_metric
@staticmethod
def _update_params(hp_params, input_params):
"""
Parameters
----------
hp_params: {param_name: param_value}
Trainable parameters.
input_params: {param_name: param_value}
Non-trainable parameters.
"""
for param_name, param_val in hp_params.iteritems():
input_params["params"][param_name] = param_val
return input_params
def _cv_objective(self, dtrain):
"""
Given dtrain, return function of trainable parameters that computes cross validated loss.
Returns
-------
kwargs -> float
"""
def hyperopt_objective(kwargs):
if "max_depth" in kwargs:
kwargs["max_depth"] = int(kwargs["max_depth"])
params = self._update_params(hp_params=kwargs, input_params=self.cv_params)
bst = xgb.cv(dtrain=dtrain, **params)
loss = bst[self._cv_metric].min()
return loss
return hyperopt_objective
def fit(self, dtrain):
"""
Choose optimal parameters and number of trees using cross-validation. Then re-fit model on entire dataset.
Parameters
----------
dtrain: xgb.DMatrix
"""
if "evals" not in self.fit_params:
self.fit_params = dict(self.fit_params.items() + {"evals": [(dtrain, "train")]}.items())
f = self._cv_objective(dtrain=dtrain)
optimized_params_ = hyperopt.fmin(f, space=self.param_space, algo=hyperopt.tpe.suggest,
max_evals=self.num_evals)
optimized_params_["max_depth"] = int(optimized_params_["max_depth"])
self.optimized_params_ = optimized_params_
# choose number of trees
cv_params = self._update_params(self.optimized_params_, self.cv_params)
bst = xgb.cv(dtrain=dtrain, **cv_params)
self.num_boost_round_ = bst[self._cv_metric].argmin()
self.fit_params["num_boost_round"] = self.num_boost_round_
# re-fit using number of trees found from cross validation
train_params = self._update_params(hp_params=self.optimized_params_, input_params=self.fit_params)
self.best_estimator_ = xgb.train(dtrain=dtrain, **train_params)
def predict(self, dpredict, **kwargs):
"""
Parameters
----------
dpredict: xgb.DMatrix
Returns
-------
array like
"""
return self.best_estimator_.predict(data=dpredict, **kwargs)
|
import os
from airflow.models import DagBag
#################################
# DAG BAG Configuration path
#################################
dags_dirs = [
"~/parallel",
"~/sequential",
"~/templates",
"~/triggers",
"~/sensors"
]
#################################
# Iteration loop
#################################
for dir in dags_dirs:
dag_bag = DagBag(os.path.expanduser(dir))
if dag_bag:
for dag_id, dag in dag_bag.dags.items():
globals()[dag_id] = dag
else:
print(f"dag bag not valid: {dir}")
|
from flask.views import MethodView
from app.server.helpers import *
from app.server.helpers.auth import login_required
from app.server.api.models import Task, TaskSchema, TournamentsToObject, Tournament
from flask import request
class GetAllTasksForTournament(MethodView):
@login_required
def get(self):
tournament_id = int(request.args['tournament_id'])
tournament = Tournament.get_info(tournament_id)
contestant_id = g.user.id
if not tournament.for_team_allowed and g.user.teams:
contestant_id = g.user.teams[0]
tournament_to_object = TournamentsToObject.get_one_or_none(tournament_id, contestant_id)
if not tournament_to_object:
return return_bad_status("Ты должен войти в турнир чтобы сдать таск")
tasks = Task.dump_all_tasks_for_tournament_by_categories(tournament_id)
tasks = [{'category' : key, 'tasks' : TaskSchema(exclude=('flag')).dump(value, many=True).data} for key, value in tasks.items()]
solved = tournament_to_object.get_solved_tasks_for_tournament_for_contestant_indices()
solved = [x[0] for x in solved]
return return_ok_status({'tasks': tasks, 'solved': solved})
|
#!/usr/bin/env python
import sys
from subprocess import call
for l in sys.stdin:
l = l[:-1]
print './youtube-dl --proxy 127.0.0.1:8087 ' + 'https://www.youtube.com/watch?{0}&list=PL782D0D8FA14D3055 > log/{1} 2>&1 &'.format(l, l)
|
"""
Created at 7:56 PM on 02 Jul 2014
Project: match
Subprogram: galcomb
Author: Andrew Crooks
Affiliation: Graduate Student @ UC Riverside
"""
'''
Combines science images with galmodel*.fits images and outputs galcomb*.fits for processing with sextractor.
Also outputs an image with just the simulated galaxies that were added in each batch called simgalset*.fits .
Runtime ~ 1 hr 5 min
'''
import gc
import sys
import pyfits # Open fits files
import numpy as np
from astropy.table import Table # Open simulated galaxy data table
def writebigfits(output_name, data_in, xsize, ysize, out_size, hdr_in='none'):
data = np.zeros((ysize, xsize), dtype=np.float32)
hdu = pyfits.PrimaryHDU(data=data)
header = hdu.header
if hdr_in is 'none':
headlen = (36*4)-1
else:
headlen = len(hdr_in)
while len(header) < headlen:
header.append()
header['NAXIS1'] = xsize
header['NAXIS2'] = ysize
header.tofile(output_name)
with open(output_name, 'rb+') as fobj:
fobj.seek(out_size)
fobj.write('\0')
if hdr_in is 'none':
pyfits.update(output_name, data_in, endcard=ignore_missing_end )
else:
pyfits.update(output_name, data_in, header=hdr_in)
print str(output_name)+' successfully written to file!'
return
def add_pstamp(input_image, postage_stamp, xloc, yloc):
"""
Adds the image 'postage_stamp' to a blank image (zero array) the same size as the 'input_image'.
The postage stamp size is 300pix X 300pix and as a result the psf is centered on x,y = (151,151).
Pixel (xcent, ycent) on the 'postage_stamp' is added to pixel (xpos, ypos) in the 'input_image'.
The remaining pixels are mapped to the surrounding pixels.
Requires: sys, numpy, pyfits
Note: FITS images have x and y inverted so y comes first (i.e. [y][x])
"""
# Find dimensions of 'input_image' and 'postage_stamp'
ysize_in = input_image.shape[0]
xsize_in = input_image.shape[1]
ysize_ps = postage_stamp.shape[0]
xsize_ps = postage_stamp.shape[1]
# Fix due to image pixels starting at 1 but array indices starting at 0
ypos = yloc - 1
xpos = xloc - 1
# Define xcent, and ycent by checking if postage stamp size is even or odd number of pixels
# in x and y dimensions, exit if even since not sure what that will do yet and don't care
if xsize_ps % 2 == 0:
xcent = (xsize_ps/2 + 1.0) - 1
else:
#xcent = (xsize_ps/2 + 0.5) - 1
sys.exit(' Error, code not yet ready for postage stamps with odd number of pixels in any dimension!')
if ysize_ps % 2 == 0:
ycent = (ysize_ps/2 + 1.0) - 1
else:
#ycent = (ysize_ps/2 + 0.5) - 1
sys.exit(' Error, code not yet ready for postage stamps with odd number of pixels in any dimension!')
# Make sure 'postage_stamp' dimensions are smaller then the resize image dimensions
if xsize_ps >= xsize_in or ysize_ps >= ysize_in:
sys.exit('Error: Postage stamp resizing can only enlarge, ' +
'but postage stamp is larger than resized dimension(s)!')
# Make sure xpos and ypos exist inside resized image dimensions
if xpos < 0 or xpos > xsize_in-1:
sys.exit('Error: X center position is outside bounds of input_image dimensions!')
if ypos < 0 or ypos > ysize_in-1:
sys.exit('Error: Y center position is outside bounds of input_image dimensions!')
# Define xmin, xmax, ymin, ymax, xr_offset, xl_offset, yb_offset, and yt_offset
xr_offset = 0
xl_offset = 0
yb_offset = 0
yt_offset = 0
ymin = ypos - ycent
ymax = ypos + ycent - 1
xmin = xpos - xcent
xmax = xpos + xcent - 1
# Check if postage stamp goes outside bounds of image and correct for this.
if xmin < 0:
xl_offset = -xmin
xmin = 0
if ymin < 0:
yb_offset = -ymin
ymin = 0
elif ymax > ysize_in - 1:
yt_offset = ymax - (ysize_in - 1)
ymax = ysize_in - 1
elif xmax > xsize_in - 1:
xr_offset = xmax - (xsize_in - 1)
xmax = xsize_in - 1
if ymin < 0:
yb_offset = -ymin
ymin = 0
elif ymax > ysize_in - 1:
yt_offset = ymax - (ysize_in - 1)
ymax = ysize_in - 1
else:
if ymin < 0:
yb_offset = -ymin
ymin = 0
elif ymax > ysize_in - 1:
yt_offset = ymax - (ysize_in - 1)
ymax = ysize_in - 1
### Add region of postage stamped centered on (xcent, ycent) to
### resized image of zero pixels region centered on (xpos, ypos)
input_image[ymin:ymax+1, xmin:xmax+1] += postage_stamp[yb_offset:ysize_ps-yt_offset, xl_offset:xsize_ps-xr_offset]
return input_image
def main():
#Dashboard
wdir = '/home/lokiz/Desktop/fits/'
# Prefixes for iterated input and output files
galcomb = 'galcomb' # image with n sim gal added
galmodel = 'galmodel' # sim galaxy file prefix
galmodelset = 'galmodelset'
# Field and Filter selector
y = 0 # Field index
z = 3 # Filter index
fields = ['uds', 'bootes']
filters = ['f160w', 'f125w', 'f814w', 'f606w', 'f350lp']
field = fields[y]
filt = filters[z]
# Local Subdirectories and special input files
science = wdir + 'science/' + field + '/' + filt + '/'
simcomb = science + 'simcomb/'
simgal = science + 'simgal/'
simgalset = science + 'simgalset/'
galtab = 'galmodel_'+field+'_'+filt+'.tab'
# Instrument selector
if filt is 'f160w' or filt is 'f125w':
inst = 'wfc3'
elif filt is 'f814w' or filt is 'f606w':
inst = 'acs'
elif filt is 'f350lp':
inst = 'uvis'
else:
sys.exit('Error: No Filter Selected!')
# Read and array data from simulated galaxy table (galmodel.tab)
gal = Table.read(science + galtab, format='ascii.tab')
gxpix = gal[gal.colnames[1]]
gypix = gal[gal.colnames[2]]
# Set iterations and name input image
iteration = [5, 2000] # [ number of combined images, number of sim galaxies per combined image]
#outsize = os.stat(science+'hlsp_candels_hst_'+inst+'_'+field+'-tot_'+filt+'_v1.0_drz.fits').st_size
# Loop through batches of simulated galaxies
for w in range(0, iteration[0]):
# Read in image data and header from science image
image = pyfits.open(science+'hlsp_candels_hst_'+inst+'_'+field+'-tot_'+filt+'_v1.0_drz.fits')
hdu = image[0]
data = hdu.data
hdr = hdu.header
xpix = hdr['NAXIS1']
ypix = hdr['NAXIS2']
galset = np.zeros((ypix, xpix), dtype=np.float32)
print 'Preparing numpy array'
# Loop through the individual simulated galaxies
for x in xrange(iteration[1]):
# Open simulated galaxy postage stamp, subtract sky, and zero out values less than zero.
# Then add sku subtracted simulated galaxy to array of zeros.
gimage = pyfits.open(simgal + galmodel + str(x+w*iteration[1]) + '.fits')[0].data
data = add_pstamp(data, gimage, gxpix[x+w*iteration[1]], gypix[x+w*iteration[1]])
data_out = pyfits.PrimaryHDU(data=data, header=hdr)
print 'Beginning to write '+simcomb + galcomb + str(w) + '.fits file to disk.'
data_out.writeto(simcomb + galcomb + str(w) + '.fits', clobber=True)
print simcomb + galcomb + str(w) + '.fits successfully written to file!'
# Loop through the individual simulated galaxies
for y in xrange(iteration[1]):
# Open simulated galaxy postage stamp, subtract sky, and zero out values less than zero.
# Then add sku subtracted simulated galaxy to array of zeros.
gimage = pyfits.open(simgal + galmodel + str(y+w*iteration[1]) + '.fits')[0].data
galset = add_pstamp(galset, gimage, gxpix[y+w*iteration[1]], gypix[y+w*iteration[1]])
# Write just simulated galaxy batch to galmodelset*.fits and the simulared + science to galcomb*.fits
galset_out = pyfits.PrimaryHDU(data=galset)
print 'Beginning to write '+simgalset + galmodelset + str(w) + '.fits file to disk.'
galset_out.writeto(simgalset + galmodelset + str(w) + '.fits', clobber=True)
print simgalset + galmodelset + str(w) + '.fits successfully written to file!'
#writebigfits(simgalset + galmodelset + str(w) + '.fits', galset, xpix, ypix, outsize)
#writebigfits(simcomb + galcomb + str(w) + '.fits', data, xpix, ypix, outsize, hdr_in =hdr)
if __name__ == "__main__":
main()
|
##first generate a list with all terms
##sort the list. then remove duplicates.
def combo():
list=[]
temp=0
for a in range(2,101):
for b in range(2,101):
temp=0
temp=a**b
list.append(temp)
else:
return list
def sortdupe(list):
list.sort()
newlist=[]
for i in list:
if i not in newlist:
newlist.append(i)
else:
print(len(newlist))
sortdupe(combo())
|
#!/usr/bin/python
import requests
import ConfigParser
from getpass import getpass
class Setup:
URLS = {
"1": "https://www.fireservicerota.co.uk",
"2": "https://www.brandweerrooster.nl"
}
CONFIG_FILE = '.local_settings.ini'
domain = None
api_key = None
def __init__(self):
pass
def get_settings(self):
self.read_configuration()
while(not self.is_configured()):
self.ask_user()
self.write_configuration()
return {'domain': self.domain, 'api_key': self.api_key}
def is_configured(self):
return self.domain != None and self.api_key != None
def read_configuration(self):
config = ConfigParser.ConfigParser()
config.read(self.CONFIG_FILE)
try:
self.domain = config.get('Main', 'Domain')
self.api_key = config.get('Main', 'APIKey')
finally:
return
def write_configuration(self):
config = ConfigParser.ConfigParser()
config.add_section('Main')
config.set('Main', 'Domain', self.domain)
config.set('Main', 'APIKey', self.api_key)
cfgfile = open('.local_settings.ini', 'w')
config.write(cfgfile)
cfgfile.close()
def get_api_key(self):
url_template = '{}/api/sessions'
url = url_template.format(self.domain)
result = requests.post(url, json = {'user_login': self.email, 'password': self.password})
response_json = result.json()
success = response_json['success']
if(success):
return response_json['auth_token']
else:
return None
def ask_user(self):
while True:
self.ask_system_choice()
self.ask_email()
self.ask_password()
self.api_key = self.get_api_key()
if self.api_key:
print "Logged in"
print
return
else:
print
print "Invalid email or password. Please try again"
print
def ask_email(self):
self.email = raw_input("Please enter your email address: ")
def ask_password(self):
self.password = getpass("Please enter your password: ")
def ask_system_choice(self):
print "Please select the system you use"
print "1. FireServiceRota (international)"
print "2. Brandweerrooster (Netherlands)"
while True:
self.system_choice = raw_input("Please enter 1 or 2: ")
if self.system_choice in ["1", "2"]:
break
self.domain = self.URLS[self.system_choice]
return
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#from __future__ import division, with_statement
'''
Copyright 2010, 陈同 (chentong_biology@163.com).
Please see the license file for legal information.
===========================================================
'''
__author__ = 'chentong & ct586[9]'
__author_email__ = 'chentong_biology@163.com'
#=========================================================
if False:
print "This program does not work under python 3, \
run in python 2.x."
import sys
from time import localtime, strftime
timeformat = "%Y-%m-%d %H:%M:%S"
def main():
lensysargv = len(sys.argv)
if lensysargv < 2:
print >>sys.stderr, "Print the result to screen"
print >>sys.stderr, 'Using python %s filename[- means \
sys.stdin] forParseGTF(given anything to turn on this option, optional)' % sys.argv[0]
sys.exit(0)
#-----------------------------------
file = sys.argv[1]
if lensysargv == 3:
keepRows = ['exon', 'start_codon', 'stop_codon']
else:
keepRows = []
if file == '-':
fh = sys.stdin
else:
fh = open(file)
aDict = {}
for line in fh:
lineL = line.split("\t")
if (keepRows and lineL[2] in keepRows) or ((not keepRows) and lineL[2] != 'CDS'):
firstKey = lineL[0] + '.'.join(lineL[8].split("; ")[0:2])
if firstKey not in aDict:
aDict[firstKey] = {}
secondKey = (int(lineL[3]), int(lineL[4]), lineL[2])
assert secondKey not in aDict[firstKey], \
(firstKey, secondKey)
aDict[firstKey][secondKey] = line
#here is your reading
#-------------END reading file----------
#----close file handle for files-----
if file != '-':
fh.close()
#-----------end close fh-----------
firstKeyL = aDict.keys()
firstKeyL.sort()
for firstKey in firstKeyL:
secondKeyL = aDict[firstKey].keys()
secondKeyL.sort()
for secondKey in secondKeyL:
print aDict[firstKey][secondKey],
if __name__ == '__main__':
startTime = strftime(timeformat, localtime())
main()
endTime = strftime(timeformat, localtime())
fh = open('python.log', 'a')
print >>fh, "%s\n\tRun time : %s - %s " % \
(' '.join(sys.argv), startTime, endTime)
fh.close()
|
#!/usr/bin/python3
import unittest
from python.common.baseunittest import BaseUnitTest
from python.eapi.methods.loans.loan import Loan
class TestLoans(BaseUnitTest):
"""Runs Loan test scenarios."""
@BaseUnitTest.log_try_except
def test_01_get_multi(self):
"""
1. Get multiple.
"""
the_loan = Loan()
the_loan.get_multi()
@BaseUnitTest.log_try_except
def test_02_get_one(self):
"""
1. Choose a random existing team id.
2. Get only that team.
"""
the_loan = Loan()
random_loan = the_loan.choose_random()
the_loan.get_one(random_loan)
@BaseUnitTest.log_try_except
def test_03_post_one(self):
"""
1. Create new.
2. Get the newly created.
"""
the_loan = Loan()
json_loan = the_loan.post_one()
the_loan.get_one(json_loan)
@BaseUnitTest.log_try_except
def test_04_patch_one(self):
"""
1. Create new.
2. Get the newly created.
3. Update the created.
4. Get the updates.
"""
the_loan = Loan()
json_loan = the_loan.post_one()
json_loan = the_loan.get_one(json_loan)
the_loan.patch_one(json_loan)
the_loan.get_one(json_loan)
@BaseUnitTest.log_try_except
def test_05_delete_one(self):
"""
1. Create new.
2. Get the newly created.
3. Delete the new.
4. Get the freshly deleted, and expect a 404 response.
"""
the_loan = Loan()
json_loan = the_loan.post_one()
the_loan.get_one(json_loan)
the_loan.delete_one(json_loan)
the_loan.get_one(json_loan, expected_code=404)
if __name__ == '__main__':
unittest.main()
|
'''
使用递归的几种情况
(1)指数函数
(2)优化的指数函数
(3)判断回文字符串
(4)二分查找
(5)选择子集和
'''
import time
# 使用递归的几种情况:
# (1)指数函数
def power(n, k): # 计算n的k次方
# 边界条件 当k减少到0的时候
if k == 0:
return 1
else:
return n * power(n, k - 1)
# (2)优化的指数函数
def power_optimize(n, k):
if k == 0:
return 1
else:
half = power_optimize(n, k // 2)
if k % 2 == 0:
return half * half
else:
return n * half * half
# (3)判断回文字符串
def loop_str(s):
if len(s) <= 1: # 边界条件
return True
else:
return s[0] == s[-1] and loop_str(s[1:-1])
# (4)二分查找
def binary_search(s, start, end, value):
# 边界条件
if start > end:
return -1
else:
mid = (end + start) // 2
if s[mid] == value:
return mid
elif s[mid] < value:
return binary_search(s, mid + 1, end, value)
else:
return binary_search(s, start, mid-1, value)
# (5)统计子集个数
#一共要选4个人,假设有一个Bob,可以分为选中Bob和没选中Bob两种情况
def C(n,k):#从n个人中选择k个人
if k==0 or k==n:
return 1
else:
return C(n-1,k)+C(n-1,k-1)
if __name__ == '__main__':
time_a = time.time()
print(power(5, 11))
time_b = time.time()
print('%fms' % ((time_b - time_a) * 1000))
time_a = time.time()
print(power_optimize(5, 11))
time_b = time.time()
print('%fms' % ((time_b - time_a) * 1000))
print(loop_str('abcb'))
print(binary_search([1,3,5,7,9],0,4,5))
print(C(8,4))
|
from django.urls import path
from . import views as uv
urlpatterns = [
path('', uv.home, name='homepage'),
path('about/', uv.about, name='about'),
path('services/', uv.services, name='services'),
path('contact/', uv.contact, name='contact'),
path('profile/', uv.profile, name='profile'),
] |
month = input("Enter the month:")
day = input("Enter the day of the month:")
if month in ["January", "February"]:
print("It's winter!")
elif month in ["April", "May"]:
print("It's Spring!")
elif month in ["July", "August"]:
print("It's Summer!")
elif month in ["October", "November"]:
print("It's Fall!")
if month == "March":
if day < 20:
print("It's winter!")
else:
print("It's Spring!")
if month == "June":
if day < 21:
print("It's Spring!")
else:
print("It's Summer!")
if month == "September":
if day < 22:
print("It's Summer!")
else:
print("It's Fall!")
if month == "December":
if day < 21:
print("It's Fall!")
else:
print("It's winter!") |
meat = {'beef': 199, 'pork': 99, 'chiken': 49}
for name, price in meat.items():
print(name, 'is', price, 'yen') |
from graphql import (
GraphQLObjectType
)
from .listen_to_messages import ListenToMessagesSubscription
RootSubscriptionType = GraphQLObjectType(
"Subscriptions",
{
'listenToMessages': ListenToMessagesSubscription
},
)
__all__ = ['RootSubscriptionType']
|
"""Główny moduł programu."""
import sys
import pygame
from pygame.sprite import Group
from settings import Settings
import functions as fct
from UI_buttons import (ColorIndicator, EraserButton,
ReferenceGridButton, ClearButton, ButtonsGroup,
SaveButton, LoadButton, RectangleButton,
DrawingButton)
from tools import (VerticalReferenceGrid, HorizontalReferenceGrid,
Pointer)
from drawing import DrawingElement
def run_program():
"""Inicjalizacja programu i wczytywanie początkowych wartości."""
pygame.init()
settings = Settings()
screen = pygame.display.set_mode((settings.screen_width,
settings.screen_height))
pygame.display.set_caption("Sprite Painter")
#Tworzenie w tle niewidzlanej siatki komórek.
background_grid = Group()
fct.create_background_grid(settings, screen, background_grid)
#Tworzenie grupy, w której zapisany będzie rysunek.
drawing = Group()
#Tworzenie ramki.
frame = Group()
fct.create_frame(screen, settings, frame)
#Tworzenie siatki, którą będzie mozna włączać w celu ułatwienia
#rysowania.
reference_grid = Group()
for collumn_number in range(int(settings.width_grid_number
/ settings.reference_grid_size)):
vertical_reference_grid = VerticalReferenceGrid(screen,
settings)
vertical_reference_grid.rect.centerx = (
(collumn_number+1)
* settings.width_grid
* settings.reference_grid_size)
reference_grid.add(vertical_reference_grid)
for row_number in range(int(settings.height_grid_number
/ settings.reference_grid_size)):
horizontal_reference_grid = HorizontalReferenceGrid(screen,
settings)
horizontal_reference_grid.rect.centery = (
(row_number+1)
* settings.height_grid
* settings.reference_grid_size)
reference_grid.add(horizontal_reference_grid)
#Dodanie zmiennej, która informuje czy mysz jest obecie kliknięta.
mouse_down = False
#Tworzenie grupy przycisków kolorów.
buttons_group = ButtonsGroup(screen, settings)
#Tworzneie grupy przycisków dodatkowych.
UI_buttons_group = Group()
#Dodanie wskaźnika, który pokazuje obecnie używany kolor.
color_indicator = ColorIndicator(screen, settings)
UI_buttons_group.add(color_indicator)
#Dodanie przycisku, który pozwoli na wymazywanie elementów rysunku.
eraser_button = EraserButton(screen, settings)
UI_buttons_group.add(eraser_button)
#Dodanie przycisku, który pozwoli na wyświetlenie siatki pomagającej
#w rysowaniu.
reference_grid_button = ReferenceGridButton(screen, settings)
UI_buttons_group.add(reference_grid_button)
#Dodanie przycisku, który pozwoli na usunięcie całego rysunku.
clear_button = ClearButton(screen, settings)
UI_buttons_group.add(clear_button)
#Dodanie przycisku, który pozwoli na zapisywanie obrazu.
save_button = SaveButton(screen, settings)
UI_buttons_group.add(save_button)
#Dodanie przycisku, który pozwoli na wczytywanie obrazu.
load_button = LoadButton(screen, settings)
UI_buttons_group.add(load_button)
#Dodanie przycisku, który pozwoli na rysowanie prostokątów.
rectangle_button = RectangleButton(screen, settings)
UI_buttons_group.add(rectangle_button)
#Dodanie przycisku, który pozwoli na zwykłe rysowanie.
drawing_button = DrawingButton(screen, settings)
UI_buttons_group.add(drawing_button)
#Dodanie niewidzialnego kwadratu, który bedzie odpowiadał położeniu
#kursora i będzie tworzył rysunki w miejscu kliknięcia.
pointer = Pointer(screen, settings)
#Dodanie grupy w której będą tymczasowe figury.
rectangles = Group()
while True:
#Ustalenie lub odświeżenie pozycji kursora.
mouse_x, mouse_y = pygame.mouse.get_pos()
pointer.rect.centerx = mouse_x
pointer.rect.centery = mouse_y
#Reakcja na klawisze i przyciski.
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
elif event.type == pygame.MOUSEBUTTONDOWN:
#Włączenie trybu klikniętej myszy.
mouse_down = True
elif event.type == pygame.MOUSEBUTTONUP:
#Wyłączenie trybu klikniętej myszy.
mouse_down = False
#Zmiana wartości zmiennej, dzięki czemu po odkliknięciu
#myszy bdzie można ponownie wcisnąć przycisk siatki
#pomocniczej dopiero przy nasetpnym kliknięciu.
settings.enable_changing_grid = True
#Jeżeli aktualnie tworzona jest figura, aktualnie
#ustalony kształt zostanie dodany do rysunku.
if len(rectangles) >= 0:
fct.drawing_rectangle(background_grid, rectangles,
drawing, screen, settings)
if mouse_down == True:
#Reakcja na kliknięcie i przytrzymanie myszy
fct.mouse_click(screen, settings, background_grid, drawing,
buttons_group, frame, color_indicator,
eraser_button, reference_grid_button,
pointer, clear_button, save_button,
load_button, rectangle_button, rectangles,
drawing_button)
fct.update_screen(screen, drawing, frame, buttons_group,
UI_buttons_group, reference_grid,
reference_grid_button, rectangles)
run_program()
|
from .asm import ASM
from .attachment import Attachment
from .bcc_settings import BCCSettings
from .bypass_list_management import BypassListManagement
from .category import Category
from .click_tracking import ClickTracking
from .content import Content
from .custom_arg import CustomArg
from .email import Email
from .exceptions import SendGridException, APIKeyIncludedException
from .footer_settings import FooterSettings
from .from_email import From
from .ganalytics import Ganalytics
from .header import Header
from .html_content import HtmlContent
from .mail_settings import MailSettings
from .mail import Mail
from .open_tracking import OpenTracking
from .personalization import Personalization
from .plain_text_content import PlainTextContent
from .sandbox_mode import SandBoxMode
from .section import Section
from .spam_check import SpamCheck
from .subject import Subject
from .subscription_tracking import SubscriptionTracking
from .substitution import Substitution
from .tracking_settings import TrackingSettings
from .to_email import To
from .cc_email import Cc
from .bcc_email import Bcc
from .validators import ValidateAPIKey
|
#!/usr/bin/env python
# coding: utf-8
# In[1]:
from datascience import *
import matplotlib
path_data = '../../assets/data/'
matplotlib.use('Agg')
get_ipython().run_line_magic('matplotlib', 'inline')
import matplotlib.pyplot as plots
plots.style.use('fivethirtyeight')
import numpy as np
np.set_printoptions(threshold=50)
# # Visualization
# Tables are a powerful way of organizing and visualizing data. However, large tables of numbers can be difficult to interpret, no matter how organized they are. Sometimes it is much easier to interpret graphs than numbers.
#
# In this chapter we will develop some of the fundamental graphical methods of data analysis. Our source of data is the [Internet Movie Database](http://www.imdb.com), an online database that contains information about movies, television shows, video games, and so on. The site [Box Office Mojo](http://www.boxofficemojo.com) provides many summaries of IMDB data, some of which we have adapted. We have also used data summaries from [The Numbers](http://www.the-numbers.com), a site with a tagline that says it is "where data and the movie business meet."
# <h2>Scatter Plots and Line Graphs</h2>
# The table `actors` contains data on Hollywood actors, both male and female. The columns are:
#
# | ** Column ** | Contents |
# |---------------------|----------|
# |`Actor` | Name of actor |
# |`Total Gross` | Total gross domestic box office receipt, in millions of dollars, of all of the actor's movies |
# | `Number of Movies` | The number of movies the actor has been in |
# | `Average per Movie` | Total gross divided by number of movies |
# | `#1 Movie` | The highest grossing movie the actor has been in |
# | `Gross` | Gross domestic box office receipt, in millions of dollars, of the actor's `#1 Movie` |
#
# In the calculation of the gross receipt, the data tabulators did not include movies where an actor had a cameo role or a speaking role that did not involve much screen time.
#
# The table has 50 rows, corresponding to the 50 top grossing actors. The table is already sorted by `Total Gross`, so it is easy to see that Harrison Ford is the highest grossing actor. In total, his movies have brought in more money at domestic box office than the movies of any other actor.
# In[2]:
actors = Table.read_table(path_data + 'actors.csv')
actors
# **Terminology.**
# A *variable* is a formal name for what we have been calling a "feature", such as 'number of movies.' The term *variable* emphasizes that the feature can have different values for different individuals – the numbers of movies that actors have been in varies across all the actors.
#
# Variables that have numerical values, such as 'number of movies' or 'average gross receipts per movie' are called *quantitative* or *numerical* variables.
# <h2>Scatter Plots</h2>
#
# A *scatter plot* displays the relation between two numerical variables. You saw an example of a scatter plot in an early section where we looked at the number of periods and number of characters in two classic novels.
#
# The Table method `scatter` draws a scatter plot consisting of one point for each row of the table. Its first argument is the label of the column to be plotted on the horizontal axis, and its second argument is the label of the column on the vertical.
# In[3]:
actors.scatter('Number of Movies', 'Total Gross')
# The plot contains 50 points, one point for each actor in the table. You can see that it slopes upwards, in general. The more movies an actor has been in, the more the total gross of all of those movies – in general.
#
# Formally, we say that the plot shows an *association* between the variables, and that the association is *positive*: high values of one variable tend to be associated with high values of the other, and low values of one with low values of the other, in general.
#
# Of course there is some variability. Some actors have high numbers of movies but middling total gross receipts. Others have middling numbers of movies but high receipts. That the association is positive is simply a statement about the broad general trend.
#
# Later in the course we will study how to quantify association. For the moment, we will just think about it qualitatively.
# Now that we have explored how the number of movies is related to the *total* gross receipt, let's turn our attention to how it is related to the *average* gross receipt per movie.
# In[4]:
actors.scatter('Number of Movies', 'Average per Movie')
# This is a markedly different picture and shows a *negative* association. In general, the more movies an actor has been in, the *less* the average receipt per movie.
#
# Also, one of the points is quite high and off to the left of the plot. It corresponds to one actor who has a low number of movies and high average per movie. This point is an *outlier*. It lies outside the general range of the data. Indeed, it is quite far from all the other points in the plot.
# We will examine the negative association further by looking at points at the right and left ends of the plot.
#
# For the right end, let's zoom in on the main body of the plot by just looking at the portion that doesn't have the outlier.
# In[5]:
no_outlier = actors.where('Number of Movies', are.above(10))
no_outlier.scatter('Number of Movies', 'Average per Movie')
# The negative association is still clearly visible. Let's identify the actors corresponding to the points that lie on the right hand side of the plot where the number of movies is large:
# In[6]:
actors.where('Number of Movies', are.above(60))
# The great actor Robert DeNiro has the highest number of movies and the lowest average receipt per movie. Other fine actors are at points that are not very far away, but DeNiro's is at the extreme end.
#
# To understand the negative association, note that the more movies an actor is in, the more variable those movies might be, in terms of style, genre, and box office draw. For example, an actor might be in some high-grossing action movies or comedies (such as Meet the Fockers), and also in a large number of smaller films that may be excellent but don't draw large crowds. Thus the actor's value of average receipts per movie might be relatively low.
#
# To approach this argument from a different direction, let us now take a look at the outlier.
# In[7]:
actors.where('Number of Movies', are.below(10))
# As an actor, Anthony Daniels might not have the stature of Robert DeNiro. But his 7 movies had an astonishingly high average receipt of nearly $452$ million dollars per movie.
#
# What were these movies? You might know about the droid C-3PO in Star Wars:
#
# 
#
# That's [Anthony Daniels](https://en.wikipedia.org/wiki/Anthony_Daniels) inside the metallic suit. He plays C-3PO.
#
# Mr. Daniels' entire filmography (apart from cameos) consists of movies in the high-grossing Star Wars franchise. That explains both his high average receipt and his low number of movies.
#
# Variables such as genre and production budget have an effect on the association between the number of movies and the average receipt per movie. This example is a reminder that studying the association between two variables often involves understanding other related variables as well.
# <h2>Line Graphs</h2>
#
# Line graphs are among the most common visualizations and are often used to study chronological trends and patterns.
#
# The table `movies_by_year` contains data on movies produced by U.S. studios in each of the years 1980 through 2015. The columns are:
#
# | **Column** | Content |
# |------------|---------|
# | `Year` | Year |
# | `Total Gross` | Total domestic box office gross, in millions of dollars, of all movies released |
# | `Number of Movies` | Number of movies released |
# | `#1 Movie` | Highest grossing movie |
# In[8]:
movies_by_year = Table.read_table(path_data + 'movies_by_year.csv')
movies_by_year
# The Table method `plot` produces a line graph. Its two arguments are the same as those for `scatter`: first the column on the horizontal axis, then the column on the vertical. Here is a line graph of the number of movies released each year over the years 1980 through 2015.
# In[9]:
movies_by_year.plot('Year', 'Number of Movies')
# The graph rises sharply and then has a gentle upwards trend though the numbers vary noticeably from year to year. The sharp rise in the early 1980's is due in part to studios returning to the forefront of movie production after some years of filmmaker driven movies in the 1970's.
#
# Our focus will be on more recent years. In keeping with the theme of movies, the table of rows corresponding to the years 2000 through 2015 have been assigned to the name `century_21`.
# In[10]:
century_21 = movies_by_year.where('Year', are.above(1999))
# In[11]:
century_21.plot('Year', 'Number of Movies')
# The global financial crisis of 2008 has a visible effect – in 2009 there is a sharp drop in the number of movies released.
#
# The dollar figures, however, didn't suffer much.
# In[12]:
century_21.plot('Year', 'Total Gross')
# The total domestic gross receipt was higher in 2009 than in 2008, even though there was a financial crisis and a much smaller number of movies were released.
#
# One reason for this apparent contradiction is that people tend to go to the movies when there is a recession. ["In Downturn, Americans Flock to the Movies,"](http://www.nytimes.com/2009/03/01/movies/01films.html?_r=0) said the New York Times in February 2009. The article quotes Martin Kaplan of the University of Southern California saying, "People want to forget their troubles, and they want to be with other people." When holidays and expensive treats are unaffordable, movies provide welcome entertainment and relief.
#
# In 2009, another reason for high box office receipts was the movie Avatar and its 3D release. Not only was Avatar the \#1 movie of 2009, it is also by some calculations the second highest grossing movie of all time, as we will see later.
# In[13]:
century_21.where('Year', are.equal_to(2009))
#
# ```{toctree}
# :hidden:
# :titlesonly:
#
#
# Categorical Distributions <1/Visualizing_Categorical_Distributions>
# Numerical Distributions <2/Visualizing_Numerical_Distributions>
# Overlaid Graphs <3/Overlaid_Graphs>
# ```
#
|
'''
Bryan Coleman
BJC18BV
membership question for L = {w E {0,1}* : w is a binary palendrome}
'''
import sys
def solution(tape):
accept = True
reject = False
state = 'q0'
current = 0
head = tape[current]
while True:
if state == 'q0':
if head == ' ':
#empty string, accept
return accept
else:
#hold onto our value to compare
#set the var at the head to x
#move to state q1
memory = head
tape[current] = 'x'
state = 'q1'
current += 1
head = tape[current]
if head == ' ':
#quickly check to see if we only have one variable which we accept
return accept
elif state == 'q1':
if head == '1' or head == '0':
#roll through the tape if we hit a 1 or 0
current += 1
head = tape[current]
if head == ' ' or head == 'y':
#if we find a space or y we need to roll back one
#compare to memory
#reject if no match
#change var to y if there is a match and move to state q2
current -= 1
head = tape[current]
if head == 'x':
#we have found an odd palendrome
#the last x mark is arbitray
#example 10101 and 10001 are both accepted
return accept
if head == memory:
tape[current] = 'y'
state = 'q2'
current -= 1
head = tape[current]
else:
return reject
elif state == 'q2':
if head == '1' or head == '0':
#roll through to the left if we hit a 1 or 0
current -= 1
head = tape[current]
if head == 'x':
current += 1
head = tape[current]
if head == 'y':
#this is the case were we moved forward and immediatly found a y
#meaning we have a even palidrome
return accept
else:
#else we mark with an x and move back to state q1
memory = head
tape[current] = 'x'
current += 1
head = tape[current]
state = 'q1'
if __name__ == '__main__':
user_input = str(input('enter a string containing 1\'s and 0\'s to see if they are in the language L = L = {w E {0,1}* : w is a binary palendrome} -> '))
flag = False
for i in user_input:
if i == '1' or i == '0':
pass
else:
flag = True
if(flag):
print('you\'re string contains invalid char\'s')
sys.exit()
tape = list(user_input)
tape.append(' ')
if solution(tape):
print('String is accepted')
else:
print('String is NOT accepted')
|
from rest_framework import generics
from rest_framework.exceptions import ParseError
from inventoryProject.permissions import IsSuperUserOrStaffReadOnly, IsSuperUser, IsStaffUser
from items.models.asset_custom_fields import AssetField, IntAssetField, FloatAssetField, ShortTextAssetField, \
LongTextAssetField
from items.serializers.asset_field_serializer import AssetFieldSerializer, IntAssetFieldSerializer, \
FloatAssetFieldSerializer, ShortTextAssetFieldSerializer, LongTextAssetFieldSerializer
class AssetFieldList(generics.ListCreateAPIView):
queryset = AssetField.objects.all()
permission_classes = [IsSuperUserOrStaffReadOnly]
serializer_class = AssetFieldSerializer
class AssetFieldDetailed(generics.RetrieveUpdateDestroyAPIView):
queryset = AssetField.objects.all()
permission_classes = [IsSuperUser]
serializer_class = AssetFieldSerializer
def perform_update(self, serializer):
if serializer.validated_data.get('type') is not None:
raise ParseError(detail='You cannot change the type of a field')
serializer.save()
class IntAssetFieldUpdate(generics.UpdateAPIView):
queryset = IntAssetField.objects.all()
permission_classes = [IsStaffUser]
serializer_class = IntAssetFieldSerializer
class FloatAssetFieldUpdate(generics.UpdateAPIView):
queryset = FloatAssetField.objects.all()
permission_classes = [IsStaffUser]
serializer_class = FloatAssetFieldSerializer
class ShortTextAssetFieldUpdate(generics.UpdateAPIView):
queryset = ShortTextAssetField.objects.all()
permission_classes = [IsStaffUser]
serializer_class = ShortTextAssetFieldSerializer
class LongTextAssetFieldUpdate(generics.UpdateAPIView):
queryset = LongTextAssetField.objects.all()
permission_classes = [IsStaffUser]
serializer_class = LongTextAssetFieldSerializer
|
from flask_sqlalchemy import SQLAlchemy
db = SQLAlchemy()
# Table 1. Review
class Review(db.Model):
__tablename__ = "reviews"
id = db.Column( db.Integer, primary_key=True )
date = db.Column( db.DateTime, nullable=False )
condition = db.Column( db.String, nullable=False )
animals = db.Column( db.Boolean, nullable=False )
comment = db.Column( db.String, nullable=True )
user_id = db.Column( db.Integer, db.ForeignKey("users.id"), nullable=False )
# Table 2. User
class User(db.Model):
__tablename__ = "users"
id = db.Column( db.Integer, primary_key=True )
username = db.Column( db.String, nullable=False )
password = db.Column( db.String, nullable=False )
postal_code = db.Column( db.String, nullable=False )
# Table 3. ESA
# for ESA_NAME, ESA_NUM, Shape_Area, Shape_Length, geometry
class ESA(db.Model):
__tablename__ = "esas"
id = db.Column( db.Integer, primary_key=True )
ESA_NAME = db.Column( db.String, nullable=False )
ESA_NUM = db.Column( db.Integer, nullable=False )
Shape_Area = db.Column( db.Float, nullable=False )
Shape_Length = db.Column( db.Float, nullable=False )
geometry = db.Column( db.Text, nullable=False )
# def add_passenger(self, name):
# p = Passenger(name=name, flight_id=self.id)
# db.session.add(p)
# db.session.commit()
|
from django.db import models
class GlobalSettings(models.Model):
current_session = models.PositiveIntegerField()
penalty_points = models.PositiveIntegerField() # Indicates how much points will be the penalty for editing each time.
bonus_points = models.PositiveIntegerField() # Maximum bonus points for exact guess. Reduced by a factor as the difference increases.
def clean(self):
if (GlobalSettings.objects.count() > 0 and self.id != GlobalSettings.objects.get().id):
raise ValidationError("Can only create 1 %s instance" % model.__name__)
class GlobalValues(models.Model):
dashboard = models.TextField()
loginScroll = models.TextField()
ship1 = models.TextField()
ship2 = models.TextField()
ship3 = models.TextField()
ship4 = models.TextField()
ship5 = models.TextField()
running = models.BooleanField(help_text = "Game running flag")
allowReg = models.BooleanField(help_text = "Allow Registrations?")
session_time = models.PositiveIntegerField(help_text = "Total time for session")
break_time = models.PositiveIntegerField(help_text = "Total break time")
endtime = models.DecimalField(max_digits = 18, decimal_places = 5, help_text = "Time to end the session")
no_of_sessions = models.PositiveIntegerField(help_text = "Total number of sessions")
|
import tensorflow as tf
from tensorflow import keras
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
mnist = keras.datasets.mnist
(x_train_orig, y_train), (x_test_orig, y_test) = mnist.load_data()
print("mnist dataset: train=%s test=%s" % (x_train_orig.shape, x_test_orig.shape))
# print("x_test_orig[0] =", x_test_orig[0])
class myCallback(keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs={}):
self.model.stop_training = logs.get('accuracy') > 0.993
def rotate_images(arr, degree):
img = Image.fromarray(arr)
return np.array(img.rotate(degree))
x_train_rotated_left = [rotate_images(x, 20) for x in x_train_orig]
x_train_rotated_right = [rotate_images(x, 160) for x in x_train_orig]
x_test_rotated_left = [rotate_images(x, 20) for x in x_test_orig]
x_test_rotated_right = [rotate_images(x, 160) for x in x_test_orig]
x_train_orig = np.concatenate((x_train_orig, x_train_rotated_left, x_train_rotated_right), axis=0)
x_test_orig = np.concatenate((x_test_orig, x_test_rotated_left, x_test_rotated_right), axis=0)
y_train = np.concatenate((y_train, y_train, y_train), axis=0)
y_test = np.concatenate((y_test, y_test, y_test), axis=0)
print("dataset + roated images: train=%s test=%s y_train=%s y_test=%s" % (
x_train_orig.shape, x_test_orig.shape, y_train.shape, y_test.shape))
x_train, x_test = x_train_orig / 255.0, x_test_orig / 255.0 # normalizing
model = keras.models.Sequential([
keras.layers.Flatten(input_shape=(28, 28)),
keras.layers.Dense(1024, activation='relu'),
keras.layers.Dropout(0.2),
keras.layers.Dense(1024, activation='relu'),
keras.layers.Dropout(0.2),
keras.layers.Dense(512, activation='relu'),
keras.layers.Dropout(0.2),
keras.layers.Dense(512, activation='relu'),
keras.layers.Dropout(0.2),
keras.layers.Dense(512, activation='relu'),
keras.layers.Dropout(0.2),
keras.layers.Dense(10, activation='softmax'),
])
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
# model.fit(x_train,
# y_train,
# epochs=50)
model.fit(x_train,
y_train,
epochs=50,
callbacks=[myCallback()])
model.evaluate(x_test, y_test)
tf.saved_model.save(model, 'models/mnist')
converter = tf.lite.TFLiteConverter.from_saved_model('models/mnist')
lite_model = converter.convert()
open('models/mnist/mnist.tflite', 'wb').write(lite_model)
|
from __future__ import (absolute_import, division, print_function, unicode_literals)
import pickle
import matplotlib.pyplot as plt
import numpy as np
import pydot
import tensorflow as tf
import tqdm
from keras.utils import vis_utils
from tensorflow.keras import datasets, layers, models
vis_utils.pydot = pydot
def unpickle(file):
with open(file, 'rb') as fo:
dict = pickle.load(fo, encoding='bytes')
return dict
test, cross = unpickle('./test_batch'), unpickle(f'./data_batch_5')
data, labels = [], []
for i in range(5):
cifar = unpickle(f'./data_batch_{i + 1}')
if i == 0:
data = cifar[b'data'] / 255
labels = np.array(cifar[b'labels'])
else:
data = np.append(data, cifar[b'data'] / 255, axis=0)
labels = np.append(labels, np.array(cifar[b'labels']), axis=0)
def network(data, labels, test, cross):
data.resize((data.shape[0], 1, data.shape[-1]))
cross[b'data'].resize((cross[b'data'].shape[0], 1, cross[b'data'].shape[-1]))
test[b'data'].resize((test[b'data'].shape[0], 1, test[b'data'].shape[-1]))
model = models.Sequential([
layers.Dense(512, activation='relu'),
layers.Dense(256, activation='relu'),
layers.Dense(128, activation='relu'),
layers.Dense(10, activation='softmax')
])
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
history = model.fit(data, labels, epochs=10, validation_data=(cross[b'data'] / 255, np.array(cross[b'labels'])))
plt.plot(history.history['accuracy'], label='accuracy')
plt.plot(history.history['val_accuracy'], label='val_accuracy')
plt.xlabel('Epoch')
plt.ylabel('Accuracy')
plt.legend(loc='lower right')
plt.savefig('nn_evaluation.png', dpi=600)
cross_loss, cross_acc = model.evaluate(cross[b'data'] / 255, np.array(cross[b'labels']), verbose=2)
model.save(f'nn{cross_acc:.2}.h5')
vis_utils.plot_model(model, to_file='nn.png', show_shapes=True, show_layer_names=True, expand_nested=True, dpi=600)
print(f'Cross Validation Accuracy: {cross_acc}, Cross Validation lost: {cross_loss}')
test_loss, test_acc = model.evaluate(test[b'data'] / 255, np.array(test[b'labels']), verbose=2)
print(f'Test accuracy: {test_acc}, Test lost: {test_loss}')
print(model.summary())
def CNN(data, labels, test, cross):
data = np.array([i.reshape((3, 1024)).T.reshape(32, 32, 3) for i in data])
cross[b'data'] = np.array([i.reshape((3, 1024)).T.reshape(32, 32, 3) for i in cross[b'data']])
test[b'data'] = np.array([i.reshape((3, 1024)).T.reshape(32, 32, 3) for i in test[b'data']])
model = models.Sequential([
layers.Conv2D(32, (3, 3), activation='relu', input_shape=(32, 32, 3)),
layers.MaxPooling2D((2, 2)),
layers.Conv2D(64, (3, 3), activation='relu'),
layers.MaxPooling2D((2, 2)),
layers.Conv2D(64, (3, 3), activation='relu'),
layers.Flatten(),
layers.Dense(64, activation='relu'),
layers.Dense(10, activation='softmax')
])
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
history = model.fit(data, labels, epochs=10, validation_data=(cross[b'data'] / 255, np.array(cross[b'labels'])))
plt.plot(history.history['accuracy'], label='accuracy')
plt.plot(history.history['val_accuracy'], label='val_accuracy')
plt.xlabel('Epoch')
plt.ylabel('Accuracy')
plt.ylim([0.5, 1])
plt.legend(loc='lower right')
plt.savefig('cnn_evaluation.png', dpi=600)
cross_loss, cross_acc = model.evaluate(cross[b'data'] / 255, np.array(cross[b'labels']), verbose=2)
model.save(f'cnn{cross_acc:.2}.h5')
from tensorflow.keras.utils import plot_model
plot_model(model, to_file='cnn.png', show_shapes=True, show_layer_names=True, expand_nested=True, dpi=600)
print(f'Cross Validation Accuracy: {cross_acc}, Cross Validation lost: {cross_loss}')
test_loss, test_acc = model.evaluate(test[b'data'] / 255, np.array(test[b'labels']), verbose=2)
print(f'Test accuracy: {test_acc}, Test lost: {test_loss}')
print(model.summary())
class NearestNeighbor:
def __init__(self, data, labels, test, cross):
self.test = test
self.data = data
self.labels = labels
self.test[b'data'] = test[b'data']
self.test[b'labels'] = test[b'labels']
self.cross = cross
self.train()
def train(self):
predict = self.predict()
accuracy = np.mean(predict == self.test[b'labels'])
print(f'Accuracy:\t{accuracy}')
def predict(self, k=7):
predict = np.zeros(self.test[b'data'].shape[0], dtype=self.labels.dtype)
for i in tqdm.tqdm(range(self.test[b'data'].shape[0])):
L1 = np.sum(np.abs(self.data - self.test[b'data'][i, :]), axis=1)
closest = self.labels[np.argsort(L1)[:k]]
unique, indices = np.unique(closest, return_inverse=True)
predict[i] = unique[np.argmax(np.bincount(indices))]
return predict
def rnn(data, labels, test, cross, first_exec=True):
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
size = 32 # 32 * 32
timesteps = 32
hidden_layer = 256
classes = 10
params = {"learning_rate": 0.001, "training_iters": 10000, "batch_size": 64}
test_data, test_labels = test[b'data'] / 255, test[b'labels']
# 将RGB值转为灰度值
print('Converting data......')
data_array = np.array([[[item[index], item[index + 1024], item[index + 1024 * 2]] for index in range(1024)]
for item in tqdm.tqdm(data)])
test_array = np.array([[[item[index], item[index + 1024], item[index + 1024 * 2]] for index in range(1024)]
for item in tqdm.tqdm(test_data)])
data = np.array([[data_array[i, j].dot([0.299, 0.587, 0.114]) for j in range(data_array.shape[1])]
for i in tqdm.tqdm(range(data_array.shape[0]))])
test = np.array([[test_array[i, j].dot([0.299, 0.587, 0.114]) for j in range(test_array.shape[1])]
for i in tqdm.tqdm(range(test_array.shape[0]))])
labels = np.array([[1 if i == row else 0 for i in range(10)] for row in tqdm.tqdm(labels)])
test_labels = np.array([[1 if i == row else 0 for i in range(10)] for row in tqdm.tqdm(test_labels)])
# 按照 tutorial 定义 RNN 模型
x = tf.placeholder("float", [None, timesteps, size])
y = tf.placeholder("float", [None, classes])
weights = tf.Variable(tf.random_normal([hidden_layer, classes]), name='weights')
biases = tf.Variable(tf.random_normal([classes]), name='biases')
def rnn_model(x, weights, biases):
x = tf.transpose(x, [1, 0, 2])
x = tf.reshape(x, [-1, size])
x = tf.split(x, timesteps, axis=0)
lstm_cell = tf.nn.rnn_cell.LSTMCell(hidden_layer, forget_bias=1.0)
outputs, states = tf.nn.static_rnn(lstm_cell, x, dtype=tf.float32)
return tf.matmul(outputs[-1], weights) + biases
pred = rnn_model(x, weights, biases)
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=pred, labels=y))
optimizer = tf.train.AdamOptimizer(learning_rate=params['learning_rate']).minimize(cost)
correct_pred = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
# 训练模型
print('Training......')
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
# 一轮一轮地训练模型
for step in tqdm.tqdm(range(1, int(params['training_iters'] / params['batch_size']) + 1)):
batch_x = data[(step - 1) * params['batch_size']:step * params['batch_size']].reshape(
(params['batch_size'], timesteps, size))
batch_y = labels[(step - 1) * params['batch_size']:step * params['batch_size']]
sess.run(optimizer, feed_dict={x: batch_x, y: batch_y})
# 测试评估模型
print("Accuracy:",
sess.run(accuracy, feed_dict={
x: test[:128].reshape((-1, timesteps, size)),
y: test_labels[:128]
}))
network(data, labels, test, cross)
CNN(data, labels, test, cross)
NearestNeighbor(data, labels, test, cross)
rnn(data, labels, test, cross, 0)
|
markup = {
"application": "App",
"imports": [],
"startup": "test_app.views.main"
}
|
# Generated by Django 3.0.8 on 2020-07-06 18:57
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('zemax', '0016_house_image3'),
]
operations = [
migrations.AlterField(
model_name='house',
name='image2',
field=models.ImageField(blank=True, default='default.jpg', upload_to='house_pics'),
),
migrations.AlterField(
model_name='house',
name='image3',
field=models.ImageField(blank=True, default='default.jpg', upload_to='house_pics'),
),
]
|
from rest_framework import permissions
from rest_framework.viewsets import ModelViewSet
from src.profiles.models import MyUser
from src.profiles.serializers import GetUserSerializer, GetUserPublicSerializer
class PublicUserView(ModelViewSet):
""" Public user information"""
queryset = MyUser.objects.all()
serializer_class = GetUserPublicSerializer
permission_classes = [permissions.AllowAny]
class UserView(ModelViewSet):
""" User information """
serializer_class = GetUserSerializer
permission_classes = [permissions.IsAuthenticated]
def get_queryset(self):
return MyUser.objects.filter(id=self.request.user.id) |
import numpy as np
import pickle
from RealSenseCamera import RealSenseCamera
k = RealSenseCamera(serial_no='834412071881')
k.start()
frames = []
try:
while True:
image_dict = k.get_feed()[1]
color_image = image_dict['color_image']
depth_image = image_dict['depth_image']
frames.append(image_dict)
except KeyboardInterrupt:
frames = np.asarray(frames)
# open a file, where you ant to store the data
file = open('real_record.pkl', 'wb')
# dump information to that file
pickle.dump(frames, file)
|
import airflow_home.credential_vars
import psycopg2
connection_options = ('redshift', 'zeus')
class NotAValidDatabase(Exception):
pass
def db_conn(source):
global cursor
global conn
if source == 'redshift':
conn = psycopg2.connect(airflow_home.credential_vars.redshift_conn_string)
cursor = conn.cursor()
elif source == 'zeus':
conn = psycopg2.connect(airflow_home.credential_vars.zeus_conn_string)
cursor = conn.cursor()
else:
raise NotAValidDatabase(("Invalid Database Source. Please choose one"
"of the following: %s")
% ', '.join(connection_options))
return conn, cursor
|
import sys
_module = sys.modules[__name__]
del sys
conf = _module
adult_census = _module
adult_census_attention_mlp = _module
adult_census_bayesian_tabmlp = _module
adult_census_cont_den_full_example = _module
adult_census_cont_den_run_all_models = _module
adult_census_enc_dec_full_example = _module
adult_census_enc_dec_run_all_models = _module
adult_census_tabnet = _module
adult_census_transformers = _module
airbnb_all_modes_multiclass = _module
airbnb_all_modes_regr = _module
airbnb_data_preprocessing = _module
bio_imbalanced_loader = _module
california_housing_fds_lds = _module
download_images = _module
pytorch_widedeep = _module
bayesian_models = _module
_base_bayesian_model = _module
_weight_sampler = _module
bayesian_nn = _module
modules = _module
bayesian_embedding = _module
bayesian_linear = _module
tabular = _module
bayesian_embeddings_layers = _module
bayesian_wide = _module
bayesian_mlp = _module
_layers = _module
bayesian_tab_mlp = _module
callbacks = _module
dataloaders = _module
datasets = _module
_base = _module
data = _module
initializers = _module
losses = _module
metrics = _module
models = _module
_get_activation_fn = _module
fds_layer = _module
image = _module
_layers = _module
vision = _module
_base_tabular_model = _module
embeddings_layers = _module
linear = _module
wide = _module
mlp = _module
_attention_layers = _module
_encoders = _module
_layers = _module
context_attention_mlp = _module
self_attention_mlp = _module
tab_mlp = _module
resnet = _module
_layers = _module
tab_resnet = _module
self_supervised = _module
_augmentations = _module
_denoise_mlps = _module
_random_obfuscator = _module
contrastive_denoising_model = _module
encoder_decoder_model = _module
tabnet = _module
_layers = _module
_utils = _module
sparsemax = _module
tab_net = _module
transformers = _module
_attention_layers = _module
_encoders = _module
ft_transformer = _module
saint = _module
tab_fastformer = _module
tab_perceiver = _module
tab_transformer = _module
text = _module
_encoders = _module
attentive_rnn = _module
basic_rnn = _module
stacked_attentive_rnn = _module
wide_deep = _module
preprocessing = _module
base_preprocessor = _module
image_preprocessor = _module
tab_preprocessor = _module
text_preprocessor = _module
wide_preprocessor = _module
self_supervised_training = _module
_base_contrastive_denoising_trainer = _module
_base_encoder_decoder_trainer = _module
contrastive_denoising_trainer = _module
encoder_decoder_trainer = _module
tab2vec = _module
training = _module
_base_bayesian_trainer = _module
_base_trainer = _module
_finetune = _module
_loss_and_obj_aliases = _module
_multiple_lr_scheduler = _module
_multiple_optimizer = _module
_multiple_transforms = _module
_trainer_utils = _module
_wd_dataset = _module
bayesian_trainer = _module
trainer = _module
utils = _module
deeptabular_utils = _module
fastai_transforms = _module
general_utils = _module
image_utils = _module
text_utils = _module
version = _module
wdtypes = _module
setup = _module
tests = _module
test_b_losses = _module
test_mc_bayes_tabmlp = _module
test_mc_bayes_wide = _module
test_b_callbacks = _module
test_b_data_inputs = _module
test_b_fit_methods = _module
test_b_miscellaneous = _module
test_b_t2v = _module
test_data_utils = _module
test_du_base_preprocessor = _module
test_du_image = _module
test_du_tabular = _module
test_du_text = _module
test_du_wide = _module
test_fastai_transforms = _module
test_datasets = _module
test_finetune = _module
test_finetuning_routines = _module
test_losses = _module
test_metrics = _module
test_torchmetrics = _module
test_model_components = _module
test_mc_attn_tab_mlp = _module
test_mc_image = _module
test_mc_tab_mlp = _module
test_mc_tab_resnet = _module
test_mc_tab_tabnet = _module
test_mc_text = _module
test_mc_transformers = _module
test_mc_wide = _module
test_wide_deep = _module
test_model_functioning = _module
test_callbacks = _module
test_data_inputs = _module
test_fit_methods = _module
test_initializers = _module
test_miscellaneous = _module
test_self_supervised = _module
test_ss_callbacks = _module
test_ss_miscellaneous = _module
test_ss_model_components = _module
test_ss_model_pretrain_method = _module
test_t2v = _module
from _paritybench_helpers import _mock_config, patch_functional
from unittest.mock import mock_open, MagicMock
from torch.autograd import Function
from torch.nn import Module
import abc, collections, copy, enum, functools, inspect, itertools, logging, math, matplotlib, numbers, numpy, pandas, queue, random, re, scipy, sklearn, string, tensorflow, time, torch, torchaudio, torchtext, torchvision, types, typing, uuid, warnings
import numpy as np
from torch import Tensor
patch_functional()
open = mock_open()
yaml = logging = sys = argparse = MagicMock()
ArgumentParser = argparse.ArgumentParser
_global_config = args = argv = cfg = config = params = _mock_config()
argparse.ArgumentParser.return_value.parse_args.return_value = _global_config
yaml.load.return_value = _global_config
sys.argv = _global_config
__version__ = '1.0.0'
xrange = range
wraps = functools.wraps
import numpy as np
import torch
import pandas as pd
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
from itertools import product
from torchvision.transforms import ToTensor
from torchvision.transforms import Normalize
import time
import warnings
from torch.optim import SGD
from torch.optim import lr_scheduler
from sklearn.metrics import classification_report
from torch import nn
import math
import torch.nn.functional as F
from torch.optim.lr_scheduler import ReduceLROnPlateau
from typing import Tuple
from torch.utils.data import DataLoader
from torch.utils.data import WeightedRandomSampler
import re
from typing import Dict
import torch.nn as nn
import torchvision
from torch import einsum
from collections import OrderedDict
import inspect
from scipy.sparse import csc_matrix
from torch.autograd import Function
from abc import ABC
from abc import abstractmethod
from torch.utils.data import TensorDataset
from copy import deepcopy
from scipy.ndimage import convolve1d
from sklearn.utils import Bunch
from torch.utils.data import Dataset
from inspect import signature
from scipy.ndimage import gaussian_filter1d
from sklearn.exceptions import NotFittedError
from scipy.signal.windows import triang
from types import SimpleNamespace
from typing import Any
from typing import List
from typing import Match
from typing import Union
from typing import Callable
from typing import Iterable
from typing import Iterator
from typing import Optional
from typing import Generator
from typing import Collection
from torch import Tensor
from torch.nn import Module
from torch.optim.optimizer import Optimizer
from torchvision.transforms import Pad
from torchvision.transforms import Lambda
from torchvision.transforms import Resize
from torchvision.transforms import Compose
from torchvision.transforms import TenCrop
from torchvision.transforms import FiveCrop
from torchvision.transforms import Grayscale
from torchvision.transforms import CenterCrop
from torchvision.transforms import RandomCrop
from torchvision.transforms import ToPILImage
from torchvision.transforms import ColorJitter
from torchvision.transforms import PILToTensor
from torchvision.transforms import RandomApply
from torchvision.transforms import RandomOrder
from torchvision.transforms import GaussianBlur
from torchvision.transforms import RandomAffine
from torchvision.transforms import RandomChoice
from torchvision.transforms import RandomInvert
from torchvision.transforms import RandomErasing
from torchvision.transforms import RandomEqualize
from torchvision.transforms import RandomRotation
from torchvision.transforms import RandomSolarize
from torchvision.transforms import RandomGrayscale
from torchvision.transforms import RandomPosterize
from torchvision.transforms import ConvertImageDtype
from torchvision.transforms import InterpolationMode
from torchvision.transforms import RandomPerspective
from torchvision.transforms import RandomResizedCrop
from torchvision.transforms import RandomAutocontrast
from torchvision.transforms import RandomVerticalFlip
from torchvision.transforms import LinearTransformation
from torchvision.transforms import RandomHorizontalFlip
from torchvision.transforms import RandomAdjustSharpness
from torchvision.models._api import WeightsEnum
from torch.optim.lr_scheduler import _LRScheduler
from torch.utils.data.dataloader import DataLoader
from sklearn.metrics import mean_squared_error
import string
from torch.optim.lr_scheduler import StepLR
from torch.optim.lr_scheduler import CyclicLR
from scipy import stats
from numpy.testing import assert_almost_equal
from sklearn.metrics import mean_squared_log_error
from sklearn.metrics import f1_score
from sklearn.metrics import r2_score
from sklearn.metrics import fbeta_score
from sklearn.metrics import recall_score
from sklearn.metrics import precision_score
from torchvision.models import MNASNet1_0_Weights
from torchvision.models import MobileNet_V2_Weights
from torchvision.models import SqueezeNet1_0_Weights
from torchvision.models import ResNeXt50_32X4D_Weights
from torchvision.models import Wide_ResNet50_2_Weights
from torchvision.models import ShuffleNet_V2_X0_5_Weights
from copy import copy
from itertools import chain
from copy import deepcopy as c
class BayesianModule(nn.Module):
"""Simply a 'hack' to facilitate the computation of the KL divergence for all
Bayesian models
"""
def init(self):
super().__init__()
class BaseBayesianModel(nn.Module):
"""Base model containing the two methods common to all Bayesian models"""
def init(self):
super().__init__()
def _kl_divergence(self):
kld = 0
for module in self.modules():
if isinstance(module, BayesianModule):
kld += module.log_variational_posterior - module.log_prior
return kld
def sample_elbo(self, input: Tensor, target: Tensor, loss_fn: nn.Module, n_samples: int, n_batches: int) ->Tuple[Tensor, Tensor]:
outputs_l = []
kld = 0.0
for _ in range(n_samples):
outputs_l.append(self(input))
kld += self._kl_divergence()
outputs = torch.stack(outputs_l)
complexity_cost = kld / n_batches
likelihood_cost = loss_fn(outputs.mean(0), target)
return outputs, complexity_cost + likelihood_cost
class GaussianPosterior(object):
"""Defines the Gaussian variational posterior as proposed in Weight
Uncertainty in Neural Networks
"""
def __init__(self, param_mu: Tensor, param_rho: Tensor):
super().__init__()
self.param_mu = param_mu
self.param_rho = param_rho
self.normal = torch.distributions.Normal(0, 1)
@property
def sigma(self):
return torch.log1p(torch.exp(self.param_rho))
def sample(self) ->Tensor:
epsilon = self.normal.sample(self.param_rho.size())
return self.param_mu + self.sigma * epsilon
def log_posterior(self, input: Tensor) ->Tensor:
return (-math.log(math.sqrt(2 * math.pi)) - torch.log(self.sigma) - (input - self.param_mu) ** 2 / (2 * self.sigma ** 2)).sum()
class ScaleMixtureGaussianPrior(object):
"""Defines the Scale Mixture Prior as proposed in Weight Uncertainty in
Neural Networks (Eq 7 in the original publication)
"""
def __init__(self, pi: float, sigma1: float, sigma2: float):
super().__init__()
self.pi = pi
self.sigma1 = sigma1
self.sigma2 = sigma2
self.gaussian1 = torch.distributions.Normal(0, sigma1)
self.gaussian2 = torch.distributions.Normal(0, sigma2)
def log_prior(self, input: Tensor) ->Tensor:
prob1 = torch.exp(self.gaussian1.log_prob(input))
prob2 = torch.exp(self.gaussian2.log_prob(input))
return torch.log(self.pi * prob1 + (1 - self.pi) * prob2).sum()
class BayesianEmbedding(BayesianModule):
"""A simple lookup table that looks up embeddings in a fixed dictionary and
size.
Parameters
----------
n_embed: int
number of embeddings. Typically referred as size of the vocabulary
embed_dim: int
Dimension of the embeddings
padding_idx: int, optional, default = None
If specified, the entries at ``padding_idx`` do not contribute to the
gradient; therefore, the embedding vector at ``padding_idx`` is not
updated during training, i.e. it remains as a fixed “pad”. For a
newly constructed Embedding, the embedding vector at ``padding_idx``
will default to all zeros, but can be updated to another value to be
used as the padding vector
max_norm: float, optional, default = None
If given, each embedding vector with norm larger than ``max_norm`` is
renormalized to have norm max_norm
norm_type: float, optional, default = 2.
The p of the p-norm to compute for the ``max_norm`` option.
scale_grad_by_freq: bool, optional, default = False
If given, this will scale gradients by the inverse of frequency of the
words in the mini-batch.
sparse: bool, optional, default = False
If True, gradient w.r.t. weight matrix will be a sparse tensor. See
Notes for more details regarding sparse gradients.
prior_sigma_1: float, default = 1.0
Prior of the sigma parameter for the first of the two Gaussian
distributions that will be mixed to produce the prior weight
distribution
prior_sigma_2: float, default = 0.002
Prior of the sigma parameter for the second of the two Gaussian
distributions that will be mixed to produce the prior weight
distribution
prior_pi: float, default = 0.8
Scaling factor that will be used to mix the Gaussians to produce the
prior weight distribution
posterior_mu_init: float = 0.0
The posterior sample of the weights is defined as:
.. math::
\\begin{aligned}
\\mathbf{w} &= \\mu + log(1 + exp(\\rho))
\\end{aligned}
where:
.. math::
\\begin{aligned}
\\mathcal{N}(x\\vert \\mu, \\sigma) &= \\frac{1}{\\sqrt{2\\pi}\\sigma}e^{-\\frac{(x-\\mu)^2}{2\\sigma^2}}\\\\
\\log{\\mathcal{N}(x\\vert \\mu, \\sigma)} &= -\\log{\\sqrt{2\\pi}} -\\log{\\sigma} -\\frac{(x-\\mu)^2}{2\\sigma^2}\\\\
\\end{aligned}
:math:`\\mu` is initialised using a normal distributtion with mean
``posterior_rho_init`` and std equal to 0.1.
posterior_rho_init: float = -7.0
As in the case of :math:`\\mu`, :math:`\\rho` is initialised using a
normal distributtion with mean ``posterior_rho_init`` and std equal to
0.1.
Examples
--------
>>> import torch
>>> from pytorch_widedeep.bayesian_models import bayesian_nn as bnn
>>> embedding = bnn.BayesianEmbedding(10, 3)
>>> input = torch.LongTensor([[1,2,4,5],[4,3,2,9]])
>>> out = embedding(input)
"""
def __init__(self, n_embed: int, embed_dim: int, padding_idx: Optional[int]=None, max_norm: Optional[float]=None, norm_type: Optional[float]=2.0, scale_grad_by_freq: Optional[bool]=False, sparse: Optional[bool]=False, prior_sigma_1: float=1.0, prior_sigma_2: float=0.002, prior_pi: float=0.8, posterior_mu_init: float=0.0, posterior_rho_init: float=-7.0):
super(BayesianEmbedding, self).__init__()
self.n_embed = n_embed
self.embed_dim = embed_dim
self.padding_idx = padding_idx
self.max_norm = max_norm
self.norm_type = norm_type
self.scale_grad_by_freq = scale_grad_by_freq
self.sparse = sparse
self.prior_sigma_1 = prior_sigma_1
self.prior_sigma_2 = prior_sigma_2
self.prior_pi = prior_pi
self.posterior_mu_init = posterior_mu_init
self.posterior_rho_init = posterior_rho_init
self.weight_mu = nn.Parameter(torch.Tensor(n_embed, embed_dim).normal_(posterior_mu_init, 0.1))
self.weight_rho = nn.Parameter(torch.Tensor(n_embed, embed_dim).normal_(posterior_rho_init, 0.1))
self.weight_sampler = GaussianPosterior(self.weight_mu, self.weight_rho)
self.weight_prior_dist = ScaleMixtureGaussianPrior(self.prior_pi, self.prior_sigma_1, self.prior_sigma_2)
self.log_prior: Union[Tensor, float] = 0.0
self.log_variational_posterior: Union[Tensor, float] = 0.0
def forward(self, X: Tensor) ->Tensor:
if not self.training:
return F.embedding(X, self.weight_mu, self.padding_idx, self.max_norm, self.norm_type, self.scale_grad_by_freq, self.sparse)
weight = self.weight_sampler.sample()
self.log_variational_posterior = self.weight_sampler.log_posterior(weight)
self.log_prior = self.weight_prior_dist.log_prior(weight)
return F.embedding(X, weight, self.padding_idx, self.max_norm, self.norm_type, self.scale_grad_by_freq, self.sparse)
def extra_repr(self) ->str:
s = '{n_embed}, {embed_dim}'
if self.padding_idx is not None:
s += ', padding_idx={padding_idx}'
if self.max_norm is not None:
s += ', max_norm={max_norm}'
if self.norm_type != 2:
s += ', norm_type={norm_type}'
if self.scale_grad_by_freq is not False:
s += ', scale_grad_by_freq={scale_grad_by_freq}'
if self.sparse is not False:
s += ', sparse=True'
if self.prior_sigma_1 != 1.0:
s += ', prior_sigma_1={prior_sigma_1}'
if self.prior_sigma_2 != 0.002:
s += ', prior_sigma_2={prior_sigma_2}'
if self.prior_pi != 0.8:
s += ', prior_pi={prior_pi}'
if self.posterior_mu_init != 0.0:
s += ', posterior_mu_init={posterior_mu_init}'
if self.posterior_rho_init != -7.0:
s += ', posterior_rho_init={posterior_rho_init}'
return s.format(**self.__dict__)
class BayesianLinear(BayesianModule):
"""Applies a linear transformation to the incoming data as proposed in Weight
Uncertainity on Neural Networks
Parameters
----------
in_features: int
size of each input sample
out_features: int
size of each output sample
use_bias: bool, default = True
Boolean indicating if an additive bias will be learnt
prior_sigma_1: float, default = 1.0
Prior of the sigma parameter for the first of the two Gaussian
distributions that will be mixed to produce the prior weight
distribution
prior_sigma_2: float, default = 0.002
Prior of the sigma parameter for the second of the two Gaussian
distributions that will be mixed to produce the prior weight
distribution
prior_pi: float, default = 0.8
Scaling factor that will be used to mix the Gaussians to produce the
prior weight distribution
posterior_mu_init: float = 0.0
The posterior sample of the weights is defined as:
.. math::
\\begin{aligned}
\\mathbf{w} &= \\mu + log(1 + exp(\\rho))
\\end{aligned}
where:
.. math::
\\begin{aligned}
\\mathcal{N}(x\\vert \\mu, \\sigma) &= \\frac{1}{\\sqrt{2\\pi}\\sigma}e^{-\\frac{(x-\\mu)^2}{2\\sigma^2}}\\\\
\\log{\\mathcal{N}(x\\vert \\mu, \\sigma)} &= -\\log{\\sqrt{2\\pi}} -\\log{\\sigma} -\\frac{(x-\\mu)^2}{2\\sigma^2}\\\\
\\end{aligned}
:math:`\\mu` is initialised using a normal distributtion with mean
``posterior_rho_init`` and std equal to 0.1.
posterior_rho_init: float = -7.0
As in the case of :math:`\\mu`, :math:`\\rho` is initialised using a
normal distributtion with mean ``posterior_rho_init`` and std equal to
0.1.
Examples
--------
>>> import torch
>>> from pytorch_widedeep.bayesian_models import bayesian_nn as bnn
>>> linear = bnn.BayesianLinear(10, 6)
>>> input = torch.rand(6, 10)
>>> out = linear(input)
"""
def __init__(self, in_features: int, out_features: int, use_bias: bool=True, prior_sigma_1: float=1.0, prior_sigma_2: float=0.002, prior_pi: float=0.8, posterior_mu_init: float=0.0, posterior_rho_init: float=-7.0):
super(BayesianLinear, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.use_bias = use_bias
self.posterior_mu_init = posterior_mu_init
self.posterior_rho_init = posterior_rho_init
self.prior_sigma_1 = prior_sigma_1
self.prior_sigma_2 = prior_sigma_2
self.prior_pi = prior_pi
self.weight_mu = nn.Parameter(torch.Tensor(out_features, in_features).normal_(posterior_mu_init, 0.1))
self.weight_rho = nn.Parameter(torch.Tensor(out_features, in_features).normal_(posterior_rho_init, 0.1))
self.weight_sampler = GaussianPosterior(self.weight_mu, self.weight_rho)
if self.use_bias:
self.bias_mu = nn.Parameter(torch.Tensor(out_features).normal_(posterior_mu_init, 0.1))
self.bias_rho = nn.Parameter(torch.Tensor(out_features).normal_(posterior_rho_init, 0.1))
self.bias_sampler = GaussianPosterior(self.bias_mu, self.bias_rho)
else:
self.bias_mu, self.bias_rho = None, None
self.weight_prior_dist = ScaleMixtureGaussianPrior(self.prior_pi, self.prior_sigma_1, self.prior_sigma_2)
if self.use_bias:
self.bias_prior_dist = ScaleMixtureGaussianPrior(self.prior_pi, self.prior_sigma_1, self.prior_sigma_2)
self.log_prior: Union[Tensor, float] = 0.0
self.log_variational_posterior: Union[Tensor, float] = 0.0
def forward(self, X: Tensor) ->Tensor:
if not self.training:
return F.linear(X, self.weight_mu, self.bias_mu)
weight = self.weight_sampler.sample()
if self.use_bias:
bias = self.bias_sampler.sample()
bias_log_posterior: Union[Tensor, float] = self.bias_sampler.log_posterior(bias)
bias_log_prior: Union[Tensor, float] = self.bias_prior_dist.log_prior(bias)
else:
bias = None
bias_log_posterior = 0.0
bias_log_prior = 0.0
self.log_variational_posterior = self.weight_sampler.log_posterior(weight) + bias_log_posterior
self.log_prior = self.weight_prior_dist.log_prior(weight) + bias_log_prior
return F.linear(X, weight, bias)
def extra_repr(self) ->str:
s = '{in_features}, {out_features}'
if self.use_bias is not False:
s += ', use_bias=True'
if self.prior_sigma_1 != 1.0:
s += ', prior_sigma_1={prior_sigma_1}'
if self.prior_sigma_2 != 0.002:
s += ', prior_sigma_2={prior_sigma_2}'
if self.prior_pi != 0.8:
s += ', prior_pi={prior_pi}'
if self.posterior_mu_init != 0.0:
s += ', posterior_mu_init={posterior_mu_init}'
if self.posterior_rho_init != -7.0:
s += ', posterior_rho_init={posterior_rho_init}'
return s.format(**self.__dict__)
class BayesianContEmbeddings(BayesianModule):
def __init__(self, n_cont_cols: int, embed_dim: int, prior_sigma_1: float, prior_sigma_2: float, prior_pi: float, posterior_mu_init: float, posterior_rho_init: float, use_bias: bool=False):
super(BayesianContEmbeddings, self).__init__()
self.n_cont_cols = n_cont_cols
self.embed_dim = embed_dim
self.use_bias = use_bias
self.weight_mu = nn.Parameter(torch.Tensor(n_cont_cols, embed_dim).normal_(posterior_mu_init, 0.1))
self.weight_rho = nn.Parameter(torch.Tensor(n_cont_cols, embed_dim).normal_(posterior_rho_init, 0.1))
self.weight_sampler = GaussianPosterior(self.weight_mu, self.weight_rho)
if use_bias:
self.bias_mu = nn.Parameter(torch.Tensor(n_cont_cols, embed_dim).normal_(posterior_mu_init, 0.1))
self.bias_rho = nn.Parameter(torch.Tensor(n_cont_cols, embed_dim).normal_(posterior_rho_init, 0.1))
self.bias_sampler = GaussianPosterior(self.bias_mu, self.bias_rho)
else:
self.bias_mu, self.bias_rho = None, None
self.weight_prior_dist = ScaleMixtureGaussianPrior(prior_pi, prior_sigma_1, prior_sigma_2)
if self.use_bias:
self.bias_prior_dist = ScaleMixtureGaussianPrior(prior_pi, prior_sigma_1, prior_sigma_2)
self.log_prior: Union[Tensor, float] = 0.0
self.log_variational_posterior: Union[Tensor, float] = 0.0
def forward(self, X: Tensor) ->Tensor:
if not self.training:
x = self.weight_mu.unsqueeze(0) * X.unsqueeze(2)
if self.bias_mu is not None:
x + self.bias_mu.unsqueeze(0)
return x
weight = self.weight_sampler.sample()
if self.use_bias:
bias = self.bias_sampler.sample()
bias_log_posterior: Union[Tensor, float] = self.bias_sampler.log_posterior(bias)
bias_log_prior: Union[Tensor, float] = self.bias_prior_dist.log_prior(bias)
else:
bias = 0.0
bias_log_posterior = 0.0
bias_log_prior = 0.0
self.log_variational_posterior = self.weight_sampler.log_posterior(weight) + bias_log_posterior
self.log_prior = self.weight_prior_dist.log_prior(weight) + bias_log_prior
x = weight.unsqueeze(0) * X.unsqueeze(2) + bias
return x
def extra_repr(self) ->str:
s = '{n_cont_cols}, {embed_dim}, use_bias={use_bias}'
return s.format(**self.__dict__)
class BayesianDiffSizeCatEmbeddings(nn.Module):
def __init__(self, column_idx: Dict[str, int], embed_input: List[Tuple[str, int, int]], prior_sigma_1: float, prior_sigma_2: float, prior_pi: float, posterior_mu_init: float, posterior_rho_init: float):
super(BayesianDiffSizeCatEmbeddings, self).__init__()
self.column_idx = column_idx
self.embed_input = embed_input
self.embed_layers = nn.ModuleDict({('emb_layer_' + col): bnn.BayesianEmbedding(val + 1, dim, padding_idx=0, prior_sigma_1=prior_sigma_1, prior_sigma_2=prior_sigma_2, prior_pi=prior_pi, posterior_mu_init=posterior_mu_init, posterior_rho_init=posterior_rho_init) for col, val, dim in self.embed_input})
self.emb_out_dim: int = int(np.sum([embed[2] for embed in self.embed_input]))
def forward(self, X: Tensor) ->Tensor:
embed = [self.embed_layers['emb_layer_' + col](X[:, self.column_idx[col]].long()) for col, _, _ in self.embed_input]
x = torch.cat(embed, 1)
return x
NormLayers = Union[nn.Identity, nn.LayerNorm, nn.BatchNorm1d]
class BayesianDiffSizeCatAndContEmbeddings(nn.Module):
def __init__(self, column_idx: Dict[str, int], cat_embed_input: List[Tuple[str, int, int]], continuous_cols: Optional[List[str]], embed_continuous: bool, cont_embed_dim: int, use_cont_bias: bool, cont_norm_layer: Optional[str], prior_sigma_1: float, prior_sigma_2: float, prior_pi: float, posterior_mu_init: float, posterior_rho_init: float):
super(BayesianDiffSizeCatAndContEmbeddings, self).__init__()
self.cat_embed_input = cat_embed_input
self.continuous_cols = continuous_cols
self.embed_continuous = embed_continuous
self.cont_embed_dim = cont_embed_dim
if self.cat_embed_input is not None:
self.cat_embed = BayesianDiffSizeCatEmbeddings(column_idx, cat_embed_input, prior_sigma_1, prior_sigma_2, prior_pi, posterior_mu_init, posterior_rho_init)
self.cat_out_dim = int(np.sum([embed[2] for embed in self.cat_embed_input]))
else:
self.cat_out_dim = 0
if continuous_cols is not None:
self.cont_idx = [column_idx[col] for col in continuous_cols]
if cont_norm_layer == 'layernorm':
self.cont_norm: NormLayers = nn.LayerNorm(len(continuous_cols))
elif cont_norm_layer == 'batchnorm':
self.cont_norm = nn.BatchNorm1d(len(continuous_cols))
else:
self.cont_norm = nn.Identity()
if self.embed_continuous:
self.cont_embed = BayesianContEmbeddings(len(continuous_cols), cont_embed_dim, prior_sigma_1, prior_sigma_2, prior_pi, posterior_mu_init, posterior_rho_init, use_cont_bias)
self.cont_out_dim = len(continuous_cols) * cont_embed_dim
else:
self.cont_out_dim = len(continuous_cols)
else:
self.cont_out_dim = 0
self.output_dim = self.cat_out_dim + self.cont_out_dim
def forward(self, X: Tensor) ->Tuple[Tensor, Any]:
if self.cat_embed_input is not None:
x_cat = self.cat_embed(X)
else:
x_cat = None
if self.continuous_cols is not None:
x_cont = self.cont_norm(X[:, self.cont_idx].float())
if self.embed_continuous:
x_cont = self.cont_embed(x_cont)
x_cont = einops.rearrange(x_cont, 'b s d -> b (s d)')
else:
x_cont = None
return x_cat, x_cont
class BayesianWide(BaseBayesianModel):
"""Defines a `Wide` model. This is a linear model where the
non-linearlities are captured via crossed-columns
Parameters
----------
input_dim: int
size of the Embedding layer. `input_dim` is the summation of all the
individual values for all the features that go through the wide
component. For example, if the wide component receives 2 features with
5 individual values each, `input_dim = 10`
pred_dim: int
size of the ouput tensor containing the predictions
prior_sigma_1: float, default = 1.0
The prior weight distribution is a scaled mixture of two Gaussian
densities:
$$
\\begin{aligned}
P(\\mathbf{w}) = \\prod_{i=j} \\pi N (\\mathbf{w}_j | 0, \\sigma_{1}^{2}) + (1 - \\pi) N (\\mathbf{w}_j | 0, \\sigma_{2}^{2})
\\end{aligned}
$$
`prior_sigma_1` is the prior of the sigma parameter for the first of the two
Gaussians that will be mixed to produce the prior weight
distribution.
prior_sigma_2: float, default = 0.002
Prior of the sigma parameter for the second of the two Gaussian
distributions that will be mixed to produce the prior weight
distribution
prior_pi: float, default = 0.8
Scaling factor that will be used to mix the Gaussians to produce the
prior weight distribution
posterior_mu_init: float = 0.0
The posterior sample of the weights is defined as:
$$
\\begin{aligned}
\\mathbf{w} &= \\mu + log(1 + exp(\\rho))
\\end{aligned}
$$
where:
$$
\\begin{aligned}
\\mathcal{N}(x\\vert \\mu, \\sigma) &= \\frac{1}{\\sqrt{2\\pi}\\sigma}e^{-\\frac{(x-\\mu)^2}{2\\sigma^2}}\\\\
\\log{\\mathcal{N}(x\\vert \\mu, \\sigma)} &= -\\log{\\sqrt{2\\pi}} -\\log{\\sigma} -\\frac{(x-\\mu)^2}{2\\sigma^2}\\\\
\\end{aligned}
$$
$\\mu$ is initialised using a normal distributtion with mean
`posterior_mu_init` and std equal to 0.1.
posterior_rho_init: float = -7.0
As in the case of $\\mu$, $\\rho$ is initialised using a
normal distributtion with mean `posterior_rho_init` and std equal to
0.1.
Attributes
-----------
bayesian_wide_linear: nn.Module
the linear layer that comprises the wide branch of the model
Examples
--------
>>> import torch
>>> from pytorch_widedeep.bayesian_models import BayesianWide
>>> X = torch.empty(4, 4).random_(6)
>>> wide = BayesianWide(input_dim=X.unique().size(0), pred_dim=1)
>>> out = wide(X)
"""
def __init__(self, input_dim: int, pred_dim: int=1, prior_sigma_1: float=1.0, prior_sigma_2: float=0.002, prior_pi: float=0.8, posterior_mu_init: float=0.0, posterior_rho_init: float=-7.0):
super(BayesianWide, self).__init__()
self.bayesian_wide_linear = bnn.BayesianEmbedding(n_embed=input_dim + 1, embed_dim=pred_dim, padding_idx=0, prior_sigma_1=prior_sigma_1, prior_sigma_2=prior_sigma_2, prior_pi=prior_pi, posterior_mu_init=posterior_mu_init, posterior_rho_init=posterior_rho_init)
self.bias = nn.Parameter(torch.zeros(pred_dim))
def forward(self, X: Tensor) ->Tensor:
out = self.bayesian_wide_linear(X.long()).sum(dim=1) + self.bias
return out
class GEGLU(nn.Module):
def forward(self, x):
x, gates = x.chunk(2, dim=-1)
return x * F.gelu(gates)
class REGLU(nn.Module):
def forward(self, x):
x, gates = x.chunk(2, dim=-1)
return x * F.gelu(gates)
allowed_activations = ['relu', 'leaky_relu', 'tanh', 'gelu', 'geglu', 'reglu']
def get_activation_fn(activation):
if activation == 'relu':
return nn.ReLU(inplace=True)
elif activation == 'leaky_relu':
return nn.LeakyReLU(inplace=True)
elif activation == 'tanh':
return nn.Tanh()
elif activation == 'gelu':
return nn.GELU()
elif activation == 'geglu':
return GEGLU()
elif activation == 'reglu':
return REGLU()
elif activation == 'softplus':
return nn.Softplus()
else:
raise ValueError("Only the following activation functions are currently supported: {}. Note that 'geglu' and 'reglu' should only be used as transformer's activations".format(', '.join(allowed_activations)))
class BayesianMLP(nn.Module):
def __init__(self, d_hidden: List[int], activation: str, use_bias: bool=True, prior_sigma_1: float=1.0, prior_sigma_2: float=0.002, prior_pi: float=0.8, posterior_mu_init: float=0.0, posterior_rho_init: float=-7.0):
super(BayesianMLP, self).__init__()
self.d_hidden = d_hidden
self.activation = activation
act_fn = get_activation_fn(activation)
self.bayesian_mlp = nn.Sequential()
for i in range(1, len(d_hidden)):
bayesian_dense_layer = nn.Sequential(*[bnn.BayesianLinear(d_hidden[i - 1], d_hidden[i], use_bias, prior_sigma_1, prior_sigma_2, prior_pi, posterior_mu_init, posterior_rho_init), act_fn if i != len(d_hidden) - 1 else nn.Identity()])
self.bayesian_mlp.add_module('bayesian_dense_layer_{}'.format(i - 1), bayesian_dense_layer)
def forward(self, X: Tensor) ->Tensor:
return self.bayesian_mlp(X)
class BayesianTabMlp(BaseBayesianModel):
"""Defines a `BayesianTabMlp` model.
This class combines embedding representations of the categorical features
with numerical (aka continuous) features, embedded or not. These are then
passed through a series of probabilistic dense layers (i.e. a MLP).
Parameters
----------
column_idx: Dict
Dict containing the index of the columns that will be passed through
the `TabMlp` model. Required to slice the tensors. e.g. _{'education':
0, 'relationship': 1, 'workclass': 2, ...}_
cat_embed_input: List, Optional, default = None
List of Tuples with the column name, number of unique values and
embedding dimension. e.g. _[(education, 11, 32), ...]_
cat_embed_dropout: float, default = 0.1
Categorical embeddings dropout
cat_embed_activation: Optional, str, default = None,
Activation function for the categorical embeddings, if any. Currently
_'tanh'_, _'relu'_, _'leaky_relu'_ and _'gelu'_ are supported
continuous_cols: List, Optional, default = None
List with the name of the numeric (aka continuous) columns
cont_norm_layer: str, default = "batchnorm"
Type of normalization layer applied to the continuous features. Options
are: 'layernorm', 'batchnorm' or None.
embed_continuous: bool, default = False,
Boolean indicating if the continuous columns will be embedded
(i.e. passed each through a linear layer with or without activation)
cont_embed_dim: int, default = 32,
Size of the continuous embeddings
cont_embed_dropout: float, default = 0.1,
Dropout for the continuous embeddings
use_cont_bias: bool, default = True,
Boolean indicating if bias will be used for the continuous embeddings
cont_embed_activation: Optional, str, default = None,
Activation function for the continuous embeddings if any. Currently
_'tanh'_, _'relu'_, _'leaky_relu'_ and _'gelu'_ are supported
mlp_hidden_dims: List, default = [200, 100]
List with the number of neurons per dense layer in the mlp.
mlp_activation: str, default = "relu"
Activation function for the dense layers of the MLP. Currently
_'tanh'_, _'relu'_, _'leaky_relu'_ and _'gelu'_ are supported
prior_sigma_1: float, default = 1.0
The prior weight distribution is a scaled mixture of two Gaussian
densities:
$$
\\begin{aligned}
P(\\mathbf{w}) = \\prod_{i=j} \\pi N (\\mathbf{w}_j | 0, \\sigma_{1}^{2}) + (1 - \\pi) N (\\mathbf{w}_j | 0, \\sigma_{2}^{2})
\\end{aligned}
$$
`prior_sigma_1` is the prior of the sigma parameter for the first of the two
Gaussians that will be mixed to produce the prior weight
distribution.
prior_sigma_2: float, default = 0.002
Prior of the sigma parameter for the second of the two Gaussian
distributions that will be mixed to produce the prior weight
distribution for each Bayesian linear and embedding layer
prior_pi: float, default = 0.8
Scaling factor that will be used to mix the Gaussians to produce the
prior weight distribution ffor each Bayesian linear and embedding
layer
posterior_mu_init: float = 0.0
The posterior sample of the weights is defined as:
$$
\\begin{aligned}
\\mathbf{w} &= \\mu + log(1 + exp(\\rho))
\\end{aligned}
$$
where:
$$
\\begin{aligned}
\\mathcal{N}(x\\vert \\mu, \\sigma) &= \\frac{1}{\\sqrt{2\\pi}\\sigma}e^{-\\frac{(x-\\mu)^2}{2\\sigma^2}}\\\\
\\log{\\mathcal{N}(x\\vert \\mu, \\sigma)} &= -\\log{\\sqrt{2\\pi}} -\\log{\\sigma} -\\frac{(x-\\mu)^2}{2\\sigma^2}\\\\
\\end{aligned}
$$
$\\mu$ is initialised using a normal distributtion with mean
`posterior_mu_init` and std equal to 0.1.
posterior_rho_init: float = -7.0
As in the case of $\\mu$, $\\rho$ is initialised using a
normal distributtion with mean `posterior_rho_init` and std equal to
0.1.
Attributes
----------
bayesian_cat_and_cont_embed: nn.Module
This is the module that processes the categorical and continuous columns
bayesian_tab_mlp: nn.Sequential
mlp model that will receive the concatenation of the embeddings and
the continuous columns
Examples
--------
>>> import torch
>>> from pytorch_widedeep.bayesian_models import BayesianTabMlp
>>> X_tab = torch.cat((torch.empty(5, 4).random_(4), torch.rand(5, 1)), axis=1)
>>> colnames = ['a', 'b', 'c', 'd', 'e']
>>> cat_embed_input = [(u,i,j) for u,i,j in zip(colnames[:4], [4]*4, [8]*4)]
>>> column_idx = {k:v for v,k in enumerate(colnames)}
>>> model = BayesianTabMlp(mlp_hidden_dims=[8,4], column_idx=column_idx, cat_embed_input=cat_embed_input,
... continuous_cols = ['e'])
>>> out = model(X_tab)
"""
def __init__(self, column_idx: Dict[str, int], cat_embed_input: Optional[List[Tuple[str, int, int]]]=None, cat_embed_dropout: float=0.1, cat_embed_activation: Optional[str]=None, continuous_cols: Optional[List[str]]=None, embed_continuous: bool=False, cont_embed_dim: int=32, cont_embed_dropout: float=0.1, cont_embed_activation: Optional[str]=None, use_cont_bias: bool=True, cont_norm_layer: str='batchnorm', mlp_hidden_dims: List[int]=[200, 100], mlp_activation: str='leaky_relu', prior_sigma_1: float=1, prior_sigma_2: float=0.002, prior_pi: float=0.8, posterior_mu_init: float=0.0, posterior_rho_init: float=-7.0, pred_dim=1):
super(BayesianTabMlp, self).__init__()
self.column_idx = column_idx
self.cat_embed_input = cat_embed_input
self.cat_embed_dropout = cat_embed_dropout
self.cat_embed_activation = cat_embed_activation
self.continuous_cols = continuous_cols
self.cont_norm_layer = cont_norm_layer
self.embed_continuous = embed_continuous
self.cont_embed_dim = cont_embed_dim
self.cont_embed_dropout = cont_embed_dropout
self.use_cont_bias = use_cont_bias
self.cont_embed_activation = cont_embed_activation
self.mlp_hidden_dims = mlp_hidden_dims
self.mlp_activation = mlp_activation
self.prior_sigma_1 = prior_sigma_1
self.prior_sigma_2 = prior_sigma_2
self.prior_pi = prior_pi
self.posterior_mu_init = posterior_mu_init
self.posterior_rho_init = posterior_rho_init
self.pred_dim = pred_dim
allowed_activations = ['relu', 'leaky_relu', 'tanh', 'gelu']
if self.mlp_activation not in allowed_activations:
raise ValueError("Currently, only the following activation functions are supported for the Bayesian MLP's dense layers: {}. Got '{}' instead".format(', '.join(allowed_activations), self.mlp_activation))
self.cat_and_cont_embed = BayesianDiffSizeCatAndContEmbeddings(column_idx, cat_embed_input, continuous_cols, embed_continuous, cont_embed_dim, use_cont_bias, cont_norm_layer, prior_sigma_1, prior_sigma_2, prior_pi, posterior_mu_init, posterior_rho_init)
self.cat_embed_act_fn = get_activation_fn(cat_embed_activation) if cat_embed_activation is not None else None
self.cont_embed_act_fn = get_activation_fn(cont_embed_activation) if cont_embed_activation is not None else None
mlp_input_dim = self.cat_and_cont_embed.output_dim
mlp_hidden_dims = [mlp_input_dim] + mlp_hidden_dims + [pred_dim]
self.bayesian_tab_mlp = BayesianMLP(mlp_hidden_dims, mlp_activation, True, prior_sigma_1, prior_sigma_2, prior_pi, posterior_mu_init, posterior_rho_init)
def forward(self, X: Tensor) ->Tensor:
x_cat, x_cont = self.cat_and_cont_embed(X)
if x_cat is not None:
x = self.cat_embed_act_fn(x_cat) if self.cat_embed_act_fn is not None else x_cat
if x_cont is not None:
if self.cont_embed_act_fn is not None:
x_cont = self.cont_embed_act_fn(x_cont)
x = torch.cat([x, x_cont], 1) if x_cat is not None else x_cont
return self.bayesian_tab_mlp(x)
class MSELoss(nn.Module):
"""Mean square error loss with the option of using Label Smooth
Distribution (LDS)
LDS is based on
[Delving into Deep Imbalanced Regression](https://arxiv.org/abs/2102.09554).
"""
def __init__(self):
super().__init__()
def forward(self, input: Tensor, target: Tensor, lds_weight: Optional[Tensor]=None) ->Tensor:
"""
Parameters
----------
input: Tensor
Input tensor with predictions
target: Tensor
Target tensor with the actual values
lds_weight: Tensor, Optional
Tensor of weights that will multiply the loss value.
Examples
--------
>>> import torch
>>> from pytorch_widedeep.losses import MSELoss
>>>
>>> target = torch.tensor([1, 1.2, 0, 2]).view(-1, 1)
>>> input = torch.tensor([0.6, 0.7, 0.3, 0.8]).view(-1, 1)
>>> lds_weight = torch.tensor([0.1, 0.2, 0.3, 0.4]).view(-1, 1)
>>> loss = MSELoss()(input, target, lds_weight)
"""
loss = (input - target) ** 2
if lds_weight is not None:
loss *= lds_weight
return torch.mean(loss)
class MSLELoss(nn.Module):
"""Mean square log error loss with the option of using Label Smooth
Distribution (LDS)
LDS is based on
[Delving into Deep Imbalanced Regression](https://arxiv.org/abs/2102.09554).
"""
def __init__(self):
super().__init__()
def forward(self, input: Tensor, target: Tensor, lds_weight: Optional[Tensor]=None) ->Tensor:
"""
Parameters
----------
input: Tensor
Input tensor with predictions (not probabilities)
target: Tensor
Target tensor with the actual classes
lds_weight: Tensor, Optional
Tensor of weights that will multiply the loss value.
Examples
--------
>>> import torch
>>> from pytorch_widedeep.losses import MSLELoss
>>>
>>> target = torch.tensor([1, 1.2, 0, 2]).view(-1, 1)
>>> input = torch.tensor([0.6, 0.7, 0.3, 0.8]).view(-1, 1)
>>> lds_weight = torch.tensor([0.1, 0.2, 0.3, 0.4]).view(-1, 1)
>>> loss = MSLELoss()(input, target, lds_weight)
"""
assert input.min() >= 0, """All input values must be >=0, if your model is predicting
values <0 try to enforce positive values by activation function
on last layer with `trainer.enforce_positive_output=True`"""
assert target.min() >= 0, 'All target values must be >=0'
loss = (torch.log(input + 1) - torch.log(target + 1)) ** 2
if lds_weight is not None:
loss *= lds_weight
return torch.mean(loss)
class RMSELoss(nn.Module):
"""Root mean square error loss adjusted for the possibility of using Label
Smooth Distribution (LDS)
LDS is based on
[Delving into Deep Imbalanced Regression](https://arxiv.org/abs/2102.09554).
"""
def __init__(self):
super().__init__()
def forward(self, input: Tensor, target: Tensor, lds_weight: Optional[Tensor]=None) ->Tensor:
"""
Parameters
----------
input: Tensor
Input tensor with predictions (not probabilities)
target: Tensor
Target tensor with the actual classes
lds_weight: Tensor, Optional
Tensor of weights that will multiply the loss value.
Examples
--------
>>> import torch
>>> from pytorch_widedeep.losses import RMSELoss
>>>
>>> target = torch.tensor([1, 1.2, 0, 2]).view(-1, 1)
>>> input = torch.tensor([0.6, 0.7, 0.3, 0.8]).view(-1, 1)
>>> lds_weight = torch.tensor([0.1, 0.2, 0.3, 0.4]).view(-1, 1)
>>> loss = RMSELoss()(input, target, lds_weight)
"""
loss = (input - target) ** 2
if lds_weight is not None:
loss *= lds_weight
return torch.sqrt(torch.mean(loss))
class RMSLELoss(nn.Module):
"""Root mean square log error loss adjusted for the possibility of using Label
Smooth Distribution (LDS)
LDS is based on
[Delving into Deep Imbalanced Regression](https://arxiv.org/abs/2102.09554).
"""
def __init__(self):
super().__init__()
def forward(self, input: Tensor, target: Tensor, lds_weight: Optional[Tensor]=None) ->Tensor:
"""
Parameters
----------
input: Tensor
Input tensor with predictions (not probabilities)
target: Tensor
Target tensor with the actual classes
lds_weight: Tensor, Optional
Tensor of weights that will multiply the loss value.
Examples
--------
>>> import torch
>>> from pytorch_widedeep.losses import RMSLELoss
>>>
>>> target = torch.tensor([1, 1.2, 0, 2]).view(-1, 1)
>>> input = torch.tensor([0.6, 0.7, 0.3, 0.8]).view(-1, 1)
>>> lds_weight = torch.tensor([0.1, 0.2, 0.3, 0.4]).view(-1, 1)
>>> loss = RMSLELoss()(input, target, lds_weight)
"""
assert input.min() >= 0, """All input values must be >=0, if your model is predicting
values <0 try to enforce positive values by activation function
on last layer with `trainer.enforce_positive_output=True`"""
assert target.min() >= 0, 'All target values must be >=0'
loss = (torch.log(input + 1) - torch.log(target + 1)) ** 2
if lds_weight is not None:
loss *= lds_weight
return torch.sqrt(torch.mean(loss))
class QuantileLoss(nn.Module):
"""Quantile loss defined as:
$$
Loss = max(q \\times (y-y_{pred}), (1-q) \\times (y_{pred}-y))
$$
All credits go to the implementation at
[pytorch-forecasting](https://pytorch-forecasting.readthedocs.io/en/latest/_modules/pytorch_forecasting/metrics.html#QuantileLoss).
Parameters
----------
quantiles: List, default = [0.02, 0.1, 0.25, 0.5, 0.75, 0.9, 0.98]
List of quantiles
"""
def __init__(self, quantiles: List[float]=[0.02, 0.1, 0.25, 0.5, 0.75, 0.9, 0.98]):
super().__init__()
self.quantiles = quantiles
def forward(self, input: Tensor, target: Tensor) ->Tensor:
"""
Parameters
----------
input: Tensor
Input tensor with predictions
target: Tensor
Target tensor with the actual values
Examples
--------
>>> import torch
>>>
>>> from pytorch_widedeep.losses import QuantileLoss
>>>
>>> # REGRESSION
>>> target = torch.tensor([[0.6, 1.5]]).view(-1, 1)
>>> input = torch.tensor([[.1, .2,], [.4, .5]])
>>> qloss = QuantileLoss([0.25, 0.75])
>>> loss = qloss(input, target)
"""
assert input.shape == torch.Size([target.shape[0], len(self.quantiles)]), f'The input and target have inconsistent shape. The dimension of the prediction of the model that is using QuantileLoss must be equal to number of quantiles, i.e. {len(self.quantiles)}.'
target = target.view(-1, 1).float()
losses = []
for i, q in enumerate(self.quantiles):
errors = target - input[..., i]
losses.append(torch.max((q - 1) * errors, q * errors).unsqueeze(-1))
loss = torch.cat(losses, dim=2)
return torch.mean(loss)
use_cuda = torch.cuda.is_available()
class FocalLoss(nn.Module):
"""Implementation of the [Focal loss](https://arxiv.org/pdf/1708.02002.pdf)
for both binary and multiclass classification:
$$
FL(p_t) = \\alpha (1 - p_t)^{\\gamma} log(p_t)
$$
where, for a case of a binary classification problem
$$
\\begin{equation} p_t= \\begin{cases}p, & \\text{if $y=1$}.\\\\1-p, & \\text{otherwise}. \\end{cases} \\end{equation}
$$
Parameters
----------
alpha: float
Focal Loss `alpha` parameter
gamma: float
Focal Loss `gamma` parameter
"""
def __init__(self, alpha: float=0.25, gamma: float=1.0):
super().__init__()
self.alpha = alpha
self.gamma = gamma
def _get_weight(self, p: Tensor, t: Tensor) ->Tensor:
pt = p * t + (1 - p) * (1 - t)
w = self.alpha * t + (1 - self.alpha) * (1 - t)
return (w * (1 - pt).pow(self.gamma)).detach()
def forward(self, input: Tensor, target: Tensor) ->Tensor:
"""
Parameters
----------
input: Tensor
Input tensor with predictions (not probabilities)
target: Tensor
Target tensor with the actual classes
Examples
--------
>>> import torch
>>>
>>> from pytorch_widedeep.losses import FocalLoss
>>>
>>> # BINARY
>>> target = torch.tensor([0, 1, 0, 1]).view(-1, 1)
>>> input = torch.tensor([[0.6, 0.7, 0.3, 0.8]]).t()
>>> loss = FocalLoss()(input, target)
>>>
>>> # MULTICLASS
>>> target = torch.tensor([1, 0, 2]).view(-1, 1)
>>> input = torch.tensor([[0.2, 0.5, 0.3], [0.8, 0.1, 0.1], [0.7, 0.2, 0.1]])
>>> loss = FocalLoss()(input, target)
"""
input_prob = torch.sigmoid(input)
if input.size(1) == 1:
input_prob = torch.cat([1 - input_prob, input_prob], axis=1)
num_class = 2
else:
num_class = input_prob.size(1)
binary_target = torch.eye(num_class)[target.squeeze().long()]
if use_cuda:
binary_target = binary_target
binary_target = binary_target.contiguous()
weight = self._get_weight(input_prob, binary_target)
return F.binary_cross_entropy(input_prob, binary_target, weight, reduction='mean')
class BayesianRegressionLoss(nn.Module):
"""log Gaussian loss as specified in the original publication 'Weight
Uncertainty in Neural Networks'
Currently we do not use this loss as is proportional to the
`BayesianSELoss` and the latter does not need a scale/noise_tolerance
param
"""
def __init__(self, noise_tolerance: float):
super().__init__()
self.noise_tolerance = noise_tolerance
def forward(self, input: Tensor, target: Tensor) ->Tensor:
return -torch.distributions.Normal(input, self.noise_tolerance).log_prob(target).sum()
class BayesianSELoss(nn.Module):
"""Squared Loss (log Gaussian) for the case of a regression as specified in
the original publication
[Weight Uncertainty in Neural Networks](https://arxiv.org/abs/1505.05424).
"""
def __init__(self):
super().__init__()
def forward(self, input: Tensor, target: Tensor) ->Tensor:
"""
Parameters
----------
input: Tensor
Input tensor with predictions (not probabilities)
target: Tensor
Target tensor with the actual classes
Examples
--------
>>> import torch
>>> from pytorch_widedeep.losses import BayesianSELoss
>>> target = torch.tensor([1, 1.2, 0, 2]).view(-1, 1)
>>> input = torch.tensor([0.6, 0.7, 0.3, 0.8]).view(-1, 1)
>>> loss = BayesianSELoss()(input, target)
"""
return (0.5 * (input - target) ** 2).sum()
class TweedieLoss(nn.Module):
"""
Tweedie loss for extremely unbalanced zero-inflated data
All credits go to Wenbo Shi. See
[this post](https://towardsdatascience.com/tweedie-loss-function-for-right-skewed-data-2c5ca470678f)
and the [original publication](https://arxiv.org/abs/1811.10192) for details.
"""
def __init__(self):
super().__init__()
def forward(self, input: Tensor, target: Tensor, lds_weight: Optional[Tensor]=None, p: float=1.5) ->Tensor:
"""
Parameters
----------
input: Tensor
Input tensor with predictions
target: Tensor
Target tensor with the actual values
lds_weight: Tensor, Optional
If we choose to use LDS this is the tensor of weights that will
multiply the loss value.
p: float, default = 1.5
the power to be used to compute the loss. See the original
publication for details
Examples
--------
>>> import torch
>>> from pytorch_widedeep.losses import TweedieLoss
>>>
>>> target = torch.tensor([1, 1.2, 0, 2]).view(-1, 1)
>>> input = torch.tensor([0.6, 0.7, 0.3, 0.8]).view(-1, 1)
>>> lds_weight = torch.tensor([0.1, 0.2, 0.3, 0.4]).view(-1, 1)
>>> loss = TweedieLoss()(input, target, lds_weight)
"""
assert input.min() > 0, """All input values must be >=0, if your model is predicting
values <0 try to enforce positive values by activation function
on last layer with `trainer.enforce_positive_output=True`"""
assert target.min() >= 0, 'All target values must be >=0'
loss = -target * torch.pow(input, 1 - p) / (1 - p) + torch.pow(input, 2 - p) / (2 - p)
if lds_weight is not None:
loss *= lds_weight
return torch.mean(loss)
class ZILNLoss(nn.Module):
"""Adjusted implementation of the Zero Inflated LogNormal Loss
See [A Deep Probabilistic Model for Customer Lifetime Value Prediction](https://arxiv.org/pdf/1912.07753.pdf)
and the corresponding
[code](https://github.com/google/lifetime_value/blob/master/lifetime_value/zero_inflated_lognormal.py).
"""
def __init__(self):
super().__init__()
def forward(self, input: Tensor, target: Tensor) ->Tensor:
"""
Parameters
----------
input: Tensor
Input tensor with predictions with spape (N,3), where N is the batch size
target: Tensor
Target tensor with the actual target values
Examples
--------
>>> import torch
>>> from pytorch_widedeep.losses import ZILNLoss
>>>
>>> target = torch.tensor([[0., 1.5]]).view(-1, 1)
>>> input = torch.tensor([[.1, .2, .3], [.4, .5, .6]])
>>> loss = ZILNLoss()(input, target)
"""
positive = target > 0
positive = positive.float()
assert input.shape == torch.Size([target.shape[0], 3]), "Wrong shape of the 'input' tensor. The pred_dim of the model that is using ZILNLoss must be equal to 3."
positive_input = input[..., :1]
classification_loss = F.binary_cross_entropy_with_logits(positive_input, positive, reduction='none').flatten()
loc = input[..., 1:2]
max_input = F.softplus(input[..., 2:])
max_other = torch.sqrt(torch.Tensor([torch.finfo(torch.double).eps])).type(max_input.type())
scale = torch.max(max_input, max_other)
safe_labels = positive * target + (1 - positive) * torch.ones_like(target)
regression_loss = -torch.mean(positive * torch.distributions.log_normal.LogNormal(loc=loc, scale=scale).log_prob(safe_labels), dim=-1)
return torch.mean(classification_loss + regression_loss)
class L1Loss(nn.Module):
"""L1 loss adjusted for the possibility of using Label Smooth
Distribution (LDS)
LDS is based on
[Delving into Deep Imbalanced Regression](https://arxiv.org/abs/2102.09554).
"""
def __init__(self):
super().__init__()
def forward(self, input: Tensor, target: Tensor, lds_weight: Optional[Tensor]=None) ->Tensor:
"""
Parameters
----------
input: Tensor
Input tensor with predictions
target: Tensor
Target tensor with the actual values
lds_weight: Tensor, Optional
If we choose to use LDS this is the tensor of weights that will
multiply the loss value.
Examples
--------
>>> import torch
>>>
>>> from pytorch_widedeep.losses import L1Loss
>>>
>>> target = torch.tensor([1, 1.2, 0, 2]).view(-1, 1)
>>> input = torch.tensor([0.6, 0.7, 0.3, 0.8]).view(-1, 1)
>>> loss = L1Loss()(input, target)
"""
loss = F.l1_loss(input, target, reduction='none')
if lds_weight is not None:
loss *= lds_weight
return torch.mean(loss)
class HuberLoss(nn.Module):
"""Hubbler Loss
Based on [Delving into Deep Imbalanced Regression](https://arxiv.org/abs/2102.09554).
"""
def __init__(self, beta: float=0.2):
super().__init__()
self.beta = beta
def forward(self, input: Tensor, target: Tensor, lds_weight: Optional[Tensor]=None) ->Tensor:
"""
Parameters
----------
input: Tensor
Input tensor with predictions (not probabilities)
target: Tensor
Target tensor with the actual classes
lds_weight: Tensor, Optional
If we choose to use LDS this is the tensor of weights that will
multiply the loss value.
Examples
--------
>>> import torch
>>>
>>> from pytorch_widedeep.losses import HuberLoss
>>>
>>> target = torch.tensor([1, 1.2, 0, 2]).view(-1, 1)
>>> input = torch.tensor([0.6, 0.7, 0.3, 0.8]).view(-1, 1)
>>> loss = HuberLoss()(input, target)
"""
l1_loss = torch.abs(input - target)
cond = l1_loss < self.beta
loss = torch.where(cond, 0.5 * l1_loss ** 2 / self.beta, l1_loss - 0.5 * self.beta)
if lds_weight is not None:
loss *= lds_weight
return torch.mean(loss)
class InfoNCELoss(nn.Module):
"""InfoNCE Loss. Loss applied during the Contrastive Denoising Self
Supervised Pre-training routine available in this library
:information_source: **NOTE**: This loss is in principle not exposed to
the user, as it is used internally in the library, but it is included
here for completion.
See [SAINT: Improved Neural Networks for Tabular Data via Row Attention
and Contrastive Pre-Training](https://arxiv.org/abs/2106.01342) and
references therein
Partially inspired by the code in this [repo](https://github.com/RElbers/info-nce-pytorch)
Parameters:
-----------
temperature: float, default = 0.1
The logits are divided by the temperature before computing the loss value
reduction: str, default = "mean"
Loss reduction method
"""
def __init__(self, temperature: float=0.1, reduction: str='mean'):
super(InfoNCELoss, self).__init__()
self.temperature = temperature
self.reduction = reduction
def forward(self, g_projs: Tuple[Tensor, Tensor]) ->Tensor:
"""
Parameters
----------
g_projs: Tuple
Tuple with the two tensors corresponding to the output of the two
projection heads, as described 'SAINT: Improved Neural Networks
for Tabular Data via Row Attention and Contrastive Pre-Training'.
Examples
--------
>>> import torch
>>> from pytorch_widedeep.losses import InfoNCELoss
>>> g_projs = (torch.rand(5, 5), torch.rand(5, 5))
>>> loss = InfoNCELoss()
>>> res = loss(g_projs)
"""
z, z_ = g_projs[0], g_projs[1]
norm_z = F.normalize(z, dim=-1).flatten(1)
norm_z_ = F.normalize(z_, dim=-1).flatten(1)
logits = norm_z @ norm_z_.t() / self.temperature
logits_ = norm_z_ @ norm_z.t() / self.temperature
target = torch.arange(len(norm_z), device=norm_z.device)
loss = F.cross_entropy(logits, target, reduction=self.reduction)
loss_ = F.cross_entropy(logits_, target, reduction=self.reduction)
return (loss + loss_) / 2.0
class DenoisingLoss(nn.Module):
"""Denoising Loss. Loss applied during the Contrastive Denoising Self
Supervised Pre-training routine available in this library
:information_source: **NOTE**: This loss is in principle not exposed to
the user, as it is used internally in the library, but it is included
here for completion.
See [SAINT: Improved Neural Networks for Tabular Data via Row Attention
and Contrastive Pre-Training](https://arxiv.org/abs/2106.01342) and
references therein
Parameters:
-----------
lambda_cat: float, default = 1.
Multiplicative factor that will be applied to loss associated to the
categorical features
lambda_cont: float, default = 1.
Multiplicative factor that will be applied to loss associated to the
continuous features
reduction: str, default = "mean"
Loss reduction method
"""
def __init__(self, lambda_cat: float=1.0, lambda_cont: float=1.0, reduction: str='mean'):
super(DenoisingLoss, self).__init__()
self.lambda_cat = lambda_cat
self.lambda_cont = lambda_cont
self.reduction = reduction
def forward(self, x_cat_and_cat_: Optional[Union[List[Tuple[Tensor, Tensor]], Tuple[Tensor, Tensor]]], x_cont_and_cont_: Optional[Union[List[Tuple[Tensor, Tensor]], Tuple[Tensor, Tensor]]]) ->Tensor:
"""
Parameters
----------
x_cat_and_cat_: tuple of Tensors or lists of tuples
Tuple of tensors containing the raw input features and their
encodings, referred in the SAINT paper as $x$ and $x''$
respectively. If one denoising MLP is used per categorical
feature `x_cat_and_cat_` will be a list of tuples, one per
categorical feature
x_cont_and_cont_: tuple of Tensors or lists of tuples
same as `x_cat_and_cat_` but for continuous columns
Examples
--------
>>> import torch
>>> from pytorch_widedeep.losses import DenoisingLoss
>>> x_cat_and_cat_ = (torch.empty(3).random_(3).long(), torch.randn(3, 3))
>>> x_cont_and_cont_ = (torch.randn(3, 1), torch.randn(3, 1))
>>> loss = DenoisingLoss()
>>> res = loss(x_cat_and_cat_, x_cont_and_cont_)
"""
loss_cat = self._compute_cat_loss(x_cat_and_cat_) if x_cat_and_cat_ is not None else torch.tensor(0.0)
loss_cont = self._compute_cont_loss(x_cont_and_cont_) if x_cont_and_cont_ is not None else torch.tensor(0.0)
return self.lambda_cat * loss_cat + self.lambda_cont * loss_cont
def _compute_cat_loss(self, x_cat_and_cat_: Union[List[Tuple[Tensor, Tensor]], Tuple[Tensor, Tensor]]) ->Tensor:
loss_cat = torch.tensor(0.0, device=self._get_device(x_cat_and_cat_))
if isinstance(x_cat_and_cat_, list):
for x, x_ in x_cat_and_cat_:
loss_cat += F.cross_entropy(x_, x, reduction=self.reduction)
elif isinstance(x_cat_and_cat_, tuple):
x, x_ = x_cat_and_cat_
loss_cat += F.cross_entropy(x_, x, reduction=self.reduction)
return loss_cat
def _compute_cont_loss(self, x_cont_and_cont_) ->Tensor:
loss_cont = torch.tensor(0.0, device=self._get_device(x_cont_and_cont_))
if isinstance(x_cont_and_cont_, list):
for x, x_ in x_cont_and_cont_:
loss_cont += F.mse_loss(x_, x, reduction=self.reduction)
elif isinstance(x_cont_and_cont_, tuple):
x, x_ = x_cont_and_cont_
loss_cont += F.mse_loss(x_, x, reduction=self.reduction)
return loss_cont
@staticmethod
def _get_device(x_and_x_: Union[List[Tuple[Tensor, Tensor]], Tuple[Tensor, Tensor]]):
if isinstance(x_and_x_, tuple):
device = x_and_x_[0].device
elif isinstance(x_and_x_, list):
device = x_and_x_[0][0].device
return device
class EncoderDecoderLoss(nn.Module):
"""'_Standard_' Encoder Decoder Loss. Loss applied during the Endoder-Decoder
Self-Supervised Pre-Training routine available in this library
:information_source: **NOTE**: This loss is in principle not exposed to
the user, as it is used internally in the library, but it is included
here for completion.
The implementation of this lost is based on that at the
[tabnet repo](https://github.com/dreamquark-ai/tabnet), which is in itself an
adaptation of that in the original paper [TabNet: Attentive
Interpretable Tabular Learning](https://arxiv.org/abs/1908.07442).
Parameters:
-----------
eps: float
Simply a small number to avoid dividing by zero
"""
def __init__(self, eps: float=1e-09):
super(EncoderDecoderLoss, self).__init__()
self.eps = eps
def forward(self, x_true: Tensor, x_pred: Tensor, mask: Tensor) ->Tensor:
"""
Parameters
----------
x_true: Tensor
Embeddings of the input data
x_pred: Tensor
Reconstructed embeddings
mask: Tensor
Mask with 1s indicated that the reconstruction, and therefore the
loss, is based on those features.
Examples
--------
>>> import torch
>>> from pytorch_widedeep.losses import EncoderDecoderLoss
>>> x_true = torch.rand(3, 3)
>>> x_pred = torch.rand(3, 3)
>>> mask = torch.empty(3, 3).random_(2)
>>> loss = EncoderDecoderLoss()
>>> res = loss(x_true, x_pred, mask)
"""
errors = x_pred - x_true
reconstruction_errors = torch.mul(errors, mask) ** 2
x_true_means = torch.mean(x_true, dim=0)
x_true_means[x_true_means == 0] = 1
x_true_stds = torch.std(x_true, dim=0) ** 2
x_true_stds[x_true_stds == 0] = x_true_means[x_true_stds == 0]
features_loss = torch.matmul(reconstruction_errors, 1 / x_true_stds)
nb_reconstructed_variables = torch.sum(mask, dim=1)
features_loss_norm = features_loss / (nb_reconstructed_variables + self.eps)
loss = torch.mean(features_loss_norm)
return loss
def find_bin(bin_edges: Union[np.ndarray, Tensor], values: Union[np.ndarray, Tensor], ret_value: bool=True) ->Union[np.ndarray, Tensor]:
"""Returns histograms left bin edge value or array indices from monotonically
increasing array of bin edges for each value in values.
If ret_value
Parameters
----------
bin_edges: Union[np.ndarray, Tensor]
monotonically increasing array of bin edges
values: Union[np.ndarray, Tensor]
values for which we want corresponding bins
ret_value: bool
if True, return bin values else indices
Returns
-------
left_bin_edges: Union[np.ndarray, Tensor]
left bin edges
"""
if type(bin_edges) == np.ndarray and type(values) == np.ndarray:
indices: Union[np.ndarray, Tensor] = np.searchsorted(bin_edges, values, side='left')
indices = np.where((indices == 0) | (indices == len(bin_edges)), indices, indices - 1)
indices = np.where(indices != len(bin_edges), indices, indices - 2)
elif type(bin_edges) == Tensor and type(values) == Tensor:
bin_edges = bin_edges
indices = torch.searchsorted(bin_edges, values, right=False)
indices = torch.where((indices == 0) | (indices == len(bin_edges)), indices, indices - 1)
indices = torch.where(indices != len(bin_edges), indices, indices - 2)
else:
raise TypeError('Both input arrays must be of teh same type, either np.ndarray of Tensor')
return indices if not ret_value else bin_edges[indices]
def _laplace(x, sigma: Union[int, float]=2):
return np.exp(-abs(x) / sigma) / (2.0 * sigma)
def set_default_attr(obj: Any, name: str, value: Any):
"""Set the `name` attribute of `obj` to `value` if the attribute does not
already exist
Parameters
----------
obj: Object
Object whose `name` attribute will be returned (after setting it to
`value`, if necessary)
name: String
Name of the attribute to set to `value`, or to return
value: Object
Default value to give to `obj.name` if the attribute does not already
exist
Returns
-------
Object
`obj.name` if it exists. Else, `value`
Examples
--------
>>> foo = type("Foo", tuple(), {"my_attr": 32})
>>> set_default_attr(foo, "my_attr", 99)
32
>>> set_default_attr(foo, "other_attr", 9000)
9000
>>> assert foo.my_attr == 32
>>> assert foo.other_attr == 9000
"""
try:
return getattr(obj, name)
except AttributeError:
setattr(obj, name, value)
return value
def dense_layer(inp: int, out: int, activation: str, p: float, bn: bool, linear_first: bool):
act_fn = get_activation_fn(activation)
layers = [nn.BatchNorm1d(out if linear_first else inp)] if bn else []
if p != 0:
layers.append(nn.Dropout(p))
lin = [nn.Linear(inp, out, bias=not bn), act_fn]
layers = lin + layers if linear_first else layers + lin
return nn.Sequential(*layers)
class MLP(nn.Module):
def __init__(self, d_hidden: List[int], activation: str, dropout: Optional[Union[float, List[float]]], batchnorm: bool, batchnorm_last: bool, linear_first: bool):
super(MLP, self).__init__()
if not dropout:
dropout = [0.0] * len(d_hidden)
elif isinstance(dropout, float):
dropout = [dropout] * len(d_hidden)
self.mlp = nn.Sequential()
for i in range(1, len(d_hidden)):
self.mlp.add_module('dense_layer_{}'.format(i - 1), dense_layer(d_hidden[i - 1], d_hidden[i], activation, dropout[i - 1], batchnorm and (i != len(d_hidden) - 1 or batchnorm_last), linear_first))
def forward(self, X: Tensor) ->Tensor:
return self.mlp(X)
allowed_pretrained_models = ['resnet', 'shufflenet', 'resnext', 'wide_resnet', 'regnet', 'densenet', 'mobilenet', 'mnasnet', 'efficientnet', 'squeezenet']
def conv_layer(ni: int, nf: int, kernel_size: int=3, stride: int=1, maxpool: bool=True, adaptiveavgpool: bool=False):
layer = nn.Sequential(nn.Conv2d(ni, nf, kernel_size=kernel_size, stride=stride, bias=False, padding=kernel_size // 2), nn.BatchNorm2d(nf, momentum=0.01), nn.LeakyReLU(negative_slope=0.1, inplace=True))
if maxpool:
layer.add_module('maxpool', nn.MaxPool2d(2, 2))
if adaptiveavgpool:
layer.add_module('adaptiveavgpool', nn.AdaptiveAvgPool2d(output_size=(1, 1)))
return layer
class ContEmbeddings(nn.Module):
def __init__(self, n_cont_cols: int, embed_dim: int, embed_dropout: float, use_bias: bool):
super(ContEmbeddings, self).__init__()
self.n_cont_cols = n_cont_cols
self.embed_dim = embed_dim
self.embed_dropout = embed_dropout
self.use_bias = use_bias
self.weight = nn.init.kaiming_uniform_(nn.Parameter(torch.Tensor(n_cont_cols, embed_dim)), a=math.sqrt(5))
self.bias = nn.init.kaiming_uniform_(nn.Parameter(torch.Tensor(n_cont_cols, embed_dim)), a=math.sqrt(5)) if use_bias else None
def forward(self, X: Tensor) ->Tensor:
x = self.weight.unsqueeze(0) * X.unsqueeze(2)
if self.bias is not None:
x = x + self.bias.unsqueeze(0)
return F.dropout(x, self.embed_dropout, self.training)
def extra_repr(self) ->str:
s = '{n_cont_cols}, {embed_dim}, embed_dropout={embed_dropout}, use_bias={use_bias}'
return s.format(**self.__dict__)
class DiffSizeCatEmbeddings(nn.Module):
def __init__(self, column_idx: Dict[str, int], embed_input: List[Tuple[str, int, int]], embed_dropout: float, use_bias: bool):
super(DiffSizeCatEmbeddings, self).__init__()
self.column_idx = column_idx
self.embed_input = embed_input
self.use_bias = use_bias
self.embed_layers_names = None
if self.embed_input is not None:
self.embed_layers_names = {e[0]: e[0].replace('.', '_') for e in self.embed_input}
self.embed_layers = nn.ModuleDict({('emb_layer_' + self.embed_layers_names[col]): nn.Embedding(val + 1, dim, padding_idx=0) for col, val, dim in self.embed_input})
self.embedding_dropout = nn.Dropout(embed_dropout)
if use_bias:
self.biases = nn.ParameterDict()
for col, _, dim in self.embed_input:
bound = 1 / math.sqrt(dim)
self.biases['bias_' + col] = nn.Parameter(nn.init.uniform_(torch.Tensor(dim), -bound, bound))
self.emb_out_dim: int = int(np.sum([embed[2] for embed in self.embed_input]))
def forward(self, X: Tensor) ->Tensor:
embed = [(self.embed_layers['emb_layer_' + self.embed_layers_names[col]](X[:, self.column_idx[col]].long()) + (self.biases['bias_' + col].unsqueeze(0) if self.use_bias else torch.zeros(1, dim, device=X.device))) for col, _, dim in self.embed_input]
x = torch.cat(embed, 1)
x = self.embedding_dropout(x)
return x
class DiffSizeCatAndContEmbeddings(nn.Module):
def __init__(self, column_idx: Dict[str, int], cat_embed_input: List[Tuple[str, int, int]], cat_embed_dropout: float, use_cat_bias: bool, continuous_cols: Optional[List[str]], cont_norm_layer: str, embed_continuous: bool, cont_embed_dim: int, cont_embed_dropout: float, use_cont_bias: bool):
super(DiffSizeCatAndContEmbeddings, self).__init__()
self.cat_embed_input = cat_embed_input
self.continuous_cols = continuous_cols
self.embed_continuous = embed_continuous
self.cont_embed_dim = cont_embed_dim
if self.cat_embed_input is not None:
self.cat_embed = DiffSizeCatEmbeddings(column_idx, cat_embed_input, cat_embed_dropout, use_cat_bias)
self.cat_out_dim = int(np.sum([embed[2] for embed in self.cat_embed_input]))
else:
self.cat_out_dim = 0
if continuous_cols is not None:
self.cont_idx = [column_idx[col] for col in continuous_cols]
if cont_norm_layer == 'layernorm':
self.cont_norm: NormLayers = nn.LayerNorm(len(continuous_cols))
elif cont_norm_layer == 'batchnorm':
self.cont_norm = nn.BatchNorm1d(len(continuous_cols))
else:
self.cont_norm = nn.Identity()
if self.embed_continuous:
self.cont_embed = ContEmbeddings(len(continuous_cols), cont_embed_dim, cont_embed_dropout, use_cont_bias)
self.cont_out_dim = len(continuous_cols) * cont_embed_dim
else:
self.cont_out_dim = len(continuous_cols)
else:
self.cont_out_dim = 0
self.output_dim = self.cat_out_dim + self.cont_out_dim
def forward(self, X: Tensor) ->Tuple[Tensor, Any]:
if self.cat_embed_input is not None:
x_cat = self.cat_embed(X)
else:
x_cat = None
if self.continuous_cols is not None:
x_cont = self.cont_norm(X[:, self.cont_idx].float())
if self.embed_continuous:
x_cont = self.cont_embed(x_cont)
x_cont = einops.rearrange(x_cont, 'b s d -> b (s d)')
else:
x_cont = None
return x_cat, x_cont
class BaseTabularModelWithoutAttention(nn.Module):
def __init__(self, column_idx: Dict[str, int], cat_embed_input: Optional[List[Tuple[str, int, int]]], cat_embed_dropout: float, use_cat_bias: bool, cat_embed_activation: Optional[str], continuous_cols: Optional[List[str]], cont_norm_layer: str, embed_continuous: bool, cont_embed_dim: int, cont_embed_dropout: float, use_cont_bias: bool, cont_embed_activation: Optional[str]):
super().__init__()
self.column_idx = column_idx
self.cat_embed_input = cat_embed_input
self.cat_embed_dropout = cat_embed_dropout
self.use_cat_bias = use_cat_bias
self.cat_embed_activation = cat_embed_activation
self.continuous_cols = continuous_cols
self.cont_norm_layer = cont_norm_layer
self.embed_continuous = embed_continuous
self.cont_embed_dim = cont_embed_dim
self.cont_embed_dropout = cont_embed_dropout
self.use_cont_bias = use_cont_bias
self.cont_embed_activation = cont_embed_activation
self.cat_and_cont_embed = DiffSizeCatAndContEmbeddings(column_idx, cat_embed_input, cat_embed_dropout, use_cat_bias, continuous_cols, cont_norm_layer, embed_continuous, cont_embed_dim, cont_embed_dropout, use_cont_bias)
self.cat_embed_act_fn = get_activation_fn(cat_embed_activation) if cat_embed_activation is not None else None
self.cont_embed_act_fn = get_activation_fn(cont_embed_activation) if cont_embed_activation is not None else None
def _get_embeddings(self, X: Tensor) ->Tensor:
x_cat, x_cont = self.cat_and_cont_embed(X)
if x_cat is not None:
x = self.cat_embed_act_fn(x_cat) if self.cat_embed_act_fn is not None else x_cat
if x_cont is not None:
if self.cont_embed_act_fn is not None:
x_cont = self.cont_embed_act_fn(x_cont)
x = torch.cat([x, x_cont], 1) if x_cat is not None else x_cont
return x
@property
def output_dim(self) ->int:
raise NotImplementedError
class FullEmbeddingDropout(nn.Module):
def __init__(self, p: float):
super(FullEmbeddingDropout, self).__init__()
if p < 0 or p > 1:
raise ValueError(f'p probability has to be between 0 and 1, but got {p}')
self.p = p
def forward(self, X: Tensor) ->Tensor:
if self.training:
mask = X.new().resize_((X.size(1), 1)).bernoulli_(1 - self.p).expand_as(X) / (1 - self.p)
return mask * X
else:
return X
def extra_repr(self) ->str:
return f'p={self.p}'
DropoutLayers = Union[nn.Dropout, FullEmbeddingDropout]
class SharedEmbeddings(nn.Module):
def __init__(self, n_embed: int, embed_dim: int, embed_dropout: float, full_embed_dropout: bool=False, add_shared_embed: bool=False, frac_shared_embed=0.25):
super(SharedEmbeddings, self).__init__()
assert frac_shared_embed < 1, "'frac_shared_embed' must be less than 1"
self.add_shared_embed = add_shared_embed
self.embed = nn.Embedding(n_embed, embed_dim, padding_idx=0)
self.embed.weight.data.clamp_(-2, 2)
if add_shared_embed:
col_embed_dim = embed_dim
else:
col_embed_dim = int(embed_dim * frac_shared_embed)
self.shared_embed = nn.Parameter(torch.empty(1, col_embed_dim).uniform_(-1, 1))
if full_embed_dropout:
self.dropout: DropoutLayers = FullEmbeddingDropout(embed_dropout)
else:
self.dropout = nn.Dropout(embed_dropout)
def forward(self, X: Tensor) ->Tensor:
out = self.dropout(self.embed(X))
shared_embed = self.shared_embed.expand(out.shape[0], -1)
if self.add_shared_embed:
out += shared_embed
else:
out[:, :shared_embed.shape[1]] = shared_embed
return out
class SameSizeCatEmbeddings(nn.Module):
def __init__(self, embed_dim: int, column_idx: Dict[str, int], embed_input: Optional[List[Tuple[str, int]]], embed_dropout: float, use_bias: bool, full_embed_dropout: bool, shared_embed: bool, add_shared_embed: bool, frac_shared_embed: float):
super(SameSizeCatEmbeddings, self).__init__()
self.n_tokens = sum([ei[1] for ei in embed_input])
self.column_idx = column_idx
self.embed_input = embed_input
self.shared_embed = shared_embed
self.with_cls_token = 'cls_token' in column_idx
self.embed_layers_names = None
if self.embed_input is not None:
self.embed_layers_names = {e[0]: e[0].replace('.', '_') for e in self.embed_input}
categorical_cols = [ei[0] for ei in embed_input]
self.cat_idx = [self.column_idx[col] for col in categorical_cols]
if use_bias:
if shared_embed:
warnings.warn("The current implementation of 'SharedEmbeddings' does not use bias", UserWarning)
n_cat = len(categorical_cols) - 1 if self.with_cls_token else len(categorical_cols)
self.bias = nn.init.kaiming_uniform_(nn.Parameter(torch.Tensor(n_cat, embed_dim)), a=math.sqrt(5))
else:
self.bias = None
if self.shared_embed:
self.embed: Union[nn.ModuleDict, nn.Embedding] = nn.ModuleDict({('emb_layer_' + self.embed_layers_names[col]): SharedEmbeddings(val if col == 'cls_token' else val + 1, embed_dim, embed_dropout, full_embed_dropout, add_shared_embed, frac_shared_embed) for col, val in self.embed_input})
else:
n_tokens = sum([ei[1] for ei in embed_input])
self.embed = nn.Embedding(n_tokens + 1, embed_dim, padding_idx=0)
if full_embed_dropout:
self.dropout: DropoutLayers = FullEmbeddingDropout(embed_dropout)
else:
self.dropout = nn.Dropout(embed_dropout)
def forward(self, X: Tensor) ->Tensor:
if self.shared_embed:
cat_embed = [self.embed['emb_layer_' + self.embed_layers_names[col]](X[:, self.column_idx[col]].long()).unsqueeze(1) for col, _ in self.embed_input]
x = torch.cat(cat_embed, 1)
else:
x = self.embed(X[:, self.cat_idx].long())
if self.bias is not None:
if self.with_cls_token:
bias = torch.cat([torch.zeros(1, self.bias.shape[1], device=x.device), self.bias])
else:
bias = self.bias
x = x + bias.unsqueeze(0)
x = self.dropout(x)
return x
class SameSizeCatAndContEmbeddings(nn.Module):
def __init__(self, embed_dim: int, column_idx: Dict[str, int], cat_embed_input: Optional[List[Tuple[str, int]]], cat_embed_dropout: float, use_cat_bias: bool, full_embed_dropout: bool, shared_embed: bool, add_shared_embed: bool, frac_shared_embed: float, continuous_cols: Optional[List[str]], cont_norm_layer: str, embed_continuous: bool, cont_embed_dropout: float, use_cont_bias: bool):
super(SameSizeCatAndContEmbeddings, self).__init__()
self.embed_dim = embed_dim
self.cat_embed_input = cat_embed_input
self.continuous_cols = continuous_cols
self.embed_continuous = embed_continuous
if cat_embed_input is not None:
self.cat_embed = SameSizeCatEmbeddings(embed_dim, column_idx, cat_embed_input, cat_embed_dropout, use_cat_bias, full_embed_dropout, shared_embed, add_shared_embed, frac_shared_embed)
if continuous_cols is not None:
self.cont_idx = [column_idx[col] for col in continuous_cols]
if cont_norm_layer == 'layernorm':
self.cont_norm: NormLayers = nn.LayerNorm(len(continuous_cols))
elif cont_norm_layer == 'batchnorm':
self.cont_norm = nn.BatchNorm1d(len(continuous_cols))
else:
self.cont_norm = nn.Identity()
if self.embed_continuous:
self.cont_embed = ContEmbeddings(len(continuous_cols), embed_dim, cont_embed_dropout, use_cont_bias)
def forward(self, X: Tensor) ->Tuple[Tensor, Any]:
if self.cat_embed_input is not None:
x_cat = self.cat_embed(X)
else:
x_cat = None
if self.continuous_cols is not None:
x_cont = self.cont_norm(X[:, self.cont_idx].float())
if self.embed_continuous:
x_cont = self.cont_embed(x_cont)
else:
x_cont = None
return x_cat, x_cont
class BaseTabularModelWithAttention(nn.Module):
def __init__(self, column_idx: Dict[str, int], cat_embed_input: Optional[List[Tuple[str, int]]], cat_embed_dropout: float, use_cat_bias: bool, cat_embed_activation: Optional[str], full_embed_dropout: bool, shared_embed: bool, add_shared_embed: bool, frac_shared_embed: float, continuous_cols: Optional[List[str]], cont_norm_layer: str, embed_continuous: bool, cont_embed_dropout: float, use_cont_bias: bool, cont_embed_activation: Optional[str], input_dim: int):
super().__init__()
self.column_idx = column_idx
self.cat_embed_input = cat_embed_input
self.cat_embed_dropout = cat_embed_dropout
self.use_cat_bias = use_cat_bias
self.cat_embed_activation = cat_embed_activation
self.full_embed_dropout = full_embed_dropout
self.shared_embed = shared_embed
self.add_shared_embed = add_shared_embed
self.frac_shared_embed = frac_shared_embed
self.continuous_cols = continuous_cols
self.cont_norm_layer = cont_norm_layer
self.embed_continuous = embed_continuous
self.cont_embed_dropout = cont_embed_dropout
self.use_cont_bias = use_cont_bias
self.cont_embed_activation = cont_embed_activation
self.input_dim = input_dim
self.cat_and_cont_embed = SameSizeCatAndContEmbeddings(input_dim, column_idx, cat_embed_input, cat_embed_dropout, use_cat_bias, full_embed_dropout, shared_embed, add_shared_embed, frac_shared_embed, continuous_cols, cont_norm_layer, embed_continuous, cont_embed_dropout, use_cont_bias)
self.cat_embed_act_fn = get_activation_fn(cat_embed_activation) if cat_embed_activation is not None else None
self.cont_embed_act_fn = get_activation_fn(cont_embed_activation) if cont_embed_activation is not None else None
def _get_embeddings(self, X: Tensor) ->Tensor:
x_cat, x_cont = self.cat_and_cont_embed(X)
if x_cat is not None:
x = self.cat_embed_act_fn(x_cat) if self.cat_embed_act_fn is not None else x_cat
if x_cont is not None:
if self.cont_embed_act_fn is not None:
x_cont = self.cont_embed_act_fn(x_cont)
x = torch.cat([x, x_cont], 1) if x_cat is not None else x_cont
return x
@property
def output_dim(self) ->int:
raise NotImplementedError
@property
def attention_weights(self):
raise NotImplementedError
class Wide(nn.Module):
"""Defines a `Wide` (linear) model where the non-linearities are
captured via the so-called crossed-columns. This can be used as the
`wide` component of a Wide & Deep model.
Parameters
-----------
input_dim: int
size of the Embedding layer. `input_dim` is the summation of all the
individual values for all the features that go through the wide
model. For example, if the wide model receives 2 features with
5 individual values each, `input_dim = 10`
pred_dim: int, default = 1
size of the ouput tensor containing the predictions. Note that unlike
all the other models, the wide model is connected directly to the
output neuron(s) when used to build a Wide and Deep model. Therefore,
it requires the `pred_dim` parameter.
Attributes
-----------
wide_linear: nn.Module
the linear layer that comprises the wide branch of the model
Examples
--------
>>> import torch
>>> from pytorch_widedeep.models import Wide
>>> X = torch.empty(4, 4).random_(6)
>>> wide = Wide(input_dim=X.unique().size(0), pred_dim=1)
>>> out = wide(X)
"""
def __init__(self, input_dim: int, pred_dim: int=1):
super(Wide, self).__init__()
self.input_dim = input_dim
self.pred_dim = pred_dim
self.wide_linear = nn.Embedding(input_dim + 1, pred_dim, padding_idx=0)
self.bias = nn.Parameter(torch.zeros(pred_dim))
self._reset_parameters()
def _reset_parameters(self) ->None:
"""initialize Embedding and bias like nn.Linear. See [original
implementation](https://pytorch.org/docs/stable/_modules/torch/nn/modules/linear.html#Linear).
"""
nn.init.kaiming_uniform_(self.wide_linear.weight, a=math.sqrt(5))
fan_in, _ = nn.init._calculate_fan_in_and_fan_out(self.wide_linear.weight)
bound = 1 / math.sqrt(fan_in)
nn.init.uniform_(self.bias, -bound, bound)
def forward(self, X: Tensor) ->Tensor:
"""Forward pass. Simply connecting the Embedding layer with the ouput
neuron(s)"""
out = self.wide_linear(X.long()).sum(dim=1) + self.bias
return out
class ContextAttention(nn.Module):
"""Attention mechanism inspired by `Hierarchical Attention Networks for
Document Classification
<https://www.cs.cmu.edu/~./hovy/papers/16HLT-hierarchical-attention-networks.pdf>`_
"""
def __init__(self, input_dim: int, dropout: float, sum_along_seq: bool=False):
super(ContextAttention, self).__init__()
self.inp_proj = nn.Linear(input_dim, input_dim)
self.context = nn.Linear(input_dim, 1, bias=False)
self.dropout = nn.Dropout(dropout)
self.sum_along_seq = sum_along_seq
def forward(self, X: Tensor) ->Tensor:
scores = torch.tanh_(self.inp_proj(X))
attn_weights = self.context(scores).softmax(dim=1)
self.attn_weights = attn_weights.squeeze(2)
attn_weights = self.dropout(attn_weights)
output = (attn_weights * X).sum(1) if self.sum_along_seq else attn_weights * X
return output
class QueryKeySelfAttention(nn.Module):
"""Attention mechanism inspired by the well known multi-head attention. Here,
rather than learning a value projection matrix that will be multiplied by
the attention weights, we multiply such weights directly by the input
tensor.
The rationale behind this implementation comes, among other
considerations, from the fact that Transformer based models tend to
heavily overfit tabular. Therefore, by reducing the number of trainable
parameters and multiply directly by the incoming tensor we help
mitigating such overfitting
"""
def __init__(self, input_dim: int, dropout: float, use_bias: bool, n_heads: int):
super(QueryKeySelfAttention, self).__init__()
assert input_dim % n_heads == 0, "'input_dim' must be divisible by 'n_heads'"
self.head_dim = input_dim // n_heads
self.n_heads = n_heads
self.qk_proj = nn.Linear(input_dim, input_dim * 2, bias=use_bias)
self.dropout = nn.Dropout(dropout)
def forward(self, X: Tensor) ->Tensor:
q, k = self.qk_proj(X).chunk(2, dim=-1)
q, k, x_rearr = map(lambda t: einops.rearrange(t, 'b m (h d) -> b h m d', h=self.n_heads), (q, k, X))
scores = einsum('b h s d, b h l d -> b h s l', q, k) / math.sqrt(self.head_dim)
attn_weights = scores.softmax(dim=-1)
self.attn_weights = attn_weights
attn_weights = self.dropout(attn_weights)
attn_output = einsum('b h s l, b h l d -> b h s d', attn_weights, x_rearr)
output = einops.rearrange(attn_output, 'b h s d -> b s (h d)', h=self.n_heads)
return output
class AddNorm(nn.Module):
"""aka PosNorm"""
def __init__(self, input_dim: int, dropout: float):
super(AddNorm, self).__init__()
self.dropout = nn.Dropout(dropout)
self.ln = nn.LayerNorm(input_dim)
def forward(self, X: Tensor, sublayer: nn.Module) ->Tensor:
return self.ln(X + self.dropout(sublayer(X)))
class ContextAttentionEncoder(nn.Module):
def __init__(self, rnn: nn.Module, input_dim: int, attn_dropout: float, attn_concatenate: bool, with_addnorm: bool, sum_along_seq: bool):
super(ContextAttentionEncoder, self).__init__()
self.rnn = rnn
self.bidirectional = self.rnn.bidirectional
self.attn_concatenate = attn_concatenate
self.with_addnorm = with_addnorm
if with_addnorm:
self.attn_addnorm = AddNorm(input_dim, attn_dropout)
self.attn = ContextAttention(input_dim, attn_dropout, sum_along_seq)
def forward(self, X: Tensor, h: Tensor, c: Tensor) ->Tuple[Tensor, Tensor, Tensor]:
if isinstance(self.rnn, nn.LSTM):
o, (h, c) = self.rnn(X, (h, c))
elif isinstance(self.rnn, nn.GRU):
o, h = self.rnn(X, h)
attn_inp = self._process_rnn_outputs(o, h)
if self.with_addnorm:
out = self.attn_addnorm(attn_inp, self.attn)
else:
out = self.attn(attn_inp)
return out, c, h
def _process_rnn_outputs(self, output: Tensor, hidden: Tensor) ->Tensor:
if self.attn_concatenate:
if self.bidirectional:
bi_hidden = torch.cat((hidden[-2], hidden[-1]), dim=1)
attn_inp = torch.cat([output, bi_hidden.unsqueeze(1).expand_as(output)], dim=2)
else:
attn_inp = torch.cat([output, hidden[-1].unsqueeze(1).expand_as(output)], dim=2)
else:
attn_inp = output
return attn_inp
class SLP(nn.Module):
def __init__(self, input_dim: int, dropout: float, activation: str, normalise: bool):
super(SLP, self).__init__()
self.lin = nn.Linear(input_dim, input_dim * 2 if activation.endswith('glu') else input_dim)
self.dropout = nn.Dropout(dropout)
self.activation = get_activation_fn(activation)
if normalise:
self.norm: Union[nn.LayerNorm, nn.Identity] = nn.LayerNorm(input_dim)
else:
self.norm = nn.Identity()
def forward(self, X: Tensor) ->Tensor:
return self.dropout(self.norm(self.activation(self.lin(X))))
class SelfAttentionEncoder(nn.Module):
def __init__(self, input_dim: int, dropout: float, use_bias: bool, n_heads: int, with_addnorm: bool, activation: str):
super(SelfAttentionEncoder, self).__init__()
self.with_addnorm = with_addnorm
self.attn = QueryKeySelfAttention(input_dim, dropout, use_bias, n_heads)
if with_addnorm:
self.attn_addnorm = AddNorm(input_dim, dropout)
self.slp_addnorm = AddNorm(input_dim, dropout)
self.slp = SLP(input_dim, dropout, activation, not with_addnorm)
def forward(self, X: Tensor) ->Tensor:
if self.with_addnorm:
x = self.attn_addnorm(X, self.attn)
out = self.slp_addnorm(x, self.slp)
else:
out = self.slp(self.attn(X))
return out
class ContextAttentionMLP(BaseTabularModelWithAttention):
"""Defines a `ContextAttentionMLP` model that can be used as the
`deeptabular` component of a Wide & Deep model or independently by
itself.
This class combines embedding representations of the categorical features
with numerical (aka continuous) features that are also embedded. These
are then passed through a series of attention blocks. Each attention
block is comprised by a `ContextAttentionEncoder`. Such encoder is in
part inspired by the attention mechanism described in
[Hierarchical Attention Networks for Document
Classification](https://www.cs.cmu.edu/~./hovy/papers/16HLT-hierarchical-attention-networks.pdf).
See `pytorch_widedeep.models.tabular.mlp._attention_layers` for details.
Parameters
----------
column_idx: Dict
Dict containing the index of the columns that will be passed through
the model. Required to slice the tensors. e.g.
_{'education': 0, 'relationship': 1, 'workclass': 2, ...}_
cat_embed_input: List
List of Tuples with the column name and number of unique values per
categorical columns. e.g. _[('education', 11), ...]_
cat_embed_dropout: float, default = 0.1
Categorical embeddings dropout
use_cat_bias: bool, default = False,
Boolean indicating if bias will be used for the categorical embeddings
cat_embed_activation: Optional, str, default = None,
Activation function for the categorical embeddings, if any. _'tanh'_,
_'relu'_, _'leaky_relu'_ and _'gelu'_ are supported.
full_embed_dropout: bool, default = False
Boolean indicating if an entire embedding (i.e. the representation of
one column) will be dropped in the batch. See:
`pytorch_widedeep.models.embeddings_layers.FullEmbeddingDropout`.
If `full_embed_dropout = True`, `cat_embed_dropout` is ignored.
shared_embed: bool, default = False
The idea behind sharing part of the embeddings per column is to let
the model learn which column is embedded at the time.
add_shared_embed: bool, default = False,
The two embedding sharing strategies are: 1) add the shared embeddings
to the column embeddings or 2) to replace the first
`frac_shared_embed` with the shared embeddings.
See `pytorch_widedeep.models.embeddings_layers.SharedEmbeddings`
frac_shared_embed: float, default = 0.25
The fraction of embeddings that will be shared (if `add_shared_embed
= False`) by all the different categories for one particular
column.
continuous_cols: List, Optional, default = None
List with the name of the numeric (aka continuous) columns
cont_norm_layer: str, default = "batchnorm"
Type of normalization layer applied to the continuous features. Options
are: _'layernorm'_, _'batchnorm'_ or None.
cont_embed_dropout: float, default = 0.1,
Continuous embeddings dropout
use_cont_bias: bool, default = True,
Boolean indicating if bias will be used for the continuous embeddings
cont_embed_activation: str, default = None
Activation function to be applied to the continuous embeddings, if
any. _'tanh'_, _'relu'_, _'leaky_relu'_ and _'gelu'_ are supported.
input_dim: int, default = 32
The so-called *dimension of the model*. Is the number of embeddings
used to encode the categorical and/or continuous columns
attn_dropout: float, default = 0.2
Dropout for each attention block
with_addnorm: bool = False,
Boolean indicating if residual connections will be used in the
attention blocks
attn_activation: str, default = "leaky_relu"
String indicating the activation function to be applied to the dense
layer in each attention encoder. _'tanh'_, _'relu'_, _'leaky_relu'_
and _'gelu'_ are supported.
n_blocks: int, default = 3
Number of attention blocks
Attributes
----------
cat_and_cont_embed: nn.Module
This is the module that processes the categorical and continuous columns
encoder: nn.Module
Sequence of attention encoders.
Examples
--------
>>> import torch
>>> from pytorch_widedeep.models import ContextAttentionMLP
>>> X_tab = torch.cat((torch.empty(5, 4).random_(4), torch.rand(5, 1)), axis=1)
>>> colnames = ['a', 'b', 'c', 'd', 'e']
>>> cat_embed_input = [(u,i,j) for u,i,j in zip(colnames[:4], [4]*4, [8]*4)]
>>> column_idx = {k:v for v,k in enumerate(colnames)}
>>> model = ContextAttentionMLP(column_idx=column_idx, cat_embed_input=cat_embed_input, continuous_cols = ['e'])
>>> out = model(X_tab)
"""
def __init__(self, column_idx: Dict[str, int], cat_embed_input: Optional[List[Tuple[str, int]]]=None, cat_embed_dropout: float=0.1, use_cat_bias: bool=False, cat_embed_activation: Optional[str]=None, full_embed_dropout: bool=False, shared_embed: bool=False, add_shared_embed: bool=False, frac_shared_embed: float=0.25, continuous_cols: Optional[List[str]]=None, cont_norm_layer: str=None, cont_embed_dropout: float=0.1, use_cont_bias: bool=True, cont_embed_activation: Optional[str]=None, input_dim: int=32, attn_dropout: float=0.2, with_addnorm: bool=False, attn_activation: str='leaky_relu', n_blocks: int=3):
super(ContextAttentionMLP, self).__init__(column_idx=column_idx, cat_embed_input=cat_embed_input, cat_embed_dropout=cat_embed_dropout, use_cat_bias=use_cat_bias, cat_embed_activation=cat_embed_activation, full_embed_dropout=full_embed_dropout, shared_embed=shared_embed, add_shared_embed=add_shared_embed, frac_shared_embed=frac_shared_embed, continuous_cols=continuous_cols, cont_norm_layer=cont_norm_layer, embed_continuous=True, cont_embed_dropout=cont_embed_dropout, use_cont_bias=use_cont_bias, cont_embed_activation=cont_embed_activation, input_dim=input_dim)
self.attn_dropout = attn_dropout
self.with_addnorm = with_addnorm
self.attn_activation = attn_activation
self.n_blocks = n_blocks
self.with_cls_token = 'cls_token' in column_idx
self.n_cat = len(cat_embed_input) if cat_embed_input is not None else 0
self.n_cont = len(continuous_cols) if continuous_cols is not None else 0
self.encoder = nn.Sequential()
for i in range(n_blocks):
self.encoder.add_module('attention_block' + str(i), ContextAttentionEncoder(input_dim, attn_dropout, with_addnorm, attn_activation))
def forward(self, X: Tensor) ->Tensor:
x = self._get_embeddings(X)
x = self.encoder(x)
if self.with_cls_token:
out = x[:, 0, :]
else:
out = x.flatten(1)
return out
@property
def output_dim(self) ->int:
"""The output dimension of the model. This is a required property
neccesary to build the `WideDeep` class
"""
return self.input_dim if self.with_cls_token else (self.n_cat + self.n_cont) * self.input_dim
@property
def attention_weights(self) ->List:
"""List with the attention weights per block
The shape of the attention weights is $(N, F)$, where $N$ is the batch
size and $F$ is the number of features/columns in the dataset
"""
return [blk.attn.attn_weights for blk in self.encoder]
class SelfAttentionMLP(BaseTabularModelWithAttention):
"""Defines a `SelfAttentionMLP` model that can be used as the
deeptabular component of a Wide & Deep model or independently by
itself.
This class combines embedding representations of the categorical features
with numerical (aka continuous) features that are also embedded. These
are then passed through a series of attention blocks. Each attention
block is comprised by what we would refer as a simplified
`SelfAttentionEncoder`. See
`pytorch_widedeep.models.tabular.mlp._attention_layers` for details. The
reason to use a simplified version of self attention is because we
observed that the '_standard_' attention mechanism used in the
TabTransformer has a notable tendency to overfit.
Parameters
----------
column_idx: Dict
Dict containing the index of the columns that will be passed through
the model. Required to slice the tensors. e.g.
_{'education': 0, 'relationship': 1, 'workclass': 2, ...}_
cat_embed_input: List
List of Tuples with the column name and number of unique values per
categorical column e.g. _[(education, 11), ...]_.
cat_embed_dropout: float, default = 0.1
Categorical embeddings dropout
use_cat_bias: bool, default = False,
Boolean indicating if bias will be used for the categorical embeddings
cat_embed_activation: Optional, str, default = None,
Activation function for the categorical embeddings, if any. Currently
_'tanh'_, _'relu'_, _'leaky_relu'_ and _'gelu'_ are supported.
full_embed_dropout: bool, default = False
Boolean indicating if an entire embedding (i.e. the representation of
one column) will be dropped in the batch. See:
`pytorch_widedeep.models.embeddings_layers.FullEmbeddingDropout`.
If full_embed_dropout = True, `cat_embed_dropout` is ignored.
shared_embed: bool, default = False
The of sharing part of the embeddings per column is to enable the
model to distinguish the classes in one column from those in the
other columns. In other words, the idea is to let the model learn
which column is embedded at the time.
add_shared_embed: bool, default = False,
The two embedding sharing strategies are: 1) add the shared embeddings
to the column embeddings or 2) to replace the first
frac_shared_embed with the shared embeddings.
See `pytorch_widedeep.models.embeddings_layers.SharedEmbeddings`
frac_shared_embed: float, default = 0.25
The fraction of embeddings that will be shared (if add_shared_embed
= False) by all the different categories for one particular
column.
continuous_cols: List, Optional, default = None
List with the name of the numeric (aka continuous) columns
cont_norm_layer: str, default = "batchnorm"
Type of normalization layer applied to the continuous features. Options
are: _'layernorm'_, _'batchnorm'_ or None.
cont_embed_dropout: float, default = 0.1,
Continuous embeddings dropout
use_cont_bias: bool, default = True,
Boolean indicating if bias will be used for the continuous embeddings
cont_embed_activation: str, default = None
Activation function to be applied to the continuous embeddings, if
any. _'tanh'_, _'relu'_, _'leaky_relu'_ and _'gelu'_ are supported.
input_dim: int, default = 32
The so-called *dimension of the model*. Is the number of
embeddings used to encode the categorical and/or continuous columns
attn_dropout: float, default = 0.2
Dropout for each attention block
n_heads: int, default = 8
Number of attention heads per attention block.
use_bias: bool, default = False
Boolean indicating whether or not to use bias in the Q, K projection
layers.
with_addnorm: bool = False,
Boolean indicating if residual connections will be used in the attention blocks
attn_activation: str, default = "leaky_relu"
String indicating the activation function to be applied to the dense
layer in each attention encoder. _'tanh'_, _'relu'_, _'leaky_relu'_
and _'gelu'_ are supported.
n_blocks: int, default = 3
Number of attention blocks
Attributes
----------
cat_and_cont_embed: nn.Module
This is the module that processes the categorical and continuous columns
encoder: nn.Module
Sequence of attention encoders.
Examples
--------
>>> import torch
>>> from pytorch_widedeep.models import SelfAttentionMLP
>>> X_tab = torch.cat((torch.empty(5, 4).random_(4), torch.rand(5, 1)), axis=1)
>>> colnames = ['a', 'b', 'c', 'd', 'e']
>>> cat_embed_input = [(u,i,j) for u,i,j in zip(colnames[:4], [4]*4, [8]*4)]
>>> column_idx = {k:v for v,k in enumerate(colnames)}
>>> model = SelfAttentionMLP(column_idx=column_idx, cat_embed_input=cat_embed_input, continuous_cols = ['e'])
>>> out = model(X_tab)
"""
def __init__(self, column_idx: Dict[str, int], cat_embed_input: Optional[List[Tuple[str, int]]]=None, cat_embed_dropout: float=0.1, use_cat_bias: bool=False, cat_embed_activation: Optional[str]=None, full_embed_dropout: bool=False, shared_embed: bool=False, add_shared_embed: bool=False, frac_shared_embed: float=0.25, continuous_cols: Optional[List[str]]=None, cont_norm_layer: str=None, cont_embed_dropout: float=0.1, use_cont_bias: bool=True, cont_embed_activation: Optional[str]=None, input_dim: int=32, attn_dropout: float=0.2, n_heads: int=8, use_bias: bool=False, with_addnorm: bool=False, attn_activation: str='leaky_relu', n_blocks: int=3):
super(SelfAttentionMLP, self).__init__(column_idx=column_idx, cat_embed_input=cat_embed_input, cat_embed_dropout=cat_embed_dropout, use_cat_bias=use_cat_bias, cat_embed_activation=cat_embed_activation, full_embed_dropout=full_embed_dropout, shared_embed=shared_embed, add_shared_embed=add_shared_embed, frac_shared_embed=frac_shared_embed, continuous_cols=continuous_cols, cont_norm_layer=cont_norm_layer, embed_continuous=True, cont_embed_dropout=cont_embed_dropout, use_cont_bias=use_cont_bias, cont_embed_activation=cont_embed_activation, input_dim=input_dim)
self.attn_dropout = attn_dropout
self.n_heads = n_heads
self.use_bias = use_bias
self.with_addnorm = with_addnorm
self.attn_activation = attn_activation
self.n_blocks = n_blocks
self.with_cls_token = 'cls_token' in column_idx
self.n_cat = len(cat_embed_input) if cat_embed_input is not None else 0
self.n_cont = len(continuous_cols) if continuous_cols is not None else 0
self.encoder = nn.Sequential()
for i in range(n_blocks):
self.encoder.add_module('attention_block' + str(i), SelfAttentionEncoder(input_dim, attn_dropout, use_bias, n_heads, with_addnorm, attn_activation))
def forward(self, X: Tensor) ->Tensor:
x = self._get_embeddings(X)
x = self.encoder(x)
if self.with_cls_token:
out = x[:, 0, :]
else:
out = x.flatten(1)
return out
@property
def output_dim(self) ->int:
"""The output dimension of the model. This is a required property
neccesary to build the WideDeep class
"""
return self.input_dim if self.with_cls_token else (self.n_cat + self.n_cont) * self.input_dim
@property
def attention_weights(self) ->List:
"""List with the attention weights per block
The shape of the attention weights is $(N, H, F, F)$, where $N$ is the
batch size, $H$ is the number of attention heads and $F$ is the
number of features/columns in the dataset
"""
return [blk.attn.attn_weights for blk in self.encoder]
class TabMlp(BaseTabularModelWithoutAttention):
"""Defines a `TabMlp` model that can be used as the `deeptabular`
component of a Wide & Deep model or independently by itself.
This class combines embedding representations of the categorical features
with numerical (aka continuous) features, embedded or not. These are then
passed through a series of dense layers (i.e. a MLP).
Parameters
----------
column_idx: Dict
Dict containing the index of the columns that will be passed through
the `TabMlp` model. Required to slice the tensors. e.g. _{'education':
0, 'relationship': 1, 'workclass': 2, ...}_.
cat_embed_input: List, Optional, default = None
List of Tuples with the column name, number of unique values and
embedding dimension. e.g. _[(education, 11, 32), ...]_
cat_embed_dropout: float, default = 0.1
Categorical embeddings dropout
use_cat_bias: bool, default = False,
Boolean indicating if bias will be used for the categorical embeddings
cat_embed_activation: Optional, str, default = None,
Activation function for the categorical embeddings, if any. Currently
_'tanh'_, _'relu'_, _'leaky_relu'_ and _'gelu'_ are supported
continuous_cols: List, Optional, default = None
List with the name of the numeric (aka continuous) columns
cont_norm_layer: str, default = "batchnorm"
Type of normalization layer applied to the continuous features. Options
are: _'layernorm'_, _'batchnorm'_ or None.
embed_continuous: bool, default = False,
Boolean indicating if the continuous columns will be embedded
(i.e. passed each through a linear layer with or without activation)
cont_embed_dim: int, default = 32,
Size of the continuous embeddings
cont_embed_dropout: float, default = 0.1,
Dropout for the continuous embeddings
use_cont_bias: bool, default = True,
Boolean indicating if bias will be used for the continuous embeddings
cont_embed_activation: Optional, str, default = None,
Activation function for the continuous embeddings if any. Currently
_'tanh'_, _'relu'_, _'leaky_relu'_ and _'gelu'_ are supported
mlp_hidden_dims: List, default = [200, 100]
List with the number of neurons per dense layer in the mlp.
mlp_activation: str, default = "relu"
Activation function for the dense layers of the MLP. Currently
_'tanh'_, _'relu'_, _'leaky_relu'_ and _'gelu'_ are supported
mlp_dropout: float or List, default = 0.1
float or List of floats with the dropout between the dense layers.
e.g: _[0.5,0.5]_
mlp_batchnorm: bool, default = False
Boolean indicating whether or not batch normalization will be applied
to the dense layers
mlp_batchnorm_last: bool, default = False
Boolean indicating whether or not batch normalization will be applied
to the last of the dense layers
mlp_linear_first: bool, default = False
Boolean indicating the order of the operations in the dense
layer. If `True: [LIN -> ACT -> BN -> DP]`. If `False: [BN -> DP ->
LIN -> ACT]`
Attributes
----------
cat_and_cont_embed: nn.Module
This is the module that processes the categorical and continuous columns
encoder: nn.Module
mlp model that will receive the concatenation of the embeddings and
the continuous columns
Examples
--------
>>> import torch
>>> from pytorch_widedeep.models import TabMlp
>>> X_tab = torch.cat((torch.empty(5, 4).random_(4), torch.rand(5, 1)), axis=1)
>>> colnames = ['a', 'b', 'c', 'd', 'e']
>>> cat_embed_input = [(u,i,j) for u,i,j in zip(colnames[:4], [4]*4, [8]*4)]
>>> column_idx = {k:v for v,k in enumerate(colnames)}
>>> model = TabMlp(mlp_hidden_dims=[8,4], column_idx=column_idx, cat_embed_input=cat_embed_input,
... continuous_cols = ['e'])
>>> out = model(X_tab)
"""
def __init__(self, column_idx: Dict[str, int], cat_embed_input: Optional[List[Tuple[str, int, int]]]=None, cat_embed_dropout: float=0.1, use_cat_bias: bool=False, cat_embed_activation: Optional[str]=None, continuous_cols: Optional[List[str]]=None, cont_norm_layer: str='batchnorm', embed_continuous: bool=False, cont_embed_dim: int=32, cont_embed_dropout: float=0.1, use_cont_bias: bool=True, cont_embed_activation: Optional[str]=None, mlp_hidden_dims: List[int]=[200, 100], mlp_activation: str='relu', mlp_dropout: Union[float, List[float]]=0.1, mlp_batchnorm: bool=False, mlp_batchnorm_last: bool=False, mlp_linear_first: bool=False):
super(TabMlp, self).__init__(column_idx=column_idx, cat_embed_input=cat_embed_input, cat_embed_dropout=cat_embed_dropout, use_cat_bias=use_cat_bias, cat_embed_activation=cat_embed_activation, continuous_cols=continuous_cols, cont_norm_layer=cont_norm_layer, embed_continuous=embed_continuous, cont_embed_dim=cont_embed_dim, cont_embed_dropout=cont_embed_dropout, use_cont_bias=use_cont_bias, cont_embed_activation=cont_embed_activation)
self.mlp_hidden_dims = mlp_hidden_dims
self.mlp_activation = mlp_activation
self.mlp_dropout = mlp_dropout
self.mlp_batchnorm = mlp_batchnorm
self.mlp_batchnorm_last = mlp_batchnorm_last
self.mlp_linear_first = mlp_linear_first
mlp_input_dim = self.cat_and_cont_embed.output_dim
mlp_hidden_dims = [mlp_input_dim] + mlp_hidden_dims
self.encoder = MLP(mlp_hidden_dims, mlp_activation, mlp_dropout, mlp_batchnorm, mlp_batchnorm_last, mlp_linear_first)
def forward(self, X: Tensor) ->Tensor:
x = self._get_embeddings(X)
return self.encoder(x)
@property
def output_dim(self) ->int:
"""The output dimension of the model. This is a required property
neccesary to build the `WideDeep` class"""
return self.mlp_hidden_dims[-1]
class TabMlpDecoder(nn.Module):
"""Companion decoder model for the `TabMlp` model (which can be considered
an encoder itself).
This class is designed to be used with the `EncoderDecoderTrainer` when
using self-supervised pre-training (see the corresponding section in the
docs). The `TabMlpDecoder` will receive the output from the MLP
and '_reconstruct_' the embeddings.
Parameters
----------
embed_dim: int
Size of the embeddings tensor that needs to be reconstructed.
mlp_hidden_dims: List, default = [200, 100]
List with the number of neurons per dense layer in the mlp.
mlp_activation: str, default = "relu"
Activation function for the dense layers of the MLP. Currently
_'tanh'_, _'relu'_, _'leaky_relu'_ and _'gelu'_ are supported
mlp_dropout: float or List, default = 0.1
float or List of floats with the dropout between the dense layers.
e.g: _[0.5,0.5]_
mlp_batchnorm: bool, default = False
Boolean indicating whether or not batch normalization will be applied
to the dense layers
mlp_batchnorm_last: bool, default = False
Boolean indicating whether or not batch normalization will be applied
to the last of the dense layers
mlp_linear_first: bool, default = False
Boolean indicating the order of the operations in the dense
layer. If `True: [LIN -> ACT -> BN -> DP]`. If `False: [BN -> DP ->
LIN -> ACT]`
Attributes
----------
decoder: nn.Module
mlp model that will receive the output of the encoder
Examples
--------
>>> import torch
>>> from pytorch_widedeep.models import TabMlpDecoder
>>> x_inp = torch.rand(3, 8)
>>> decoder = TabMlpDecoder(embed_dim=32, mlp_hidden_dims=[8,16])
>>> res = decoder(x_inp)
>>> res.shape
torch.Size([3, 32])
"""
def __init__(self, embed_dim: int, mlp_hidden_dims: List[int]=[100, 200], mlp_activation: str='relu', mlp_dropout: Union[float, List[float]]=0.1, mlp_batchnorm: bool=False, mlp_batchnorm_last: bool=False, mlp_linear_first: bool=False):
super(TabMlpDecoder, self).__init__()
self.embed_dim = embed_dim
self.mlp_hidden_dims = mlp_hidden_dims
self.mlp_activation = mlp_activation
self.mlp_dropout = mlp_dropout
self.mlp_batchnorm = mlp_batchnorm
self.mlp_batchnorm_last = mlp_batchnorm_last
self.mlp_linear_first = mlp_linear_first
self.decoder = MLP(mlp_hidden_dims + [self.embed_dim], mlp_activation, mlp_dropout, mlp_batchnorm, mlp_batchnorm_last, mlp_linear_first)
def forward(self, X: Tensor) ->Tensor:
return self.decoder(X)
class BasicBlock(nn.Module):
def __init__(self, inp: int, out: int, dropout: float=0.0, simplify: bool=False, resize: nn.Module=None):
super(BasicBlock, self).__init__()
self.simplify = simplify
self.resize = resize
self.lin1 = nn.Linear(inp, out, bias=False)
self.bn1 = nn.BatchNorm1d(out)
self.leaky_relu = nn.LeakyReLU(inplace=True)
if dropout > 0.0:
self.dropout = True
self.dp = nn.Dropout(dropout)
else:
self.dropout = False
if not self.simplify:
self.lin2 = nn.Linear(out, out, bias=False)
self.bn2 = nn.BatchNorm1d(out)
def forward(self, X: Tensor) ->Tensor:
identity = X
out = self.lin1(X)
out = self.bn1(out)
out = self.leaky_relu(out)
if self.dropout:
out = self.dp(out)
if not self.simplify:
out = self.lin2(out)
out = self.bn2(out)
if self.resize is not None:
identity = self.resize(X)
out += identity
out = self.leaky_relu(out)
return out
class DenseResnet(nn.Module):
def __init__(self, input_dim: int, blocks_dims: List[int], dropout: float, simplify: bool):
super(DenseResnet, self).__init__()
if input_dim != blocks_dims[0]:
self.dense_resnet = nn.Sequential(OrderedDict([('lin_inp', nn.Linear(input_dim, blocks_dims[0], bias=False)), ('bn_inp', nn.BatchNorm1d(blocks_dims[0]))]))
else:
self.dense_resnet = nn.Sequential()
for i in range(1, len(blocks_dims)):
resize = None
if blocks_dims[i - 1] != blocks_dims[i]:
resize = nn.Sequential(nn.Linear(blocks_dims[i - 1], blocks_dims[i], bias=False), nn.BatchNorm1d(blocks_dims[i]))
self.dense_resnet.add_module('block_{}'.format(i - 1), BasicBlock(blocks_dims[i - 1], blocks_dims[i], dropout, simplify, resize))
def forward(self, X: Tensor) ->Tensor:
return self.dense_resnet(X)
class TabResnet(BaseTabularModelWithoutAttention):
"""Defines a `TabResnet` model that can be used as the `deeptabular`
component of a Wide & Deep model or independently by itself.
This class combines embedding representations of the categorical features
with numerical (aka continuous) features, embedded or not. These are then
passed through a series of Resnet blocks. See
`pytorch_widedeep.models.tab_resnet._layers` for details on the
structure of each block.
Parameters
----------
column_idx: Dict
Dict containing the index of the columns that will be passed through
the model. Required to slice the tensors. e.g.
_{'education': 0, 'relationship': 1, 'workclass': 2, ...}_
cat_embed_input: List
List of Tuples with the column name, number of unique values and
embedding dimension. e.g. _[(education, 11, 32), ...]_.
cat_embed_dropout: float, default = 0.1
Categorical embeddings dropout
use_cat_bias: bool, default = False,
Boolean indicating if bias will be used for the categorical embeddings
cat_embed_activation: Optional, str, default = None,
Activation function for the categorical embeddings, if any. Currently
_'tanh'_, _'relu'_, _'leaky'_relu` and _'gelu'_ are supported
continuous_cols: List, Optional, default = None
List with the name of the numeric (aka continuous) columns
cont_norm_layer: str, default = "batchnorm"
Type of normalization layer applied to the continuous features. Options
are: _'layernorm'_, _'batchnorm'_ or `None`.
embed_continuous: bool, default = False,
Boolean indicating if the continuous columns will be embedded
(i.e. passed each through a linear layer with or without activation)
cont_embed_dim: int, default = 32,
Size of the continuous embeddings
cont_embed_dropout: float, default = 0.1,
Continuous embeddings dropout
use_cont_bias: bool, default = True,
Boolean indicating if bias will be used for the continuous embeddings
cont_embed_activation: Optional, str, default = None,
Activation function for the continuous embeddings, if any. Currently
_'tanh'_, _'relu'_, _'leaky'_relu` and _'gelu'_ are supported
blocks_dims: List, default = [200, 100, 100]
List of integers that define the input and output units of each block.
For example: _[200, 100, 100]_ will generate 2 blocks. The first will
receive a tensor of size 200 and output a tensor of size 100, and the
second will receive a tensor of size 100 and output a tensor of size
100. See `pytorch_widedeep.models.tab_resnet._layers` for
details on the structure of each block.
blocks_dropout: float, default = 0.1
Block's internal dropout.
simplify_blocks: bool, default = False,
Boolean indicating if the simplest possible residual blocks (`X -> [
[LIN, BN, ACT] + X ]`) will be used instead of a standard one
(`X -> [ [LIN1, BN1, ACT1] -> [LIN2, BN2] + X ]`).
mlp_hidden_dims: List, Optional, default = None
List with the number of neurons per dense layer in the MLP. e.g:
_[64, 32]_. If `None` the output of the Resnet Blocks will be
connected directly to the output neuron(s).
mlp_activation: str, default = "relu"
Activation function for the dense layers of the MLP. Currently
_'tanh'_, _'relu'_, _'leaky'_relu` and _'gelu'_ are supported
mlp_dropout: float, default = 0.1
float with the dropout between the dense layers of the MLP.
mlp_batchnorm: bool, default = False
Boolean indicating whether or not batch normalization will be applied
to the dense layers
mlp_batchnorm_last: bool, default = False
Boolean indicating whether or not batch normalization will be applied
to the last of the dense layers
mlp_linear_first: bool, default = False
Boolean indicating the order of the operations in the dense
layer. If `True: [LIN -> ACT -> BN -> DP]`. If `False: [BN -> DP ->
LIN -> ACT]`
Attributes
----------
cat_and_cont_embed: nn.Module
This is the module that processes the categorical and continuous columns
encoder: nn.Module
deep dense Resnet model that will receive the concatenation of the
embeddings and the continuous columns
mlp: nn.Module
if `mlp_hidden_dims` is `True`, this attribute will be an mlp
model that will receive the results of the concatenation of the
embeddings and the continuous columns -- if present --.
Examples
--------
>>> import torch
>>> from pytorch_widedeep.models import TabResnet
>>> X_deep = torch.cat((torch.empty(5, 4).random_(4), torch.rand(5, 1)), axis=1)
>>> colnames = ['a', 'b', 'c', 'd', 'e']
>>> cat_embed_input = [(u,i,j) for u,i,j in zip(colnames[:4], [4]*4, [8]*4)]
>>> column_idx = {k:v for v,k in enumerate(colnames)}
>>> model = TabResnet(blocks_dims=[16,4], column_idx=column_idx, cat_embed_input=cat_embed_input,
... continuous_cols = ['e'])
>>> out = model(X_deep)
"""
def __init__(self, column_idx: Dict[str, int], cat_embed_input: Optional[List[Tuple[str, int, int]]]=None, cat_embed_dropout: float=0.1, use_cat_bias: bool=False, cat_embed_activation: Optional[str]=None, continuous_cols: Optional[List[str]]=None, cont_norm_layer: str='batchnorm', embed_continuous: bool=False, cont_embed_dim: int=32, cont_embed_dropout: float=0.1, use_cont_bias: bool=True, cont_embed_activation: Optional[str]=None, blocks_dims: List[int]=[200, 100, 100], blocks_dropout: float=0.1, simplify_blocks: bool=False, mlp_hidden_dims: Optional[List[int]]=None, mlp_activation: str='relu', mlp_dropout: float=0.1, mlp_batchnorm: bool=False, mlp_batchnorm_last: bool=False, mlp_linear_first: bool=False):
super(TabResnet, self).__init__(column_idx=column_idx, cat_embed_input=cat_embed_input, cat_embed_dropout=cat_embed_dropout, use_cat_bias=use_cat_bias, cat_embed_activation=cat_embed_activation, continuous_cols=continuous_cols, cont_norm_layer=cont_norm_layer, embed_continuous=embed_continuous, cont_embed_dim=cont_embed_dim, cont_embed_dropout=cont_embed_dropout, use_cont_bias=use_cont_bias, cont_embed_activation=cont_embed_activation)
if len(blocks_dims) < 2:
raise ValueError("'blocks' must contain at least two elements, e.g. [256, 128]")
self.blocks_dims = blocks_dims
self.blocks_dropout = blocks_dropout
self.simplify_blocks = simplify_blocks
self.mlp_hidden_dims = mlp_hidden_dims
self.mlp_activation = mlp_activation
self.mlp_dropout = mlp_dropout
self.mlp_batchnorm = mlp_batchnorm
self.mlp_batchnorm_last = mlp_batchnorm_last
self.mlp_linear_first = mlp_linear_first
cat_out_dim = self.cat_and_cont_embed.cat_out_dim
cont_out_dim = self.cat_and_cont_embed.cont_out_dim
dense_resnet_input_dim = cat_out_dim + cont_out_dim
self.encoder = DenseResnet(dense_resnet_input_dim, blocks_dims, blocks_dropout, self.simplify_blocks)
if self.mlp_hidden_dims is not None:
mlp_hidden_dims = [self.blocks_dims[-1]] + mlp_hidden_dims
self.mlp = MLP(mlp_hidden_dims, mlp_activation, mlp_dropout, mlp_batchnorm, mlp_batchnorm_last, mlp_linear_first)
else:
self.mlp = None
def forward(self, X: Tensor) ->Tensor:
x = self._get_embeddings(X)
x = self.encoder(x)
if self.mlp is not None:
x = self.mlp(x)
return x
@property
def output_dim(self) ->int:
"""The output dimension of the model. This is a required property
neccesary to build the `WideDeep` class
"""
return self.mlp_hidden_dims[-1] if self.mlp_hidden_dims is not None else self.blocks_dims[-1]
class TabResnetDecoder(nn.Module):
"""Companion decoder model for the `TabResnet` model (which can be
considered an encoder itself)
This class is designed to be used with the `EncoderDecoderTrainer` when
using self-supervised pre-training (see the corresponding section in the
docs). This class will receive the output from the ResNet blocks or the
MLP(if present) and '_reconstruct_' the embeddings.
Parameters
----------
embed_dim: int
Size of the embeddings tensor to be reconstructed.
blocks_dims: List, default = [200, 100, 100]
List of integers that define the input and output units of each block.
For example: _[200, 100, 100]_ will generate 2 blocks. The first will
receive a tensor of size 200 and output a tensor of size 100, and the
second will receive a tensor of size 100 and output a tensor of size
100. See `pytorch_widedeep.models.tab_resnet._layers` for
details on the structure of each block.
blocks_dropout: float, default = 0.1
Block's internal dropout.
simplify_blocks: bool, default = False,
Boolean indicating if the simplest possible residual blocks (`X -> [
[LIN, BN, ACT] + X ]`) will be used instead of a standard one
(`X -> [ [LIN1, BN1, ACT1] -> [LIN2, BN2] + X ]`).
mlp_hidden_dims: List, Optional, default = None
List with the number of neurons per dense layer in the MLP. e.g:
_[64, 32]_. If `None` the output of the Resnet Blocks will be
connected directly to the output neuron(s).
mlp_activation: str, default = "relu"
Activation function for the dense layers of the MLP. Currently
_'tanh'_, _'relu'_, _'leaky'_relu` and _'gelu'_ are supported
mlp_dropout: float, default = 0.1
float with the dropout between the dense layers of the MLP.
mlp_batchnorm: bool, default = False
Boolean indicating whether or not batch normalization will be applied
to the dense layers
mlp_batchnorm_last: bool, default = False
Boolean indicating whether or not batch normalization will be applied
to the last of the dense layers
mlp_linear_first: bool, default = False
Boolean indicating the order of the operations in the dense
layer. If `True: [LIN -> ACT -> BN -> DP]`. If `False: [BN -> DP ->
LIN -> ACT]`
Attributes
----------
decoder: nn.Module
deep dense Resnet model that will receive the output of the encoder IF
`mlp_hidden_dims` is None
mlp: nn.Module
if `mlp_hidden_dims` is not None, the overall decoder will consist
in an MLP that will receive the output of the encoder followed by the
deep dense Resnet.
Examples
--------
>>> import torch
>>> from pytorch_widedeep.models import TabResnetDecoder
>>> x_inp = torch.rand(3, 8)
>>> decoder = TabResnetDecoder(embed_dim=32, blocks_dims=[8, 16, 16])
>>> res = decoder(x_inp)
>>> res.shape
torch.Size([3, 32])
"""
def __init__(self, embed_dim: int, blocks_dims: List[int]=[100, 100, 200], blocks_dropout: float=0.1, simplify_blocks: bool=False, mlp_hidden_dims: Optional[List[int]]=None, mlp_activation: str='relu', mlp_dropout: float=0.1, mlp_batchnorm: bool=False, mlp_batchnorm_last: bool=False, mlp_linear_first: bool=False):
super(TabResnetDecoder, self).__init__()
if len(blocks_dims) < 2:
raise ValueError("'blocks' must contain at least two elements, e.g. [256, 128]")
self.embed_dim = embed_dim
self.blocks_dims = blocks_dims
self.blocks_dropout = blocks_dropout
self.simplify_blocks = simplify_blocks
self.mlp_hidden_dims = mlp_hidden_dims
self.mlp_activation = mlp_activation
self.mlp_dropout = mlp_dropout
self.mlp_batchnorm = mlp_batchnorm
self.mlp_batchnorm_last = mlp_batchnorm_last
self.mlp_linear_first = mlp_linear_first
if self.mlp_hidden_dims is not None:
self.mlp = MLP(mlp_hidden_dims, mlp_activation, mlp_dropout, mlp_batchnorm, mlp_batchnorm_last, mlp_linear_first)
else:
self.mlp = None
if self.mlp is not None:
self.decoder = DenseResnet(mlp_hidden_dims[-1], blocks_dims, blocks_dropout, self.simplify_blocks)
else:
self.decoder = DenseResnet(blocks_dims[0], blocks_dims, blocks_dropout, self.simplify_blocks)
self.reconstruction_layer = nn.Linear(blocks_dims[-1], embed_dim, bias=False)
def forward(self, X: Tensor) ->Tensor:
x = self.mlp(X) if self.mlp is not None else X
return self.reconstruction_layer(self.decoder(x))
class CatSingleMlp(nn.Module):
"""Single MLP will be applied to all categorical features"""
def __init__(self, input_dim: int, cat_embed_input: List[Tuple[str, int]], column_idx: Dict[str, int], activation: str):
super(CatSingleMlp, self).__init__()
self.input_dim = input_dim
self.column_idx = column_idx
self.cat_embed_input = cat_embed_input
self.activation = activation
self.num_class = sum([ei[1] for ei in cat_embed_input if ei[0] != 'cls_token'])
self.mlp = MLP(d_hidden=[input_dim, self.num_class * 4, self.num_class], activation=activation, dropout=0.0, batchnorm=False, batchnorm_last=False, linear_first=False)
def forward(self, X: Tensor, r_: Tensor) ->Tuple[Tensor, Tensor]:
x = torch.cat([X[:, self.column_idx[col]].long() for col, _ in self.cat_embed_input if col != 'cls_token'])
cat_r_ = torch.cat([r_[:, self.column_idx[col], :] for col, _ in self.cat_embed_input if col != 'cls_token'])
x_ = self.mlp(cat_r_)
return x, x_
class CatMlpPerFeature(nn.Module):
"""Dedicated MLP per categorical feature"""
def __init__(self, input_dim: int, cat_embed_input: List[Tuple[str, int]], column_idx: Dict[str, int], activation: str):
super(CatMlpPerFeature, self).__init__()
self.input_dim = input_dim
self.column_idx = column_idx
self.cat_embed_input = cat_embed_input
self.activation = activation
self.mlp = nn.ModuleDict({('mlp_' + col): MLP(d_hidden=[input_dim, val * 4, val], activation=activation, dropout=0.0, batchnorm=False, batchnorm_last=False, linear_first=False) for col, val in self.cat_embed_input if col != 'cls_token'})
def forward(self, X: Tensor, r_: Tensor) ->List[Tuple[Tensor, Tensor]]:
x = [X[:, self.column_idx[col]].long() for col, _ in self.cat_embed_input if col != 'cls_token']
x_ = [self.mlp['mlp_' + col](r_[:, self.column_idx[col], :]) for col, _ in self.cat_embed_input if col != 'cls_token']
return list(zip(x, x_))
class ContSingleMlp(nn.Module):
"""Single MLP will be applied to all continuous features"""
def __init__(self, input_dim: int, continuous_cols: List[str], column_idx: Dict[str, int], activation: str):
super(ContSingleMlp, self).__init__()
self.input_dim = input_dim
self.column_idx = column_idx
self.continuous_cols = continuous_cols
self.activation = activation
self.mlp = MLP(d_hidden=[input_dim, input_dim * 2, 1], activation=activation, dropout=0.0, batchnorm=False, batchnorm_last=False, linear_first=False)
def forward(self, X: Tensor, r_: Tensor) ->Tuple[Tensor, Tensor]:
x = torch.cat([X[:, self.column_idx[col]].float() for col in self.continuous_cols]).unsqueeze(1)
cont_r_ = torch.cat([r_[:, self.column_idx[col], :] for col in self.continuous_cols])
x_ = self.mlp(cont_r_)
return x, x_
class ContMlpPerFeature(nn.Module):
"""Dedicated MLP per continuous feature"""
def __init__(self, input_dim: int, continuous_cols: List[str], column_idx: Dict[str, int], activation: str):
super(ContMlpPerFeature, self).__init__()
self.input_dim = input_dim
self.column_idx = column_idx
self.continuous_cols = continuous_cols
self.activation = activation
self.mlp = nn.ModuleDict({('mlp_' + col): MLP(d_hidden=[input_dim, input_dim * 2, 1], activation=activation, dropout=0.0, batchnorm=False, batchnorm_last=False, linear_first=False) for col in self.continuous_cols})
def forward(self, X: Tensor, r_: Tensor) ->List[Tuple[Tensor, Tensor]]:
x = [X[:, self.column_idx[col]].unsqueeze(1).float() for col in self.continuous_cols]
x_ = [self.mlp['mlp_' + col](r_[:, self.column_idx[col]]) for col in self.continuous_cols]
return list(zip(x, x_))
class RandomObfuscator(nn.Module):
"""Creates and applies an obfuscation masks
Note that the class will return a mask tensor with 1s IF the feature value
is considered for reconstruction
Parameters:
----------
p: float
Ratio of features that will be discarded for reconstruction
"""
def __init__(self, p: float):
super(RandomObfuscator, self).__init__()
self.p = p
def forward(self, x: torch.Tensor) ->Tuple[torch.Tensor, torch.Tensor]:
mask = torch.bernoulli(self.p * torch.ones(x.shape))
masked_input = torch.mul(1 - mask, x)
return masked_input, mask
DenoiseMlp = Union[CatSingleMlp, ContSingleMlp, CatMlpPerFeature, ContMlpPerFeature]
class FeedForward(nn.Module):
def __init__(self, input_dim: int, dropout: float, activation: str, mult: float=4.0):
super(FeedForward, self).__init__()
ff_hidden_dim = int(input_dim * mult)
self.w_1 = nn.Linear(input_dim, ff_hidden_dim * 2 if activation.endswith('glu') else ff_hidden_dim)
self.w_2 = nn.Linear(ff_hidden_dim, input_dim)
self.dropout = nn.Dropout(dropout)
self.activation = get_activation_fn(activation)
def forward(self, X: Tensor) ->Tensor:
return self.w_2(self.dropout(self.activation(self.w_1(X))))
class LinearAttention(nn.Module):
def __init__(self, input_dim: int, n_feats: int, n_heads: int, use_bias: bool, dropout: float, kv_compression_factor: float, kv_sharing: bool):
super(LinearAttention, self).__init__()
assert input_dim % n_heads == 0, "'input_dim' must be divisible by 'n_heads'"
self.n_feats = n_feats
self.head_dim = input_dim // n_heads
self.n_heads = n_heads
self.kv_compression_factor = kv_compression_factor
self.share_kv = kv_sharing
dim_k = int(self.kv_compression_factor * self.n_feats)
self.dropout = nn.Dropout(dropout)
self.qkv_proj = nn.Linear(input_dim, input_dim * 3, bias=use_bias)
self.E = nn.init.xavier_uniform_(nn.Parameter(torch.zeros(n_feats, dim_k)))
if not kv_sharing:
self.F = nn.init.xavier_uniform_(nn.Parameter(torch.zeros(n_feats, dim_k)))
else:
self.F = self.E
self.out_proj = nn.Linear(input_dim, input_dim, bias=use_bias) if n_heads > 1 else None
def forward(self, X: Tensor) ->Tensor:
q, k, v = self.qkv_proj(X).chunk(3, dim=-1)
q = einops.rearrange(q, 'b s (h d) -> b h s d', h=self.n_heads)
k = einsum('b s i, s k -> b k i', k, self.E)
v = einsum('b s i, s k -> b k i', v, self.F)
k = einops.rearrange(k, 'b k (h d) -> b h k d', d=self.head_dim)
v = einops.rearrange(v, 'b k (h d) -> b h k d', d=self.head_dim)
scores = einsum('b h s d, b h k d -> b h s k', q, k) / math.sqrt(self.head_dim)
attn_weights = scores.softmax(dim=-1)
self.attn_weights = attn_weights
attn_weights = self.dropout(attn_weights)
output = einsum('b h s k, b h k d -> b h s d', attn_weights, v)
output = einops.rearrange(output, 'b h s d -> b s (h d)')
if self.out_proj is not None:
output = self.out_proj(output)
return output
class NormAdd(nn.Module):
"""aka PreNorm"""
def __init__(self, input_dim: int, dropout: float):
super(NormAdd, self).__init__()
self.dropout = nn.Dropout(dropout)
self.ln = nn.LayerNorm(input_dim)
def forward(self, X: Tensor, sublayer: nn.Module) ->Tensor:
return X + self.dropout(sublayer(self.ln(X)))
class FTTransformerEncoder(nn.Module):
def __init__(self, input_dim: int, n_feats: int, n_heads: int, use_bias: bool, attn_dropout: float, ff_dropout: float, kv_compression_factor: float, kv_sharing: bool, activation: str, ff_factor: float, first_block: bool):
super(FTTransformerEncoder, self).__init__()
self.first_block = first_block
self.attn = LinearAttention(input_dim, n_feats, n_heads, use_bias, attn_dropout, kv_compression_factor, kv_sharing)
self.ff = FeedForward(input_dim, ff_dropout, activation, ff_factor)
self.attn_normadd = NormAdd(input_dim, attn_dropout)
self.ff_normadd = NormAdd(input_dim, ff_dropout)
def forward(self, X: Tensor) ->Tensor:
if self.first_block:
x = X + self.attn(X)
else:
x = self.attn_normadd(X, self.attn)
return self.ff_normadd(x, self.ff)
class FTTransformer(BaseTabularModelWithAttention):
"""Defines a [FTTransformer model](https://arxiv.org/abs/2106.11959) that
can be used as the `deeptabular` component of a Wide & Deep model or
independently by itself.
Parameters
----------
column_idx: Dict
Dict containing the index of the columns that will be passed through
the model. Required to slice the tensors. e.g.
_{'education': 0, 'relationship': 1, 'workclass': 2, ...}_
cat_embed_input: List, Optional, default = None
List of Tuples with the column name and number of unique values for
each categorical component e.g. _[(education, 11), ...]_
cat_embed_dropout: float, default = 0.1
Categorical embeddings dropout
use_cat_bias: bool, default = False,
Boolean indicating if bias will be used for the categorical embeddings
cat_embed_activation: Optional, str, default = None,
Activation function for the categorical embeddings, if any. _'tanh'_,
_'relu'_, _'leaky_relu'_ and _'gelu'_ are supported.
full_embed_dropout: bool, default = False
Boolean indicating if an entire embedding (i.e. the representation of
one column) will be dropped in the batch. See:
`pytorch_widedeep.models.transformers._layers.FullEmbeddingDropout`.
If `full_embed_dropout = True`, `cat_embed_dropout` is ignored.
shared_embed: bool, default = False
The idea behind `shared_embed` is described in the Appendix A in the
[TabTransformer paper](https://arxiv.org/abs/2012.06678): the
goal of having column embedding is to enable the model to distinguish
the classes in one column from those in the other columns. In other
words, the idea is to let the model learn which column is embedded
at the time.
add_shared_embed: bool, default = False,
The two embedding sharing strategies are: 1) add the shared embeddings
to the column embeddings or 2) to replace the first
`frac_shared_embed` with the shared embeddings.
See `pytorch_widedeep.models.transformers._layers.SharedEmbeddings`
frac_shared_embed: float, default = 0.25
The fraction of embeddings that will be shared (if `add_shared_embed
= False`) by all the different categories for one particular
column.
continuous_cols: List, Optional, default = None
List with the name of the numeric (aka continuous) columns
cont_norm_layer: str, default = "batchnorm"
Type of normalization layer applied to the continuous features. Options
are: 'layernorm', 'batchnorm' or None.
cont_embed_dropout: float, default = 0.1,
Continuous embeddings dropout
use_cont_bias: bool, default = True,
Boolean indicating if bias will be used for the continuous embeddings
cont_embed_activation: str, default = None
Activation function to be applied to the continuous embeddings, if
any. _'tanh'_, _'relu'_, _'leaky_relu'_ and _'gelu'_ are supported.
input_dim: int, default = 64
The so-called *dimension of the model*. Is the number of embeddings used to encode
the categorical and/or continuous columns.
kv_compression_factor: int, default = 0.5
By default, the FTTransformer uses Linear Attention
(See [Linformer: Self-Attention with Linear Complexity](https://arxiv.org/abs/2006.04768>) ).
The compression factor that will be used to reduce the input sequence
length. If we denote the resulting sequence length as
$k = int(kv_{compression \\space factor} \\times s)$
where $s$ is the input sequence length.
kv_sharing: bool, default = False
Boolean indicating if the $E$ and $F$ projection matrices
will share weights. See [Linformer: Self-Attention with Linear
Complexity](https://arxiv.org/abs/2006.04768) for details
n_heads: int, default = 8
Number of attention heads per FTTransformer block
use_qkv_bias: bool, default = False
Boolean indicating whether or not to use bias in the Q, K, and V
projection layers
n_blocks: int, default = 4
Number of FTTransformer blocks
attn_dropout: float, default = 0.2
Dropout that will be applied to the Linear-Attention layers
ff_dropout: float, default = 0.1
Dropout that will be applied to the FeedForward network
transformer_activation: str, default = "gelu"
Transformer Encoder activation function. _'tanh'_, _'relu'_,
_'leaky_relu'_, _'gelu'_, _'geglu'_ and _'reglu'_ are supported
ff_factor: float, default = 4 / 3
Multiplicative factor applied to the first layer of the FF network in
each Transformer block, This is normally set to 4, but they use 4/3
in the paper.
mlp_hidden_dims: List, Optional, default = None
MLP hidden dimensions. If not provided no MLP on top of the final
FTTransformer block will be used
mlp_activation: str, default = "relu"
MLP activation function. _'tanh'_, _'relu'_, _'leaky_relu'_ and
_'gelu'_ are supported
mlp_dropout: float, default = 0.1
Dropout that will be applied to the final MLP
mlp_batchnorm: bool, default = False
Boolean indicating whether or not to apply batch normalization to the
dense layers
mlp_batchnorm_last: bool, default = False
Boolean indicating whether or not to apply batch normalization to the
last of the dense layers
mlp_linear_first: bool, default = False
Boolean indicating whether the order of the operations in the dense
layer. If `True: [LIN -> ACT -> BN -> DP]`. If `False: [BN -> DP ->
LIN -> ACT]`
Attributes
----------
cat_and_cont_embed: nn.Module
This is the module that processes the categorical and continuous columns
encoder: nn.Module
Sequence of FTTransformer blocks
mlp: nn.Module
MLP component in the model
Examples
--------
>>> import torch
>>> from pytorch_widedeep.models import FTTransformer
>>> X_tab = torch.cat((torch.empty(5, 4).random_(4), torch.rand(5, 1)), axis=1)
>>> colnames = ['a', 'b', 'c', 'd', 'e']
>>> cat_embed_input = [(u,i) for u,i in zip(colnames[:4], [4]*4)]
>>> continuous_cols = ['e']
>>> column_idx = {k:v for v,k in enumerate(colnames)}
>>> model = FTTransformer(column_idx=column_idx, cat_embed_input=cat_embed_input, continuous_cols=continuous_cols)
>>> out = model(X_tab)
"""
def __init__(self, column_idx: Dict[str, int], cat_embed_input: Optional[List[Tuple[str, int]]]=None, cat_embed_dropout: float=0.1, use_cat_bias: bool=False, cat_embed_activation: Optional[str]=None, full_embed_dropout: bool=False, shared_embed: bool=False, add_shared_embed: bool=False, frac_shared_embed: float=0.25, continuous_cols: Optional[List[str]]=None, cont_norm_layer: str=None, cont_embed_dropout: float=0.1, use_cont_bias: bool=True, cont_embed_activation: Optional[str]=None, input_dim: int=64, kv_compression_factor: float=0.5, kv_sharing: bool=False, use_qkv_bias: bool=False, n_heads: int=8, n_blocks: int=4, attn_dropout: float=0.2, ff_dropout: float=0.1, transformer_activation: str='reglu', ff_factor: float=1.33, mlp_hidden_dims: Optional[List[int]]=None, mlp_activation: str='relu', mlp_dropout: float=0.1, mlp_batchnorm: bool=False, mlp_batchnorm_last: bool=False, mlp_linear_first: bool=True):
super(FTTransformer, self).__init__(column_idx=column_idx, cat_embed_input=cat_embed_input, cat_embed_dropout=cat_embed_dropout, use_cat_bias=use_cat_bias, cat_embed_activation=cat_embed_activation, full_embed_dropout=full_embed_dropout, shared_embed=shared_embed, add_shared_embed=add_shared_embed, frac_shared_embed=frac_shared_embed, continuous_cols=continuous_cols, cont_norm_layer=cont_norm_layer, embed_continuous=True, cont_embed_dropout=cont_embed_dropout, use_cont_bias=use_cont_bias, cont_embed_activation=cont_embed_activation, input_dim=input_dim)
self.kv_compression_factor = kv_compression_factor
self.kv_sharing = kv_sharing
self.use_qkv_bias = use_qkv_bias
self.n_heads = n_heads
self.n_blocks = n_blocks
self.attn_dropout = attn_dropout
self.ff_dropout = ff_dropout
self.transformer_activation = transformer_activation
self.ff_factor = ff_factor
self.mlp_hidden_dims = mlp_hidden_dims
self.mlp_activation = mlp_activation
self.mlp_dropout = mlp_dropout
self.mlp_batchnorm = mlp_batchnorm
self.mlp_batchnorm_last = mlp_batchnorm_last
self.mlp_linear_first = mlp_linear_first
self.with_cls_token = 'cls_token' in column_idx
self.n_cat = len(cat_embed_input) if cat_embed_input is not None else 0
self.n_cont = len(continuous_cols) if continuous_cols is not None else 0
self.n_feats = self.n_cat + self.n_cont
is_first = True
self.encoder = nn.Sequential()
for i in range(n_blocks):
self.encoder.add_module('fttransformer_block' + str(i), FTTransformerEncoder(input_dim, self.n_feats, n_heads, use_qkv_bias, attn_dropout, ff_dropout, kv_compression_factor, kv_sharing, transformer_activation, ff_factor, is_first))
is_first = False
self.mlp_first_hidden_dim = self.input_dim if self.with_cls_token else self.n_feats * self.input_dim
if mlp_hidden_dims is not None:
self.mlp = MLP([self.mlp_first_hidden_dim] + mlp_hidden_dims, mlp_activation, mlp_dropout, mlp_batchnorm, mlp_batchnorm_last, mlp_linear_first)
else:
self.mlp = None
def forward(self, X: Tensor) ->Tensor:
x = self._get_embeddings(X)
x = self.encoder(x)
if self.with_cls_token:
x = x[:, 0, :]
else:
x = x.flatten(1)
if self.mlp is not None:
x = self.mlp(x)
return x
@property
def output_dim(self) ->int:
"""The output dimension of the model. This is a required property
neccesary to build the `WideDeep` class
"""
return self.mlp_hidden_dims[-1] if self.mlp_hidden_dims is not None else self.mlp_first_hidden_dim
@property
def attention_weights(self) ->List:
"""List with the attention weights per block
The shape of the attention weights is: $(N, H, F, k)$, where $N$ is
the batch size, $H$ is the number of attention heads, $F$ is the
number of features/columns and $k$ is the reduced sequence length or
dimension, i.e. $k = int(kv_{compression \\space factor} \\times s)$
"""
return [blk.attn.attn_weights for blk in self.encoder]
class MultiHeadedAttention(nn.Module):
def __init__(self, input_dim: int, n_heads: int, use_bias: bool, dropout: float, query_dim: Optional[int]=None):
super(MultiHeadedAttention, self).__init__()
assert input_dim % n_heads == 0, "'input_dim' must be divisible by 'n_heads'"
self.head_dim = input_dim // n_heads
self.n_heads = n_heads
self.dropout = nn.Dropout(dropout)
query_dim = query_dim if query_dim is not None else input_dim
self.q_proj = nn.Linear(query_dim, input_dim, bias=use_bias)
self.kv_proj = nn.Linear(input_dim, input_dim * 2, bias=use_bias)
self.out_proj = nn.Linear(input_dim, query_dim, bias=use_bias) if n_heads > 1 else None
def forward(self, X_Q: Tensor, X_KV: Optional[Tensor]=None) ->Tensor:
q = self.q_proj(X_Q)
X_KV = X_KV if X_KV is not None else X_Q
k, v = self.kv_proj(X_KV).chunk(2, dim=-1)
q, k, v = map(lambda t: einops.rearrange(t, 'b m (h d) -> b h m d', h=self.n_heads), (q, k, v))
scores = einsum('b h s d, b h l d -> b h s l', q, k) / math.sqrt(self.head_dim)
attn_weights = scores.softmax(dim=-1)
self.attn_weights = attn_weights
attn_weights = self.dropout(attn_weights)
attn_output = einsum('b h s l, b h l d -> b h s d', attn_weights, v)
output = einops.rearrange(attn_output, 'b h s d -> b s (h d)', h=self.n_heads)
if self.out_proj is not None:
output = self.out_proj(output)
return output
class SaintEncoder(nn.Module):
def __init__(self, input_dim: int, n_heads: int, use_bias: bool, attn_dropout: float, ff_dropout: float, activation: str, n_feat: int):
super(SaintEncoder, self).__init__()
self.n_feat = n_feat
self.col_attn = MultiHeadedAttention(input_dim, n_heads, use_bias, attn_dropout)
self.col_attn_ff = FeedForward(input_dim, ff_dropout, activation)
self.col_attn_addnorm = AddNorm(input_dim, attn_dropout)
self.col_attn_ff_addnorm = AddNorm(input_dim, ff_dropout)
self.row_attn = MultiHeadedAttention(n_feat * input_dim, n_heads, use_bias, attn_dropout)
self.row_attn_ff = FeedForward(n_feat * input_dim, ff_dropout, activation)
self.row_attn_addnorm = AddNorm(n_feat * input_dim, attn_dropout)
self.row_attn_ff_addnorm = AddNorm(n_feat * input_dim, ff_dropout)
def forward(self, X: Tensor) ->Tensor:
x = self.col_attn_addnorm(X, self.col_attn)
x = self.col_attn_ff_addnorm(x, self.col_attn_ff)
x = einops.rearrange(x, 'b n d -> 1 b (n d)')
x = self.row_attn_addnorm(x, self.row_attn)
x = self.row_attn_ff_addnorm(x, self.row_attn_ff)
x = einops.rearrange(x, '1 b (n d) -> b n d', n=self.n_feat)
return x
class SAINT(BaseTabularModelWithAttention):
"""Defines a [SAINT model](https://arxiv.org/abs/2106.01342) that
can be used as the `deeptabular` component of a Wide & Deep model or
independently by itself.
:information_source: **NOTE**: This is an slightly modified and enhanced
version of the model described in the paper,
Parameters
----------
column_idx: Dict
Dict containing the index of the columns that will be passed through
the model. Required to slice the tensors. e.g.
_{'education': 0, 'relationship': 1, 'workclass': 2, ...}_
cat_embed_input: List, Optional, default = None
List of Tuples with the column name and number of unique values and
embedding dimension. e.g. _[(education, 11), ...]_
cat_embed_dropout: float, default = 0.1
Categorical embeddings dropout
use_cat_bias: bool, default = False,
Boolean indicating if bias will be used for the categorical embeddings
cat_embed_activation: Optional, str, default = None,
Activation function for the categorical embeddings, if any. _'tanh'_,
_'relu'_, _'leaky_relu'_ and _'gelu'_ are supported.
full_embed_dropout: bool, default = False
Boolean indicating if an entire embedding (i.e. the representation of
one column) will be dropped in the batch. See:
`pytorch_widedeep.models.transformers._layers.FullEmbeddingDropout`.
If `full_embed_dropout = True`, `cat_embed_dropout` is ignored.
shared_embed: bool, default = False
The idea behind `shared_embed` is described in the Appendix A in the
[TabTransformer paper](https://arxiv.org/abs/2012.06678): the
goal of having column embedding is to enable the model to distinguish
the classes in one column from those in the other columns. In other
words, the idea is to let the model learn which column is embedded
at the time.
add_shared_embed: bool, default = False
The two embedding sharing strategies are: 1) add the shared embeddings
to the column embeddings or 2) to replace the first
`frac_shared_embed` with the shared embeddings.
See `pytorch_widedeep.models.transformers._layers.SharedEmbeddings`
frac_shared_embed: float, default = 0.25
The fraction of embeddings that will be shared (if `add_shared_embed
= False`) by all the different categories for one particular
column.
continuous_cols: List, Optional, default = None
List with the name of the numeric (aka continuous) columns
cont_norm_layer: str, default = "batchnorm"
Type of normalization layer applied to the continuous features. Options
are: _'layernorm'_, _'batchnorm'_ or None.
cont_embed_dropout: float, default = 0.1,
Continuous embeddings dropout
use_cont_bias: bool, default = True,
Boolean indicating if bias will be used for the continuous embeddings
cont_embed_activation: str, default = None
Activation function to be applied to the continuous embeddings, if
any. _'tanh'_, _'relu'_, _'leaky_relu'_ and _'gelu'_ are supported.
input_dim: int, default = 32
The so-called *dimension of the model*. Is the number of
embeddings used to encode the categorical and/or continuous columns
n_heads: int, default = 8
Number of attention heads per Transformer block
use_qkv_bias: bool, default = False
Boolean indicating whether or not to use bias in the Q, K, and V
projection layers
n_blocks: int, default = 2
Number of SAINT-Transformer blocks.
attn_dropout: float, default = 0.2
Dropout that will be applied to the Multi-Head Attention column and
row layers
ff_dropout: float, default = 0.1
Dropout that will be applied to the FeedForward network
transformer_activation: str, default = "gelu"
Transformer Encoder activation function. _'tanh'_, _'relu'_,
_'leaky_relu'_, _'gelu'_, _'geglu'_ and _'reglu'_ are supported
mlp_hidden_dims: List, Optional, default = None
MLP hidden dimensions. If not provided it will default to $[l, 4
\\times l, 2 \\times l]$ where $l$ is the MLP's input dimension
mlp_activation: str, default = "relu"
MLP activation function. _'tanh'_, _'relu'_, _'leaky_relu'_ and
_'gelu'_ are supported
mlp_dropout: float, default = 0.1
Dropout that will be applied to the final MLP
mlp_batchnorm: bool, default = False
Boolean indicating whether or not to apply batch normalization to the
dense layers
mlp_batchnorm_last: bool, default = False
Boolean indicating whether or not to apply batch normalization to the
last of the dense layers
mlp_linear_first: bool, default = False
Boolean indicating whether the order of the operations in the dense
layer. If `True: [LIN -> ACT -> BN -> DP]`. If `False: [BN -> DP ->
LIN -> ACT]`
Attributes
----------
cat_and_cont_embed: nn.Module
This is the module that processes the categorical and continuous columns
encoder: nn.Module
Sequence of SAINT-Transformer blocks
mlp: nn.Module
MLP component in the model
Examples
--------
>>> import torch
>>> from pytorch_widedeep.models import SAINT
>>> X_tab = torch.cat((torch.empty(5, 4).random_(4), torch.rand(5, 1)), axis=1)
>>> colnames = ['a', 'b', 'c', 'd', 'e']
>>> cat_embed_input = [(u,i) for u,i in zip(colnames[:4], [4]*4)]
>>> continuous_cols = ['e']
>>> column_idx = {k:v for v,k in enumerate(colnames)}
>>> model = SAINT(column_idx=column_idx, cat_embed_input=cat_embed_input, continuous_cols=continuous_cols)
>>> out = model(X_tab)
"""
def __init__(self, column_idx: Dict[str, int], cat_embed_input: Optional[List[Tuple[str, int]]]=None, cat_embed_dropout: float=0.1, use_cat_bias: bool=False, cat_embed_activation: Optional[str]=None, full_embed_dropout: bool=False, shared_embed: bool=False, add_shared_embed: bool=False, frac_shared_embed: float=0.25, continuous_cols: Optional[List[str]]=None, cont_norm_layer: str=None, cont_embed_dropout: float=0.1, use_cont_bias: bool=True, cont_embed_activation: Optional[str]=None, input_dim: int=32, use_qkv_bias: bool=False, n_heads: int=8, n_blocks: int=2, attn_dropout: float=0.1, ff_dropout: float=0.2, transformer_activation: str='gelu', mlp_hidden_dims: Optional[List[int]]=None, mlp_activation: str='relu', mlp_dropout: float=0.1, mlp_batchnorm: bool=False, mlp_batchnorm_last: bool=False, mlp_linear_first: bool=True):
super(SAINT, self).__init__(column_idx=column_idx, cat_embed_input=cat_embed_input, cat_embed_dropout=cat_embed_dropout, use_cat_bias=use_cat_bias, cat_embed_activation=cat_embed_activation, full_embed_dropout=full_embed_dropout, shared_embed=shared_embed, add_shared_embed=add_shared_embed, frac_shared_embed=frac_shared_embed, continuous_cols=continuous_cols, cont_norm_layer=cont_norm_layer, embed_continuous=True, cont_embed_dropout=cont_embed_dropout, use_cont_bias=use_cont_bias, cont_embed_activation=cont_embed_activation, input_dim=input_dim)
self.use_qkv_bias = use_qkv_bias
self.n_heads = n_heads
self.n_blocks = n_blocks
self.attn_dropout = attn_dropout
self.ff_dropout = ff_dropout
self.transformer_activation = transformer_activation
self.mlp_hidden_dims = mlp_hidden_dims
self.mlp_activation = mlp_activation
self.mlp_dropout = mlp_dropout
self.mlp_batchnorm = mlp_batchnorm
self.mlp_batchnorm_last = mlp_batchnorm_last
self.mlp_linear_first = mlp_linear_first
self.with_cls_token = 'cls_token' in column_idx
self.n_cat = len(cat_embed_input) if cat_embed_input is not None else 0
self.n_cont = len(continuous_cols) if continuous_cols is not None else 0
self.n_feats = self.n_cat + self.n_cont
self.encoder = nn.Sequential()
for i in range(n_blocks):
self.encoder.add_module('saint_block' + str(i), SaintEncoder(input_dim, n_heads, use_qkv_bias, attn_dropout, ff_dropout, transformer_activation, self.n_feats))
self.mlp_first_hidden_dim = self.input_dim if self.with_cls_token else self.n_feats * self.input_dim
if mlp_hidden_dims is not None:
self.mlp = MLP([self.mlp_first_hidden_dim] + mlp_hidden_dims, mlp_activation, mlp_dropout, mlp_batchnorm, mlp_batchnorm_last, mlp_linear_first)
else:
self.mlp = None
def forward(self, X: Tensor) ->Tensor:
x = self._get_embeddings(X)
x = self.encoder(x)
if self.with_cls_token:
x = x[:, 0, :]
else:
x = x.flatten(1)
if self.mlp is not None:
x = self.mlp(x)
return x
@property
def output_dim(self) ->int:
"""The output dimension of the model. This is a required property
neccesary to build the `WideDeep` class
"""
return self.mlp_hidden_dims[-1] if self.mlp_hidden_dims is not None else self.mlp_first_hidden_dim
@property
def attention_weights(self) ->List:
"""List with the attention weights. Each element of the list is a tuple
where the first and the second elements are the column and row
attention weights respectively
The shape of the attention weights is:
- column attention: $(N, H, F, F)$
- row attention: $(1, H, N, N)$
where $N$ is the batch size, $H$ is the number of heads and $F$ is the
number of features/columns in the dataset
"""
attention_weights = []
for blk in self.encoder:
attention_weights.append((blk.col_attn.attn_weights, blk.row_attn.attn_weights))
return attention_weights
class AdditiveAttention(nn.Module):
def __init__(self, input_dim: int, n_heads: int, use_bias: bool, dropout: float, share_qv_weights: bool):
super(AdditiveAttention, self).__init__()
assert input_dim % n_heads == 0, "'input_dim' must be divisible by 'n_heads'"
self.head_dim = input_dim // n_heads
self.n_heads = n_heads
self.share_qv_weights = share_qv_weights
self.dropout = nn.Dropout(dropout)
if share_qv_weights:
self.qv_proj = nn.Linear(input_dim, input_dim, bias=use_bias)
else:
self.q_proj = nn.Linear(input_dim, input_dim, bias=use_bias)
self.v_proj = nn.Linear(input_dim, input_dim, bias=use_bias)
self.k_proj = nn.Linear(input_dim, input_dim, bias=use_bias)
self.W_q = nn.Linear(input_dim, n_heads)
self.W_k = nn.Linear(input_dim, n_heads)
self.r_out = nn.Linear(input_dim, input_dim)
def forward(self, X: Tensor) ->Tensor:
q = self.qv_proj(X) if self.share_qv_weights else self.q_proj(X)
v = self.qv_proj(X) if self.share_qv_weights else self.v_proj(X)
k = self.k_proj(X)
alphas = (self.W_q(q) / math.sqrt(self.head_dim)).softmax(dim=1)
q_r = einops.rearrange(q, 'b s (h d) -> b s h d', h=self.n_heads)
global_query = einsum(' b s h, b s h d -> b h d', alphas, q_r)
global_query = einops.rearrange(global_query, 'b h d -> b () (h d)')
p = k * global_query
betas = (self.W_k(p) / math.sqrt(self.head_dim)).softmax(dim=1)
p_r = einops.rearrange(p, 'b s (h d) -> b s h d', h=self.n_heads)
global_key = einsum(' b s h, b s h d -> b h d', betas, p_r)
global_key = einops.rearrange(global_key, 'b h d -> b () (h d)')
u = v * global_key
self.attn_weights = einops.rearrange(alphas, 'b s h -> b h s'), einops.rearrange(betas, 'b s h -> b h s')
output = q + self.dropout(self.r_out(u))
return output
class FastFormerEncoder(nn.Module):
def __init__(self, input_dim: int, n_heads: int, use_bias: bool, attn_dropout: float, ff_dropout: float, share_qv_weights: bool, activation: str):
super(FastFormerEncoder, self).__init__()
self.attn = AdditiveAttention(input_dim, n_heads, use_bias, attn_dropout, share_qv_weights)
self.ff = FeedForward(input_dim, ff_dropout, activation)
self.attn_addnorm = AddNorm(input_dim, attn_dropout)
self.ff_addnorm = AddNorm(input_dim, ff_dropout)
def forward(self, X: Tensor) ->Tensor:
x = self.attn_addnorm(X, self.attn)
return self.ff_addnorm(x, self.ff)
class TabFastFormer(BaseTabularModelWithAttention):
"""Defines an adaptation of a [FastFormer](https://arxiv.org/abs/2108.09084)
that can be used as the `deeptabular` component of a Wide & Deep model
or independently by itself.
:information_source: **NOTE**: while there are scientific publications for
the `TabTransformer`, `SAINT` and `FTTransformer`, the `TabPerceiver`
and the `TabFastFormer` are our own adaptations of the
[Perceiver](https://arxiv.org/abs/2103.03206) and the
[FastFormer](https://arxiv.org/abs/2108.09084) for tabular data.
Parameters
----------
column_idx: Dict
Dict containing the index of the columns that will be passed through
the `TabFastFormer` model. Required to slice the tensors. e.g. _{'education':
0, 'relationship': 1, 'workclass': 2, ...}_
cat_embed_input: List, Optional, default = None
List of Tuples with the column name, number of unique values and
embedding dimension. e.g. _[(education, 11, 32), ...]_
cat_embed_dropout: float, default = 0.1
Categorical embeddings dropout
use_cat_bias: bool, default = False,
Boolean indicating if bias will be used for the categorical embeddings
cat_embed_activation: Optional, str, default = None,
Activation function for the categorical embeddings
full_embed_dropout: bool, default = False
Boolean indicating if an entire embedding (i.e. the representation of
one column) will be dropped in the batch. See:
`pytorch_widedeep.models.transformers._layers.FullEmbeddingDropout`.
If `full_embed_dropout = True`, `cat_embed_dropout` is ignored.
shared_embed: bool, default = False
The idea behind `shared_embed` is described in the Appendix A in the
[TabTransformer paper](https://arxiv.org/abs/2012.06678): the goal of
having column embedding is to enable the model to distinguish the
classes in one column from those in the other columns. In other
words, the idea is to let the model learn which column is embedded at
the time.
add_shared_embed: bool, default = False,
The two embedding sharing strategies are: 1) add the shared embeddings
to the column embeddings or 2) to replace the first
`frac_shared_embed` with the shared embeddings.
See `pytorch_widedeep.models.transformers._layers.SharedEmbeddings`
frac_shared_embed: float, default = 0.25
The fraction of embeddings that will be shared (if `add_shared_embed
= False`) by all the different categories for one particular
column.
continuous_cols: List, Optional, default = None
List with the name of the numeric (aka continuous) columns
cont_norm_layer: str, default = "batchnorm"
Type of normalization layer applied to the continuous features. Options
are: _'layernorm'_, _'batchnorm'_ or None.
cont_embed_dropout: float, default = 0.1,
Continuous embeddings dropout
use_cont_bias: bool, default = True,
Boolean indicating if bias will be used for the continuous embeddings
cont_embed_activation: str, default = None
String indicating the activation function to be applied to the
continuous embeddings, if any. _'tanh'_, _'relu'_, _'leaky_relu'_ and
_'gelu'_ are supported.
input_dim: int, default = 32
The so-called *dimension of the model*. Is the number of
embeddings used to encode the categorical and/or continuous columns
n_heads: int, default = 8
Number of attention heads per FastFormer block
use_bias: bool, default = False
Boolean indicating whether or not to use bias in the Q, K, and V
projection layers
n_blocks: int, default = 4
Number of FastFormer blocks
attn_dropout: float, default = 0.2
Dropout that will be applied to the Additive Attention layers
ff_dropout: float, default = 0.1
Dropout that will be applied to the FeedForward network
share_qv_weights: bool, default = False
Following the paper, this is a boolean indicating if the Value ($V$) and
the Query ($Q$) transformation parameters will be shared.
share_weights: bool, default = False
In addition to sharing the $V$ and $Q$ transformation parameters, the
parameters across different Fastformer layers can also be shared.
Please, see
`pytorch_widedeep/models/tabular/transformers/tab_fastformer.py` for
details
transformer_activation: str, default = "gelu"
Transformer Encoder activation function. _'tanh'_, _'relu'_,
_'leaky_relu'_, _'gelu'_, _'geglu'_ and _'reglu'_ are supported
mlp_hidden_dims: List, Optional, default = None
MLP hidden dimensions. If not provided it will default to $[l, 4
\\times l, 2 \\times l]$ where $l$ is the MLP's input dimension
mlp_activation: str, default = "relu"
MLP activation function. _'tanh'_, _'relu'_, _'leaky_relu'_ and
_'gelu'_ are supported
mlp_dropout: float, default = 0.1
Dropout that will be applied to the final MLP
mlp_batchnorm: bool, default = False
Boolean indicating whether or not to apply batch normalization to the
dense layers
mlp_batchnorm_last: bool, default = False
Boolean indicating whether or not to apply batch normalization to the
last of the dense layers
mlp_linear_first: bool, default = False
Boolean indicating whether the order of the operations in the dense
layer. If `True: [LIN -> ACT -> BN -> DP]`. If `False: [BN -> DP ->
LIN -> ACT]`
Attributes
----------
cat_and_cont_embed: nn.Module
This is the module that processes the categorical and continuous columns
encoder: nn.Module
Sequence of FasFormer blocks.
mlp: nn.Module
MLP component in the model
Examples
--------
>>> import torch
>>> from pytorch_widedeep.models import TabFastFormer
>>> X_tab = torch.cat((torch.empty(5, 4).random_(4), torch.rand(5, 1)), axis=1)
>>> colnames = ['a', 'b', 'c', 'd', 'e']
>>> cat_embed_input = [(u,i) for u,i in zip(colnames[:4], [4]*4)]
>>> continuous_cols = ['e']
>>> column_idx = {k:v for v,k in enumerate(colnames)}
>>> model = TabFastFormer(column_idx=column_idx, cat_embed_input=cat_embed_input, continuous_cols=continuous_cols)
>>> out = model(X_tab)
"""
def __init__(self, column_idx: Dict[str, int], cat_embed_input: Optional[List[Tuple[str, int]]]=None, cat_embed_dropout: float=0.1, use_cat_bias: bool=False, cat_embed_activation: Optional[str]=None, full_embed_dropout: bool=False, shared_embed: bool=False, add_shared_embed: bool=False, frac_shared_embed: float=0.25, continuous_cols: Optional[List[str]]=None, cont_norm_layer: str=None, cont_embed_dropout: float=0.1, use_cont_bias: bool=True, cont_embed_activation: Optional[str]=None, input_dim: int=32, n_heads: int=8, use_bias: bool=False, n_blocks: int=4, attn_dropout: float=0.1, ff_dropout: float=0.2, share_qv_weights: bool=False, share_weights: bool=False, transformer_activation: str='relu', mlp_hidden_dims: Optional[List[int]]=None, mlp_activation: str='relu', mlp_dropout: float=0.1, mlp_batchnorm: bool=False, mlp_batchnorm_last: bool=False, mlp_linear_first: bool=True):
super(TabFastFormer, self).__init__(column_idx=column_idx, cat_embed_input=cat_embed_input, cat_embed_dropout=cat_embed_dropout, use_cat_bias=use_cat_bias, cat_embed_activation=cat_embed_activation, full_embed_dropout=full_embed_dropout, shared_embed=shared_embed, add_shared_embed=add_shared_embed, frac_shared_embed=frac_shared_embed, continuous_cols=continuous_cols, cont_norm_layer=cont_norm_layer, embed_continuous=True, cont_embed_dropout=cont_embed_dropout, use_cont_bias=use_cont_bias, cont_embed_activation=cont_embed_activation, input_dim=input_dim)
self.n_heads = n_heads
self.use_bias = use_bias
self.n_blocks = n_blocks
self.attn_dropout = attn_dropout
self.ff_dropout = ff_dropout
self.share_qv_weights = share_qv_weights
self.share_weights = share_weights
self.transformer_activation = transformer_activation
self.mlp_hidden_dims = mlp_hidden_dims
self.mlp_activation = mlp_activation
self.mlp_dropout = mlp_dropout
self.mlp_batchnorm = mlp_batchnorm
self.mlp_batchnorm_last = mlp_batchnorm_last
self.mlp_linear_first = mlp_linear_first
self.with_cls_token = 'cls_token' in column_idx
self.n_cat = len(cat_embed_input) if cat_embed_input is not None else 0
self.n_cont = len(continuous_cols) if continuous_cols is not None else 0
self.n_feats = self.n_cat + self.n_cont
self.encoder = nn.Sequential()
first_fastformer_block = FastFormerEncoder(input_dim, n_heads, use_bias, attn_dropout, ff_dropout, share_qv_weights, transformer_activation)
self.encoder.add_module('fastformer_block0', first_fastformer_block)
for i in range(1, n_blocks):
if share_weights:
self.encoder.add_module('fastformer_block' + str(i), first_fastformer_block)
else:
self.encoder.add_module('fastformer_block' + str(i), FastFormerEncoder(input_dim, n_heads, use_bias, attn_dropout, ff_dropout, share_qv_weights, transformer_activation))
self.mlp_first_hidden_dim = self.input_dim if self.with_cls_token else self.n_feats * self.input_dim
if mlp_hidden_dims is not None:
self.mlp = MLP([self.mlp_first_hidden_dim] + mlp_hidden_dims, mlp_activation, mlp_dropout, mlp_batchnorm, mlp_batchnorm_last, mlp_linear_first)
else:
self.mlp = None
def forward(self, X: Tensor) ->Tensor:
x = self._get_embeddings(X)
x = self.encoder(x)
if self.with_cls_token:
x = x[:, 0, :]
else:
x = x.flatten(1)
if self.mlp is not None:
x = self.mlp(x)
return x
@property
def output_dim(self) ->int:
"""The output dimension of the model. This is a required property
neccesary to build the `WideDeep` class
"""
return self.mlp_hidden_dims[-1] if self.mlp_hidden_dims is not None else self.mlp_first_hidden_dim
@property
def attention_weights(self) ->List:
"""List with the attention weights. Each element of the list is a
tuple where the first and second elements are the $\\alpha$
and $\\beta$ attention weights in the paper.
The shape of the attention weights is $(N, H, F)$ where $N$ is the
batch size, $H$ is the number of attention heads and $F$ is the
number of features/columns in the dataset
"""
if self.share_weights:
attention_weights = [self.encoder[0].attn.attn_weight]
else:
attention_weights = [blk.attn.attn_weights for blk in self.encoder]
return attention_weights
class PerceiverEncoder(nn.Module):
def __init__(self, input_dim: int, n_heads: int, use_bias: bool, attn_dropout: float, ff_dropout: float, activation: str, query_dim: Optional[int]=None):
super(PerceiverEncoder, self).__init__()
self.attn = MultiHeadedAttention(input_dim, n_heads, use_bias, attn_dropout, query_dim)
attn_dim_out = query_dim if query_dim is not None else input_dim
self.ff = FeedForward(attn_dim_out, ff_dropout, activation)
self.ln_q = nn.LayerNorm(attn_dim_out)
self.ln_kv = nn.LayerNorm(input_dim)
self.norm_attn_dropout = nn.Dropout(attn_dropout)
self.ff_norm = nn.LayerNorm(attn_dim_out)
self.norm_ff_dropout = nn.Dropout(ff_dropout)
def forward(self, X_Q: Tensor, X_KV: Optional[Tensor]=None) ->Tensor:
x = self.ln_q(X_Q)
y = None if X_KV is None else self.ln_kv(X_KV)
x = x + self.norm_attn_dropout(self.attn(x, y))
return x + self.norm_ff_dropout(self.ff(self.ff_norm(x)))
class TabPerceiver(BaseTabularModelWithAttention):
"""Defines an adaptation of a [Perceiver](https://arxiv.org/abs/2103.03206)
that can be used as the `deeptabular` component of a Wide & Deep model
or independently by itself.
:information_source: **NOTE**: while there are scientific publications for
the `TabTransformer`, `SAINT` and `FTTransformer`, the `TabPerceiver`
and the `TabFastFormer` are our own adaptations of the
[Perceiver](https://arxiv.org/abs/2103.03206) and the
[FastFormer](https://arxiv.org/abs/2108.09084) for tabular data.
Parameters
----------
column_idx: Dict
Dict containing the index of the columns that will be passed through
the model. Required to slice the tensors. e.g.
_{'education': 0, 'relationship': 1, 'workclass': 2, ...}_
cat_embed_input: List, Optional, default = None
List of Tuples with the column name and number of unique values for
each categorical component e.g. _[(education, 11), ...]_
cat_embed_dropout: float, default = 0.1
Categorical embeddings dropout
use_cat_bias: bool, default = False,
Boolean indicating if bias will be used for the categorical embeddings
cat_embed_activation: Optional, str, default = None,
Activation function for the categorical embeddings, if any. _'tanh'_,
_'relu'_, _'leaky_relu'_ and _'gelu'_ are supported.
full_embed_dropout: bool, default = False
Boolean indicating if an entire embedding (i.e. the representation of
one column) will be dropped in the batch. See:
`pytorch_widedeep.models.transformers._layers.FullEmbeddingDropout`.
If `full_embed_dropout = True`, `cat_embed_dropout` is ignored.
shared_embed: bool, default = False
The idea behind `shared_embed` is described in the Appendix A in the
[TabTransformer paper](https://arxiv.org/abs/2012.06678): the
goal of having column embedding is to enable the model to distinguish
the classes in one column from those in the other columns. In other
words, the idea is to let the model learn which column is embedded
at the time.
add_shared_embed: bool, default = False,
The two embedding sharing strategies are: 1) add the shared embeddings
to the column embeddings or 2) to replace the first
`frac_shared_embed` with the shared embeddings.
See `pytorch_widedeep.models.transformers._layers.SharedEmbeddings`
frac_shared_embed: float, default = 0.25
The fraction of embeddings that will be shared (if `add_shared_embed
= False`) by all the different categories for one particular
column.
continuous_cols: List, Optional, default = None
List with the name of the numeric (aka continuous) columns
cont_norm_layer: str, default = "batchnorm"
Type of normalization layer applied to the continuous features. Options
are: _'layernorm'_, _'batchnorm'_ or None.
cont_embed_dropout: float, default = 0.1,
Continuous embeddings dropout
use_cont_bias: bool, default = True,
Boolean indicating if bias will be used for the continuous embeddings
cont_embed_activation: str, default = None
Activation function to be applied to the continuous embeddings, if
any. _'tanh'_, _'relu'_, _'leaky_relu'_ and _'gelu'_ are supported.
input_dim: int, default = 32
The so-called *dimension of the model*. Is the number of embeddings
used to encode the categorical and/or continuous columns.
n_cross_attns: int, default = 1
Number of times each perceiver block will cross attend to the input
data (i.e. number of cross attention components per perceiver block).
This should normally be 1. However, in the paper they describe some
architectures (normally computer vision-related problems) where the
Perceiver attends multiple times to the input array. Therefore, maybe
multiple cross attention to the input array is also useful in some
cases for tabular data :shrug: .
n_cross_attn_heads: int, default = 4
Number of attention heads for the cross attention component
n_latents: int, default = 16
Number of latents. This is the $N$ parameter in the paper. As
indicated in the paper, this number should be significantly lower
than $M$ (the number of columns in the dataset). Setting $N$ closer
to $M$ defies the main purpose of the Perceiver, which is to overcome
the transformer quadratic bottleneck
latent_dim: int, default = 128
Latent dimension.
n_latent_heads: int, default = 4
Number of attention heads per Latent Transformer
n_latent_blocks: int, default = 4
Number of transformer encoder blocks (normalised MHA + normalised FF)
per Latent Transformer
n_perceiver_blocks: int, default = 4
Number of Perceiver blocks defined as [Cross Attention + Latent
Transformer]
share_weights: Boolean, default = False
Boolean indicating if the weights will be shared between Perceiver
blocks
attn_dropout: float, default = 0.2
Dropout that will be applied to the Multi-Head Attention layers
ff_dropout: float, default = 0.1
Dropout that will be applied to the FeedForward network
transformer_activation: str, default = "gelu"
Transformer Encoder activation function. _'tanh'_, _'relu'_,
_'leaky_relu'_, _'gelu'_, _'geglu'_ and _'reglu'_ are supported
mlp_hidden_dims: List, Optional, default = None
MLP hidden dimensions. If not provided it will default to $[l, 4
\\times l, 2 \\times l]$ where $l$ is the MLP's input dimension
mlp_activation: str, default = "relu"
MLP activation function. _'tanh'_, _'relu'_, _'leaky_relu'_ and
_'gelu'_ are supported
mlp_dropout: float, default = 0.1
Dropout that will be applied to the final MLP
mlp_batchnorm: bool, default = False
Boolean indicating whether or not to apply batch normalization to the
dense layers
mlp_batchnorm_last: bool, default = False
Boolean indicating whether or not to apply batch normalization to the
last of the dense layers
mlp_linear_first: bool, default = False
Boolean indicating whether the order of the operations in the dense
layer. If `True: [LIN -> ACT -> BN -> DP]`. If `False: [BN -> DP ->
LIN -> ACT]`
Attributes
----------
cat_and_cont_embed: nn.Module
This is the module that processes the categorical and continuous columns
encoder: nn.ModuleDict
ModuleDict with the Perceiver blocks
latents: nn.Parameter
Latents that will be used for prediction
mlp: nn.Module
MLP component in the model
Examples
--------
>>> import torch
>>> from pytorch_widedeep.models import TabPerceiver
>>> X_tab = torch.cat((torch.empty(5, 4).random_(4), torch.rand(5, 1)), axis=1)
>>> colnames = ['a', 'b', 'c', 'd', 'e']
>>> cat_embed_input = [(u,i) for u,i in zip(colnames[:4], [4]*4)]
>>> continuous_cols = ['e']
>>> column_idx = {k:v for v,k in enumerate(colnames)}
>>> model = TabPerceiver(column_idx=column_idx, cat_embed_input=cat_embed_input,
... continuous_cols=continuous_cols, n_latents=2, latent_dim=16,
... n_perceiver_blocks=2)
>>> out = model(X_tab)
"""
def __init__(self, column_idx: Dict[str, int], cat_embed_input: Optional[List[Tuple[str, int]]]=None, cat_embed_dropout: float=0.1, use_cat_bias: bool=False, cat_embed_activation: Optional[str]=None, full_embed_dropout: bool=False, shared_embed: bool=False, add_shared_embed: bool=False, frac_shared_embed: float=0.25, continuous_cols: Optional[List[str]]=None, cont_norm_layer: str=None, cont_embed_dropout: float=0.1, use_cont_bias: bool=True, cont_embed_activation: Optional[str]=None, input_dim: int=32, n_cross_attns: int=1, n_cross_attn_heads: int=4, n_latents: int=16, latent_dim: int=128, n_latent_heads: int=4, n_latent_blocks: int=4, n_perceiver_blocks: int=4, share_weights: bool=False, attn_dropout: float=0.1, ff_dropout: float=0.1, transformer_activation: str='geglu', mlp_hidden_dims: Optional[List[int]]=None, mlp_activation: str='relu', mlp_dropout: float=0.1, mlp_batchnorm: bool=False, mlp_batchnorm_last: bool=False, mlp_linear_first: bool=True):
super(TabPerceiver, self).__init__(column_idx=column_idx, cat_embed_input=cat_embed_input, cat_embed_dropout=cat_embed_dropout, use_cat_bias=use_cat_bias, cat_embed_activation=cat_embed_activation, full_embed_dropout=full_embed_dropout, shared_embed=shared_embed, add_shared_embed=add_shared_embed, frac_shared_embed=frac_shared_embed, continuous_cols=continuous_cols, cont_norm_layer=cont_norm_layer, embed_continuous=True, cont_embed_dropout=cont_embed_dropout, use_cont_bias=use_cont_bias, cont_embed_activation=cont_embed_activation, input_dim=input_dim)
self.n_cross_attns = n_cross_attns
self.n_cross_attn_heads = n_cross_attn_heads
self.n_latents = n_latents
self.latent_dim = latent_dim
self.n_latent_heads = n_latent_heads
self.n_latent_blocks = n_latent_blocks
self.n_perceiver_blocks = n_perceiver_blocks
self.share_weights = share_weights
self.attn_dropout = attn_dropout
self.ff_dropout = ff_dropout
self.transformer_activation = transformer_activation
self.mlp_hidden_dims = mlp_hidden_dims
self.mlp_activation = mlp_activation
self.mlp_dropout = mlp_dropout
self.mlp_batchnorm = mlp_batchnorm
self.mlp_batchnorm_last = mlp_batchnorm_last
self.mlp_linear_first = mlp_linear_first
self.latents = nn.init.trunc_normal_(nn.Parameter(torch.empty(n_latents, latent_dim)))
self.encoder = nn.ModuleDict()
first_perceiver_block = self._build_perceiver_block()
self.encoder['perceiver_block0'] = first_perceiver_block
if share_weights:
for n in range(1, n_perceiver_blocks):
self.encoder['perceiver_block' + str(n)] = first_perceiver_block
else:
for n in range(1, n_perceiver_blocks):
self.encoder['perceiver_block' + str(n)] = self._build_perceiver_block()
self.mlp_first_hidden_dim = self.latent_dim
if mlp_hidden_dims is not None:
self.mlp = MLP([self.mlp_first_hidden_dim] + mlp_hidden_dims, mlp_activation, mlp_dropout, mlp_batchnorm, mlp_batchnorm_last, mlp_linear_first)
else:
self.mlp = None
def forward(self, X: Tensor) ->Tensor:
x_emb = self._get_embeddings(X)
x = einops.repeat(self.latents, 'n d -> b n d', b=X.shape[0])
for n in range(self.n_perceiver_blocks):
cross_attns = self.encoder['perceiver_block' + str(n)]['cross_attns']
latent_transformer = self.encoder['perceiver_block' + str(n)]['latent_transformer']
for cross_attn in cross_attns:
x = cross_attn(x, x_emb)
x = latent_transformer(x)
x = x.mean(dim=1)
if self.mlp is not None:
x = self.mlp(x)
return x
@property
def output_dim(self) ->int:
"""The output dimension of the model. This is a required property
neccesary to build the `WideDeep` class
"""
return self.mlp_hidden_dims[-1] if self.mlp_hidden_dims is not None else self.mlp_first_hidden_dim
@property
def attention_weights(self) ->List:
"""List with the attention weights. If the weights are not shared
between perceiver blocks each element of the list will be a list
itself containing the Cross Attention and Latent Transformer
attention weights respectively
The shape of the attention weights is:
- Cross Attention: $(N, C, L, F)$
- Latent Attention: $(N, T, L, L)$
WHere $N$ is the batch size, $C$ is the number of Cross Attention
heads, $L$ is the number of Latents, $F$ is the number of
features/columns in the dataset and $T$ is the number of Latent
Attention heads
"""
if self.share_weights:
cross_attns = self.encoder['perceiver_block0']['cross_attns']
latent_transformer = self.encoder['perceiver_block0']['latent_transformer']
attention_weights = self._extract_attn_weights(cross_attns, latent_transformer)
else:
attention_weights = []
for n in range(self.n_perceiver_blocks):
cross_attns = self.encoder['perceiver_block' + str(n)]['cross_attns']
latent_transformer = self.encoder['perceiver_block' + str(n)]['latent_transformer']
attention_weights.append(self._extract_attn_weights(cross_attns, latent_transformer))
return attention_weights
def _build_perceiver_block(self) ->nn.ModuleDict:
perceiver_block = nn.ModuleDict()
cross_attns = nn.ModuleList()
for _ in range(self.n_cross_attns):
cross_attns.append(PerceiverEncoder(self.input_dim, self.n_cross_attn_heads, False, self.attn_dropout, self.ff_dropout, self.transformer_activation, self.latent_dim))
perceiver_block['cross_attns'] = cross_attns
latent_transformer = nn.Sequential()
for i in range(self.n_latent_blocks):
latent_transformer.add_module('latent_block' + str(i), PerceiverEncoder(self.latent_dim, self.n_latent_heads, False, self.attn_dropout, self.ff_dropout, self.transformer_activation))
perceiver_block['latent_transformer'] = latent_transformer
return perceiver_block
@staticmethod
def _extract_attn_weights(cross_attns, latent_transformer) ->List:
attention_weights = []
for cross_attn in cross_attns:
attention_weights.append(cross_attn.attn.attn_weights)
for latent_block in latent_transformer:
attention_weights.append(latent_block.attn.attn_weights)
return attention_weights
class TransformerEncoder(nn.Module):
def __init__(self, input_dim: int, n_heads: int, use_bias: bool, attn_dropout: float, ff_dropout: float, activation: str):
super(TransformerEncoder, self).__init__()
self.attn = MultiHeadedAttention(input_dim, n_heads, use_bias, attn_dropout)
self.ff = FeedForward(input_dim, ff_dropout, activation)
self.attn_addnorm = AddNorm(input_dim, attn_dropout)
self.ff_addnorm = AddNorm(input_dim, ff_dropout)
def forward(self, X: Tensor) ->Tensor:
x = self.attn_addnorm(X, self.attn)
return self.ff_addnorm(x, self.ff)
class TabTransformer(BaseTabularModelWithAttention):
"""Defines a [TabTransformer model](https://arxiv.org/abs/2012.06678) that
can be used as the `deeptabular` component of a Wide & Deep model or
independently by itself.
:information_source: **NOTE**:
This is an enhanced adaptation of the model described in the paper,
containing a series of additional features.
Parameters
----------
column_idx: Dict
Dict containing the index of the columns that will be passed through
the model. Required to slice the tensors. e.g.
_{'education': 0, 'relationship': 1, 'workclass': 2, ...}_
cat_embed_input: List, Optional, default = None
List of Tuples with the column name and number of unique values for
each categorical component e.g. _[(education, 11), ...]_
cat_embed_dropout: float, default = 0.1
Categorical embeddings dropout
use_cat_bias: bool, default = False,
Boolean indicating if bias will be used for the categorical embeddings
cat_embed_activation: Optional, str, default = None,
Activation function for the categorical embeddings, if any. _'tanh'_,
_'relu'_, _'leaky_relu'_ and _'gelu'_ are supported.
full_embed_dropout: bool, default = False
Boolean indicating if an entire embedding (i.e. the representation of
one column) will be dropped in the batch. See:
`pytorch_widedeep.models.transformers._layers.FullEmbeddingDropout`.
If `full_embed_dropout = True`, `cat_embed_dropout` is ignored.
shared_embed: bool, default = False
The idea behind `shared_embed` is described in the Appendix A in the
[TabTransformer paper](https://arxiv.org/abs/2012.06678): the
goal of having column embedding is to enable the model to distinguish
the classes in one column from those in the other columns. In other
words, the idea is to let the model learn which column is embedded at
the time.
add_shared_embed: bool, default = False,
The two embedding sharing strategies are: 1) add the shared embeddings
to the column embeddings or 2) to replace the first
`frac_shared_embed` with the shared embeddings.
See `pytorch_widedeep.models.transformers._layers.SharedEmbeddings`
frac_shared_embed: float, default = 0.25
The fraction of embeddings that will be shared (if `add_shared_embed
= False`) by all the different categories for one particular
column.
continuous_cols: List, Optional, default = None
List with the name of the numeric (aka continuous) columns
cont_norm_layer: str, default = "batchnorm"
Type of normalization layer applied to the continuous features. Options
are: _'layernorm'_, _'batchnorm'_ or None.
embed_continuous: bool, default = False
Boolean indicating if the continuous columns will be embedded
(i.e. passed each through a linear layer with or without activation)
cont_embed_dropout: float, default = 0.1,
Continuous embeddings dropout
use_cont_bias: bool, default = True,
Boolean indicating if bias will be used for the continuous embeddings
cont_embed_activation: str, default = None
Activation function to be applied to the continuous embeddings, if
any. _'tanh'_, _'relu'_, _'leaky_relu'_ and _'gelu'_ are supported.
input_dim: int, default = 32
The so-called *dimension of the model*. Is the number of
embeddings used to encode the categorical and/or continuous columns
n_heads: int, default = 8
Number of attention heads per Transformer block
use_qkv_bias: bool, default = False
Boolean indicating whether or not to use bias in the Q, K, and V
projection layers.
n_blocks: int, default = 4
Number of Transformer blocks
attn_dropout: float, default = 0.2
Dropout that will be applied to the Multi-Head Attention layers
ff_dropout: float, default = 0.1
Dropout that will be applied to the FeedForward network
transformer_activation: str, default = "gelu"
Transformer Encoder activation function. _'tanh'_, _'relu'_,
_'leaky_relu'_, _'gelu'_, _'geglu'_ and _'reglu'_ are supported
mlp_hidden_dims: List, Optional, default = None
MLP hidden dimensions. If not provided it will default to $[l,
4\\times l, 2 \\times l]$ where $l$ is the MLP's input dimension
mlp_activation: str, default = "relu"
MLP activation function. _'tanh'_, _'relu'_, _'leaky_relu'_ and
_'gelu'_ are supported
mlp_dropout: float, default = 0.1
Dropout that will be applied to the final MLP
mlp_batchnorm: bool, default = False
Boolean indicating whether or not to apply batch normalization to the
dense layers
mlp_batchnorm_last: bool, default = False
Boolean indicating whether or not to apply batch normalization to the
last of the dense layers
mlp_linear_first: bool, default = False
Boolean indicating whether the order of the operations in the dense
layer. If `True: [LIN -> ACT -> BN -> DP]`. If `False: [BN -> DP ->
LIN -> ACT]`
Attributes
----------
cat_and_cont_embed: nn.Module
This is the module that processes the categorical and continuous columns
encoder: nn.Module
Sequence of Transformer blocks
mlp: nn.Module
MLP component in the model
Examples
--------
>>> import torch
>>> from pytorch_widedeep.models import TabTransformer
>>> X_tab = torch.cat((torch.empty(5, 4).random_(4), torch.rand(5, 1)), axis=1)
>>> colnames = ['a', 'b', 'c', 'd', 'e']
>>> cat_embed_input = [(u,i) for u,i in zip(colnames[:4], [4]*4)]
>>> continuous_cols = ['e']
>>> column_idx = {k:v for v,k in enumerate(colnames)}
>>> model = TabTransformer(column_idx=column_idx, cat_embed_input=cat_embed_input, continuous_cols=continuous_cols)
>>> out = model(X_tab)
"""
def __init__(self, column_idx: Dict[str, int], cat_embed_input: Optional[List[Tuple[str, int]]]=None, cat_embed_dropout: float=0.1, use_cat_bias: bool=False, cat_embed_activation: Optional[str]=None, full_embed_dropout: bool=False, shared_embed: bool=False, add_shared_embed: bool=False, frac_shared_embed: float=0.25, continuous_cols: Optional[List[str]]=None, cont_norm_layer: str=None, embed_continuous: bool=False, cont_embed_dropout: float=0.1, use_cont_bias: bool=True, cont_embed_activation: Optional[str]=None, input_dim: int=32, n_heads: int=8, use_qkv_bias: bool=False, n_blocks: int=4, attn_dropout: float=0.2, ff_dropout: float=0.1, transformer_activation: str='gelu', mlp_hidden_dims: Optional[List[int]]=None, mlp_activation: str='relu', mlp_dropout: float=0.1, mlp_batchnorm: bool=False, mlp_batchnorm_last: bool=False, mlp_linear_first: bool=True):
super(TabTransformer, self).__init__(column_idx=column_idx, cat_embed_input=cat_embed_input, cat_embed_dropout=cat_embed_dropout, use_cat_bias=use_cat_bias, cat_embed_activation=cat_embed_activation, full_embed_dropout=full_embed_dropout, shared_embed=shared_embed, add_shared_embed=add_shared_embed, frac_shared_embed=frac_shared_embed, continuous_cols=continuous_cols, cont_norm_layer=cont_norm_layer, embed_continuous=embed_continuous, cont_embed_dropout=cont_embed_dropout, use_cont_bias=use_cont_bias, cont_embed_activation=cont_embed_activation, input_dim=input_dim)
self.n_heads = n_heads
self.use_qkv_bias = use_qkv_bias
self.n_blocks = n_blocks
self.attn_dropout = attn_dropout
self.ff_dropout = ff_dropout
self.transformer_activation = transformer_activation
self.mlp_hidden_dims = mlp_hidden_dims
self.mlp_activation = mlp_activation
self.mlp_dropout = mlp_dropout
self.mlp_batchnorm = mlp_batchnorm
self.mlp_batchnorm_last = mlp_batchnorm_last
self.mlp_linear_first = mlp_linear_first
self.with_cls_token = 'cls_token' in column_idx
self.n_cat = len(cat_embed_input) if cat_embed_input is not None else 0
self.n_cont = len(continuous_cols) if continuous_cols is not None else 0
if self.n_cont and not self.n_cat and not self.embed_continuous:
raise ValueError("If only continuous features are used 'embed_continuous' must be set to 'True'")
self.encoder = nn.Sequential()
for i in range(n_blocks):
self.encoder.add_module('transformer_block' + str(i), TransformerEncoder(input_dim, n_heads, use_qkv_bias, attn_dropout, ff_dropout, transformer_activation))
self.mlp_first_hidden_dim = self._mlp_first_hidden_dim()
if mlp_hidden_dims is not None:
self.mlp = MLP([self.mlp_first_hidden_dim] + mlp_hidden_dims, mlp_activation, mlp_dropout, mlp_batchnorm, mlp_batchnorm_last, mlp_linear_first)
else:
self.mlp = None
def forward(self, X: Tensor) ->Tensor:
if not self.embed_continuous:
x_cat, x_cont = self.cat_and_cont_embed(X)
if x_cat is not None:
x = self.cat_embed_act_fn(x_cat) if self.cat_embed_act_fn is not None else x_cat
else:
x = self._get_embeddings(X)
x_cont = None
x = self.encoder(x)
if self.with_cls_token:
x = x[:, 0, :]
else:
x = x.flatten(1)
if x_cont is not None and not self.embed_continuous:
x = torch.cat([x, x_cont], 1)
if self.mlp is not None:
x = self.mlp(x)
return x
def _mlp_first_hidden_dim(self) ->int:
if self.with_cls_token:
if self.embed_continuous:
attn_output_dim = self.input_dim
else:
attn_output_dim = self.input_dim + self.n_cont
elif self.embed_continuous:
attn_output_dim = (self.n_cat + self.n_cont) * self.input_dim
else:
attn_output_dim = self.n_cat * self.input_dim + self.n_cont
return attn_output_dim
@property
def output_dim(self) ->int:
"""The output dimension of the model. This is a required property
neccesary to build the `WideDeep` class
"""
return self.mlp_hidden_dims[-1] if self.mlp_hidden_dims is not None else self.mlp_first_hidden_dim
@property
def attention_weights(self) ->List:
"""List with the attention weights per block
The shape of the attention weights is $(N, H, F, F)$, where $N$ is the
batch size, $H$ is the number of attention heads and $F$ is the
number of features/columns in the dataset
"""
return [blk.attn.attn_weights for blk in self.encoder]
def _compute_attn_output_dim(self) ->int:
if self.with_cls_token:
if self.embed_continuous:
attn_output_dim = self.input_dim
else:
attn_output_dim = self.input_dim + self.n_cont
elif self.embed_continuous:
attn_output_dim = (self.n_cat + self.n_cont) * self.input_dim
else:
attn_output_dim = self.n_cat * self.input_dim + self.n_cont
return attn_output_dim
ModelWithAttention = Union[TabTransformer, SAINT, FTTransformer, TabFastFormer, TabPerceiver, ContextAttentionMLP, SelfAttentionMLP]
class BasePreprocessor:
"""Base Class of All Preprocessors."""
def __init__(self, *args):
pass
def fit(self, df: pd.DataFrame):
raise NotImplementedError('Preprocessor must implement this method')
def transform(self, df: pd.DataFrame):
raise NotImplementedError('Preprocessor must implement this method')
def fit_transform(self, df: pd.DataFrame):
raise NotImplementedError('Preprocessor must implement this method')
def check_is_fitted(estimator: BasePreprocessor, attributes: List[str]=None, all_or_any: str='all', condition: bool=True):
"""Checks if an estimator is fitted
Parameters
----------
estimator: ``BasePreprocessor``,
An object of type ``BasePreprocessor``
attributes: List, default = None
List of strings with the attributes to check for
all_or_any: str, default = "all"
whether all or any of the attributes in the list must be present
condition: bool, default = True,
If not attribute list is passed, this condition that must be True for
the estimator to be considered as fitted
"""
estimator_name: str = estimator.__class__.__name__
error_msg = "This {} instance is not fitted yet. Call 'fit' with appropriate arguments before using this estimator.".format(estimator_name)
if attributes is not None and all_or_any == 'all':
if not all([hasattr(estimator, attr) for attr in attributes]):
raise NotFittedError(error_msg)
elif attributes is not None and all_or_any == 'any':
if not any([hasattr(estimator, attr) for attr in attributes]):
raise NotFittedError(error_msg)
elif not condition:
raise NotFittedError(error_msg)
def cut_mix(x: Tensor, lam: float=0.8) ->Tensor:
batch_size = x.size()[0]
mask = torch.from_numpy(np.random.choice(2, x.shape, p=[lam, 1 - lam]))
rand_idx = torch.randperm(batch_size)
x_ = x[rand_idx].clone()
x_[mask == 0] = x[mask == 0]
return x_
def mix_up(p: Tensor, lam: float=0.8) ->Tensor:
batch_size = p.size()[0]
rand_idx = torch.randperm(batch_size)
p_ = lam * p + (1 - lam) * p[rand_idx, ...]
return p_
class GBN(torch.nn.Module):
"""
Ghost Batch Normalization
https://arxiv.org/abs/1705.08741
"""
def __init__(self, input_dim: int, virtual_batch_size: int=128, momentum: float=0.01):
super(GBN, self).__init__()
self.virtual_batch_size = virtual_batch_size
self.bn = nn.BatchNorm1d(input_dim, momentum=momentum)
def forward(self, X: Tensor) ->Tensor:
chunks = X.chunk(int(np.ceil(X.shape[0] / self.virtual_batch_size)), 0)
res = [self.bn(x_) for x_ in chunks]
return torch.cat(res, dim=0)
def initialize_glu(module, input_dim: int, output_dim: int):
gain_value = np.sqrt((input_dim + output_dim) / np.sqrt(input_dim))
torch.nn.init.xavier_normal_(module.weight, gain=gain_value)
return
class GLU_Layer(nn.Module):
def __init__(self, input_dim: int, output_dim: int, dropout: float, fc: nn.Module=None, ghost_bn: bool=True, virtual_batch_size: int=128, momentum: float=0.02):
super(GLU_Layer, self).__init__()
if fc:
self.fc = fc
else:
self.fc = nn.Linear(input_dim, 2 * output_dim, bias=False)
initialize_glu(self.fc, input_dim, 2 * output_dim)
if ghost_bn:
self.bn: Union[GBN, nn.BatchNorm1d] = GBN(2 * output_dim, virtual_batch_size=virtual_batch_size, momentum=momentum)
else:
self.bn = nn.BatchNorm1d(2 * output_dim, momentum=momentum)
self.dp = nn.Dropout(dropout)
def forward(self, X: Tensor) ->Tensor:
return self.dp(F.glu(self.bn(self.fc(X))))
class GLU_Block(nn.Module):
def __init__(self, input_dim: int, output_dim: int, dropout: float, n_glu: int=2, first: bool=False, shared_layers: nn.ModuleList=None, ghost_bn: bool=True, virtual_batch_size: int=128, momentum: float=0.02):
super(GLU_Block, self).__init__()
self.first = first
if shared_layers is not None and n_glu != len(shared_layers):
self.n_glu = len(shared_layers)
warnings.warn("If 'shared_layers' is nor None, 'n_glu' must be equal to the number of shared_layers.Got n_glu = {} and n shared_layers = {}. 'n_glu' has been set to {}".format(n_glu, len(shared_layers), len(shared_layers)), UserWarning)
else:
self.n_glu = n_glu
glu_dim = [input_dim] + [output_dim] * self.n_glu
self.glu_layers = nn.ModuleList()
for i in range(self.n_glu):
fc = shared_layers[i] if shared_layers else None
self.glu_layers.append(GLU_Layer(glu_dim[i], glu_dim[i + 1], dropout, fc=fc, ghost_bn=ghost_bn, virtual_batch_size=virtual_batch_size, momentum=momentum))
def forward(self, X: Tensor) ->Tensor:
scale = torch.sqrt(torch.FloatTensor([0.5]))
if self.first:
x = self.glu_layers[0](X)
layers_left = range(1, self.n_glu)
else:
x = nn.Identity()(X)
layers_left = range(self.n_glu)
for glu_id in layers_left:
x = torch.add(x, self.glu_layers[glu_id](x)) * scale
return x
class FeatTransformer(nn.Module):
def __init__(self, input_dim: int, output_dim: int, dropout: float, shared_layers: nn.ModuleList, n_glu_step_dependent: int, ghost_bn=True, virtual_batch_size=128, momentum=0.02):
super(FeatTransformer, self).__init__()
params = {'ghost_bn': ghost_bn, 'virtual_batch_size': virtual_batch_size, 'momentum': momentum}
self.shared = GLU_Block(input_dim, output_dim, dropout, n_glu=len(shared_layers), first=True, shared_layers=shared_layers, **params)
self.step_dependent = GLU_Block(output_dim, output_dim, dropout, n_glu=n_glu_step_dependent, first=False, **params)
def forward(self, X: Tensor) ->Tensor:
return self.step_dependent(self.shared(X))
def initialize_non_glu(module, input_dim: int, output_dim: int):
gain_value = np.sqrt((input_dim + output_dim) / np.sqrt(4 * input_dim))
torch.nn.init.xavier_normal_(module.weight, gain=gain_value)
return
class TabNetDecoder(nn.Module):
"""Companion decoder model for the `TabNet` model (which can be
considered an encoder itself)
This class is designed to be used with the `EncoderDecoderTrainer` when
using self-supervised pre-training (see the corresponding section in the
docs). This class will receive the output from the `TabNet` encoder
(i.e. the output from the so called 'steps') and '_reconstruct_' the
embeddings.
Parameters
----------
embed_dim: int
Size of the embeddings tensor to be reconstructed.
n_steps: int, default = 3
number of decision steps. For a better understanding of the function
of `n_steps` and the upcoming parameters, please see the
[paper](https://arxiv.org/abs/1908.07442).
step_dim: int, default = 8
Step's output dimension. This is the output dimension that
`WideDeep` will collect and connect to the output neuron(s).
dropout: float, default = 0.0
GLU block's internal dropout
n_glu_step_dependent: int, default = 2
number of GLU Blocks (`[FC -> BN -> GLU]`) that are step dependent
n_glu_shared: int, default = 2
number of GLU Blocks (`[FC -> BN -> GLU]`) that will be shared
across decision steps
ghost_bn: bool, default=True
Boolean indicating if [Ghost Batch Normalization](https://arxiv.org/abs/1705.08741)
will be used.
virtual_batch_size: int, default = 128
Batch size when using Ghost Batch Normalization
momentum: float, default = 0.02
Ghost Batch Normalization's momentum. The dreamquark-ai advises for
very low values. However high values are used in the original
publication. During our tests higher values lead to better results
Attributes
----------
decoder: nn.Module
decoder that will receive the output from the encoder's steps and will
reconstruct the embeddings
Examples
--------
>>> import torch
>>> from pytorch_widedeep.models import TabNetDecoder
>>> x_inp = [torch.rand(3, 8), torch.rand(3, 8), torch.rand(3, 8)]
>>> decoder = TabNetDecoder(embed_dim=32, ghost_bn=False)
>>> res = decoder(x_inp)
>>> res.shape
torch.Size([3, 32])
"""
def __init__(self, embed_dim: int, n_steps: int=3, step_dim: int=8, dropout: float=0.0, n_glu_step_dependent: int=2, n_glu_shared: int=2, ghost_bn: bool=True, virtual_batch_size: int=128, momentum: float=0.02):
super(TabNetDecoder, self).__init__()
self.n_steps = n_steps
self.step_dim = step_dim
self.dropout = dropout
self.n_glu_step_dependent = n_glu_step_dependent
self.n_glu_shared = n_glu_shared
self.ghost_bn = ghost_bn
self.virtual_batch_size = virtual_batch_size
self.momentum = momentum
shared_layers = nn.ModuleList()
for i in range(n_glu_shared):
if i == 0:
shared_layers.append(nn.Linear(step_dim, 2 * step_dim, bias=False))
else:
shared_layers.append(nn.Linear(step_dim, 2 * step_dim, bias=False))
self.decoder = nn.ModuleList()
for step in range(n_steps):
transformer = FeatTransformer(step_dim, step_dim, dropout, shared_layers, n_glu_step_dependent, ghost_bn, virtual_batch_size, momentum=momentum)
self.decoder.append(transformer)
self.reconstruction_layer = nn.Linear(step_dim, embed_dim, bias=False)
initialize_non_glu(self.reconstruction_layer, step_dim, embed_dim)
def forward(self, X: List[Tensor]) ->Tensor:
out = torch.tensor(0.0)
for i, x in enumerate(X):
x = self.decoder[i](x)
out = torch.add(out, x)
out = self.reconstruction_layer(out)
return out
DecoderWithoutAttention = Union[TabMlpDecoder, TabNetDecoder, TabResnetDecoder]
def _make_ix_like(input, dim=0):
d = input.size(dim)
rho = torch.arange(1, d + 1, device=input.device, dtype=input.dtype)
view = [1] * input.dim()
view[0] = -1
return rho.view(view).transpose(0, dim)
class SparsemaxFunction(Function):
"""
An implementation of sparsemax (Martins & Astudillo, 2016). See
:cite:`DBLP:journals/corr/MartinsA16` for detailed description.
By Ben Peters and Vlad Niculae
"""
@staticmethod
def forward(ctx, input, dim=-1):
"""sparsemax: normalizing sparse transform (a la softmax)
Parameters
----------
ctx : torch.autograd.function._ContextMethodMixin
input : torch.Tensor
any shape
dim : int
dimension along which to apply sparsemax
Returns
-------
output : torch.Tensor
same shape as input
"""
ctx.dim = dim
max_val, _ = input.max(dim=dim, keepdim=True)
input -= max_val
tau, supp_size = SparsemaxFunction._threshold_and_support(input, dim=dim)
output = torch.clamp(input - tau, min=0)
ctx.save_for_backward(supp_size, output)
return output
@staticmethod
def backward(ctx, grad_output):
supp_size, output = ctx.saved_tensors
dim = ctx.dim
grad_input = grad_output.clone()
grad_input[output == 0] = 0
v_hat = grad_input.sum(dim=dim) / supp_size.squeeze()
v_hat = v_hat.unsqueeze(dim)
grad_input = torch.where(output != 0, grad_input - v_hat, grad_input)
return grad_input, None
@staticmethod
def _threshold_and_support(input, dim=-1):
"""Sparsemax building block: compute the threshold
Parameters
----------
input: torch.Tensor
any dimension
dim : int
dimension along which to apply the sparsemax
Returns
-------
tau : torch.Tensor
the threshold value
support_size : torch.Tensor
"""
input_srt, _ = torch.sort(input, descending=True, dim=dim)
input_cumsum = input_srt.cumsum(dim) - 1
rhos = _make_ix_like(input, dim)
support = rhos * input_srt > input_cumsum
support_size = support.sum(dim=dim).unsqueeze(dim)
tau = input_cumsum.gather(dim, support_size - 1)
tau /= support_size
return tau, support_size
sparsemax = SparsemaxFunction.apply
class AttentiveTransformer(nn.Module):
def __init__(self, input_dim: int, output_dim: int, mask_type: str='sparsemax', ghost_bn=True, virtual_batch_size=128, momentum=0.02):
super(AttentiveTransformer, self).__init__()
self.fc = nn.Linear(input_dim, output_dim, bias=False)
initialize_non_glu(self.fc, input_dim, output_dim)
if ghost_bn:
self.bn: Union[GBN, nn.BatchNorm1d] = GBN(output_dim, virtual_batch_size=virtual_batch_size, momentum=momentum)
else:
self.bn = nn.BatchNorm1d(output_dim, momentum=momentum)
if mask_type == 'sparsemax':
self.mask: Union[sparsemax.Sparsemax, sparsemax.Entmax15] = sparsemax.Sparsemax(dim=-1)
elif mask_type == 'entmax':
self.mask = sparsemax.Entmax15(dim=-1)
else:
raise NotImplementedError("Please choose either 'sparsemax' or 'entmax' as masktype")
def forward(self, priors: Tensor, processed_feat: Tensor) ->Tensor:
x = self.bn(self.fc(processed_feat))
x = torch.mul(x, priors)
return self.mask(x)
class TabNetEncoder(nn.Module):
def __init__(self, input_dim: int, n_steps: int=3, step_dim: int=8, attn_dim: int=8, dropout: float=0.0, n_glu_step_dependent: int=2, n_glu_shared: int=2, ghost_bn: bool=True, virtual_batch_size: int=128, momentum: float=0.02, gamma: float=1.3, epsilon: float=1e-15, mask_type: str='sparsemax'):
super(TabNetEncoder, self).__init__()
self.input_dim = input_dim
self.n_steps = n_steps
self.step_dim = step_dim
self.attn_dim = attn_dim
self.gamma = gamma
self.epsilon = epsilon
self.initial_bn = nn.BatchNorm1d(input_dim, momentum=0.01)
params = {'ghost_bn': ghost_bn, 'virtual_batch_size': virtual_batch_size, 'momentum': momentum}
shared_layers = nn.ModuleList()
for i in range(n_glu_shared):
if i == 0:
shared_layers.append(nn.Linear(input_dim, 2 * (step_dim + attn_dim), bias=False))
else:
shared_layers.append(nn.Linear(step_dim + attn_dim, 2 * (step_dim + attn_dim), bias=False))
self.initial_splitter = FeatTransformer(input_dim, step_dim + attn_dim, dropout, shared_layers, n_glu_step_dependent, **params)
self.feat_transformers = nn.ModuleList()
self.attn_transformers = nn.ModuleList()
for step in range(n_steps):
feat_transformer = FeatTransformer(input_dim, step_dim + attn_dim, dropout, shared_layers, n_glu_step_dependent, **params)
attn_transformer = AttentiveTransformer(attn_dim, input_dim, mask_type, **params)
self.feat_transformers.append(feat_transformer)
self.attn_transformers.append(attn_transformer)
def forward(self, X: Tensor, prior: Optional[Tensor]=None) ->Tuple[List[Tensor], Tensor]:
x = self.initial_bn(X)
if prior is None:
prior = torch.ones(x.shape)
M_loss = torch.FloatTensor([0.0])
attn = self.initial_splitter(x)[:, self.step_dim:]
steps_output = []
for step in range(self.n_steps):
M = self.attn_transformers[step](prior, attn)
prior = torch.mul(self.gamma - M, prior)
M_loss += torch.mean(torch.sum(torch.mul(M, torch.log(M + self.epsilon)), dim=1))
masked_x = torch.mul(M, x)
out = self.feat_transformers[step](masked_x)
attn = out[:, self.step_dim:]
d_out = nn.ReLU()(out[:, :self.step_dim])
steps_output.append(d_out)
M_loss /= self.n_steps
return steps_output, M_loss
def forward_masks(self, X: Tensor) ->Tuple[Tensor, Dict[int, Tensor]]:
x = self.initial_bn(X)
prior = torch.ones(x.shape)
M_explain = torch.zeros(x.shape)
attn = self.initial_splitter(x)[:, self.step_dim:]
masks = {}
for step in range(self.n_steps):
M = self.attn_transformers[step](prior, attn)
masks[step] = M
prior = torch.mul(self.gamma - M, prior)
masked_x = torch.mul(M, x)
out = self.feat_transformers[step](masked_x)
attn = out[:, self.step_dim:]
d_out = nn.ReLU()(out[:, :self.step_dim])
agg_decision_contrib = torch.sum(d_out, dim=1)
M_explain += torch.mul(M, agg_decision_contrib.unsqueeze(dim=1))
return M_explain, masks
class TabNet(BaseTabularModelWithoutAttention):
"""Defines a [TabNet model](https://arxiv.org/abs/1908.07442) that
can be used as the `deeptabular` component of a Wide & Deep model or
independently by itself.
The implementation in this library is fully based on that
[here](https://github.com/dreamquark-ai/tabnet) by the dreamquark-ai team,
simply adapted so that it can work within the `WideDeep` frame.
Therefore, **ALL CREDIT TO THE DREAMQUARK-AI TEAM**.
Parameters
----------
column_idx: Dict
Dict containing the index of the columns that will be passed through
the `TabNet` model. Required to slice the tensors. e.g. _{'education':
0, 'relationship': 1, 'workclass': 2, ...}_
cat_embed_input: List, Optional, default = None
List of Tuples with the column name, number of unique values and
embedding dimension. e.g. _[(education, 11, 32), ...]_
cat_embed_dropout: float, default = 0.1
Categorical embeddings dropout
use_cat_bias: bool, default = False,
Boolean indicating if bias will be used for the categorical embeddings
cat_embed_activation: Optional, str, default = None,
Activation function for the categorical embeddings, if any. _'tanh'_,
_'relu'_, _'leaky_relu'_ and _'gelu'_ are supported.
continuous_cols: List, Optional, default = None
List with the name of the numeric (aka continuous) columns
cont_norm_layer: str, default = "batchnorm"
Type of normalization layer applied to the continuous features. Options
are: _'layernorm'_, _'batchnorm'_ or `None`.
embed_continuous: bool, default = False,
Boolean indicating if the continuous columns will be embedded
(i.e. passed each through a linear layer with or without activation)
cont_embed_dim: int, default = 32,
Size of the continuous embeddings
cont_embed_dropout: float, default = 0.1,
Dropout for the continuous embeddings
use_cont_bias: bool, default = True,
Boolean indicating if bias will be used for the continuous embeddings
cont_embed_activation: Optional, str, default = None,
Activation function for the continuous embeddings, if any. _'tanh'_,
_'relu'_, _'leaky_relu'_ and _'gelu'_ are supported.
n_steps: int, default = 3
number of decision steps. For a better understanding of the function
of `n_steps` and the upcoming parameters, please see the
[paper](https://arxiv.org/abs/1908.07442).
step_dim: int, default = 8
Step's output dimension. This is the output dimension that
`WideDeep` will collect and connect to the output neuron(s).
attn_dim: int, default = 8
Attention dimension
dropout: float, default = 0.0
GLU block's internal dropout
n_glu_step_dependent: int, default = 2
number of GLU Blocks (`[FC -> BN -> GLU]`) that are step dependent
n_glu_shared: int, default = 2
number of GLU Blocks (`[FC -> BN -> GLU]`) that will be shared
across decision steps
ghost_bn: bool, default=True
Boolean indicating if [Ghost Batch Normalization](https://arxiv.org/abs/1705.08741)
will be used.
virtual_batch_size: int, default = 128
Batch size when using Ghost Batch Normalization
momentum: float, default = 0.02
Ghost Batch Normalization's momentum. The dreamquark-ai advises for
very low values. However high values are used in the original
publication. During our tests higher values lead to better results
gamma: float, default = 1.3
Relaxation parameter in the paper. When gamma = 1, a feature is
enforced to be used only at one decision step. As gamma increases,
more flexibility is provided to use a feature at multiple decision
steps
epsilon: float, default = 1e-15
Float to avoid log(0). Always keep low
mask_type: str, default = "sparsemax"
Mask function to use. Either _'sparsemax'_ or _'entmax'_
Attributes
----------
cat_and_cont_embed: nn.Module
This is the module that processes the categorical and continuous columns
encoder: nn.Module
the TabNet encoder. For details see the [original publication](https://arxiv.org/abs/1908.07442).
Examples
--------
>>> import torch
>>> from pytorch_widedeep.models import TabNet
>>> X_tab = torch.cat((torch.empty(5, 4).random_(4), torch.rand(5, 1)), axis=1)
>>> colnames = ['a', 'b', 'c', 'd', 'e']
>>> cat_embed_input = [(u,i,j) for u,i,j in zip(colnames[:4], [4]*4, [8]*4)]
>>> column_idx = {k:v for v,k in enumerate(colnames)}
>>> model = TabNet(column_idx=column_idx, cat_embed_input=cat_embed_input, continuous_cols = ['e'])
>>> out = model(X_tab)
"""
def __init__(self, column_idx: Dict[str, int], cat_embed_input: Optional[List[Tuple[str, int, int]]]=None, cat_embed_dropout: float=0.1, use_cat_bias: bool=False, cat_embed_activation: Optional[str]=None, continuous_cols: Optional[List[str]]=None, cont_norm_layer: str=None, embed_continuous: bool=False, cont_embed_dim: int=32, cont_embed_dropout: float=0.1, use_cont_bias: bool=True, cont_embed_activation: Optional[str]=None, n_steps: int=3, step_dim: int=8, attn_dim: int=8, dropout: float=0.0, n_glu_step_dependent: int=2, n_glu_shared: int=2, ghost_bn: bool=True, virtual_batch_size: int=128, momentum: float=0.02, gamma: float=1.3, epsilon: float=1e-15, mask_type: str='sparsemax'):
super(TabNet, self).__init__(column_idx=column_idx, cat_embed_input=cat_embed_input, cat_embed_dropout=cat_embed_dropout, use_cat_bias=use_cat_bias, cat_embed_activation=cat_embed_activation, continuous_cols=continuous_cols, cont_norm_layer=cont_norm_layer, embed_continuous=embed_continuous, cont_embed_dim=cont_embed_dim, cont_embed_dropout=cont_embed_dropout, use_cont_bias=use_cont_bias, cont_embed_activation=cont_embed_activation)
self.n_steps = n_steps
self.step_dim = step_dim
self.attn_dim = attn_dim
self.dropout = dropout
self.n_glu_step_dependent = n_glu_step_dependent
self.n_glu_shared = n_glu_shared
self.ghost_bn = ghost_bn
self.virtual_batch_size = virtual_batch_size
self.momentum = momentum
self.gamma = gamma
self.epsilon = epsilon
self.mask_type = mask_type
self.embed_out_dim = self.cat_and_cont_embed.output_dim
self.encoder = TabNetEncoder(self.embed_out_dim, n_steps, step_dim, attn_dim, dropout, n_glu_step_dependent, n_glu_shared, ghost_bn, virtual_batch_size, momentum, gamma, epsilon, mask_type)
def forward(self, X: Tensor, prior: Optional[Tensor]=None) ->Tuple[Tensor, Tensor]:
x = self._get_embeddings(X)
steps_output, M_loss = self.encoder(x, prior)
res = torch.sum(torch.stack(steps_output, dim=0), dim=0)
return res, M_loss
def forward_masks(self, X: Tensor) ->Tuple[Tensor, Dict[int, Tensor]]:
x = self._get_embeddings(X)
return self.encoder.forward_masks(x)
@property
def output_dim(self) ->int:
"""The output dimension of the model. This is a required property
neccesary to build the `WideDeep` class
"""
return self.step_dim
ModelWithoutAttention = Union[TabMlp, TabNet, TabResnet]
class EncoderDecoderModel(nn.Module):
"""This Class, which is referred as a 'Model', implements an Encoder-Decoder
Self Supervised 'routine' inspired by `TabNet: Attentive Interpretable
Tabular Learning <https://arxiv.org/abs/1908.07442>_`
This class is in principle not exposed to the user and its documentation
is detailed in its corresponding Trainer: see
``pytorch_widedeep.self_supervised_training.EncoderDecoderTrainer``
"""
def __init__(self, encoder: ModelWithoutAttention, decoder: Optional[DecoderWithoutAttention], masked_prob: float):
super(EncoderDecoderModel, self).__init__()
self.encoder = encoder
if decoder is None:
self.decoder = self._build_decoder(encoder)
else:
self.decoder = decoder
self.masker = RandomObfuscator(p=masked_prob)
self.is_tabnet = isinstance(self.encoder, TabNet)
def forward(self, X: Tensor) ->Tuple[Tensor, Tensor, Tensor]:
if self.is_tabnet:
return self._forward_tabnet(X)
else:
return self._forward(X)
def _forward(self, X: Tensor) ->Tuple[Tensor, Tensor, Tensor]:
x_embed = self.encoder._get_embeddings(X)
if self.training:
masked_x, mask = self.masker(x_embed)
x_embed_rec = self.decoder(self.encoder(X))
else:
x_embed_rec = self.decoder(self.encoder(X))
mask = torch.ones(x_embed.shape)
return x_embed, x_embed_rec, mask
def _forward_tabnet(self, X: Tensor) ->Tuple[Tensor, Tensor, Tensor]:
x_embed = self.encoder._get_embeddings(X)
if self.training:
masked_x, mask = self.masker(x_embed)
prior = 1 - mask
steps_out, _ = self.encoder.encoder(masked_x, prior=prior)
x_embed_rec = self.decoder(steps_out)
else:
steps_out, _ = self.encoder(x_embed)
x_embed_rec = self.decoder(steps_out)
mask = torch.ones(x_embed.shape)
return x_embed_rec, x_embed, mask
def _build_decoder(self, encoder: ModelWithoutAttention) ->DecoderWithoutAttention:
if isinstance(encoder, TabMlp):
decoder = self._build_tabmlp_decoder()
if isinstance(encoder, TabResnet):
decoder = self._build_tabresnet_decoder()
if isinstance(encoder, TabNet):
decoder = self._build_tabnet_decoder()
return decoder
def _build_tabmlp_decoder(self) ->DecoderWithoutAttention:
common_params = inspect.signature(TabMlp).parameters.keys() & inspect.signature(TabMlpDecoder).parameters.keys()
decoder_param = {}
for cpn in common_params:
decoder_param[cpn] = getattr(self.encoder, cpn)
decoder_param['mlp_hidden_dims'] = decoder_param['mlp_hidden_dims'][::-1]
decoder_param['embed_dim'] = self.encoder.cat_and_cont_embed.output_dim
return TabMlpDecoder(**decoder_param)
def _build_tabresnet_decoder(self) ->DecoderWithoutAttention:
common_params = inspect.signature(TabResnet).parameters.keys() & inspect.signature(TabResnetDecoder).parameters.keys()
decoder_param = {}
for cpn in common_params:
decoder_param[cpn] = getattr(self.encoder, cpn)
decoder_param['blocks_dims'] = decoder_param['blocks_dims'][::-1]
if decoder_param['mlp_hidden_dims'] is not None:
decoder_param['mlp_hidden_dims'] = decoder_param['mlp_hidden_dims'][::-1]
decoder_param['embed_dim'] = self.encoder.cat_and_cont_embed.output_dim
return TabResnetDecoder(**decoder_param)
def _build_tabnet_decoder(self) ->DecoderWithoutAttention:
common_params = inspect.signature(TabNet).parameters.keys() & inspect.signature(TabNetDecoder).parameters.keys()
decoder_param = {}
for cpn in common_params:
decoder_param[cpn] = getattr(self.encoder, cpn)
decoder_param['embed_dim'] = self.encoder.cat_and_cont_embed.output_dim
return TabNetDecoder(**decoder_param)
class Sparsemax(nn.Module):
def __init__(self, dim=-1):
self.dim = dim
super(Sparsemax, self).__init__()
def forward(self, input):
return sparsemax(input, self.dim)
class Entmax15Function(Function):
"""
An implementation of exact Entmax with alpha=1.5 (B. Peters, V. Niculae, A. Martins). See
:cite:`https://arxiv.org/abs/1905.05702 for detailed description.
Source: https://github.com/deep-spin/entmax
"""
@staticmethod
def forward(ctx, input, dim=-1):
ctx.dim = dim
max_val, _ = input.max(dim=dim, keepdim=True)
input = input - max_val
input = input / 2
tau_star, _ = Entmax15Function._threshold_and_support(input, dim)
output = torch.clamp(input - tau_star, min=0) ** 2
ctx.save_for_backward(output)
return output
@staticmethod
def backward(ctx, grad_output):
Y, = ctx.saved_tensors
gppr = Y.sqrt()
dX = grad_output * gppr
q = dX.sum(ctx.dim) / gppr.sum(ctx.dim)
q = q.unsqueeze(ctx.dim)
dX -= q * gppr
return dX, None
@staticmethod
def _threshold_and_support(input, dim=-1):
Xsrt, _ = torch.sort(input, descending=True, dim=dim)
rho = _make_ix_like(input, dim)
mean = Xsrt.cumsum(dim) / rho
mean_sq = (Xsrt ** 2).cumsum(dim) / rho
ss = rho * (mean_sq - mean ** 2)
delta = (1 - ss) / rho
delta_nz = torch.clamp(delta, 0)
tau = mean - torch.sqrt(delta_nz)
support_size = (tau <= Xsrt).sum(dim).unsqueeze(dim)
tau_star = tau.gather(dim, support_size - 1)
return tau_star, support_size
entmax15 = Entmax15Function.apply
class Entmax15(nn.Module):
def __init__(self, dim=-1):
self.dim = dim
super(Entmax15, self).__init__()
def forward(self, input):
return entmax15(input, self.dim)
class TabNetPredLayer(nn.Module):
def __init__(self, inp, out):
"""This class is a 'hack' required because TabNet is a very particular
model within `WideDeep`.
TabNet's forward method within `WideDeep` outputs two tensors, one
with the last layer's activations and the sparse regularization
factor. Since the output needs to be collected by `WideDeep` to then
Sequentially build the output layer (connection to the output
neuron(s)) I need to code a custom TabNetPredLayer that accepts two
inputs. This will be used by the `WideDeep` class.
"""
super(TabNetPredLayer, self).__init__()
self.pred_layer = nn.Linear(inp, out, bias=False)
initialize_non_glu(self.pred_layer, inp, out)
def forward(self, tabnet_tuple: Tuple[Tensor, Tensor]) ->Tuple[Tensor, Tensor]:
res, M_loss = tabnet_tuple[0], tabnet_tuple[1]
return self.pred_layer(res), M_loss
class BasicRNN(nn.Module):
"""Standard text classifier/regressor comprised by a stack of RNNs
(LSTMs or GRUs) that can be used as the `deeptext` component of a Wide &
Deep model or independently by itself.
In addition, there is the option to add a Fully Connected (FC) set of
dense layers on top of the stack of RNNs
Parameters
----------
vocab_size: int
Number of words in the vocabulary
embed_dim: int, Optional, default = None
Dimension of the word embeddings if non-pretained word vectors are
used
embed_matrix: np.ndarray, Optional, default = None
Pretrained word embeddings
embed_trainable: bool, default = True
Boolean indicating if the pretrained embeddings are trainable
rnn_type: str, default = 'lstm'
String indicating the type of RNN to use. One of _'lstm'_ or _'gru'_
hidden_dim: int, default = 64
Hidden dim of the RNN
n_layers: int, default = 3
Number of recurrent layers
rnn_dropout: float, default = 0.1
Dropout for each RNN layer except the last layer
bidirectional: bool, default = True
Boolean indicating whether the staked RNNs are bidirectional
use_hidden_state: str, default = True
Boolean indicating whether to use the final hidden state or the RNN's
output as predicting features. Typically the former is used.
padding_idx: int, default = 1
index of the padding token in the padded-tokenised sequences. The
`TextPreprocessor` class within this library uses fastai's tokenizer
where the token index 0 is reserved for the _'unknown'_ word token.
Therefore, the default value is set to 1.
head_hidden_dims: List, Optional, default = None
List with the sizes of the dense layers in the head e.g: _[128, 64]_
head_activation: str, default = "relu"
Activation function for the dense layers in the head. Currently
_'tanh'_, _'relu'_, _'leaky_relu'_ and _'gelu'_ are supported
head_dropout: float, Optional, default = None
Dropout of the dense layers in the head
head_batchnorm: bool, default = False
Boolean indicating whether or not to include batch normalization in
the dense layers that form the _'rnn_mlp'_
head_batchnorm_last: bool, default = False
Boolean indicating whether or not to apply batch normalization to the
last of the dense layers in the head
head_linear_first: bool, default = False
Boolean indicating whether the order of the operations in the dense
layer. If `True: [LIN -> ACT -> BN -> DP]`. If `False: [BN -> DP ->
LIN -> ACT]`
Attributes
----------
word_embed: nn.Module
word embedding matrix
rnn: nn.Module
Stack of RNNs
rnn_mlp: nn.Module
Stack of dense layers on top of the RNN. This will only exists if
`head_layers_dim` is not None
Examples
--------
>>> import torch
>>> from pytorch_widedeep.models import BasicRNN
>>> X_text = torch.cat((torch.zeros([5,1]), torch.empty(5, 4).random_(1,4)), axis=1)
>>> model = BasicRNN(vocab_size=4, hidden_dim=4, n_layers=2, padding_idx=0, embed_dim=4)
>>> out = model(X_text)
"""
def __init__(self, vocab_size: int, embed_dim: Optional[int]=None, embed_matrix: Optional[np.ndarray]=None, embed_trainable: bool=True, rnn_type: str='lstm', hidden_dim: int=64, n_layers: int=3, rnn_dropout: float=0.1, bidirectional: bool=False, use_hidden_state: bool=True, padding_idx: int=1, head_hidden_dims: Optional[List[int]]=None, head_activation: str='relu', head_dropout: Optional[float]=None, head_batchnorm: bool=False, head_batchnorm_last: bool=False, head_linear_first: bool=False):
super(BasicRNN, self).__init__()
if embed_dim is not None and embed_matrix is not None and not embed_dim == embed_matrix.shape[1]:
warnings.warn('the input embedding dimension {} and the dimension of the pretrained embeddings {} do not match. The pretrained embeddings dimension ({}) will be used'.format(embed_dim, embed_matrix.shape[1], embed_matrix.shape[1]), UserWarning)
if rnn_type.lower() not in ['lstm', 'gru']:
raise ValueError(f"'rnn_type' must be 'lstm' or 'gru', got {rnn_type} instead")
self.vocab_size = vocab_size
self.embed_trainable = embed_trainable
self.embed_dim = embed_dim
self.rnn_type = rnn_type
self.hidden_dim = hidden_dim
self.n_layers = n_layers
self.rnn_dropout = rnn_dropout
self.bidirectional = bidirectional
self.use_hidden_state = use_hidden_state
self.padding_idx = padding_idx
self.head_hidden_dims = head_hidden_dims
self.head_activation = head_activation
self.head_dropout = head_dropout
self.head_batchnorm = head_batchnorm
self.head_batchnorm_last = head_batchnorm_last
self.head_linear_first = head_linear_first
self.word_embed, self.embed_dim = self._set_embeddings(embed_matrix)
rnn_params = {'input_size': self.embed_dim, 'hidden_size': hidden_dim, 'num_layers': n_layers, 'bidirectional': bidirectional, 'dropout': rnn_dropout, 'batch_first': True}
if self.rnn_type.lower() == 'lstm':
self.rnn: Union[nn.LSTM, nn.GRU] = nn.LSTM(**rnn_params)
elif self.rnn_type.lower() == 'gru':
self.rnn = nn.GRU(**rnn_params)
self.rnn_output_dim = hidden_dim * 2 if bidirectional else hidden_dim
if self.head_hidden_dims is not None:
head_hidden_dims = [self.rnn_output_dim] + head_hidden_dims
self.rnn_mlp: Union[MLP, nn.Identity] = MLP(head_hidden_dims, head_activation, head_dropout, head_batchnorm, head_batchnorm_last, head_linear_first)
else:
self.rnn_mlp = nn.Identity()
def forward(self, X: Tensor) ->Tensor:
embed = self.word_embed(X.long())
if self.rnn_type.lower() == 'lstm':
o, (h, c) = self.rnn(embed)
elif self.rnn_type.lower() == 'gru':
o, h = self.rnn(embed)
processed_outputs = self._process_rnn_outputs(o, h)
return self.rnn_mlp(processed_outputs)
@property
def output_dim(self) ->int:
"""The output dimension of the model. This is a required property
neccesary to build the `WideDeep` class
"""
return self.head_hidden_dims[-1] if self.head_hidden_dims is not None else self.rnn_output_dim
def _set_embeddings(self, embed_matrix: Union[Any, np.ndarray]) ->Tuple[nn.Module, int]:
if isinstance(embed_matrix, np.ndarray):
assert embed_matrix.dtype == 'float32', "'embed_matrix' must be of dtype 'float32', got dtype '{}'".format(str(embed_matrix.dtype))
word_embed = nn.Embedding(self.vocab_size, embed_matrix.shape[1], padding_idx=self.padding_idx)
if self.embed_trainable:
word_embed.weight = nn.Parameter(torch.tensor(embed_matrix), requires_grad=True)
else:
word_embed.weight = nn.Parameter(torch.tensor(embed_matrix), requires_grad=False)
embed_dim = embed_matrix.shape[1]
else:
word_embed = nn.Embedding(self.vocab_size, self.embed_dim, padding_idx=self.padding_idx)
embed_dim = self.embed_dim
return word_embed, embed_dim
def _process_rnn_outputs(self, output: Tensor, hidden: Tensor) ->Tensor:
output = output.permute(1, 0, 2)
if self.bidirectional:
processed_outputs = torch.cat((hidden[-2], hidden[-1]), dim=1) if self.use_hidden_state else output[-1]
else:
processed_outputs = hidden[-1] if self.use_hidden_state else output[-1]
return processed_outputs
class StackedAttentiveRNN(nn.Module):
"""Text classifier/regressor comprised by a stack of blocks:
`[RNN + Attention]`. This can be used as the `deeptext` component of a
Wide & Deep model or independently by itself.
In addition, there is the option to add a Fully Connected (FC) set of
dense layers on top of the attentiob blocks
Parameters
----------
vocab_size: int
Number of words in the vocabulary
embed_dim: int, Optional, default = None
Dimension of the word embeddings if non-pretained word vectors are
used
embed_matrix: np.ndarray, Optional, default = None
Pretrained word embeddings
embed_trainable: bool, default = True
Boolean indicating if the pretrained embeddings are trainable
rnn_type: str, default = 'lstm'
String indicating the type of RNN to use. One of 'lstm' or 'gru'
hidden_dim: int, default = 64
Hidden dim of the RNN
bidirectional: bool, default = True
Boolean indicating whether the staked RNNs are bidirectional
padding_idx: int, default = 1
index of the padding token in the padded-tokenised sequences. The
`TextPreprocessor` class within this library uses fastai's
tokenizer where the token index 0 is reserved for the _'unknown'_
word token. Therefore, the default value is set to 1.
n_blocks: int, default = 3
Number of attention blocks. Each block is comprised by an RNN and a
Context Attention Encoder
attn_concatenate: bool, default = True
Boolean indicating if the input to the attention mechanism will be the
output of the RNN or the output of the RNN concatenated with the last
hidden state or simply
attn_dropout: float, default = 0.1
Internal dropout for the attention mechanism
with_addnorm: bool, default = False
Boolean indicating if the output of each block will be added to the
input and normalised
head_hidden_dims: List, Optional, default = None
List with the sizes of the dense layers in the head e.g: [128, 64]
head_activation: str, default = "relu"
Activation function for the dense layers in the head. Currently
_'tanh'_, _'relu'_, _'leaky_relu'_ and _'gelu'_ are supported
head_dropout: float, Optional, default = None
Dropout of the dense layers in the head
head_batchnorm: bool, default = False
Boolean indicating whether or not to include batch normalization in
the dense layers that form the _'rnn_mlp'_
head_batchnorm_last: bool, default = False
Boolean indicating whether or not to apply batch normalization to the
last of the dense layers in the head
head_linear_first: bool, default = False
Boolean indicating whether the order of the operations in the dense
layer. If `True: [LIN -> ACT -> BN -> DP]`. If `False: [BN -> DP ->
LIN -> ACT]`
Attributes
----------
word_embed: nn.Module
word embedding matrix
rnn: nn.Module
Stack of RNNs
rnn_mlp: nn.Module
Stack of dense layers on top of the RNN. This will only exists if
`head_layers_dim` is not `None`
Examples
--------
>>> import torch
>>> from pytorch_widedeep.models import StackedAttentiveRNN
>>> X_text = torch.cat((torch.zeros([5,1]), torch.empty(5, 4).random_(1,4)), axis=1)
>>> model = StackedAttentiveRNN(vocab_size=4, hidden_dim=4, padding_idx=0, embed_dim=4)
>>> out = model(X_text)
"""
def __init__(self, vocab_size: int, embed_dim: Optional[int]=None, embed_matrix: Optional[np.ndarray]=None, embed_trainable: bool=True, rnn_type: str='lstm', hidden_dim: int=64, bidirectional: bool=False, padding_idx: int=1, n_blocks: int=3, attn_concatenate: bool=False, attn_dropout: float=0.1, with_addnorm: bool=False, head_hidden_dims: Optional[List[int]]=None, head_activation: str='relu', head_dropout: Optional[float]=None, head_batchnorm: bool=False, head_batchnorm_last: bool=False, head_linear_first: bool=False):
super(StackedAttentiveRNN, self).__init__()
if embed_dim is not None and embed_matrix is not None and not embed_dim == embed_matrix.shape[1]:
warnings.warn('the input embedding dimension {} and the dimension of the pretrained embeddings {} do not match. The pretrained embeddings dimension ({}) will be used'.format(embed_dim, embed_matrix.shape[1], embed_matrix.shape[1]), UserWarning)
if rnn_type.lower() not in ['lstm', 'gru']:
raise ValueError(f"'rnn_type' must be 'lstm' or 'gru', got {rnn_type} instead")
self.vocab_size = vocab_size
self.embed_trainable = embed_trainable
self.embed_dim = embed_dim
self.rnn_type = rnn_type
self.hidden_dim = hidden_dim
self.bidirectional = bidirectional
self.padding_idx = padding_idx
self.n_blocks = n_blocks
self.attn_concatenate = attn_concatenate
self.attn_dropout = attn_dropout
self.with_addnorm = with_addnorm
self.head_hidden_dims = head_hidden_dims
self.head_activation = head_activation
self.head_dropout = head_dropout
self.head_batchnorm = head_batchnorm
self.head_batchnorm_last = head_batchnorm_last
self.head_linear_first = head_linear_first
self.word_embed, self.embed_dim = self._set_embeddings(embed_matrix)
if bidirectional and attn_concatenate:
self.rnn_output_dim = hidden_dim * 4
elif bidirectional or attn_concatenate:
self.rnn_output_dim = hidden_dim * 2
else:
self.rnn_output_dim = hidden_dim
if self.rnn_output_dim != self.embed_dim:
self.embed_proj: Union[nn.Linear, nn.Identity] = nn.Linear(self.embed_dim, self.rnn_output_dim)
else:
self.embed_proj = nn.Identity()
rnn_params = {'input_size': self.rnn_output_dim, 'hidden_size': hidden_dim, 'bidirectional': bidirectional, 'batch_first': True}
if self.rnn_type.lower() == 'lstm':
self.rnn: Union[nn.LSTM, nn.GRU] = nn.LSTM(**rnn_params)
elif self.rnn_type.lower() == 'gru':
self.rnn = nn.GRU(**rnn_params)
self.attention_blks = nn.ModuleList()
for i in range(n_blocks):
self.attention_blks.append(ContextAttentionEncoder(self.rnn, self.rnn_output_dim, attn_dropout, attn_concatenate, with_addnorm=with_addnorm if i != n_blocks - 1 else False, sum_along_seq=i == n_blocks - 1))
if self.head_hidden_dims is not None:
head_hidden_dims = [self.rnn_output_dim] + head_hidden_dims
self.rnn_mlp: Union[MLP, nn.Identity] = MLP(head_hidden_dims, head_activation, head_dropout, head_batchnorm, head_batchnorm_last, head_linear_first)
else:
self.rnn_mlp = nn.Identity()
def forward(self, X: Tensor) ->Tensor:
x = self.embed_proj(self.word_embed(X.long()))
h = nn.init.zeros_(torch.Tensor(2 if self.bidirectional else 1, X.shape[0], self.hidden_dim))
if self.rnn_type == 'lstm':
c = nn.init.zeros_(torch.Tensor(2 if self.bidirectional else 1, X.shape[0], self.hidden_dim))
else:
c = None
for blk in self.attention_blks:
x, h, c = blk(x, h, c)
return self.rnn_mlp(x)
def output_dim(self) ->int:
"""The output dimension of the model. This is a required property
neccesary to build the `WideDeep` class
"""
return self.head_hidden_dims[-1] if self.head_hidden_dims is not None else self.rnn_output_dim
@property
def attention_weights(self) ->List:
"""List with the attention weights per block
The shape of the attention weights is $(N, S)$ Where $N$ is the batch
size and $S$ is the length of the sequence
"""
return [blk.attn.attn_weights for blk in self.attention_blks]
def _set_embeddings(self, embed_matrix: Union[Any, np.ndarray]) ->Tuple[nn.Module, int]:
if isinstance(embed_matrix, np.ndarray):
assert embed_matrix.dtype == 'float32', "'embed_matrix' must be of dtype 'float32', got dtype '{}'".format(str(embed_matrix.dtype))
word_embed = nn.Embedding(self.vocab_size, embed_matrix.shape[1], padding_idx=self.padding_idx)
if self.embed_trainable:
word_embed.weight = nn.Parameter(torch.tensor(embed_matrix), requires_grad=True)
else:
word_embed.weight = nn.Parameter(torch.tensor(embed_matrix), requires_grad=False)
embed_dim = embed_matrix.shape[1]
else:
word_embed = nn.Embedding(self.vocab_size, self.embed_dim, padding_idx=self.padding_idx)
embed_dim = self.embed_dim
return word_embed, embed_dim
class WideDeep(nn.Module):
"""Main collector class that combines all `wide`, `deeptabular`
`deeptext` and `deepimage` models.
Note that all models described so far in this library must be passed to
the `WideDeep` class once constructed. This is because the models output
the last layer before the prediction layer. Such prediction layer is
added by the `WideDeep` class as it collects the components for every
data mode.
There are two options to combine these models that correspond to the
two main architectures that `pytorch-widedeep` can build.
- Directly connecting the output of the model components to an ouput neuron(s).
- Adding a `Fully-Connected Head` (FC-Head) on top of the deep models.
This FC-Head will combine the output form the `deeptabular`, `deeptext` and
`deepimage` and will be then connected to the output neuron(s).
Parameters
----------
wide: nn.Module, Optional, default = None
`Wide` model. This is a linear model where the non-linearities are
captured via crossed-columns.
deeptabular: nn.Module, Optional, default = None
Currently this library implements a number of possible architectures
for the `deeptabular` component. See the documenation of the
package.
deeptext: nn.Module, Optional, default = None
Currently this library implements a number of possible architectures
for the `deeptext` component. See the documenation of the
package.
deepimage: nn.Module, Optional, default = None
Currently this library uses `torchvision` and implements a number of
possible architectures for the `deepimage` component. See the
documenation of the package.
head_hidden_dims: List, Optional, default = None
List with the sizes of the dense layers in the head e.g: [128, 64]
head_activation: str, default = "relu"
Activation function for the dense layers in the head. Currently
`'tanh'`, `'relu'`, `'leaky_relu'` and `'gelu'` are supported
head_dropout: float, Optional, default = None
Dropout of the dense layers in the head
head_batchnorm: bool, default = False
Boolean indicating whether or not to include batch normalization in
the dense layers that form the `'rnn_mlp'`
head_batchnorm_last: bool, default = False
Boolean indicating whether or not to apply batch normalization to the
last of the dense layers in the head
head_linear_first: bool, default = False
Boolean indicating whether the order of the operations in the dense
layer. If `True: [LIN -> ACT -> BN -> DP]`. If `False: [BN -> DP ->
LIN -> ACT]`
deephead: nn.Module, Optional, default = None
Alternatively, the user can pass a custom model that will receive the
output of the deep component. If `deephead` is not None all the
previous fc-head parameters will be ignored
enforce_positive: bool, default = False
Boolean indicating if the output from the final layer must be
positive. This is important if you are using loss functions with
non-negative input restrictions, e.g. RMSLE, or if you know your
predictions are bounded in between 0 and inf
enforce_positive_activation: str, default = "softplus"
Activation function to enforce that the final layer has a positive
output. `'softplus'` or `'relu'` are supported.
pred_dim: int, default = 1
Size of the final wide and deep output layer containing the
predictions. `1` for regression and binary classification or number
of classes for multiclass classification.
with_fds: bool, default = False
Boolean indicating if Feature Distribution Smoothing (FDS) will be
applied before the final prediction layer. Only available for
regression problems.
See [Delving into Deep Imbalanced Regression](https://arxiv.org/abs/2102.09554) for details.
Other Parameters
----------------
**fds_config: dict, default = None
Dictionary with the parameters to be used when using Feature
Distribution Smoothing. Please, see the docs for the `FDSLayer`.
<br/>
:information_source: **NOTE**: Feature Distribution Smoothing
is available when using **ONLY** a `deeptabular` component
<br/>
:information_source: **NOTE**: We consider this feature absolutely
experimental and we recommend the user to not use it unless the
corresponding [publication](https://arxiv.org/abs/2102.09554) is
well understood
Examples
--------
>>> from pytorch_widedeep.models import TabResnet, Vision, BasicRNN, Wide, WideDeep
>>> embed_input = [(u, i, j) for u, i, j in zip(["a", "b", "c"][:4], [4] * 3, [8] * 3)]
>>> column_idx = {k: v for v, k in enumerate(["a", "b", "c"])}
>>> wide = Wide(10, 1)
>>> deeptabular = TabResnet(blocks_dims=[8, 4], column_idx=column_idx, cat_embed_input=embed_input)
>>> deeptext = BasicRNN(vocab_size=10, embed_dim=4, padding_idx=0)
>>> deepimage = Vision()
>>> model = WideDeep(wide=wide, deeptabular=deeptabular, deeptext=deeptext, deepimage=deepimage)
:information_source: **NOTE**: It is possible to use custom components to
build Wide & Deep models. Simply, build them and pass them as the
corresponding parameters. Note that the custom models MUST return a last
layer of activations(i.e. not the final prediction) so that these
activations are collected by `WideDeep` and combined accordingly. In
addition, the models MUST also contain an attribute `output_dim` with
the size of these last layers of activations. See for example
`pytorch_widedeep.models.tab_mlp.TabMlp`
"""
def __init__(self, wide: Optional[nn.Module]=None, deeptabular: Optional[nn.Module]=None, deeptext: Optional[nn.Module]=None, deepimage: Optional[nn.Module]=None, deephead: Optional[nn.Module]=None, head_hidden_dims: Optional[List[int]]=None, head_activation: str='relu', head_dropout: float=0.1, head_batchnorm: bool=False, head_batchnorm_last: bool=False, head_linear_first: bool=False, enforce_positive: bool=False, enforce_positive_activation: str='softplus', pred_dim: int=1, with_fds: bool=False, **fds_config):
super(WideDeep, self).__init__()
self._check_inputs(wide, deeptabular, deeptext, deepimage, deephead, head_hidden_dims, pred_dim, with_fds)
self.wd_device: str = None
self.pred_dim = pred_dim
self.wide = wide
self.deeptabular = deeptabular
self.deeptext = deeptext
self.deepimage = deepimage
self.deephead = deephead
self.enforce_positive = enforce_positive
self.with_fds = with_fds
if self.deeptabular is not None:
self.is_tabnet = deeptabular.__class__.__name__ == 'TabNet'
else:
self.is_tabnet = False
if self.deephead is None and head_hidden_dims is not None:
self._build_deephead(head_hidden_dims, head_activation, head_dropout, head_batchnorm, head_batchnorm_last, head_linear_first)
elif self.deephead is not None:
pass
else:
self._add_pred_layer()
if self.with_fds:
self.fds_layer = FDSLayer(feature_dim=self.deeptabular.output_dim, **fds_config)
if self.enforce_positive:
self.enf_pos = get_activation_fn(enforce_positive_activation)
def forward(self, X: Dict[str, Tensor], y: Optional[Tensor]=None, epoch: Optional[int]=None):
if self.with_fds:
return self._forward_deep_with_fds(X, y, epoch)
wide_out = self._forward_wide(X)
if self.deephead:
deep = self._forward_deephead(X, wide_out)
else:
deep = self._forward_deep(X, wide_out)
if self.enforce_positive:
return self.enf_pos(deep)
else:
return deep
def _build_deephead(self, head_hidden_dims, head_activation, head_dropout, head_batchnorm, head_batchnorm_last, head_linear_first):
deep_dim = 0
if self.deeptabular is not None:
deep_dim += self.deeptabular.output_dim
if self.deeptext is not None:
deep_dim += self.deeptext.output_dim
if self.deepimage is not None:
deep_dim += self.deepimage.output_dim
head_hidden_dims = [deep_dim] + head_hidden_dims
self.deephead = MLP(head_hidden_dims, head_activation, head_dropout, head_batchnorm, head_batchnorm_last, head_linear_first)
self.deephead.add_module('head_out', nn.Linear(head_hidden_dims[-1], self.pred_dim))
def _add_pred_layer(self):
if self.deeptabular is not None and not self.with_fds:
if self.is_tabnet:
self.deeptabular = nn.Sequential(self.deeptabular, TabNetPredLayer(self.deeptabular.output_dim, self.pred_dim))
else:
self.deeptabular = nn.Sequential(self.deeptabular, nn.Linear(self.deeptabular.output_dim, self.pred_dim))
if self.deeptext is not None:
self.deeptext = nn.Sequential(self.deeptext, nn.Linear(self.deeptext.output_dim, self.pred_dim))
if self.deepimage is not None:
self.deepimage = nn.Sequential(self.deepimage, nn.Linear(self.deepimage.output_dim, self.pred_dim))
def _forward_wide(self, X):
if self.wide is not None:
out = self.wide(X['wide'])
else:
batch_size = X[list(X.keys())[0]].size(0)
out = torch.zeros(batch_size, self.pred_dim)
return out
def _forward_deephead(self, X, wide_out):
if self.deeptabular is not None:
if self.is_tabnet:
tab_out = self.deeptabular(X['deeptabular'])
deepside, M_loss = tab_out[0], tab_out[1]
else:
deepside = self.deeptabular(X['deeptabular'])
else:
deepside = torch.FloatTensor()
if self.deeptext is not None:
deepside = torch.cat([deepside, self.deeptext(X['deeptext'])], axis=1)
if self.deepimage is not None:
deepside = torch.cat([deepside, self.deepimage(X['deepimage'])], axis=1)
deephead_out = self.deephead(deepside)
deepside_out = nn.Linear(deephead_out.size(1), self.pred_dim)
if self.is_tabnet:
res = wide_out.add_(deepside_out(deephead_out)), M_loss
else:
res = wide_out.add_(deepside_out(deephead_out))
return res
def _forward_deep(self, X, wide_out):
if self.deeptabular is not None:
if self.is_tabnet:
tab_out, M_loss = self.deeptabular(X['deeptabular'])
wide_out.add_(tab_out)
else:
wide_out.add_(self.deeptabular(X['deeptabular']))
if self.deeptext is not None:
wide_out.add_(self.deeptext(X['deeptext']))
if self.deepimage is not None:
wide_out.add_(self.deepimage(X['deepimage']))
if self.is_tabnet:
res = wide_out, M_loss
else:
res = wide_out
return res
def _forward_deep_with_fds(self, X: Dict[str, Tensor], y: Optional[Tensor]=None, epoch: Optional[int]=None):
res = self.fds_layer(self.deeptabular(X['deeptabular']), y, epoch)
if self.enforce_positive:
if isinstance(res, Tuple):
out = res[0], self.enf_pos(res[1])
else:
out = self.enf_pos(res)
else:
out = res
return out
@staticmethod
def _check_inputs(wide, deeptabular, deeptext, deepimage, deephead, head_hidden_dims, pred_dim, with_fds):
if wide is not None:
assert wide.wide_linear.weight.size(1) == pred_dim, "the 'pred_dim' of the wide component ({}) must be equal to the 'pred_dim' of the deep component and the overall model itself ({})".format(wide.wide_linear.weight.size(1), pred_dim)
if deeptabular is not None and not hasattr(deeptabular, 'output_dim'):
raise AttributeError("deeptabular model must have an 'output_dim' attribute or property. See pytorch-widedeep.models.deep_text.DeepText")
if deeptabular is not None:
is_tabnet = deeptabular.__class__.__name__ == 'TabNet'
has_wide_text_or_image = wide is not None or deeptext is not None or deepimage is not None
if is_tabnet and has_wide_text_or_image:
warnings.warn("'WideDeep' is a model comprised by multiple components and the 'deeptabular' component is 'TabNet'. We recommend using 'TabNet' in isolation. The reasons are: i)'TabNet' uses sparse regularization which partially losses its purpose when used in combination with other components. If you still want to use a multiple component model with 'TabNet', consider setting 'lambda_sparse' to 0 during training. ii) The feature importances will be computed only for TabNet but the model will comprise multiple components. Therefore, such importances will partially lose their 'meaning'.", UserWarning)
if deeptext is not None and not hasattr(deeptext, 'output_dim'):
raise AttributeError("deeptext model must have an 'output_dim' attribute or property. See pytorch-widedeep.models.deep_text.DeepText")
if deepimage is not None and not hasattr(deepimage, 'output_dim'):
raise AttributeError("deepimage model must have an 'output_dim' attribute or property. See pytorch-widedeep.models.deep_text.DeepText")
if deephead is not None and head_hidden_dims is not None:
raise ValueError("both 'deephead' and 'head_hidden_dims' are not None. Use one of the other, but not both")
if head_hidden_dims is not None and not deeptabular and not deeptext and not deepimage:
raise ValueError("if 'head_hidden_dims' is not None, at least one deep component must be used")
if deephead is not None:
deephead_inp_feat = next(deephead.parameters()).size(1)
output_dim = 0
if deeptabular is not None:
output_dim += deeptabular.output_dim
if deeptext is not None:
output_dim += deeptext.output_dim
if deepimage is not None:
output_dim += deepimage.output_dim
assert deephead_inp_feat == output_dim, "if a custom 'deephead' is used its input features ({}) must be equal to the output features of the deep component ({})".format(deephead_inp_feat, output_dim)
if with_fds and ((wide is not None or deeptext is not None or deepimage is not None or deephead is not None) or pred_dim != 1):
raise ValueError('Feature Distribution Smoothing (FDS) is supported when using only a deeptabular component and for regression problems.')
class TextModeTestClass(nn.Module):
def __init__(self):
super(TextModeTestClass, self).__init__()
self.word_embed = nn.Embedding(5, 16, padding_idx=0)
self.rnn = nn.LSTM(16, 8, batch_first=True)
self.linear = nn.Linear(8, 1)
def forward(self, X):
embed = self.word_embed(X.long())
o, (h, c) = self.rnn(embed)
return self.linear(h).view(-1, 1)
class ImageModeTestClass(nn.Module):
def __init__(self):
super(ImageModeTestClass, self).__init__()
self.conv_block = nn.Sequential(conv_layer(3, 64, 3), conv_layer(64, 128, 1, maxpool=False, adaptiveavgpool=True))
self.linear = nn.Linear(128, 1)
def forward(self, X):
x = self.conv_block(X)
x = x.view(x.size(0), -1)
return self.linear(x)
import torch
from torch.nn import MSELoss, ReLU
from _paritybench_helpers import _mock_config, _mock_layer, _paritybench_base, _fails_compile
TESTCASES = [
# (nn.Module, init_args, forward_args, jit_compiles)
(AddNorm,
lambda: ([], {'input_dim': 4, 'dropout': 0.5}),
lambda: ([torch.rand([4, 4, 4, 4]), _mock_layer()], {}),
False),
(BasicBlock,
lambda: ([], {'inp': 4, 'out': 4}),
lambda: ([torch.rand([4, 4, 4])], {}),
False),
(BayesianContEmbeddings,
lambda: ([], {'n_cont_cols': 4, 'embed_dim': 4, 'prior_sigma_1': 4, 'prior_sigma_2': 4, 'prior_pi': 4, 'posterior_mu_init': 4, 'posterior_rho_init': 4}),
lambda: ([torch.rand([4, 4, 4, 4])], {}),
False),
(BayesianLinear,
lambda: ([], {'in_features': 4, 'out_features': 4}),
lambda: ([torch.rand([4, 4, 4, 4])], {}),
False),
(BayesianRegressionLoss,
lambda: ([], {'noise_tolerance': 4}),
lambda: ([torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])], {}),
False),
(BayesianSELoss,
lambda: ([], {}),
lambda: ([torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])], {}),
True),
(ContEmbeddings,
lambda: ([], {'n_cont_cols': 4, 'embed_dim': 4, 'embed_dropout': 0.5, 'use_bias': 4}),
lambda: ([torch.rand([4, 4, 4, 4])], {}),
True),
(ContextAttention,
lambda: ([], {'input_dim': 4, 'dropout': 0.5}),
lambda: ([torch.rand([4, 4, 4, 4])], {}),
True),
(EncoderDecoderLoss,
lambda: ([], {}),
lambda: ([torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])], {}),
True),
(Entmax15,
lambda: ([], {}),
lambda: ([torch.rand([4, 4, 4, 4])], {}),
True),
(GBN,
lambda: ([], {'input_dim': 4}),
lambda: ([torch.rand([4, 4, 4])], {}),
False),
(GEGLU,
lambda: ([], {}),
lambda: ([torch.rand([4, 4, 4, 4])], {}),
True),
(GLU_Block,
lambda: ([], {'input_dim': 4, 'output_dim': 4, 'dropout': 0.5}),
lambda: ([torch.rand([4, 4])], {}),
False),
(GLU_Layer,
lambda: ([], {'input_dim': 4, 'output_dim': 4, 'dropout': 0.5}),
lambda: ([torch.rand([4, 4])], {}),
False),
(HuberLoss,
lambda: ([], {}),
lambda: ([torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])], {}),
True),
(ImageModeTestClass,
lambda: ([], {}),
lambda: ([torch.rand([4, 3, 64, 64])], {}),
True),
(InfoNCELoss,
lambda: ([], {}),
lambda: ([torch.rand([4, 4, 4, 4])], {}),
True),
(L1Loss,
lambda: ([], {}),
lambda: ([torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])], {}),
True),
(MSELoss,
lambda: ([], {}),
lambda: ([torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])], {}),
True),
(MSLELoss,
lambda: ([], {}),
lambda: ([torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])], {}),
True),
(NormAdd,
lambda: ([], {'input_dim': 4, 'dropout': 0.5}),
lambda: ([torch.rand([4, 4, 4, 4]), _mock_layer()], {}),
False),
(REGLU,
lambda: ([], {}),
lambda: ([torch.rand([4, 4, 4, 4])], {}),
True),
(RMSELoss,
lambda: ([], {}),
lambda: ([torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])], {}),
True),
(RMSLELoss,
lambda: ([], {}),
lambda: ([torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])], {}),
True),
(Sparsemax,
lambda: ([], {}),
lambda: ([torch.rand([4, 4, 4, 4])], {}),
True),
(TabNetPredLayer,
lambda: ([], {'inp': 4, 'out': 4}),
lambda: ([torch.rand([4, 4, 4, 4])], {}),
True),
(TextModeTestClass,
lambda: ([], {}),
lambda: ([torch.rand([4, 4])], {}),
True),
(TweedieLoss,
lambda: ([], {}),
lambda: ([torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])], {}),
True),
(Wide,
lambda: ([], {'input_dim': 4}),
lambda: ([torch.rand([4, 4, 4, 4])], {}),
True),
]
class Test_jrzaurin_pytorch_widedeep(_paritybench_base):
def test_000(self):
self._check(*TESTCASES[0])
def test_001(self):
self._check(*TESTCASES[1])
def test_002(self):
self._check(*TESTCASES[2])
def test_003(self):
self._check(*TESTCASES[3])
def test_004(self):
self._check(*TESTCASES[4])
def test_005(self):
self._check(*TESTCASES[5])
def test_006(self):
self._check(*TESTCASES[6])
def test_007(self):
self._check(*TESTCASES[7])
def test_008(self):
self._check(*TESTCASES[8])
def test_009(self):
self._check(*TESTCASES[9])
def test_010(self):
self._check(*TESTCASES[10])
def test_011(self):
self._check(*TESTCASES[11])
def test_012(self):
self._check(*TESTCASES[12])
def test_013(self):
self._check(*TESTCASES[13])
def test_014(self):
self._check(*TESTCASES[14])
def test_015(self):
self._check(*TESTCASES[15])
def test_016(self):
self._check(*TESTCASES[16])
def test_017(self):
self._check(*TESTCASES[17])
def test_018(self):
self._check(*TESTCASES[18])
def test_019(self):
self._check(*TESTCASES[19])
def test_020(self):
self._check(*TESTCASES[20])
def test_021(self):
self._check(*TESTCASES[21])
def test_022(self):
self._check(*TESTCASES[22])
def test_023(self):
self._check(*TESTCASES[23])
def test_024(self):
self._check(*TESTCASES[24])
def test_025(self):
self._check(*TESTCASES[25])
def test_026(self):
self._check(*TESTCASES[26])
def test_027(self):
self._check(*TESTCASES[27])
def test_028(self):
self._check(*TESTCASES[28])
|
# -*- coding: utf-8 -*-
"""
@Time : 2020/6/2 9:06
@Author : QDY
@FileName: 面试题64. 求1+2+…+n.py
求 1+2+...+n ,要求不能使用乘除法、for、while、if、else、switch、case等关键字及条件判断语句(A?B:C)。
示例 1:
输入: n = 3
输出: 6
示例 2:
输入: n = 9
输出: 45
"""
class Solution:
def __init__(self):
self.res = 0
def sumNums(self, n):
# 逻辑短路
n > 1 and self.sumNums(n - 1)
self.res += n
return self.res
|
from ew.static import cfg as ewcfg
from . import prankcmds
cmd_map = {
# Swilldermuk -- Please make swilldermuk specific cmd/util files on reimplementation
ewcfg.cmd_gambit: prankcmds.gambit,
ewcfg.cmd_credence: prankcmds.credence, #debug
ewcfg.cmd_get_credence: prankcmds.get_credence, #debug
ewcfg.cmd_reset_prank_stats: prankcmds.reset_prank_stats, #debug
ewcfg.cmd_set_gambit: prankcmds.set_gambit, #debug
ewcfg.cmd_pointandlaugh: prankcmds.point_and_laugh,
}
apt_dm_cmd_map = {
ewcfg.cmd_gambit:prankcmds.gambit,
ewcfg.cmd_credence: prankcmds.credence,
} |
#!/usr/bin/env python
# coding: utf-8
import os
import re
class ReadData:
def __init__(self, path):
self.path = path
# For correct sorting.
def atoi(self,text):
return int(text) if text.isdigit() else text
def natural_keys(self,text):
'''
alist.sort(key=natural_keys) sorts in human order
http://nedbatchelder.com/blog/200712/human_sorting.html
(See Toothy's implementation in the comments)
'''
return [self.atoi(c) for c in re.split('(\d+)', text)]
path = '/home/aviad/Desktop/src/data/Images/odo360nodoor/odo360nodoor_orginal'
def exportNameImages(self):
imagesName = os.listdir(self.path)
imagesName.sort(key=self.natural_keys)
return [self.path + '/' + name for name in imagesName]
|
from typing import Dict, Optional, Tuple, Union
import ConfigSpace as CS
from ConfigSpace.configuration_space import ConfigurationSpace
from ConfigSpace.hyperparameters import CategoricalHyperparameter, UniformIntegerHyperparameter
import numpy as np
from torch import nn
from autoPyTorch.datasets.base_dataset import BaseDatasetPropertiesType
from autoPyTorch.pipeline.components.setup.network_head.base_network_head import NetworkHeadComponent
from autoPyTorch.pipeline.components.setup.network_head.utils import _activations
from autoPyTorch.utils.common import HyperparameterSearchSpace, get_hyperparameter
class FullyConnectedHead(NetworkHeadComponent):
"""
Head consisting of a number of fully connected layers.
Flattens any input in a array of shape [B, prod(input_shape)].
"""
def build_head(self, input_shape: Tuple[int, ...], output_shape: Tuple[int, ...]) -> nn.Module:
layers = [nn.Flatten()]
in_features = np.prod(input_shape).item()
for i in range(1, self.config["num_layers"]):
layers.append(nn.Linear(in_features=in_features,
out_features=self.config[f"units_layer_{i}"]))
layers.append(_activations[self.config["activation"]]())
in_features = self.config[f"units_layer_{i}"]
out_features = np.prod(output_shape).item()
layers.append(nn.Linear(in_features=in_features,
out_features=out_features))
return nn.Sequential(*layers)
@staticmethod
def get_properties(dataset_properties: Optional[Dict[str, BaseDatasetPropertiesType]] = None
) -> Dict[str, Union[str, bool]]:
return {
'shortname': 'FullyConnectedHead',
'name': 'FullyConnectedHead',
'handles_tabular': True,
'handles_image': True,
'handles_time_series': True,
}
@staticmethod
def get_hyperparameter_search_space(
dataset_properties: Optional[Dict[str, BaseDatasetPropertiesType]] = None,
num_layers: HyperparameterSearchSpace = HyperparameterSearchSpace(hyperparameter="num_layers",
value_range=(1, 4),
default_value=2),
units_layer: HyperparameterSearchSpace = HyperparameterSearchSpace(hyperparameter="units_layer",
value_range=(64, 512),
default_value=128),
activation: HyperparameterSearchSpace = HyperparameterSearchSpace(hyperparameter="activation",
value_range=tuple(_activations.keys()),
default_value=list(_activations.keys())[0]),
) -> ConfigurationSpace:
cs = ConfigurationSpace()
min_num_layers: int = num_layers.value_range[0] # type: ignore
max_num_layers: int = num_layers.value_range[-1] # type: ignore
num_layers_is_constant = (min_num_layers == max_num_layers)
num_layers_hp = get_hyperparameter(num_layers, UniformIntegerHyperparameter)
activation_hp = get_hyperparameter(activation, CategoricalHyperparameter)
cs.add_hyperparameter(num_layers_hp)
if not num_layers_is_constant:
cs.add_hyperparameter(activation_hp)
cs.add_condition(CS.GreaterThanCondition(activation_hp, num_layers_hp, 1))
elif max_num_layers > 1:
# only add activation if we have more than 1 layer
cs.add_hyperparameter(activation_hp)
for i in range(1, max_num_layers + 1):
num_units_search_space = HyperparameterSearchSpace(
hyperparameter=f"units_layer_{i}",
value_range=units_layer.value_range,
default_value=units_layer.default_value,
log=units_layer.log,
)
num_units_hp = get_hyperparameter(num_units_search_space, UniformIntegerHyperparameter)
cs.add_hyperparameter(num_units_hp)
if i >= min_num_layers and not num_layers_is_constant:
# In the case of a constant, the max and min number of layers are the same.
# So no condition is needed. If it is not a constant but a hyperparameter,
# then a condition has to be made so that it accounts for the value of the
# hyperparameter.
cs.add_condition(CS.GreaterThanCondition(num_units_hp, num_layers_hp, i))
return cs
|
from django.db import models
from django.utils import timezone
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from taggit.managers import TaggableManager
from django.template.defaultfilters import slugify as default_slugify
from unidecode import unidecode
from taggit.models import TaggedItemBase
import markdown
# Custom QuerySet manager
class PublishedManager(models.Manager):
def get_queryset(self):
return super(PublishedManager, self).get_queryset()\
.filter(status='published')
class TaggedPost(TaggedItemBase):
content_object = models.ForeignKey('Post')
class Category(models.Model):
title = models.CharField(max_length=40, db_index=True)
slug = models.SlugField(max_length=100, db_index=True)
create_at = models.DateTimeField(auto_now_add=True)
update_at = models.DateTimeField(auto_now=True)
def __str__(self):
return self.title
def save(self, *args, **kwargs):
if not self.slug:
self.slug = self.slugify(self.title)
super(Category, self).save(*args, **kwargs)
def slugify(self, title, i=None):
slug = default_slugify(unidecode(title))
if i is not None:
slug += '_%d' % i
return slug
STATUS_CHOICES = (
('draft', 'Draft'),
('published', 'Published'),
)
# Create your models here.
class Post(models.Model):
title = models.CharField(max_length=100)
slug = models.SlugField(max_length=100, allow_unicode=True)
author = models.ForeignKey(User, related_name='blog_posts')
content = models.TextField()
publish_at = models.DateTimeField(default=timezone.now)
create_at = models.DateTimeField(auto_now_add=True)
update_at = models.DateTimeField(auto_now=True)
status = models.CharField(
max_length=10, choices=STATUS_CHOICES, default='draft')
tags = TaggableManager(through=TaggedPost, blank=True)
category = models.ForeignKey(Category, related_name='posts', blank=True, null=True)
cover_image = models.CharField(max_length=120, default='http://static.dogrod.com/media/i-am-root.png')
class Meta:
ordering = ('-publish_at', )
def __str__(self):
return self.title
def save(self, *args, **kwargs):
if not self.slug:
self.slug = self.slugify(self.title)
super(Post, self).save(*args, **kwargs)
def slugify(self, title, i=None):
slug = default_slugify(unidecode(title))
if i is not None:
slug += '_%d' % i
return slug
def get_content_as_markdown(self):
return markdown.markdown(self.content, safe_mode='escape')
def get_summary(self):
if len(self.content) > 40:
return '{0}...'.format(self.content[:40])
else:
return self.content
def get_summary_as_markdown(self):
return markdown.markdown(self.get_summary(), safe_mode='escape')
def get_formatted_publish_time(self):
return self.publish_at.strftime('%Y-%m-%dT%H:%M')
def get_absolute_url(self):
return reverse(
'post:post_detail',
args=[
self.publish_at.year,
self.publish_at.strftime('%m'),
self.publish_at.strftime('%d'), self.slug
])
def get_publish_date(self):
return self.publish_at.date()
objects = models.Manager() # default QS manager
published = PublishedManager() # custom QS manager
class Comment(models.Model):
post = models.ForeignKey(Post, related_name='comments')
name = models.CharField(max_length=80)
email = models.EmailField(blank=True)
author = models.ForeignKey(
User, related_name='post_comments', blank=True, null=True)
content = models.TextField(verbose_name=u'content')
create_at = models.DateTimeField(auto_now_add=True)
update_at = models.DateTimeField(auto_now=True)
approved = models.BooleanField(default=True)
reply_to = models.ForeignKey(
'self',
on_delete=models.CASCADE,
related_name='child_comment',
blank=True,
null=True
)
class Meta:
ordering = ('-create_at', )
def ban_comment(self):
self.approved = False
self.save()
def approve(self):
self.approved = True
self.save()
def __str__(self):
return 'Comment by {} on {}'.format(self.name, self.post)
class Like(models.Model):
"""
Record of like action
"""
post = models.ForeignKey(Post, related_name='like')
author = models.ForeignKey(
User, related_name='post_like', blank=True, null=True)
create_at = models.DateTimeField(auto_now_add=True)
canceled = models.BooleanField(default=False)
class ActionSummary(models.Model):
"""
Summary of likes & comments
"""
post = models.ForeignKey(Post, related_name='likes')
like_count = models.IntegerField(default=0)
comment_count = models.IntegerField(default=0)
|
# # def decor1(func):
# # def inner():
# # x = func()
# # return x*x
# # return inner
# # def decor(func):
# # def inner():
# # x = func()
# # return 2 * x
# # return inner
# # @decor1
# # @deco
# r# def num():
# # return 10
# def decor(func):
# def inner(a,b):
# if a < b:
# a, b = b, a
# return func(a,b)
# return inner
# @decor
# def div(a,b):
# print (a/b)
# div(2,4)
def decor(func):
def adder(a,b,c):
if a > b:
print(c)
else:
print(b)
return func(a,b,c)
return adder
@decor
adder(1,2,4): |
from setuptools import find_packages, setup
setup(
name='src',
packages=find_packages(),
version='0.1.0',
description='{{ cookiecutter.description }}',
author='{{ cookiecutter.author_name }}',
author_email='{{ cookiecutter.author_email }}',
license='{% if cookiecutter.open_source_license == 'MIT' %}MIT{% elif cookiecutter.open_source_license == 'BSD-3-Clause' %}BSD-3{% endif %}',
packages=find_packages(where='src'),
package_dir={'': 'src'},
python_requires='>=3.6',
install_requires=[],
classifiers=[
'Programming Language :: Python :: 3',
'License :: OSI Approved :: MIT License',
],
include_package_data=True
)
|
"""
This file contains global variables used within the package.
SUPPORTED_PLANS - the plan types which have been implemented, of type tuple.
SUPPORTED_MARKETS - the markets for each sport that are supported
A dictionary with supported sports as keys and their markets
as a list of values.
"""
SUPPORTED_PLANS = ('basic')
SUPPORTED_MARKETS = {
'soccer': [
'match_odds'
]
}
|
"""
Python Interface to UpCloud's API
"""
__version__ = "0.0.1"
__author__ = "Elias Nygren"
__author_email__ = "elias.nygren@outlook.com"
__license__ = "See: http://creativecommons.org/licenses/by-nd/3.0/ "
__copyright__ = "Copyright (c) 2015 Elias Nygren"
from .server import Server
from .storage import Storage
from .ip_address import IP_address
from .firewall import Firewall
from .cloud_manager import CloudManager
from .tools import OperatingSystems
from .tools import ZONE
|
from django.views.generic.base import TemplateView
from django.views.generic import ListView
from accounts.forms import SignupForm
from django.views.generic.edit import CreateView
#from django.conf.urls import reverse_lazy
from django.urls import reverse_lazy # 윗줄이 바뀐지몰라 구글 검색후 대체
from django.contrib.auth.decorators import login_required
from blog01.models import Post
# Create your views here.
#--- ListView
class HomeView(ListView) :
model = Post
template_name = 'home.html'
context_object_name = 'posts'
paginate_by = 4
#class HomeView(TemplateView): #위 listview로 대체
# template_name = 'home.html'
# --- User Creation
class UserCreateView(CreateView):
template_name = 'registration/register.html'
form_class = SignupForm
success_url = reverse_lazy('register_done')
class UserCreateDoneTV(TemplateView):
template_name = 'registration/register_on.html'
# --- @login_required
class LoginRequiredMixin(object):
@classmethod
def as_view(cls, **initkwargs):
view = super(LoginRequiredMixin, cls).as_view(**initkwargs)
return login_required(view)
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for functions used to extract and analyze stacks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import traceback
from tensorflow.python.platform import test
from tensorflow.python.util import tf_stack
class TFStackTest(test.TestCase):
def testFormatStackSelfConsistency(self):
# Both defined on the same line to produce identical stacks.
stacks = tf_stack.extract_stack(), traceback.extract_stack()
self.assertEqual(
traceback.format_list(stacks[0]), traceback.format_list(stacks[1]))
def testFrameSummaryEquality(self):
frames1 = tf_stack.extract_stack()
frames2 = tf_stack.extract_stack()
self.assertNotEqual(frames1[0], frames1[1])
self.assertEqual(frames1[0], frames1[0])
self.assertEqual(frames1[0], frames2[0])
def testFrameSummaryEqualityAndHash(self):
# Both defined on the same line to produce identical stacks.
frame1, frame2 = tf_stack.extract_stack(), tf_stack.extract_stack()
self.assertEqual(len(frame1), len(frame2))
for f1, f2 in zip(frame1, frame2):
self.assertEqual(f1, f2)
self.assertEqual(hash(f1), hash(f1))
self.assertEqual(hash(f1), hash(f2))
self.assertEqual(frame1, frame2)
self.assertEqual(hash(tuple(frame1)), hash(tuple(frame2)))
def testLastUserFrame(self):
trace = tf_stack.extract_stack() # COMMENT
frame = trace.last_user_frame()
self.assertRegex(frame.line, "# COMMENT")
def extract_stack(limit=None):
# Both defined on the same line to produce identical stacks.
return tf_stack.extract_stack(limit), traceback.extract_stack(limit) # pylint: disable=too-many-function-args
if __name__ == "__main__":
test.main()
|
'''names = ('Максат','Лязат','Данияр','Айбек','Атай','Салават','Адинай','Жоомарт','Алымбек','Эрмек','Дастан','Бекмамат','Аслан',)
i = 2
while i < 12:
print(names)
i+=2
'''
a = int(input("введите число")
while a <= 100:
if (a > 100) and (a < 1000):
print("это число трёхзначное")
else:
print("не трёхзначное")
if a > 0:
print("положительное число")
else:
print("отрицательная")
if a % 2 == 0:
print("четное")
else:
print("не четное")
if a%31 == 0:
print("делится")
else:
print("не делиится")
break
print(a)
'''
i = -100
for i in range(i,100):
if i < 100:
print(i)
i+=1
'''
|
print('''
This is a very long string
It continues here.
And it's not over yet.
"Hello,world!"
Still here.''')
x = 1 + 2 \
+ 3 + 4
print(x)
print("abcd\n1234")
print("C:\\ProgramFile\\python")
print(r"C:\ProgramFile\python""\\")
# Python3中所有字符串都是Unicode字符串
print(u"Unicode字符串")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.