index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
12,100 | 4de1b4c364173f018b3ff5b938db64fa0b61c3ba | import os
import tensorflow as tf
from tensorflow.contrib.framework.python.ops import audio_ops as contrib_audio
from tensorflow.python.ops import io_ops
from tensorflow.python.platform import gfile
sample_rate = 16000
class WavLoader:
def __init__(self,name=None,desired_samples=None):
with tf.name_scope("wav_loader",name) as scope:
self.wav_filename_ph = tf.placeholder(tf.string,[])
self.wav_loader = io_ops.read_file(self.wav_filename_ph)
if desired_samples is None:
self.wav_decoder = contrib_audio.decode_wav(self.wav_loader,desired_channels=1)
else:
self.wav_decoder = contrib_audio.decode_wav(self.wav_loader,desired_channels=1,desired_samples=desired_samples)
def load(self,f,sess):
return sess.run(self.wav_decoder,{self.wav_filename_ph:f}).audio.flatten()
def load_test_data(sess):
wav_loader = WavLoader("test_wav_loader",desired_samples=sample_rate)
test_dir = "test/audio"
test_index = []
for i,wav_path in enumerate(gfile.Glob("test/audio/*.wav")):
if i % 10000 == 0: print("Test {}".format(i))
tdata = wav_loader.load(wav_path,sess)
file_name = os.path.basename(wav_path)
test_index.append({"file":wav_path,"identifier":file_name,"data":tdata})
return test_index
def load_bg_data(sess):
wav_loader = WavLoader("bg_wav_loader")
bg_data = []
bg_path = "train/audio/_background_noise_/*.wav"
for wav_path in gfile.Glob(bg_path):
wav_data = wav_loader.load(wav_path,sess)
bg_data.append(wav_data)
return bg_data
|
12,101 | 6a046028c6a5f744bcc2d2988bf425b20746aa37 | #!/usr/bin/python3
import json
from apps.found_handler_v2 import RedisHandler
from lib.routes import route
from lib.authenticated_async import authenticated_async
from apps.models.user import User_Challenge
from apps.models.config import Config
# 1.闯关入口
@route('/challenge')
class ChallengeHandler(RedisHandler):
@authenticated_async
async def get(self):
uuid = self.current_user.uuid
user_info_session_key = "sx_info:" + uuid
challenge_info = self.redis_spare.hget(user_info_session_key, "challenge_info")
challenge_info_dict = json.loads(challenge_info)
chan_dict = {}
temp = "0"
if type(challenge_info_dict) != dict:
challenge_info_dict = eval(challenge_info_dict)
for id, star in challenge_info_dict.items():
chan_dict[id] = {
"id": id,
"star": star
}
if int(id) > int(temp):
temp = id
chan_dict["current_level"] = temp
self.write_json(chan_dict)
# 2.进入关卡,获取关卡信息
@route('/challenge_info')
class ChallengeInfoHandler(RedisHandler):
@authenticated_async
async def get(self):
uuid = self.current_user.uuid
level = int(self.get_argument("level", "") or 0)
if not level:
return self.write_json(status=-1, msg="请稍后重试")
data = Config.challenge_config
condition = data.get(level)
condition["level"] = level
self.write_json(condition)
# 3.挑战结束保存结果
@route('/challenge_next')
class ChallengeNextHandler(RedisHandler):
@authenticated_async
async def post(self):
uuid = self.current_user.uuid
level = str(self.get_argument("level", "") or 0)
star = int(self.get_argument("star", "") or 0)
# print(level, type(level))
# print(star, type(star))
if not level or not star:
return self.write_json(status=-1, msg="请稍后重试")
user_info_session_key = "sx_info:" + uuid
user_challenge = await self.application.objects.get(User_Challenge, uuid=uuid)
challenge_info = eval(user_challenge.challenge_info)
challenge_info[level] = star
if int(level) < 120:
next_level = int(level) + 1
if challenge_info.get(str(next_level), 0) == 0:
challenge_info[next_level] = 0
else:
next_level = level
config = Config.challenge_config
condition = config.get(next_level)
user_challenge.challenge_info = json.dumps(challenge_info)
await self.application.objects.update(user_challenge)
challenge_info_json = json.dumps(challenge_info)
self.redis_spare.hset(user_info_session_key, "challenge_info", challenge_info_json)
# test = await self.application.objects.get(User_Challenge, uuid=uuid)
data = {
"next_level": next_level,
"condition": condition
}
self.write_json(data)
|
12,102 | 9cadcd9e658beba93cc2780aaff51836d017b903 | GITHUB_TOKEN = "YourTokenHere"
|
12,103 | 84b4313b59cb60649f27d1a01a9237f7bb3cf253 | #Find biggest of 3 numbers entered.
x = int(input("Enter 1st number: "))
y = int(input("Enter 2nd number: "))
z = int(input("Enter 3rd number: "))
if (x > y) and (x > z):
largest = x
elif (y > x) and (y > z):
largest = y
else:
largest = z
print("The largest number is",largest) |
12,104 | 761b98876ac676cff12e55a2856edf430b0c3409 | from django.shortcuts import render
from .models import Subway
# Create your views here.
def index(request):
return render(request, 'boards/index.html')
def subway_order(request):
return render(request, 'boards/subway.html')
def subway_result(request):
name = request.POST.get("name")
date = request.POST.get("date")
sandwich = request.POST.get("sandwich")
size = request.POST.get("size")
bread = request.POST.get("bread")
# 여러 체크리스트를 받아올땐 getlist
sauce = request.POST.getlist("sauce")
# DB 저장 부분
subway = Subway()
print(type(subway))
subway.name = name
subway.date = date
subway.sandwich = sandwich
subway.size = size
subway.bread = bread
subway.sauce = sauce
subway.save()
context = {
'name': name,
'date': date,
'sandwich':sandwich,
'size': size,
'bread': bread,
'sauce': ", ".join(sauce)
}
return render(request, 'boards/subway_result.html', context)
def subway_id(request, id):
sub = Subway.objects.get(pk=id)
context = {
'id':sub
}
render (request, 'sub_id.html', context) |
12,105 | 6b25cdb5a942501512a923a886c430b1f0f8408b | #__author__= 'Jerry Li'
count = 0
sum = 0
for i in range (1, 1000):
if(i % 5 == 0 or i % 3 == 0):
sum = sum + i
print(sum)
#final answer: 233168
|
12,106 | fa34b5c92be471dc7e794eb9c2bb4a89c249f31b | # Generated by Django 3.1.4 on 2020-12-09 11:06
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0004_auto_20201209_1905'),
]
operations = [
migrations.AlterField(
model_name='userinfo',
name='phone',
field=models.IntegerField(blank=True, null=True, verbose_name='手机号'),
),
migrations.AlterField(
model_name='userinfo',
name='vertify',
field=models.IntegerField(blank=True, null=True, verbose_name='验证码'),
),
]
|
12,107 | 227db333511123b722b2fee96a80f03a194bf43c | #!/usr/bin/python
'''
This script generates a codon optimised protein based upon a fasta protein
sequence and a table of relative codon usage.
'''
from sets import Set
import sys,argparse
from collections import defaultdict
import re
import numpy as np
import csv
import random
from Bio import SeqIO
#-----------------------------------------------------
# Step 1
# Import variables, load input files & create set of genes
# If using a different number of files, arguments & appending to list of genes will need to be changed
#-----------------------------------------------------
#These commands use the argparse module to import files specified in the command line
ap = argparse.ArgumentParser()
ap.add_argument('--fasta_aa',required=True,type=str,help='protein sequence for conversion')
ap.add_argument('--fasta_cds',required=True,type=str,help='cds for conversion')
ap.add_argument('--codon_table',required=True,type=str,help='text file containing codon usage table')
ap.add_argument('--prefix',required=True,type=str,help='output directory/filename prefix for output files')
conf = ap.parse_args()
#-----------------------------------------------------
# Step 1
# Import variables, load input files & create set of genes
# If using a different number of files, arguments & appending to list of genes will need to be changed
#-----------------------------------------------------
class AA_weight_obj(object):
"""
"""
def __init__(self, aa):
""" """
self.aa = aa
self.weightings = defaultdict(float)
self.weightings_adj = defaultdict(float)
self.max = float()
self.optimal = str()
self.codons = []
self.sorted_adj_weightings = []
self.sorted_codons = []
self.weight_list = []
self.weight_list_adj = []
def add_weight(self, codon, weight):
""" """
# print codon
# print weight
self.weightings[codon] = float(weight)
# if float(weight) > self.max:
# self.max = float(weight)
# self.optimal = codon
self.codons.append(codon)
self.weight_list.append(weight)
def random_codon(self):
""" """
num_codons = len(self.codons)
r = float(random.randrange(0,10000, 1))
# r = float(random.randrange(0,num_codons*100, 1))
# print (self.aa)
# print(r)
r = np.divide(r, 10000)
# r = np.divide(r, 100)
# print(" of max ".join([str(r), str(num_codons)]))
for x,y in zip(self.codons,self.sorted_adj_weightings):
# print(" - ".join([str(r), str(x), str(y)]))
selected_codon = x
if float(y) >= float(r):
break
else:
r = r - float(y)
return selected_codon
def get_opt(self):
""" """
# sorted_weightings = sorted(self.weight_list)
# sorted_codons = [x for _,x in sorted(zip(self.weight_list,self.codons))]
# print sorted_weightings
# print sorted_codons
# return sorted_codons[-1]
return self.sorted_codons[-1]
def adjust_weight(self):
""" """
num_codons = len(self.weight_list)
# print num_codons
# print(self.weight_list)
self.weight_list_adj = [round(np.divide(float(x), num_codons),5) for x in self.weight_list]
# print self.weight_list_adj
self.sorted_adj_weightings = sorted(self.weight_list_adj)
self.sorted_codons = [x for _,x in sorted(zip(self.weight_list_adj,self.codons))]
for x,y in zip(self.sorted_codons, self.sorted_adj_weightings):
self.weightings_adj[x] = y
self.max = self.sorted_adj_weightings[-1]
class CodonTab_obj(object):
"""
"""
def __init__(self):
"""Return a Expression_obj whose name is *gene_id*"""
# self.organism = []
self.weighting_dict = defaultdict(list)
# self.codon_obj_dict = {}
self.codon_dict = {
'UUU':'F','UUC':'F',
'UUA':'L','UUG':'L','CUU':'L','CUC':'L','CUA':'L','CUG':'L',
'AUU':'I','AUC':'I','AUA':'I',
'AUG':'M',
'GUU':'V', 'GUC':'V','GUA':'V','GUG':'V',
'UCU':'S','UCC':'S','UCA':'S','UCG':'S',
'CCU':'P','CCC':'P','CCA':'P','CCG':'P',
'ACU':'T','ACC':'T','ACA':'T','ACG':'T',
'GCU':'A','GCC':'A','GCA':'A','GCG':'A',
'UAU':'Y','UAC':'Y',
'UAA':'X','UAG':'X',
'CAU':'H','CAC':'H',
'CAA':'Q','CAG':'Q',
'AAU':'N','AAC':'N',
'AAA':'K','AAG':'K',
'GAU':'D','GAC':'D',
'GAA':'E','GAG':'E',
'UGU':'C','UGC':'C',
'UGA':'X',
'UGG':'W',
'CGU':'R','CGC':'R','CGA':'R','CGG':'R',
'AGU':'S','AGC':'S',
'AGA':'R','AGG':'R',
'GGU':'G','GGC':'G', 'GGA':'G','GGG':'G'
}
def add_table(self, table):
""""""
table = table.replace(' ', '')
table_lines = table.split(';')
for line in table_lines:
split_line = line.split(':')
codon = split_line[0]
# print codon
weighting = split_line[1]
# print weighting
aa = self.codon_dict[codon]
if self.weighting_dict[aa] and self.weighting_dict[aa][0]:
obj = self.weighting_dict[aa][0]
# print obj.weightings
else:
obj = AA_weight_obj(aa)
obj.add_weight(codon, weighting)
self.weighting_dict[aa].append(obj)
for aa in self.weighting_dict.keys():
self.weighting_dict[aa][0].adjust_weight()
def optimise_rand(prot):
new_seq = ''
for aa in prot:
new_aa = vd_table_obj.weighting_dict[aa][0].random_codon()
new_seq = new_seq + new_aa
return(new_seq)
def optimise_best(prot):
new_seq = ''
for aa in prot:
# print aa
# new_aa = vd_table_obj.weighting_dict[aa][0].get_opt()
new_aa = vd_table_obj.weighting_dict[aa][0].sorted_codons[-1]
new_seq = new_seq + new_aa
return(new_seq)
def optimise_worst(prot):
new_seq = ''
for aa in prot:
# print aa
new_aa = vd_table_obj.weighting_dict[aa][0].sorted_codons[0]
new_seq = new_seq + new_aa
return(new_seq)
def score_seq(seq, table_obj):
codons = [seq[i:i+3] for i in range(0, len(seq), 3)]
total_score = float(0)
total_max = float(0)
for codon in codons:
aa = table_obj.codon_dict[codon]
score = table_obj.weighting_dict[aa][0].weightings_adj[codon]
# score = score - table_obj.weighting_dict[aa][0].weight_list_adj[0]
max = table_obj.weighting_dict[aa][0].max
total_score = total_score + score
total_max = total_max + max
return [round(np.divide(total_score, total_max), 2), round(np.divide(total_max, total_max), 2)]
# scores = []
# for aa in seq.split(''):
# scores.append(score_dict[aa])
#-----------------------------------------------------
# Step X
#
#-----------------------------------------------------
seq_records = list(SeqIO.parse(conf.fasta_aa, "fasta"))
cds_records = list(SeqIO.parse(conf.fasta_cds, "fasta"))
prefix = conf.prefix
with open(conf.codon_table) as f:
table_lines = []
for line in f.readlines():
table_lines.append(line.rstrip())
#-----------------------------------------------------
# Step X
#
#-----------------------------------------------------
record = seq_records[0]
# print record
prot = record.seq
# prot = 'MVSKGEEDNMAIIKEFMRFKVHMEGSVNGHEFEIEGEGEGRPYEGTQTAKLKVTKGGPLPFAWDILSPQFMYGSKAYVKHPADIPDYLKLSFPEGFKWERVMNFEDGGVVTVTQDSSLQDGEFIYKVKLRGTNFPSDGPVMQKKTMGWEASSERMYPEDGALKGEIKQRLKLKDGGHYDAEVKTTYKAKKPVQLPGAYNVNIKLDITSHNEDYTIVEQYERAEGRHSTGGMDELYK'
table = "".join(table_lines)
# table = 'UUU: 0.55; UCU: 0.85; UAU: 0.40; UGU: 0.44; UUC: 1.45; UCC: 1.41; UAC: 1.60; UGC: 1.56; UUA: 0.07; UCA: 0.51; UAA: 1.04; UGA: 1.06; UUG: 0.55; UCG: 1.36; UAG: 0.90; UGG: 1.00; CUU: 0.84; CCU: 0.93; CAU: 0.50; CGU: 0.97; CUC: 2.49; CCC: 1.66; CAC: 1.50; CGC: 2.45; CUA: 0.23; CCA: 0.53; CAA: 0.50; CGA: 0.75; CUG: 1.81; CCG: 0.89; CAG: 1.50; CGG: 0.71; AUU: 0.95; ACU: 0.58; AAU: 0.37; AGU: 0.39; AUC: 1.91; ACC: 1.62; AAC: 1.63; AGC: 1.49; AUA: 0.14; ACA: 0.58; AAA: 0.26; AGA: 0.36; AUG: 1.00; ACG: 1.22; AAG: 1.74; AGG: 0.76; GUU: 0.73; GCU: 0.80; GAU: 0.61; GGU: 0.91; GUC: 2.20; GCC: 1.98; GAC: 1.39; GGC: 2.32; GUA: 0.18; GCA: 0.44; GAA: 0.48; GGA: 0.46; GUG: 0.88; GCG: 0.77; GAG: 1.52; GGG: 0.31'
vd_table_obj = CodonTab_obj()
vd_table_obj.add_table(table)
# for k in vd_table_obj.weighting_dict.keys():
# print(vd_table_obj.weighting_dict[k][0].weightings)
# print(prot)
#-----------------------------------------------------
# Step X
# Optimise codons - random weightings
#-----------------------------------------------------
print("randomised codons:")
new_cds = optimise_rand(prot)
print(new_cds)
seq_score, max = score_seq(new_cds, vd_table_obj)
print(" of ".join([str(seq_score), str(max)]))
#-----------------------------------------------------
# Step X
# Optimise codons - optimum codons
#-----------------------------------------------------
print("optimum sequence:")
new_cds = optimise_best(prot)
print(new_cds)
seq_score, max = score_seq(new_cds, vd_table_obj)
print(" of ".join([str(seq_score), str(max)]))
#-----------------------------------------------------
# Step X
# Optimise codons - worst codons
#-----------------------------------------------------
print("worst sequence:")
new_cds = optimise_worst(prot)
print(new_cds)
seq_score, max = score_seq(new_cds, vd_table_obj)
print(" of ".join([str(seq_score), str(max)]))
#-----------------------------------------------------
# Step X
# Score 1000 sequences for optimisation scores
#-----------------------------------------------------
score_list = []
cds_list = []
f = open("_".join([prefix, "1000_seqs.fa"]), "w+")
for i in range(0, 10000, 1):
new_cds = optimise_rand(prot)
seq_score, max = score_seq(new_cds, vd_table_obj)
# print seq_score
cds_list.append(new_cds)
score_list.append(str(round(seq_score, 2)))
f.write(">cds_" + str(i) + "_" + str(seq_score))
f.write(new_cds)
f.close()
f = open("_".join([prefix, "1000_scores.tsv"]), "w+")
f.write("\n".join(score_list))
f.close()
midpoint_score = sorted(score_list)[500]
sorted_cds = [x for _,x in sorted(zip(score_list,cds_list))]
midpoint_cds = sorted_cds[500]
print("midpoint sequence:")
print midpoint_score
print midpoint_cds
#-----------------------------------------------------
# Step X
# Score the pre-optimised sequence
#-----------------------------------------------------
print("Score of the pre-optimised sequence:")
for record in cds_records:
print record.id
old_cds = str(record.seq)
old_cds = old_cds.replace('T', 'U')
# print old_cds
seq_score, max = score_seq(old_cds, vd_table_obj)
print(" of ".join([str(seq_score), str(max)]))
# print(score_list)
# #set matplotlib to use a backend suitable for headless operation
# import matplotlib
# matplotlib.use('Agg')
# import matplotlib.pyplot as plt
#
# plt.hist(score_list, bins='auto')
# out='tmp.png'
# plt.savefig(out, dpi=300, bbox_inches='tight')
# rng = np.random.RandomState(10) # deterministic random data
# a = np.hstack((rng.normal(size=1000),
# rng.normal(loc=5, scale=2, size=1000)))
|
12,108 | 9c1cdf23b54a07be3c6967718e0cee50a532b689 | #!/usr/bin/env python3
import os
import yaml
HOME = os.path.expanduser("~")
class Dotfile:
def __init__(self, source, target_dir, dotify=False, create_parent=False):
self.source = source
target_file = os.path.basename(self.source)
if dotify:
target_file = ".{}".format(target_file)
self.target = os.path.join(target_dir, target_file)
self.create_parent = create_parent
def install(self):
if os.path.exists(self.target):
raise FileExistsError("Destination file {} already exists".format(self.target))
if not os.path.exists(os.path.dirname(self.target)):
if self.create_parent:
os.makedirs(os.path.dirname(self.target), exist_ok=True)
else:
raise FileNotFoundError("Parent directory doesn't exist")
os.symlink(self.source, self.target)
# Maintain abspath, because os.listdir doesn't
def listdir(path):
return [os.path.join(path, p) for p in os.listdir(path)]
def handle_dotfiles(directory):
def get_target_dir(defaults_dict):
if "target_dir" in defaults_dict:
return os.path.expanduser(defaults_dict.pop("target_dir"))
return HOME
visited = set()
defaults = {}
layout_file = os.path.join(directory, "layout.yaml")
if os.path.exists(layout_file):
with open(layout_file, "r") as fh:
layout = yaml.safe_load(fh)
visited.add(layout_file)
if "__directory__" in layout:
dir_settings = layout.pop("__directory__")
defaults = dir_settings.get("defaults", {})
visited.update(
[os.path.join(directory, p) for p in dir_settings.get("ignore", [])])
# Install files called out in layout.yaml
for source, args in layout.items():
source = os.path.join(directory, source)
visited.add(source)
def_args = defaults.copy()
def_args.update(args)
target_dir = get_target_dir(def_args)
dotfile = Dotfile(source, target_dir, **def_args)
try:
dotfile.install()
except (FileExistsError, FileNotFoundError) as exc:
print(exc)
# Install all other files
target_dir = get_target_dir(defaults)
for source in listdir(directory):
if source in visited:
continue
dotfile = Dotfile(source, target_dir, **defaults)
try:
dotfile.install()
except (FileExistsError, FileNotFoundError) as exc:
print(exc)
def main():
here = os.path.dirname(os.path.realpath(__file__))
for d in listdir(here):
if not os.path.isdir(d):
continue
if os.path.basename(d).startswith('.'):
continue
handle_dotfiles(d)
if __name__ == "__main__":
main()
|
12,109 | 07ca6ca4286b97357cec4a80177b92f15f75ceb9 | """Package of support modules for SriteBot."""
|
12,110 | ea515486ba00a9e5ced7ec43f078f9585faeafd6 | from types.user_mapping import UserMapping
USER_GROUPS_MAPPING_NAME = "user_category_mapping"
#----------------------------------------------------------------------------------------------
class GroupCategoryMapping():
def __init__(self, instance):
self.instance = instance
#----------------------------------------------------------------------------------------------
def init(self):
self.cat = self.instance.connection.db[USER_GROUPS_MAPPING_NAME]
#----------------------------------------------------------------------------------------------
def getMapping(self, group):
data = self.cat.find_one({'group_id':group._id})
if data:
return UserMapping(data)
return None
#----------------------------------------------------------------------------------------------
def removeMapping(self, userMapping):
out = False
if userMapping:
self.cat.remove({"_id":userMapping._id})
out = True
return out
#----------------------------------------------------------------------------------------------
def addMapping(self, userMapping):
self.cat.insert(userMapping.get())
pass
#----------------------------------------------------------------------------------------------
def loadUserMapping(self, group):
return self.getMapping(group)
#----------------------------------------------------------------------------------------------
def addUserMappingCategory(self, mapping, user_category, aspect_name, aspect_category):
res = False
if mapping and user_category and aspect_category:
mapping.add(user_category._id, aspect_name, aspect_category._id)
res = True
return res
#----------------------------------------------------------------------------------------------
def removeUserMappingCategory(self, mapping, user_category, base_aspect_name):
res = False
if mapping and user_category:
res = mapping.remove(user_category._id, base_aspect_name)
return res
#----------------------------------------------------------------------------------------------
def clearUserMapping(self, mapping):
mapping.clear()
pass
#----------------------------------------------------------------------------------------------
def updateUserMapping(self, mapping):
self.cat.update_one({
'_id': mapping._id
},{
'$set': {
'mapping': mapping.mapping
}
}, upsert=False)
#----------------------------------------------------------------------------------------------
def drop(self):
'''drop collection. rem in production'''
self.cat.drop() |
12,111 | bc2e8a6d761eee5c78fcd183dc0eb8f7e8aae7c9 | import pandas as pd
import sqlite3
import matplotlib.pyplot as plt
df_adidas = pd.read_csv('adidas_data.csv')
data_adidas = pd.DataFrame(df_adidas, columns=['product_name', 'product_id', 'listing_price', 'sale_price', 'discount'])
# print(data_adidas)
df_nike = pd.read_csv('nike_data.csv')
data_nike = pd.DataFrame(df_nike, columns=['Product Name', 'Product ID', 'Listing Price', 'Sale Price', 'Discount'])
con = sqlite3.connect('adidas_nike.sqlite')
cur = con.cursor()
con2 = sqlite3.connect('discount.sqlite')
cur2 = con2.cursor()
cur2.execute('''CREATE TABLE IF NOT EXISTS ad_ni_discount
(id INTEGER NOT NULL PRIMARY KEY UNIQUE, discount TEXT UNIQUE, count_adidas INTEGER, count_nike INTEGER)''')
cur.execute('''CREATE TABLE IF NOT EXISTS adidas
(id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE, product_name TEXT, product_id TEXT UNIQUE,
listing_price INTEGER, sale_price INTEGER, discount INTEGER)''')
cur.execute('''CREATE TABLE IF NOT EXISTS nike
(id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE, product_name TEXT, product_id TEXT UNIQUE,
listing_price INTEGER, sale_price INTEGER, discount INTEGER)''')
cur.execute('''CREATE TABLE IF NOT EXISTS ad_ni_price
(id INTEGER NOT NULL PRIMARY KEY UNIQUE, Price_Range TEXT UNIQUE, mrpcount_adidas INTEGER,
mrpcount_nike INTEGER, spcount_adidas INTEGER, spcount_nike INTEGER)''')
for row in data_adidas.itertuples():
# print(row)
pn = row.product_name
pi = row.product_id
lp = row.listing_price
sp = row.sale_price
dis = row.discount
# print(pn, pi, lp, sp, dis)
cur.execute('''
INSERT OR IGNORE INTO adidas (product_name, product_id, listing_price, sale_price, discount)
VALUES (?, ?, ?, ?, ?)''', (pn, pi, lp, sp, dis))
for row in data_nike.itertuples():
# print(row)
pn = row._1
pi = row._2
lp = row._3
sp = row._4
dis = row.Discount
# print(pn, pi, lp, sp, dis)
cur.execute('''
INSERT OR IGNORE INTO nike (product_name, product_id, listing_price, sale_price, discount)
VALUES (?, ?, ?, ?, ?)''', (pn, pi, lp, sp, dis))
cur.execute('''SELECT MIN(listing_price) FROM adidas''')
minrow_ad = cur.fetchone()[0]
cur.execute('''SELECT MAX(listing_price) FROM adidas''')
maxrow_ad = cur.fetchone()[0]
cur.execute('''SELECT COUNT (id) FROM adidas''')
count_adidas = cur.fetchone()[0]
print("Adidas has",count_adidas ,"products, and its price ranges from", minrow_ad, 'to', maxrow_ad)
cur.execute('''SELECT MIN(listing_price) FROM nike''')
minrow_ni = cur.fetchone()[0]
cur.execute('''SELECT MAX(listing_price) FROM nike''')
maxrow_ni = cur.fetchone()[0]
cur.execute('''SELECT COUNT (id) FROM nike''')
count_nike = cur.fetchone()[0]
print("Nike has",count_nike ,"products, and its price ranges from", minrow_ni, 'to', maxrow_ni)
# plt.bar(data_adidas['listing_price'], data_nike['Listing Price'])
# plt.xlabel("Nike Listing Price")
# plt.ylabel("Adidas Listing Price")
# plt.show()
def column_range(brand_name, column_name, LOWER, UPPER):
range_list = []
cur.execute('''SELECT product_id FROM {} WHERE {} BETWEEN {} AND {}'''.format(brand_name, column_name, LOWER, UPPER))
w = cur.fetchall()
# print(w)
range_list= map(lambda row: row[0], w)
return list(range_list)
# for rows in w:
# range_list.append(rows[0])
# return range_list
id = 1
range = 2000
start_range = 0
end_range = range
cur.execute('''DELETE FROM ad_ni_price''')
if maxrow_ni > maxrow_ad:
while end_range < maxrow_ni+range:
my_str = str(start_range) + ' - ' + str(end_range)
count_list = column_range('adidas', 'listing_price', start_range, end_range)
count_list2 = column_range('nike', 'listing_price', start_range, end_range)
count_list3 = column_range('adidas', 'sale_price', start_range, end_range)
count_list4 = column_range('nike', 'sale_price', start_range, end_range)
count = len(count_list)
count2 = len(count_list2)
count3 = len(count_list3)
count4 = len(count_list4)
if count < 1:
count = 0
if count2 < 1:
count2 = 0
if count3 < 1:
count3 = 0
if count4 < 1:
count4 = 0
cur.execute('''INSERT OR IGNORE INTO ad_ni_price (id, Price_Range, mrpcount_adidas, mrpcount_nike, spcount_adidas, spcount_nike)
VALUES (?, ?, ?, ?, ?, ?)''', (id, my_str, count, count2, count3, count4))
start_range = end_range
end_range = end_range + range
id = id + 1
else:
print("maxrow_ad > maxrow_ni. Change the code")
cur.execute('''SELECT COUNT (id) FROM ad_ni_price''')
count_rows = cur.fetchone()[0]
print(count_rows)
id_dis = 1
range_dis = 10
lower_dis = 0
higher_dis = range_dis
cur2.execute('''DELETE FROM ad_ni_discount''')
if maxrow_ni > maxrow_ad:
while higher_dis < 101:
my_str2 = str(lower_dis) + ' - ' + str(higher_dis)
count_list = column_range('adidas', 'discount', lower_dis, higher_dis)
count_list2 = column_range('nike', 'discount', lower_dis, higher_dis)
count = len(count_list)
count2 = len(count_list2)
if count < 1:
count = 0
if count2 < 1:
count2 = 0
cur2.execute('''INSERT OR IGNORE INTO ad_ni_discount (id, discount, count_adidas, count_nike)
VALUES (?, ?, ?, ?)''', (id_dis, my_str2, count, count2))
lower_dis = higher_dis
higher_dis = higher_dis + range_dis
id_dis = id_dis + 1
else:
print("maxrow_ad > maxrow_ni. Change the code")
cur2.execute('''SELECT COUNT (id) FROM ad_ni_discount''')
rows = cur2.fetchone()[0]
print(rows)
con.commit()
con2.commit() |
12,112 | 8a7220315397e8716b1a7a36a81213cfd6d2ed53 |
from tensorflow.keras import Model, Input
from model import layers
class DecoderModel(object):
def __init__(self, input_shape):
self.build(input_shape)
@property
def model(self):
return self._model
def build(self, input_shape):
'''
input: concat of z_a and z_p -> 16 x 16 x 256
output: reconstructed image 256 x 256 x 3
'''
concat = Input(shape=input_shape)
up = layers.up(concat) # 32 x 32
up = layers.conv_bn_act(up, 128, (3, 3))
up = layers.conv_bn_act(up, 128, (3, 3))
# up = layers.conv_bn_act(up, 128, (3, 3))
up = layers.up(up) # 64 x 64
up = layers.conv_bn_act(up, 64, (3, 3))
up = layers.conv_bn_act(up, 64, (3, 3))
# up = layers.conv_bn_act(up, 128, (3, 3))
up = layers.up(up) # 128 x 128
up = layers.conv_bn_act(up, 32, (3, 3))
up = layers.conv_bn_act(up, 32, (3, 3))
up = layers.up(up) # 256 x 256
up = layers.conv_bn_act(up, 3, (3, 3))
up = layers.conv_bn(up, 3, (1, 1)) # 3 channels, output shape of this should be (None, 3, 256, 256)
# TODO: should we permute here or have the input formatted with channels first?
# perm = Permute((1, 2))(up)
# i_hat = Permute((2, 3))(perm)
i_hat = up
self._model = Model(inputs=concat, outputs=i_hat, name='decoder')
|
12,113 | fd591457ce443a167a6fcad0ae99974cc685829e | import pymysql
db = pymysql.connect("localhost", "root", "Admin01", "Empleado")
#db = pymysql.connect(host='localhost', port=3306, user='admin', passwd='Admin01', db='employees')
cursor = db.cursor()
# Prueba de Instalacion de MYSQL
#cursor.execute("select version()")
#data = cursor.fetchone()
#print("version de MySQL: %s" % data)
#db.close()
#-------------
# Eliminacion / creacion de tablas
#cursor.execute("DROP TABLE IF EXISTS empleado")
#sql = """CREATE TABLE EMPLEADO (NOMBRE VARCHAR(20) NOT NULL, APELLIDO VARCHAR(20), EDAD INT, SEXO CHAR(1), SALARIO FLOAT);"""
#db.close()
#---------------
# Insertar datos a la tabla
#sql = """INSERT INTO EMPLEADO(NOMBRE,APELLIDO,EDAD,SEXO,SALARIO)
#VALUES('Petra', 'Petrov', 32, 'F',7000)"""
#try:
# cursor.execute(sql)
# db.commit()
#except:
# db.rollback()
#db.close()
#----------------
# Leer datos de la tabla de empleados
e = int(input("Edad de Petra> "))
salarios = []
sql = "Select * from empleado where edad > '%d'" % e
try:
cursor.execute(sql)
resultados = cursor.fetchall()
for registro in resultados:
salario = registro[4]
salarios.append(salario)
except:
print("Error al obtener datos! ")
db.close()
if len(salarios) > 0:
print("El Salario mas alto de Petra fue de $" + str(max(salarios)))
else:
print("No hay Salario de Petra para ese rango de edad")
#----------------
# Actualizar datos
#sql = "UPDATE EMPLEADO SET EDAD = EDAD + 1 WHERE SEXO = 'F'"
#try:
# cursor .execute(sql)
# db.commit()
#except:
# db.rollback()
#db.close()
# Borrar datos
#sql = "DELETE FROM EMPLEADO WHERE EDAD < 18"
#try:
# cursor .execute(sql)
# db.commit()
#except:
# db.rollback()
#db.close() |
12,114 | 1cbf44ed9075d83427d97ec50b1661aeefdb4c1a | # Generated by Django 2.2.2 on 2019-06-19 17:27
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dashboard', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='stock',
name='title',
field=models.CharField(db_index=True, max_length=200),
),
]
|
12,115 | df21edc96a6b4570ea06736177b946c486d1b333 | #key는 unique해야 하며, 불변 이다 , value 는 가변(변경 가능)
dict = {'Name': 'Zara', 'Age': 7, 'Class': 'First'}
print ("dict['Name']: ", dict['Name'])
print ("dict['Age']: ", dict['Age'])
dict = {'Name': 'Zara', 'Age': 7, 'Class': 'First'}
print ("dict['Alice']: ", dict['Alice']) #존재하지 않는 키로 요소에 접근할 경우?
dict['Age'] = 8; #요소의 value변경
dict['School'] = "DPS School" #새로운 요소를 추가
print ("dict['Age']: ", dict['Age'])
print ("dict['School']: ", dict['School'])
dict = {'Name': 'Zara', 'Age': 7, 'Class': 'First'}
del dict1['Name'] #특정 요소만 삭제
dict.clear() #모든 요소를 삭제하고, dict 객체는 남고, empty dict instance가 된다.
del dict # dict 객체 삭제?
print(dict) #error?
print ("dict['Age']: ", dict['Age'])
print ("dict['School']: ", dict['School'])
dict = {'Name': 'Zara', 'Age': 7, 'Name': 'Manni'} #오버라이팅된다. 엎어쓰기 된다.
print ("dict['Name']: ", dict['Name'])
dict = {['Name']: 'Zara', 'Age': 7} #키에 가변개체 선언(사용), 에러발생, 불변만 써야한다.
print ("dict['Name']: ", dict['Name'])
dict = {'Name': 'Zara', 'Age': 7}
print ("Value : %s" % dict.items())
print ("Value : %s" % dict.keys())
print ("Value : %s" % dict.get('Age')) #없는 값을 요청할때
print ("Value : %s" % dict.get('Sex', "NA"))
dict = {'Sex': 'female', 'Age': 7, 'Name': 'Zara'}
print ("Values : ", list(dict.values()))
dict = {'Name': 'Manni', 'Age': 7, 'Class': 'First'} #dictionery 요소개수
print ("Length : %d" % len (dict))
#######################################################
dict1 = {'Name': 'Zara', 'Age': 7};
dict2 = {'Name': 'Mahnaz', 'Age': 27};
dict3 = {'Name': 'Abid', 'Age': 27};
dict4 = {'Name': 'Zara', 'Age': 7};
print "Return Value : %d" % cmp (dict1, dict2)
print "Return Value : %d" % cmp (dict2, dict3)
print "Return Value : %d" % cmp (dict1, dict4)
|
12,116 | 1c54813df34ab8768c31d72ea37d4234d0573ca7 | # encoding=utf8
import random
def bubble_sort(nums):
"""
冒泡排序
一种稳定的排序方法,最好的情况下的时间复杂度为O(n), 最坏的情况下时间复杂度为O(n²), 平均情况下的时间复杂度为O(n²). 空间复杂度为O(1)
:param nums:
:return:
"""
i = 1
while i < len(nums):
j = i - 1
while j < len(nums) - 1:
if nums[j] > nums[j+1]:
nums[j], nums[j+1] = nums[j+1], nums[j]
j += 1
i += 1
return nums
if __name__ == '__main__':
nums = []
for i in range(10):
nums.append(random.randint(1, 100))
print(nums)
print('---------------------')
print(bubble_sort(nums)) |
12,117 | 417933aa5a3f5cad2a86d8a9099a02b4a9c250b8 | """
Code loading and analyzing SVHN images and data
"""
import os
import numpy as np
from PIL import Image
print('All modules imported.')
# Wait until you see that all files have been downloaded.
print('All files downloaded.')
def load_svhn_images(folder_path):
"""
Load in all images from a folder
:param folder_path: Path to folder containing
:return a numpy array of all the images
"""
images = []
for file in os.listdir(folder_path):
if file.endswith(".png"):
image = Image.open(file)
image.load()
# Load image data as 1 dimensional array
# We're using float32 to save on memory space
feature = np.array(image, dtype=np.float32)
images.append(feature)
return images
IMAGES = load_svhn_images('data/train/')
HEIGHTS = [image.shape[0] for image in IMAGES]
WIDTHS = [image.shape[1] for image in IMAGES]
#---
MAX_HEIGHT, MIN_HEIGHT = max(HEIGHTS), min(HEIGHTS)
MAX_WIDTH, MIN_WIDTH = max(WIDTHS), min(WIDTHS)
print()
print("Max Height:", MAX_HEIGHT, "Min Height:", MIN_HEIGHT)
print("Max Width:", MAX_WIDTH, "Min Width:", MIN_WIDTH)
#---
import matplotlib
matplotlib.use("svg")
import matplotlib.pyplot as plt
# %matplotlib inline
# setup heights histogram
fig = plt.figure()
height_plot = fig.add_subplot(111)
l = height_plot.hist(HEIGHTS, 50, normed=1, facecolor='green', alpha=0.75)
height_plot.set_xlabel('Image Height')
height_plot.set_ylabel('Fraction')
height_plot.set_title('Height Distribution')
height_plot.set_xlim(MIN_HEIGHT, MAX_HEIGHT)
height_plot.set_ylim(0, max(n))
height_plot.grid(True)
width_plot = fig.add_subplot(112)
l = width_plot.hist(HEIGHTS, 50, normed=1, facecolor='green', alpha=0.75)
width_plot.set_xlabel('Image Height')
width_plot.set_ylabel('Fraction')
width_plot.set_title('Height Distribution')
width_plot.set_xlim(MIN_HEIGHT, MAX_HEIGHT)
width_plot.set_ylim(0, max(n))
width_plot.grid(True)
plt.show()
#---
from digitStruct import DigitStruct, yieldNextDigitStruct
from tdqm import tdqm
def read_labels(digitstruct_file):
"""
Read in labels from digitStruct.mat file to create a dict of image file name and
corresponding labels
"""
labels = dict()
for dsObj in tdqm(yieldNextDigitStruct(digitstruct_file), ncols=50):
image_labels = []
for bbox in dsObj.bboxList:
image_labels.append(bbox.label)
labels[dsObj.name] = image_labels
return labels
DSFILE = 'data/train/digitStruct.mat'
LABELS = read_labels(DSFILE)
#---
# View first few lables
for index in range(3):
image_file = '{}.png'.format(index)
print(image_file, labels(image_file))
|
12,118 | e7ce4122a9aeaa7bce89b547fc74998276b15194 | from hls4ml.converters.keras_to_hls import keras_handler, parse_default_keras_layer
merge_layers = ['Add', 'Subtract', 'Multiply', 'Average', 'Maximum', 'Minimum', 'Concatenate', 'Dot']
@keras_handler(*merge_layers)
def parse_merge_layer(keras_layer, input_names, input_shapes, data_reader):
assert keras_layer['class_name'] in merge_layers
layer = parse_default_keras_layer(keras_layer, input_names)
layer['op'] = layer['class_name'].lower()
output_shape = input_shapes[0][:]
if layer['class_name'] == 'Concatenate':
rank = len(input_shapes[0][1:])
if rank > 3:
raise Exception('ERROR: Concatenation of tensors with rank > 3 is not yet supported.')
layer['op'] = layer['class_name'].lower() + f'{rank}d'
layer['axis'] = keras_layer['config']['axis']
output_shape[layer['axis']] += input_shapes[1][layer['axis']]
elif layer['class_name'] == 'Dot':
rank = len(input_shapes[0][1:])
if rank > 1:
raise Exception('ERROR: Dot of tensors with rank > 1 is not yet supported.')
layer['op'] = layer['class_name'].lower() + f'{rank}d'
else:
layer['class_name'] = 'Merge'
if len(layer['inputs']) > 2:
raise Exception('ERROR: Merging more than two tensors is not yet supported.')
return layer, output_shape
|
12,119 | d15c5914979c3fd65165b73606883e7f3c050a98 | name= "data.csv"
print("name.split:")
|
12,120 | c644091e62283d8ed6861abf25029a89d84bfabb | """
Displays index.html. Leaves the routing to react.
"""
from flask import render_template
from . import app
@app.route('/')
@app.route('/gameDayLineups')
@app.route('/gameDateGames')
@app.route('/gameDayAnalysis')
def show_index():
return render_template('index.html')
|
12,121 | 2e4a484adcbd6989658addc70c811bd134eb2137 | #!/usr/bin/env python
import sublime
import sublime_plugin
import plistlib
import subprocess
import webbrowser
"""
macOS customize:
/usr/bin/open default
path/to/custom/open custom
export PATH=path/to/custom:$PATH ~/.bashrc
"""
MAC = "osx" in sublime.platform()
class WeblocCommand(sublime_plugin.WindowCommand):
@property
def path(self):
return sublime.active_window().active_view().file_name()
@property
def url(self):
plist = plistlib.readPlist(self.path)
return plist.URL
def browse_mac(self):
args = ["open", self.url]
process = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdoutdata, stderrdata = process.communicate()
code = process.returncode
if code != 0:
raise OSError(stderrdata.decode("utf-8"))
def browse(self):
if MAC:
self.browse_mac()
else:
webbrowser.open(self.url)
def run(self):
try:
self.browse()
except Exception as e:
msg = "%s\n%s" % (type(e), str(e))
sublime.error_message(msg)
|
12,122 | c10993063dffa31d28e979ce5997effd14483e19 | """файл с классом для форматирования текста постов"""
import re
from telegraph import Telegraph
from models import Post
from emoji import emojize
class Formatter:
"""класс для форматирования текста постов"""
def __init__(self,post):
self.post = post
self.text = emojize(self.post.text.replace("Корейскаякосметика","Корейскаякосметика"))
self.post_to_telegraph()
def get_name(self) -> str:
"""Извлекает название товара"""
try:
name = re.search("[a-zA-Z][a-zA-Z+. ]*",self.text).group(0)
except AttributeError:
name =""
return name
def get_tags(self) -> str:
"""Извлекает хештэги"""
tags = self.text.split("#")
return "#"+" #".join(tags[1::]).strip()
def get_title(self) -> str:
"""извлекает первый абзац описания"""
title = self.text.split("\n")
return title[0].strip()
def get_volume(self) -> str:
"""Извлекает строчку с объемом
Делает ее жирной
"""
try:
found = re.search('(.+?)Объем(.+?)мл', self.text).group(0)
found = f"<b>{found}</b>"
except AttributeError:
found = ''
return found
def get_delivery(self) -> str:
"""Извлекает строку с доставкой. Делает ее жирной"""
try:
found = re.search('(.+?)Имеется доставка', self.text).group(0)
found = f"<b>{found}</b>"
except AttributeError:
found = ''
return found
def post_to_telegraph(self) -> str:
"""Постит информацию в telegra.ph. Возвращает ссылку на пост"""
if self.post.telegraph is None or self.post.telegraph == "":
telegraph = Telegraph()
telegraph.create_account(short_name='1337')
images = ""
for media in self.post.links:
images += f'<img src="{media}">'
response = telegraph.create_page(
title=self.get_name(),
html_content=f'{images}<p>{self.text}</p>'
)
self.post.telegraph = "https://telegra.ph/{}".format(response['path'])
self.post.save()
return '<a href="https://telegra.ph/{}">Подробнее...</a>'.format(response['path'])
else:
return '<a href="{}">Подробнее...</a>'.format(self.post.telegraph)
def format(self) -> str:
"""Форматирует текст всеми методами"""
is_available = "Есть в наличии" if self.post.is_available else "Нет в наличии"
final_text = f"{self.get_title()}\nЦена: {self.post.cost} сум\n{self.get_volume()}\n{self.get_delivery()}\n<b>{is_available}</b>\n{self.post_to_telegraph()}"
return final_text
|
12,123 | 08389e2cdf8912778c9cd8d796838ee15691c9d2 | __author__ = 'wsr'
__date__ = '2018/10/25 0025 下午 4:08'
from django.conf.urls import url
from .views import *
urlpatterns = [
url(r'^$', index,name='index'), #首页链接
url(r'^login/$', LoginView.as_view(),name='login'), #登录页面链接
url(r'^register/$', RegisterView.as_view(), name='register'), #注册页面链接
] |
12,124 | 0084b5137df5a7e6e6f04e0ca2ae84d6185cadfb | from abc import abstractmethod
from .base import OperatorConverter
class ATenPackSequenceSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::_pack_sequence(Tensor output, Tensor batch_sizes, Tensor? sorted_indices, Tensor? unsorted_indices) -> (Tensor, Tensor, Tensor?, Tensor?)'''
pass
class ATenAsTensorSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::as_tensor(Tensor(a) data, *, int? dtype=None, Device? device=None) -> (Tensor(a|b))'''
pass
class ATenUpsampleSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::__upsample(Tensor input, int? size=None, int? scale_factor=None, str mode="nearest", bool? align_corners=None) -> (Tensor)
aten::__upsample.size_list(Tensor input, int[]? size=None, int? scale_factor=None, str mode="nearest", bool? align_corners=None) -> (Tensor)'''
pass
class ATenHspmmSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::hspmm(Tensor mat1, Tensor mat2) -> (Tensor)'''
pass
class ATenValuesSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::values(Tensor(a) self) -> (Tensor(a))
aten::_values(Tensor(a) self) -> (Tensor(a))'''
pass
class ATenIndicesSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::indices(Tensor(a) self) -> (Tensor(a))
aten::_indices(Tensor(a) self) -> (Tensor(a))'''
pass
class ATenNativeNormSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::native_norm(Tensor self, Scalar p=2) -> (Tensor)
aten::native_norm.ScalarOpt_dim_dtype(Tensor self, Scalar? p, int[1] dim, bool keepdim, int? dtype) -> (Tensor)'''
pass
class ATenQuantizedMaxPool1dSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::quantized_max_pool1d(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=[0], int[1] dilation=[1], bool ceil_mode=False) -> (Tensor)'''
pass
class ATenToDenseSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::to_dense(Tensor self, int? dtype=None) -> (Tensor)'''
pass
class ATenFlattenDenseTensorsSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::flatten_dense_tensors(Tensor[] tensors) -> (Tensor)'''
pass
class ATenLinalgMatrixRankSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::linalg_matrix_rank(Tensor self, float? tol=None, bool hermitian=False) -> (Tensor)
aten::linalg_matrix_rank.tol_tensor(Tensor input, Tensor tol, bool hermitian=False) -> (Tensor)'''
pass
class ATenLinalgTensorinvSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::linalg_tensorinv(Tensor self, int ind=2) -> (Tensor)'''
pass
class ATenLinalgPinvSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::linalg_pinv(Tensor self, float rcond=1.0000000000000001e-15, bool hermitian=False) -> (Tensor)
aten::linalg_pinv.rcond_tensor(Tensor self, Tensor rcond, bool hermitian=False) -> (Tensor)'''
pass
class ATenLinalgCondSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::linalg_cond(Tensor self, Scalar? p=None) -> (Tensor)
aten::linalg_cond.p_str(Tensor self, str p) -> (Tensor)'''
pass
class ATenLinalgSvdvalsSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::linalg_svdvals(Tensor input) -> (Tensor)'''
pass
class ATenLinalgSvdSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::linalg_svd.U(Tensor self, bool full_matrices=True, *, Tensor(a!) U, Tensor(b!) S, Tensor(c!) Vh) -> (Tensor(a!) U, Tensor(b!) S, Tensor(c!) Vh)
aten::linalg_svd(Tensor self, bool full_matrices=True) -> (Tensor U, Tensor S, Tensor Vh)'''
pass
class ATenInnerSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::inner(Tensor self, Tensor other) -> (Tensor)'''
pass
class ATenLinalgInvSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::linalg_inv(Tensor self) -> (Tensor)'''
pass
class ATenLinalgEigvalshSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::linalg_eigvalsh(Tensor self, str UPLO="L") -> (Tensor)'''
pass
class ATenLinalgEigvalsSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::linalg_eigvals(Tensor self) -> (Tensor)'''
pass
class ATenLinalgCholeskySchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::linalg_cholesky(Tensor self) -> (Tensor)'''
pass
class ATenFftIfftshiftSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::fft_ifftshift(Tensor self, int[1]? dim=None) -> (Tensor)'''
pass
class ATenFftFftshiftSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::fft_fftshift(Tensor self, int[1]? dim=None) -> (Tensor)'''
pass
class ATenFftIrfftnSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::fft_irfftn(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None) -> (Tensor)'''
pass
class ATenFftRfftnSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::fft_rfftn(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None) -> (Tensor)'''
pass
class ATenFftIrfft2Schema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::fft_irfft2(Tensor self, int[1]? s=None, int[1] dim=[-2, -1], str? norm=None) -> (Tensor)'''
pass
class ATenFftRfft2Schema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::fft_rfft2(Tensor self, int[1]? s=None, int[1] dim=[-2, -1], str? norm=None) -> (Tensor)'''
pass
class ATenFftFft2Schema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::fft_fft2(Tensor self, int[1]? s=None, int[1] dim=[-2, -1], str? norm=None) -> (Tensor)'''
pass
class ATenFftIhfftSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::fft_ihfft(Tensor self, int? n=None, int dim=-1, str? norm=None) -> (Tensor)'''
pass
class ATenFftHfftSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::fft_hfft(Tensor self, int? n=None, int dim=-1, str? norm=None) -> (Tensor)'''
pass
class ATenFftIrfftSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::fft_irfft(Tensor self, int? n=None, int dim=-1, str? norm=None) -> (Tensor)'''
pass
class ATenFftRfftSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::fft_rfft(Tensor self, int? n=None, int dim=-1, str? norm=None) -> (Tensor)'''
pass
class ATenFftIfftSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::fft_ifft(Tensor self, int? n=None, int dim=-1, str? norm=None) -> (Tensor)'''
pass
class ATenSlowConv3dSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::slow_conv3d(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=[1, 1, 1], int[3] padding=[0, 0, 0]) -> (Tensor)'''
pass
class ATenThnnConvDepthwise2dSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::thnn_conv_depthwise2d(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=[1, 1], int[2] padding=[0, 0], int[2] dilation=[1, 1]) -> (Tensor)'''
pass
class ATenThnnConv2dSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::thnn_conv2d(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=[1, 1], int[2] padding=[0, 0]) -> (Tensor)'''
pass
class ATenLogSigmoidSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::log_sigmoid(Tensor self) -> (Tensor)'''
pass
class ATenFloatPowerSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::float_power.Tensor_Tensor(Tensor self, Tensor exponent) -> (Tensor)
aten::float_power.Scalar(Scalar self, Tensor exponent) -> (Tensor)
aten::float_power.Tensor_Scalar(Tensor self, Scalar exponent) -> (Tensor)'''
pass
class ATenArgsortSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::argsort(Tensor self, int dim=-1, bool descending=False) -> (Tensor)
aten::argsort.dimname(Tensor self, str dim, bool descending=False) -> (Tensor)'''
pass
class ATenMsortSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::msort(Tensor self) -> (Tensor)'''
pass
class ATenNanquantileSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::nanquantile.scalar(Tensor self, float q, int? dim=None, bool keepdim=False) -> (Tensor)
aten::nanquantile(Tensor self, Tensor q, int? dim=None, bool keepdim=False) -> (Tensor)
aten::nanquantile.new_scalar(Tensor self, float q, int? dim, bool keepdim, *, str interpolation) -> (Tensor)
aten::nanquantile.new(Tensor self, Tensor q, int? dim, bool keepdim, *, str interpolation) -> (Tensor)'''
pass
class ATenQuantileSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::quantile.scalar(Tensor self, float q, int? dim=None, bool keepdim=False) -> (Tensor)
aten::quantile(Tensor self, Tensor q, int? dim=None, bool keepdim=False) -> (Tensor)
aten::quantile.new_scalar(Tensor self, float q, int? dim, bool keepdim, *, str interpolation) -> (Tensor)
aten::quantile.new(Tensor self, Tensor q, int? dim, bool keepdim, *, str interpolation) -> (Tensor)'''
pass
class ATenQrSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::qr.Q(Tensor self, bool some=True, *, Tensor(a!) Q, Tensor(b!) R) -> (Tensor(a!) Q, Tensor(b!) R)
aten::qr(Tensor self, bool some=True) -> (Tensor Q, Tensor R)'''
pass
class ATenSvdSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::svd.U(Tensor self, bool some=True, bool compute_uv=True, *, Tensor(a!) U, Tensor(b!) S, Tensor(c!) V) -> (Tensor(a!) U, Tensor(b!) S, Tensor(c!) V)
aten::svd(Tensor self, bool some=True, bool compute_uv=True) -> (Tensor U, Tensor S, Tensor V)'''
pass
class ATenCrossEntropyLossSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::cross_entropy_loss(Tensor self, Tensor target, Tensor? weight=None, int reduction=1, int ignore_index=-100) -> (Tensor)'''
pass
class ATenNonzeroNumpySchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::nonzero_numpy(Tensor self) -> (Tensor[])'''
pass
class ATenTakeAlongDimSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::take_along_dim(Tensor self, Tensor indices, int? dim=None) -> (Tensor)'''
pass
class ATenScatterSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::scatter.src(Tensor self, int dim, Tensor index, Tensor src) -> (Tensor)
aten::scatter.value(Tensor self, int dim, Tensor index, Scalar value) -> (Tensor)
aten::scatter.dimname_src(Tensor self, str dim, Tensor index, Tensor src) -> (Tensor)
aten::scatter.dimname_value(Tensor self, str dim, Tensor index, Scalar value) -> (Tensor)'''
pass
class ATenIndexAddSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::index_add(Tensor self, int dim, Tensor index, Tensor source) -> (Tensor)
aten::index_add.alpha(Tensor self, int dim, Tensor index, Tensor source, *, Scalar alpha) -> (Tensor)
aten::index_add.dimname(Tensor self, str dim, Tensor index, Tensor source, *, Scalar alpha=1) -> (Tensor)'''
pass
class ATenPutSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::put(Tensor self, Tensor index, Tensor source, bool accumulate=False) -> (Tensor)'''
pass
class ATenMaskedScatterSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::masked_scatter(Tensor self, Tensor mask, Tensor source) -> (Tensor)'''
pass
class ATenQuantizedRnnReluCellSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::quantized_rnn_relu_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor b_ih, Tensor b_hh, Tensor packed_ih, Tensor packed_hh, Tensor col_offsets_ih, Tensor col_offsets_hh, Scalar scale_ih, Scalar scale_hh, Scalar zero_point_ih, Scalar zero_point_hh) -> (Tensor)'''
pass
class ATenQuantizedGruCellSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::quantized_gru_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor b_ih, Tensor b_hh, Tensor packed_ih, Tensor packed_hh, Tensor col_offsets_ih, Tensor col_offsets_hh, Scalar scale_ih, Scalar scale_hh, Scalar zero_point_ih, Scalar zero_point_hh) -> (Tensor)'''
pass
class ATenQuantizedLstmCellSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::quantized_lstm_cell(Tensor input, Tensor[] hx, Tensor w_ih, Tensor w_hh, Tensor b_ih, Tensor b_hh, Tensor packed_ih, Tensor packed_hh, Tensor col_offsets_ih, Tensor col_offsets_hh, Scalar scale_ih, Scalar scale_hh, Scalar zero_point_ih, Scalar zero_point_hh) -> (Tensor, Tensor)'''
pass
class ATenRnnReluSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::rnn_relu.input(Tensor input, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor)
aten::rnn_relu.data(Tensor data, Tensor batch_sizes, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor)'''
pass
class ATenRnnTanhSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::rnn_tanh.input(Tensor input, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor)
aten::rnn_tanh.data(Tensor data, Tensor batch_sizes, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor)'''
pass
class ATenGruSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::gru.input(Tensor input, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor)
aten::gru.data(Tensor data, Tensor batch_sizes, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor)'''
pass
class ATenLstmSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::lstm.input(Tensor input, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor, Tensor)
aten::lstm.data(Tensor data, Tensor batch_sizes, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor, Tensor)'''
pass
class ATenPadPackedSequenceSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::_pad_packed_sequence(Tensor data, Tensor batch_sizes, bool batch_first, Scalar padding_value, int total_length) -> (Tensor, Tensor)'''
pass
class ATenCombinationsSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::combinations(Tensor self, int r=2, bool with_replacement=False) -> (Tensor)'''
pass
class ATenCartesianProdSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::cartesian_prod(Tensor[] tensors) -> (Tensor)'''
pass
class ATenMeshgridSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::meshgrid(Tensor[] tensors) -> (Tensor[])'''
pass
class ATenMaskedScaleSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::_masked_scale(Tensor self, Tensor mask, float scale) -> (Tensor)'''
pass
class ATenFakeQuantizePerChannelAffineSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::fake_quantize_per_channel_affine(Tensor self, Tensor scale, Tensor zero_point, int axis, int quant_min, int quant_max) -> (Tensor)'''
pass
class ATenFakeQuantizePerTensorAffineSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::fake_quantize_per_tensor_affine(Tensor self, float scale, int zero_point, int quant_min, int quant_max) -> (Tensor)'''
pass
class ATenCoalesceSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::coalesce(Tensor(a) self) -> (Tensor(a))
aten::_coalesce(Tensor self) -> (Tensor)'''
pass
class ATenWeightNormSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::_weight_norm(Tensor v, Tensor g, int dim=0) -> (Tensor)'''
pass
class ATenNormExceptDimSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::norm_except_dim(Tensor v, int pow=2, int dim=0) -> (Tensor)'''
pass
class ATenWhereSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::where.self(Tensor condition, Tensor self, Tensor other) -> (Tensor)
aten::where.ScalarSelf(Tensor condition, Scalar self, Tensor other) -> (Tensor)
aten::where.ScalarOther(Tensor condition, Tensor self, Scalar other) -> (Tensor)
aten::where.Scalar(Tensor condition, Scalar self, Scalar other) -> (Tensor)
aten::where(Tensor condition) -> (Tensor[])'''
pass
class ATenTypeAsSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::type_as(Tensor self, Tensor other) -> (Tensor)'''
pass
class ATenFlipudSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::flipud(Tensor self) -> (Tensor)'''
pass
class ATenFliplrSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::fliplr(Tensor self) -> (Tensor)'''
pass
class ATenOneHotSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::one_hot(Tensor self, int num_classes=-1) -> (Tensor)'''
pass
class ATenTileSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::tile(Tensor self, int[] dims) -> (Tensor)'''
pass
class ATenSumToSizeSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::sum_to_size(Tensor self, int[] size) -> (Tensor)'''
pass
class ATenIstftSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::istft(Tensor self, int n_fft, int? hop_length=None, int? win_length=None, Tensor? window=None, bool center=True, bool normalized=False, bool? onesided=None, int? length=None, bool return_complex=False) -> (Tensor)'''
pass
class ATenStftSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::stft(Tensor self, int n_fft, int? hop_length=None, int? win_length=None, Tensor? window=None, bool normalized=False, bool? onesided=None, bool? return_complex=None) -> (Tensor)'''
pass
class ATenDstackSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::dstack(Tensor[] tensors) -> (Tensor)'''
pass
class ATenHstackSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::hstack(Tensor[] tensors) -> (Tensor)'''
pass
class ATenDsplitSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::dsplit.int(Tensor(a) self, int sections) -> (Tensor[])
aten::dsplit.array(Tensor(a) self, int[] indices) -> (Tensor[])'''
pass
class ATenVsplitSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::vsplit.int(Tensor(a) self, int sections) -> (Tensor[])
aten::vsplit.array(Tensor(a) self, int[] indices) -> (Tensor[])'''
pass
class ATenHsplitSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::hsplit.int(Tensor(a) self, int sections) -> (Tensor[])
aten::hsplit.array(Tensor(a) self, int[] indices) -> (Tensor[])'''
pass
class ATenSmmSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::smm(Tensor self, Tensor mat2) -> (Tensor)'''
pass
class ATenSeluSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::selu(Tensor self) -> (Tensor)'''
pass
class ATenRreluSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::rrelu(Tensor self, Scalar lower=0.125, Scalar upper=0.33333333333333331, bool training=False, Generator? generator=None) -> (Tensor)'''
pass
class ATenRavelSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::ravel(Tensor(a) self) -> (Tensor(a))'''
pass
class ATenPinverseSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::pinverse(Tensor self, float rcond=1.0000000000000001e-15) -> (Tensor)'''
pass
class ATenPinMemorySchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::pin_memory(Tensor(a) self) -> (Tensor(a))'''
pass
class ATenPixelUnshuffleSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::pixel_unshuffle(Tensor self, int downscale_factor) -> (Tensor)'''
pass
class ATenPixelShuffleSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::pixel_shuffle(Tensor self, int upscale_factor) -> (Tensor)'''
pass
class ATenPairwiseDistanceSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::pairwise_distance(Tensor x1, Tensor x2, float p=2., float eps=9.9999999999999995e-07, bool keepdim=False) -> (Tensor)'''
pass
class ATenMatrixRankSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::matrix_rank.tol(Tensor self, float tol, bool symmetric=False) -> (Tensor)
aten::matrix_rank(Tensor self, bool symmetric=False) -> (Tensor)'''
pass
class ATenKronSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::kron(Tensor self, Tensor other) -> (Tensor)'''
pass
class ATenInstanceNormSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::instance_norm(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool use_input_stats, float momentum, float eps, bool cudnn_enabled) -> (Tensor)'''
pass
class ATenIndexCopySchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::index_copy(Tensor self, int dim, Tensor index, Tensor source) -> (Tensor)
aten::index_copy.dimname(Tensor self, str dim, Tensor index, Tensor source) -> (Tensor)'''
pass
class ATenLdexpSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::ldexp.Tensor(Tensor self, Tensor other) -> (Tensor)'''
pass
class ATenEmbeddingBagSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::embedding_bag(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq=False, int mode=0, bool sparse=False, Tensor? per_sample_weights=None, bool include_last_offset=False) -> (Tensor, Tensor, Tensor, Tensor)
aten::embedding_bag.padding_idx(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq, int mode, bool sparse, Tensor? per_sample_weights, bool include_last_offset, int? padding_idx) -> (Tensor, Tensor, Tensor, Tensor)
aten::_embedding_bag(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq=False, int mode=0, bool sparse=False, Tensor? per_sample_weights=None, bool include_last_offset=False, int padding_idx=-1) -> (Tensor, Tensor, Tensor, Tensor)'''
pass
class ATenEinsumSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::einsum(str equation, Tensor[] tensors) -> (Tensor)'''
pass
class ATenDiffSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::diff(Tensor self, int n=1, int dim=-1, Tensor? prepend=None, Tensor? append=None) -> (Tensor)'''
pass
class ATenDiagflatSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::diagflat(Tensor self, int offset=0) -> (Tensor)'''
pass
class ATenDiagEmbedSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::diag_embed(Tensor self, int offset=0, int dim1=-2, int dim2=-1) -> (Tensor)'''
pass
class ATenCtcLossSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::ctc_loss.IntList(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank=0, int reduction=1, bool zero_infinity=False) -> (Tensor)
aten::ctc_loss.Tensor(Tensor log_probs, Tensor targets, Tensor input_lengths, Tensor target_lengths, int blank=0, int reduction=1, bool zero_infinity=False) -> (Tensor)
aten::_ctc_loss(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank=0, bool zero_infinity=False) -> (Tensor, Tensor)'''
pass
class ATenConvolutionModeSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::_convolution_mode(Tensor input, Tensor weight, Tensor? bias, int[] stride, str padding, int[] dilation, int groups) -> (Tensor)'''
pass
class ATenCpuSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::cpu(Tensor(a) self) -> (Tensor(a|b))'''
pass
class ATenBlockDiagSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::block_diag(Tensor[] tensors) -> (Tensor)'''
pass
class ATenBroadcastToSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::broadcast_to(Tensor(a) self, int[] size) -> (Tensor(a))'''
pass
class ATenBroadcastTensorsSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::broadcast_tensors(Tensor[] tensors) -> (Tensor[])'''
pass
class ATenBatchNormImplIndexSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::_batch_norm_impl_index(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float momentum, float eps, bool cudnn_enabled) -> (Tensor, Tensor, Tensor, Tensor, int)'''
pass
class ATenBatchNormSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::batch_norm(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float momentum, float eps, bool cudnn_enabled) -> (Tensor)'''
pass
class ATenAtleast3dSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::atleast_3d(Tensor self) -> (Tensor)
aten::atleast_3d.Sequence(Tensor[] tensors) -> (Tensor[])'''
pass
class ATenAtleast2dSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::atleast_2d(Tensor self) -> (Tensor)
aten::atleast_2d.Sequence(Tensor[] tensors) -> (Tensor[])'''
pass
class ATenAtleast1dSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::atleast_1d(Tensor self) -> (Tensor)
aten::atleast_1d.Sequence(Tensor[] tensors) -> (Tensor[])'''
pass
class ATenDimArangeSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::_dim_arange(Tensor like, int dim) -> (Tensor)'''
pass
class ATenBatchNormStatsSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::batch_norm_stats(Tensor input, float eps) -> (Tensor, Tensor)'''
pass
class ATenCopyFromSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::_copy_from(Tensor self, Tensor dst, bool non_blocking=False) -> (Tensor)'''
pass
class ATenAdaptiveMaxPool1dSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::adaptive_max_pool1d(Tensor self, int[1] output_size) -> (Tensor, Tensor)'''
pass
class ATenAdaptiveAvgPool1dSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::adaptive_avg_pool1d(Tensor self, int[1] output_size) -> (Tensor)'''
pass
class ATenCrowIndicesSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::crow_indices(Tensor(a) self) -> (Tensor(a))'''
pass
class ATenAvgPool1dSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::avg_pool1d(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=[0], bool ceil_mode=False, bool count_include_pad=True) -> (Tensor)'''
pass
class ATenFeatureAlphaDropoutSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::feature_alpha_dropout(Tensor input, float p, bool train) -> (Tensor)'''
pass
class ATenBatchNormElemtSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::batch_norm_elemt(Tensor input, Tensor? weight, Tensor? bias, Tensor mean, Tensor invstd, float eps) -> (Tensor)'''
pass
class ATenAlphaDropoutSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::alpha_dropout(Tensor input, float p, bool train) -> (Tensor)'''
pass
class ATenFeatureDropoutSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::feature_dropout(Tensor input, float p, bool train) -> (Tensor)'''
pass
class ATenShapeAsTensorSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::_shape_as_tensor(Tensor self) -> (Tensor)'''
pass
class ATenQuantizedRnnTanhCellSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::quantized_rnn_tanh_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor b_ih, Tensor b_hh, Tensor packed_ih, Tensor packed_hh, Tensor col_offsets_ih, Tensor col_offsets_hh, Scalar scale_ih, Scalar scale_hh, Scalar zero_point_ih, Scalar zero_point_hh) -> (Tensor)'''
pass
class ATenReshapeFromTensorSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::_reshape_from_tensor(Tensor self, Tensor shape) -> (Tensor)'''
pass
class ATenSobolEngineDrawSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::_sobol_engine_draw(Tensor quasi, int n, Tensor sobolstate, int dimension, int num_generated, int? dtype) -> (Tensor, Tensor)'''
pass
class ATenLinalgQrSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::linalg_qr.out(Tensor self, str mode="reduced", *, Tensor(a!) Q, Tensor(b!) R) -> (Tensor(a!) Q, Tensor(b!) R)
aten::linalg_qr(Tensor self, str mode="reduced") -> (Tensor Q, Tensor R)'''
pass
class ATenLinalgInvExSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::linalg_inv_ex.inverse(Tensor self, *, bool check_errors=False, Tensor(a!) inverse, Tensor(b!) info) -> (Tensor(a!) inverse, Tensor(b!) info)
aten::linalg_inv_ex(Tensor self, *, bool check_errors=False) -> (Tensor inverse, Tensor info)'''
pass
class ATenLinalgEighSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::linalg_eigh.eigvals(Tensor self, str UPLO="L", *, Tensor(a!) eigvals, Tensor(b!) eigvecs) -> (Tensor(a!) eigenvalues, Tensor(b!) eigenvectors)
aten::linalg_eigh(Tensor self, str UPLO="L") -> (Tensor eigenvalues, Tensor eigenvectors)'''
pass
class ATenLuSolveSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::lu_solve(Tensor self, Tensor LU_data, Tensor LU_pivots) -> (Tensor)'''
pass
class ATenSolveSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::solve.solution(Tensor self, Tensor A, *, Tensor(a!) solution, Tensor(b!) lu) -> (Tensor(a!) solution, Tensor(b!) LU)
aten::solve(Tensor self, Tensor A) -> (Tensor solution, Tensor LU)'''
pass
class ATenCholeskySolveSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::cholesky_solve(Tensor self, Tensor input2, bool upper=False) -> (Tensor)'''
pass
class ATenEigSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::eig.e(Tensor self, bool eigenvectors=False, *, Tensor(a!) e, Tensor(b!) v) -> (Tensor(a!) eigenvalues, Tensor(b!) eigenvectors)
aten::eig(Tensor self, bool eigenvectors=False) -> (Tensor eigenvalues, Tensor eigenvectors)'''
pass
class ATenSymeigSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::symeig.e(Tensor self, bool eigenvectors=False, bool upper=True, *, Tensor(a!) e, Tensor(b!) V) -> (Tensor(a!) eigenvalues, Tensor(b!) eigenvectors)
aten::symeig(Tensor self, bool eigenvectors=False, bool upper=True) -> (Tensor eigenvalues, Tensor eigenvectors)'''
pass
class ATenChooseQparamsOptimizedSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::choose_qparams_optimized(Tensor input, int numel, int n_bins, float ratio, int bit_width) -> (Tensor, Tensor)'''
pass
class ATenPackPaddedSequenceSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::_pack_padded_sequence(Tensor input, Tensor lengths, bool batch_first) -> (Tensor, Tensor)'''
pass
class ATenFftIfft2Schema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::fft_ifft2(Tensor self, int[1]? s=None, int[1] dim=[-2, -1], str? norm=None) -> (Tensor)'''
pass
class ATenUnsafeViewSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::_unsafe_view(Tensor self, int[] size) -> (Tensor)'''
pass
class ATenPadSequenceSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::pad_sequence(Tensor[] sequences, bool batch_first=False, float padding_value=0.) -> (Tensor)'''
pass
class ATenTrilinearSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::_trilinear(Tensor i1, Tensor i2, Tensor i3, int[] expand1, int[] expand2, int[] expand3, int[] sumdim, int unroll_dim=1) -> (Tensor)'''
pass
class ATenRot90Schema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::rot90(Tensor self, int k=1, int[] dims=[0, 1]) -> (Tensor)'''
pass
class ATenSlogdetSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::slogdet(Tensor self) -> (Tensor sign, Tensor logabsdet)'''
pass
class ATenCeluSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::celu(Tensor self, Scalar alpha=1.) -> (Tensor)'''
pass
class ATenRepeatSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::repeat(Tensor self, int[] repeats) -> (Tensor)'''
pass
class ATenEuclideanDistSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::_euclidean_dist(Tensor x1, Tensor x2) -> (Tensor)'''
pass
class ATenMvlgammaSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::mvlgamma(Tensor self, int p) -> (Tensor)'''
pass
class ATenLogdetSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::logdet(Tensor self) -> (Tensor)'''
pass
class ATenInverseSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::inverse(Tensor self) -> (Tensor)'''
pass
class ATenGridSampler2dCpuFallbackSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::_grid_sampler_2d_cpu_fallback(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> (Tensor)'''
pass
class ATenEmbeddingSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::embedding(Tensor weight, Tensor indices, int padding_idx=-1, bool scale_grad_by_freq=False, bool sparse=False) -> (Tensor)'''
pass
class ATenUnpackDualSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::_unpack_dual(Tensor(a) dual, int level) -> (Tensor(a) primal, Tensor tangent)'''
pass
class ATenConvolutionBackwardOverrideableSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::convolution_backward_overrideable(Tensor grad_output, Tensor input, Tensor weight, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups, bool[3] output_mask) -> (Tensor grad_input, Tensor grad_weight, Tensor grad_bias)'''
pass
class ATenMakeDualSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::_make_dual(Tensor(a) primal, Tensor tangent, int level) -> (Tensor(a))'''
pass
class ATenConvolutionOverrideableSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::convolution_overrideable(Tensor input, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups) -> (Tensor)'''
pass
class ATenConstantPadNdSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::constant_pad_nd(Tensor self, int[] pad, Scalar value=0) -> (Tensor)'''
pass
class ATenAffineGridGeneratorSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::affine_grid_generator(Tensor theta, int[] size, bool align_corners) -> (Tensor)'''
pass
class ATenSegmentReduceSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::segment_reduce(Tensor data, str reduce, *, Tensor? lengths=None, Tensor? indices=None, int axis=0, bool unsafe=False, Scalar? initial=None) -> (Tensor)'''
pass
class ATenLinalgQrHelperSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::_linalg_qr_helper(Tensor self, str mode) -> (Tensor, Tensor)'''
pass
class ATenXorSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::__xor__.Scalar(Tensor self, Scalar other) -> (Tensor)
aten::__xor__.Tensor(Tensor self, Tensor other) -> (Tensor)'''
pass
class ATenLinalgEigSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::linalg_eig.out(Tensor self, *, Tensor(a!) eigenvalues, Tensor(b!) eigenvectors) -> (Tensor(a!) eigenvalues, Tensor(b!) eigenvectors)
aten::linalg_eig(Tensor self) -> (Tensor eigenvalues, Tensor eigenvectors)'''
pass
class ATenOrSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::__or__.Scalar(Tensor self, Scalar other) -> (Tensor)
aten::__or__.Tensor(Tensor self, Tensor other) -> (Tensor)'''
pass
class ATenLinalgLstsqSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::linalg_lstsq.out(Tensor self, Tensor b, float? rcond=None, *, str? driver=None, Tensor(a!) solution, Tensor(b!) residuals, Tensor(c!) rank, Tensor(d!) singular_values) -> (Tensor(a!) solution, Tensor(b!) residuals, Tensor(c!) rank, Tensor(d!) singular_values)
aten::linalg_lstsq(Tensor self, Tensor b, float? rcond=None, *, str? driver=None) -> (Tensor solution, Tensor residuals, Tensor rank, Tensor singular_values)'''
pass
class ATenSpecialXlog1pySchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::special_xlog1py(Tensor self, Tensor other) -> (Tensor)
aten::special_xlog1py.self_scalar(Scalar self, Tensor other) -> (Tensor)
aten::special_xlog1py.other_scalar(Tensor self, Scalar other) -> (Tensor)'''
pass
class ATenSpecialEntrSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::special_entr(Tensor self) -> (Tensor)'''
pass
class ATenSlowConvDilated3dSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::slow_conv_dilated3d(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=[1, 1, 1], int[3] padding=[0, 0, 0], int[3] dilation=[1, 1, 1]) -> (Tensor)'''
pass
class ATenSlowConvDilated2dSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::slow_conv_dilated2d(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=[1, 1], int[2] padding=[0, 0], int[2] dilation=[1, 1]) -> (Tensor)'''
pass
class ATenSlowConvTranspose3dSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::slow_conv_transpose3d(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=[1, 1, 1], int[3] padding=[0, 0, 0], int[3] output_padding=[0, 0, 0], int[3] dilation=[1, 1, 1]) -> (Tensor)'''
pass
class ATenSlowConvTranspose2dSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::slow_conv_transpose2d(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=[1, 1], int[2] padding=[0, 0], int[2] output_padding=[0, 0], int[2] dilation=[1, 1]) -> (Tensor)'''
pass
class ATenAndSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::__and__.Scalar(Tensor self, Scalar other) -> (Tensor)
aten::__and__.Tensor(Tensor self, Tensor other) -> (Tensor)'''
pass
class ATenUpsampleNearest2dSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::upsample_nearest2d(Tensor self, int[2] output_size, float? scales_h=None, float? scales_w=None) -> (Tensor)
aten::upsample_nearest2d.vec(Tensor input, int[]? output_size, float[]? scale_factors) -> (Tensor)'''
pass
class ATenUpsampleNearest1dSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::upsample_nearest1d(Tensor self, int[1] output_size, float? scales=None) -> (Tensor)
aten::upsample_nearest1d.vec(Tensor input, int[]? output_size, float[]? scale_factors) -> (Tensor)'''
pass
class ATenUpsampleTrilinear3dSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::upsample_trilinear3d(Tensor self, int[3] output_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> (Tensor)
aten::upsample_trilinear3d.vec(Tensor input, int[]? output_size, bool align_corners, float[]? scale_factors) -> (Tensor)'''
pass
class ATenUpsampleBicubic2dSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::upsample_bicubic2d(Tensor self, int[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> (Tensor)
aten::upsample_bicubic2d.vec(Tensor input, int[]? output_size, bool align_corners, float[]? scale_factors) -> (Tensor)'''
pass
class ATenUpsampleBilinear2dSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::upsample_bilinear2d(Tensor self, int[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> (Tensor)
aten::upsample_bilinear2d.vec(Tensor input, int[]? output_size, bool align_corners, float[]? scale_factors) -> (Tensor)'''
pass
class ATenUpsampleLinear1dSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::upsample_linear1d(Tensor self, int[1] output_size, bool align_corners, float? scales=None) -> (Tensor)
aten::upsample_linear1d.vec(Tensor input, int[]? output_size, bool align_corners, float[]? scale_factors) -> (Tensor)'''
pass
class ATenUpsampleNearest3dSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::upsample_nearest3d.vec(Tensor input, int[]? output_size, float[]? scale_factors) -> (Tensor)
aten::upsample_nearest3d(Tensor self, int[3] output_size, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> (Tensor)'''
pass
class ATenReplicationPad3dSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::replication_pad3d(Tensor self, int[6] padding) -> (Tensor)'''
pass
class ATenReplicationPad2dSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::replication_pad2d(Tensor self, int[4] padding) -> (Tensor)'''
pass
class ATenReplicationPad1dSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::replication_pad1d(Tensor self, int[2] padding) -> (Tensor)'''
pass
class ATenReflectionPad2dSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::reflection_pad2d(Tensor self, int[4] padding) -> (Tensor)'''
pass
class ATenReflectionPad1dSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::reflection_pad1d(Tensor self, int[2] padding) -> (Tensor)'''
pass
class ATenMaxUnpool3dSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::max_unpool3d(Tensor self, Tensor indices, int[3] output_size, int[3] stride, int[3] padding) -> (Tensor)'''
pass
class ATenMaxUnpool2dSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::max_unpool2d(Tensor self, Tensor indices, int[2] output_size) -> (Tensor)'''
pass
class ATenFractionalMaxPool2dSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::fractional_max_pool2d.output(Tensor self, int[2] kernel_size, int[2] output_size, Tensor random_samples, *, Tensor(a!) output, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!))
aten::fractional_max_pool2d(Tensor self, int[2] kernel_size, int[2] output_size, Tensor random_samples) -> (Tensor, Tensor)'''
pass
class ATenAvgPool3dSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::avg_pool3d(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=[0, 0, 0], bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None) -> (Tensor)'''
pass
class ATenConvDepthwise3dSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::conv_depthwise3d(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias, int[3] stride, int[3] padding, int[3] dilation) -> (Tensor)'''
pass
class ATenColIndicesSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::col_indices(Tensor(a) self) -> (Tensor(a))'''
pass
class ATenAvgPool2dSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::avg_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=[0, 0], bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None) -> (Tensor)'''
pass
class ATenEmptyQuantizedSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::empty_quantized(int[] size, Tensor qtensor) -> (Tensor)'''
pass
class ATenQuantizedBatchNormSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::quantized_batch_norm(Tensor input, Tensor? weight, Tensor? bias, Tensor mean, Tensor var, float eps, float output_scale, int output_zero_point) -> (Tensor)'''
pass
class ATenAdaptiveMaxPool3dSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::adaptive_max_pool3d.out(Tensor self, int[3] output_size, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!))
aten::adaptive_max_pool3d(Tensor self, int[3] output_size) -> (Tensor, Tensor)'''
pass
class ATenAdaptiveMaxPool2dSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::adaptive_max_pool2d.out(Tensor self, int[2] output_size, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!))
aten::adaptive_max_pool2d(Tensor self, int[2] output_size) -> (Tensor, Tensor)'''
pass
class ATenAdaptiveAvgPool3dSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::adaptive_avg_pool3d(Tensor self, int[3] output_size) -> (Tensor)
aten::_adaptive_avg_pool3d(Tensor self, int[3] output_size) -> (Tensor)'''
pass
class ATenSpecialI0eSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::special_i0e(Tensor self) -> (Tensor)'''
pass
class ATenAdaptiveAvgPool2dSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::_adaptive_avg_pool2d(Tensor self, int[2] output_size) -> (Tensor)
aten::adaptive_avg_pool2d(Tensor self, int[2] output_size) -> (Tensor)'''
pass
class ATenSoftshrinkSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::softshrink(Tensor self, Scalar lambd=0.5) -> (Tensor)'''
pass
class ATenSpecialExp2Schema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::special_exp2(Tensor self) -> (Tensor)'''
pass
class ATenRreluWithNoiseSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::rrelu_with_noise(Tensor self, Tensor noise, Scalar lower=0.125, Scalar upper=0.33333333333333331, bool training=False, Generator? generator=None) -> (Tensor)'''
pass
class ATenLeakyReluSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::leaky_relu(Tensor self, Scalar negative_slope=0.01) -> (Tensor)'''
pass
class ATenSpecialExpm1Schema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::special_expm1(Tensor self) -> (Tensor)'''
pass
class ATenHardswishSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::hardswish(Tensor self) -> (Tensor)'''
pass
class ATenHardtanhSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::hardtanh(Tensor self, Scalar min_val=-1, Scalar max_val=1) -> (Tensor)'''
pass
class ATenFractionalMaxPool3dSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::fractional_max_pool3d.output(Tensor self, int[3] kernel_size, int[3] output_size, Tensor random_samples, *, Tensor(a!) output, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!))
aten::fractional_max_pool3d(Tensor self, int[3] kernel_size, int[3] output_size, Tensor random_samples) -> (Tensor, Tensor)'''
pass
class ATenHardsigmoidSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::hardsigmoid(Tensor self) -> (Tensor)'''
pass
class ATenGluSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::glu(Tensor self, int dim=-1) -> (Tensor)'''
pass
class ATenEluSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::elu(Tensor self, Scalar alpha=1, Scalar scale=1, Scalar input_scale=1) -> (Tensor)'''
pass
class ATenSpecialExpitSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::special_expit(Tensor self) -> (Tensor)'''
pass
class ATenSpecialLogitSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::special_logit(Tensor self, float? eps=None) -> (Tensor)'''
pass
class ATenBucketizeSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::bucketize.Tensor(Tensor self, Tensor boundaries, *, bool out_int32=False, bool right=False) -> (Tensor)
aten::bucketize.Scalar(Scalar self, Tensor boundaries, *, bool out_int32=False, bool right=False) -> (Tensor)'''
pass
class ATenSpecialErfinvSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::special_erfinv(Tensor self) -> (Tensor)'''
pass
class ATenSpecialErfcSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::special_erfc(Tensor self) -> (Tensor)'''
pass
class ATenSpecialErfSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::special_erf(Tensor self) -> (Tensor)'''
pass
class ATenSpecialGammalnSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::special_gammaln(Tensor self) -> (Tensor)'''
pass
class ATenMoveaxisSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::moveaxis.intlist(Tensor(a) self, int[] source, int[] destination) -> (Tensor(a))
aten::moveaxis.int(Tensor(a) self, int source, int destination) -> (Tensor(a))'''
pass
class ATenSwapdimsSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::swapdims(Tensor(a) self, int dim0, int dim1) -> (Tensor(a))'''
pass
class ATenSwapaxesSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::swapaxes(Tensor(a) self, int axis0, int axis1) -> (Tensor(a))'''
pass
class ATenRowStackSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::row_stack(Tensor[] tensors) -> (Tensor)'''
pass
class ATenVstackSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::vstack(Tensor[] tensors) -> (Tensor)'''
pass
class ATenNegativeSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::negative(Tensor self) -> (Tensor)'''
pass
class ATenTruncSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::trunc(Tensor self) -> (Tensor)'''
pass
class ATenKeysSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::keys.Tensor(Dict(Tensor, t) self) -> (Tensor[](*))'''
pass
class ATenSubtractSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::subtract.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> (Tensor)
aten::subtract.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> (Tensor)'''
pass
class ATenSubSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::sub.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> (Tensor)
aten::sub.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> (Tensor)'''
pass
class ATenTransposeSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::transpose.int(Tensor(a) self, int dim0, int dim1) -> (Tensor(a))
aten::transpose.Dimname(Tensor(a) self, str dim0, str dim1) -> (Tensor(a))'''
pass
class ATenLinalgHouseholderProductSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::linalg_householder_product(Tensor input, Tensor tau) -> (Tensor)'''
pass
class ATenOrgqrSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::orgqr(Tensor self, Tensor input2) -> (Tensor)'''
pass
class ATenRowwisePruneSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::_rowwise_prune(Tensor weight, Tensor mask, int compressed_indices_dtype) -> (Tensor, Tensor)'''
pass
class ATenNewFullSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::new_full(Tensor self, int[] size, Scalar fill_value, *, int? dtype=None, int? layout=None, Device? device=None, bool? pin_memory=None) -> (Tensor)'''
pass
class ATenNotEqualSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::not_equal.Scalar(Tensor self, Scalar other) -> (Tensor)
aten::not_equal.Tensor(Tensor self, Tensor other) -> (Tensor)'''
pass
class ATenMinimumSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::minimum(Tensor self, Tensor other) -> (Tensor)'''
pass
class ATenFmaxSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::fmax(Tensor self, Tensor other) -> (Tensor)'''
pass
class ATenFminSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::fmin(Tensor self, Tensor other) -> (Tensor)'''
pass
class ATenHistcSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::histc(Tensor self, int bins=100, Scalar min=0, Scalar max=0) -> (Tensor)'''
pass
class ATenLuWithInfoSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::_lu_with_info(Tensor self, bool pivot=True, bool check_errors=True) -> (Tensor, Tensor, Tensor)'''
pass
class ATenGeqrfSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::geqrf.a(Tensor self, *, Tensor(a!) a, Tensor(b!) tau) -> (Tensor(a!) a, Tensor(b!) tau)
aten::geqrf(Tensor self) -> (Tensor a, Tensor tau)'''
pass
class ATenCholeskyInverseSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::cholesky_inverse(Tensor self, bool upper=False) -> (Tensor)'''
pass
class ATenSolveHelperSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::_solve_helper(Tensor self, Tensor A) -> (Tensor, Tensor)'''
pass
class ATenCholeskySchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::cholesky(Tensor self, bool upper=False) -> (Tensor)'''
pass
class ATenGatherSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::gather(Tensor self, int dim, Tensor index, *, bool sparse_grad=False) -> (Tensor)
aten::gather.dimname(Tensor self, str dim, Tensor index, *, bool sparse_grad=False) -> (Tensor)'''
pass
class ATenDiagSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::diag(Tensor self, int diagonal=0) -> (Tensor)'''
pass
class ATenTriangularSolveSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::triangular_solve.X(Tensor self, Tensor A, bool upper=True, bool transpose=False, bool unitriangular=False, *, Tensor(a!) X, Tensor(b!) M) -> (Tensor(a!) solution, Tensor(b!) cloned_coefficient)
aten::triangular_solve(Tensor self, Tensor A, bool upper=True, bool transpose=False, bool unitriangular=False) -> (Tensor solution, Tensor cloned_coefficient)'''
pass
class ATenFmodSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::fmod.Scalar(Tensor self, Scalar other) -> (Tensor)
aten::fmod.Tensor(Tensor self, Tensor other) -> (Tensor)'''
pass
class ATenMultiplySchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::multiply.Tensor(Tensor self, Tensor other) -> (Tensor)
aten::multiply.Scalar(Tensor self, Scalar other) -> (Tensor)'''
pass
class ATenCholeskySolveHelperSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::_cholesky_solve_helper(Tensor self, Tensor A, bool upper) -> (Tensor)'''
pass
class ATenTriuSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::triu(Tensor self, int diagonal=0) -> (Tensor)'''
pass
class ATenMulSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::mul.Tensor(Tensor self, Tensor other) -> (Tensor)
aten::mul.Scalar(Tensor self, Scalar other) -> (Tensor)'''
pass
class ATenSymeigHelperSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::_symeig_helper(Tensor self, bool eigenvectors, bool upper) -> (Tensor, Tensor)'''
pass
class ATenTrilSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::tril(Tensor self, int diagonal=0) -> (Tensor)'''
pass
class ATenIm2colSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::im2col(Tensor self, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride) -> (Tensor)'''
pass
class ATenLinalgSlogdetSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::linalg_slogdet.out(Tensor self, *, Tensor(a!) sign, Tensor(b!) logabsdet) -> (Tensor(a!) sign, Tensor(b!) logabsdet)
aten::linalg_slogdet(Tensor self) -> (Tensor sign, Tensor logabsdet)'''
pass
class ATenRshiftSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::__rshift__.Scalar(Tensor self, Scalar other) -> (Tensor)
aten::__rshift__.Tensor(Tensor self, Tensor other) -> (Tensor)'''
pass
class ATenCol2imSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::col2im(Tensor self, int[2] output_size, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride) -> (Tensor)'''
pass
class ATenLinalgCholeskyExSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::linalg_cholesky_ex.L(Tensor self, *, bool check_errors=False, Tensor(a!) L, Tensor(b!) info) -> (Tensor(a!) L, Tensor(b!) info)
aten::linalg_cholesky_ex(Tensor self, *, bool check_errors=False) -> (Tensor L, Tensor info)'''
pass
class ATenLshiftSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::__lshift__.Scalar(Tensor self, Scalar other) -> (Tensor)
aten::__lshift__.Tensor(Tensor self, Tensor other) -> (Tensor)'''
pass
class ATenLeSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::le.Tensor(Tensor self, Tensor other) -> (Tensor)
aten::le.Scalar(Tensor self, Scalar other) -> (Tensor)'''
pass
class ATenFakeQuantizeLearnablePerChannelAffineSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::_fake_quantize_learnable_per_channel_affine(Tensor self, Tensor scale, Tensor zero_point, int axis, int quant_min, int quant_max, float grad_factor=1.) -> (Tensor)'''
pass
class ATenFakeQuantizePerChannelAffineCachemaskSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::fake_quantize_per_channel_affine_cachemask(Tensor self, Tensor scale, Tensor zero_point, int axis, int quant_min, int quant_max) -> (Tensor output, Tensor mask)'''
pass
class ATenGreaterSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::greater.Scalar(Tensor self, Scalar other) -> (Tensor)
aten::greater.Tensor(Tensor self, Tensor other) -> (Tensor)'''
pass
class ATenFakeQuantizeLearnablePerTensorAffineSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::_fake_quantize_learnable_per_tensor_affine(Tensor self, Tensor scale, Tensor zero_point, int quant_min, int quant_max, float grad_factor=1.) -> (Tensor)'''
pass
class ATenGtSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::gt.Tensor(Tensor self, Tensor other) -> (Tensor)
aten::gt.Scalar(Tensor self, Scalar other) -> (Tensor)'''
pass
class ATenMakePerChannelQuantizedTensorSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::_make_per_channel_quantized_tensor(Tensor self, Tensor scale, Tensor zero_point, int axis) -> (Tensor)'''
pass
class ATenMakePerTensorQuantizedTensorSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::_make_per_tensor_quantized_tensor(Tensor self, float scale, int zero_point) -> (Tensor)'''
pass
class ATenGreaterEqualSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::greater_equal.Scalar(Tensor self, Scalar other) -> (Tensor)
aten::greater_equal.Tensor(Tensor self, Tensor other) -> (Tensor)'''
pass
class ATenDequantizeSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::dequantize.self(Tensor self) -> (Tensor)
aten::dequantize.tensors(Tensor[] tensors) -> (Tensor[])
aten::dequantize.tensor(Tensor qtensor) -> (Tensor)
aten::dequantize.list(Tensor[] qtensors) -> (Tensor[])'''
pass
class ATenGeSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::ge.Tensor(Tensor self, Tensor other) -> (Tensor)
aten::ge.Scalar(Tensor self, Scalar other) -> (Tensor)'''
pass
class ATenQuantizePerTensorSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::quantize_per_tensor(Tensor self, float scale, int zero_point, int dtype) -> (Tensor)
aten::quantize_per_tensor.tensors(Tensor[] tensors, Tensor scales, Tensor zero_points, int dtype) -> (Tensor[])'''
pass
class ATenLtSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::lt.Tensor(Tensor self, Tensor other) -> (Tensor)
aten::lt.Scalar(Tensor self, Scalar other) -> (Tensor)'''
pass
class ATenHeavisideSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::heaviside(Tensor self, Tensor values) -> (Tensor)'''
pass
class ATenFrexpSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::frexp.Tensor(Tensor self) -> (Tensor mantissa, Tensor exponent)'''
pass
class ATenBinomialSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::binomial(Tensor count, Tensor prob, Generator? generator=None) -> (Tensor)'''
pass
class ATenFftFftnSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::fft_fftn(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None) -> (Tensor)'''
pass
class ATenStandardGammaSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::_standard_gamma(Tensor self, Generator? generator=None) -> (Tensor)'''
pass
class ATenFftIfftnSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::fft_ifftn(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None) -> (Tensor)'''
pass
class ATenSWhereSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::_s_where(Tensor condition, Tensor self, Tensor other) -> (Tensor)'''
pass
class ATenUniqueDimConsecutiveSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::unique_dim_consecutive(Tensor self, int dim, bool return_inverse=False, bool return_counts=False) -> (Tensor, Tensor, Tensor)'''
pass
class ATenUnflattenDenseTensorsSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::unflatten_dense_tensors(Tensor flat, Tensor[] tensors) -> (Tensor[])'''
pass
class ATenUniqueSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::_unique(Tensor self, bool sorted=True, bool return_inverse=False) -> (Tensor, Tensor)'''
pass
class ATenFlipSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::flip(Tensor self, int[] dims) -> (Tensor)'''
pass
class ATenNansumSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::nansum(Tensor self, *, int? dtype=None) -> (Tensor)
aten::nansum.dim_IntList(Tensor self, int[1] dim, bool keepdim=False, *, int? dtype=None) -> (Tensor)'''
pass
class ATenUniqueConsecutiveSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::unique_consecutive(Tensor self, bool return_inverse=False, bool return_counts=False, int? dim=None) -> (Tensor, Tensor, Tensor)'''
pass
class ATenTrueDivideSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::true_divide.Scalar(Tensor self, Scalar other) -> (Tensor)
aten::true_divide.Tensor(Tensor self, Tensor other) -> (Tensor)'''
pass
class ATenLogitSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::logit(Tensor self, float? eps=None) -> (Tensor)'''
pass
class ATenDivSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::div.Tensor(Tensor self, Tensor other) -> (Tensor)
aten::div.Scalar(Tensor self, Scalar other) -> (Tensor)
aten::div.Tensor_mode(Tensor self, Tensor other, *, str? rounding_mode) -> (Tensor)
aten::div.Scalar_mode(Tensor self, Scalar other, *, str? rounding_mode) -> (Tensor)'''
pass
class ATenHardshrinkSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::hardshrink(Tensor self, Scalar lambd=0.5) -> (Tensor)'''
pass
class ATenAminSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::amin(Tensor self, int[1] dim=[], bool keepdim=False) -> (Tensor)'''
pass
class ATenTakeSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::take(Tensor self, Tensor index) -> (Tensor)'''
pass
class ATenAmaxSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::amax(Tensor self, int[1] dim=[], bool keepdim=False) -> (Tensor)'''
pass
class ATenLinalgNormSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::linalg_norm(Tensor self, Scalar? ord=None, int[1]? dim=None, bool keepdim=False, *, int? dtype=None) -> (Tensor)
aten::linalg_norm.ord_str(Tensor self, str ord, int[1]? dim=None, bool keepdim=False, *, int? dtype=None) -> (Tensor)'''
pass
class ATenLinalgMultiDotSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::linalg_multi_dot(Tensor[] tensors) -> (Tensor)'''
pass
class ATenIndexSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::index.Tensor(Tensor self, Tensor?[] indices) -> (Tensor)
aten::index.Tensor_hacked_twin(Tensor self, Tensor[] indices) -> (Tensor)'''
pass
class ATenDetSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::det(Tensor self) -> (Tensor)'''
pass
class ATenFftC2rSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::_fft_c2r(Tensor self, int[] dim, int normalization, int last_dim_size) -> (Tensor)'''
pass
class ATenFftR2cSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::_fft_r2c(Tensor self, int[] dim, int normalization, bool onesided) -> (Tensor)'''
pass
class ATenGridSampler3dSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::grid_sampler_3d(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> (Tensor)'''
pass
class ATenGridSampler2dSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::grid_sampler_2d(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> (Tensor)'''
pass
class ATenSspaddmmSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::sspaddmm(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> (Tensor)'''
pass
class ATenClampSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::clamp(Tensor self, Scalar? min=None, Scalar? max=None) -> (Tensor)
aten::clamp.Tensor(Tensor self, Tensor? min=None, Tensor? max=None) -> (Tensor)'''
pass
class ATenGcdSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::gcd(Tensor self, Tensor other) -> (Tensor)'''
pass
class ATenExp2Schema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::exp2(Tensor self) -> (Tensor)'''
pass
class ATenAtanSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::atan(Tensor self) -> (Tensor)'''
pass
class ATenCountNonzeroSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::count_nonzero.dim_IntList(Tensor self, int[] dim) -> (Tensor)
aten::count_nonzero(Tensor self, int? dim=None) -> (Tensor)'''
pass
class ATenPolarSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::polar(Tensor abs, Tensor angle) -> (Tensor)'''
pass
class ATenComplexSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::complex(Tensor real, Tensor imag) -> (Tensor)'''
pass
class ATenCopysignSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::copysign.Tensor(Tensor self, Tensor other) -> (Tensor)
aten::copysign.Scalar(Tensor self, Scalar other) -> (Tensor)'''
pass
class ATenBincountSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::bincount(Tensor self, Tensor? weights=None, int minlength=0) -> (Tensor)'''
pass
class ATenUnique2Schema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::_unique2(Tensor self, bool sorted=True, bool return_inverse=False, bool return_counts=False) -> (Tensor, Tensor, Tensor)'''
pass
class ATenBatchNormBackwardElemtSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::batch_norm_backward_elemt(Tensor grad_out, Tensor input, Tensor mean, Tensor invstd, Tensor? weight, Tensor mean_dy, Tensor mean_dy_xmu, Tensor count) -> (Tensor)'''
pass
class ATenArgminSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::argmin(Tensor self, int? dim=None, bool keepdim=False) -> (Tensor)'''
pass
class ATenBatchNormBackwardReduceSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::batch_norm_backward_reduce(Tensor grad_out, Tensor input, Tensor mean, Tensor invstd, Tensor? weight, bool input_g, bool weight_g, bool bias_g) -> (Tensor, Tensor, Tensor, Tensor)'''
pass
class ATenArgmaxSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::argmax(Tensor self, int? dim=None, bool keepdim=False) -> (Tensor)'''
pass
class ATenAsinhSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::asinh(Tensor self) -> (Tensor)'''
pass
class ATenColumnStackSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::column_stack(Tensor[] tensors) -> (Tensor)'''
pass
class ATenNllLossNdSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::nll_loss_nd(Tensor self, Tensor target, Tensor? weight=None, int reduction=1, int ignore_index=-100) -> (Tensor)'''
pass
class ATenFftFftSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::fft_fft(Tensor self, int? n=None, int dim=-1, str? norm=None) -> (Tensor)'''
pass
class ATenFixSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::fix(Tensor self) -> (Tensor)'''
pass
class ATenAsinSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::asin(Tensor self) -> (Tensor)'''
pass
class ATenUpsampleBilinearSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::__upsample_bilinear(Tensor input, int? size=None, int? scale_factor=None) -> (Tensor)
aten::__upsample_bilinear.size_list(Tensor input, int[]? size=None, int? scale_factor=None) -> (Tensor)
aten::__upsample_bilinear.scale_list(Tensor input, int? size=None, int[]? scale_factor=None) -> (Tensor)
aten::__upsample_bilinear.size_list_scale_list(Tensor input, int[]? size=None, int[]? scale_factor=None) -> (Tensor)'''
pass
class ATenBatchNormGatherStatsWithCountsSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::batch_norm_gather_stats_with_counts(Tensor input, Tensor mean, Tensor invstd, Tensor? running_mean, Tensor? running_var, float momentum, float eps, Tensor counts) -> (Tensor, Tensor)'''
pass
class ATenAcosSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::acos(Tensor self) -> (Tensor)'''
pass
class ATenSincSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::sinc(Tensor self) -> (Tensor)'''
pass
class ATenSgnSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::sgn(Tensor self) -> (Tensor)'''
pass
class ATenSiluSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::silu(Tensor self) -> (Tensor)'''
pass
class ATenRemainderSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::remainder.Scalar(Tensor self, Scalar other) -> (Tensor)
aten::remainder.Tensor(Tensor self, Tensor other) -> (Tensor)'''
pass
class ATenOrmqrSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::ormqr(Tensor self, Tensor input2, Tensor input3, bool left=True, bool transpose=False) -> (Tensor)'''
pass
class ATenNonzeroSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::nonzero(Tensor self) -> (Tensor)'''
pass
class ATenBitwiseXorSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::bitwise_xor.Tensor(Tensor self, Tensor other) -> (Tensor)
aten::bitwise_xor.Scalar(Tensor self, Scalar other) -> (Tensor)'''
pass
class ATenBitwiseOrSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::bitwise_or.Tensor(Tensor self, Tensor other) -> (Tensor)
aten::bitwise_or.Scalar(Tensor self, Scalar other) -> (Tensor)'''
pass
class ATenBitwiseAndSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::bitwise_and.Tensor(Tensor self, Tensor other) -> (Tensor)
aten::bitwise_and.Scalar(Tensor self, Scalar other) -> (Tensor)'''
pass
class ATenNativeBatchNormSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::native_batch_norm(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float momentum, float eps) -> (Tensor, Tensor, Tensor)
aten::native_batch_norm.out(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float momentum, float eps, *, Tensor(a!) out, Tensor(b!) save_mean, Tensor(c!) save_invstd) -> (Tensor(a!), Tensor(b!), Tensor(c!))'''
pass
class ATenNarrowCopySchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::narrow_copy(Tensor self, int dim, int start, int length) -> (Tensor)'''
pass
class ATenNanToNumSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::nan_to_num(Tensor self, float? nan=None, float? posinf=None, float? neginf=None) -> (Tensor)'''
pass
class ATenDataSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::data(Tensor self) -> (Tensor)'''
pass
class ATenNegSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::neg(Tensor self) -> (Tensor)'''
pass
class ATenZerosLikeSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::zeros_like(Tensor self, *, int? dtype=None, int? layout=None, Device? device=None, bool? pin_memory=None, int? memory_format=None) -> (Tensor)'''
pass
class ATenVarSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::var(Tensor self, bool unbiased=True) -> (Tensor)
aten::var.dim(Tensor self, int[1] dim, bool unbiased=True, bool keepdim=False) -> (Tensor)
aten::var.names_dim(Tensor self, str[1] dim, bool unbiased=True, bool keepdim=False) -> (Tensor)
aten::var.correction(Tensor self, int[1]? dim, *, int? correction, bool keepdim=False) -> (Tensor)
aten::var.correction_names(Tensor self, str[1] dim, *, int? correction, bool keepdim=False) -> (Tensor)'''
pass
class ATenGerSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::ger(Tensor self, Tensor vec2) -> (Tensor)'''
pass
class ATenUnsafeSplitWithSizesSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::unsafe_split_with_sizes(Tensor self, int[] split_sizes, int dim=0) -> (Tensor[])'''
pass
class ATenUnsafeSplitSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::unsafe_split.Tensor(Tensor self, int split_size, int dim=0) -> (Tensor[])'''
pass
class ATenUnflattenSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::unflatten.Dimname(Tensor(a) self, str dim, int[] sizes, str[] names) -> (Tensor(a))
aten::unflatten.int(Tensor(a) self, int dim, int[] sizes, str[]? names=None) -> (Tensor(a))'''
pass
class ATenVanderSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::vander(Tensor x, int? N=None, bool increasing=False) -> (Tensor)'''
pass
class ATenViewAsSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::view_as(Tensor(a) self, Tensor other) -> (Tensor(a))'''
pass
class ATenDivideSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::divide.Tensor(Tensor self, Tensor other) -> (Tensor)
aten::divide.Scalar(Tensor self, Scalar other) -> (Tensor)
aten::divide.Tensor_mode(Tensor self, Tensor other, *, str? rounding_mode) -> (Tensor)
aten::divide.Scalar_mode(Tensor self, Scalar other, *, str? rounding_mode) -> (Tensor)'''
pass
class ATenRollSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::roll(Tensor self, int[1] shifts, int[1] dims=[]) -> (Tensor)'''
pass
class ATenLinalgDetSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::linalg_det(Tensor self) -> (Tensor)'''
pass
class ATenFftC2cSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::_fft_c2c(Tensor self, int[] dim, int normalization, bool forward) -> (Tensor)'''
pass
class ATenChainMatmulSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::chain_matmul(Tensor[] matrices) -> (Tensor)'''
pass
class ATenArctanhSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::arctanh(Tensor self) -> (Tensor)'''
pass
class ATenNativeGroupNormSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::native_group_norm(Tensor input, Tensor? weight, Tensor? bias, int N, int C, int HxW, int group, float eps) -> (Tensor, Tensor, Tensor)'''
pass
class ATenSquareSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::square(Tensor self) -> (Tensor)'''
pass
class ATenMinSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::min(Tensor self) -> (Tensor)
aten::min.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices)
aten::min.dim_min(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) min, Tensor(b!) min_indices) -> (Tensor(a!) values, Tensor(b!) indices)
aten::min.names_dim(Tensor self, str dim, bool keepdim=False) -> (Tensor values, Tensor indices)
aten::min.names_dim_min(Tensor self, str dim, bool keepdim=False, *, Tensor(a!) min, Tensor(b!) min_indices) -> (Tensor(a!) values, Tensor(b!) indices)
aten::min.other(Tensor self, Tensor other) -> (Tensor)'''
pass
class ATenNanmedianSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::nanmedian(Tensor self) -> (Tensor)
aten::nanmedian.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices)
aten::nanmedian.dim_values(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
aten::nanmedian.names_dim(Tensor self, str dim, bool keepdim=False) -> (Tensor values, Tensor indices)
aten::nanmedian.names_dim_values(Tensor self, str dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)'''
pass
class ATenMeanSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::mean(Tensor self, *, int? dtype=None) -> (Tensor)
aten::mean.dim(Tensor self, int[1] dim, bool keepdim=False, *, int? dtype=None) -> (Tensor)
aten::mean.names_dim(Tensor self, str[1] dim, bool keepdim=False, *, int? dtype=None) -> (Tensor)'''
pass
class ATenPowSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::pow.Tensor_Tensor(Tensor self, Tensor exponent) -> (Tensor)
aten::pow.Tensor_Scalar(Tensor self, Scalar exponent) -> (Tensor)
aten::pow.Scalar(Scalar self, Tensor exponent) -> (Tensor)'''
pass
class ATenPolygammaSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::polygamma(int n, Tensor self) -> (Tensor)'''
pass
class ATenOnesLikeSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::ones_like(Tensor self, *, int? dtype=None, int? layout=None, Device? device=None, bool? pin_memory=None, int? memory_format=None) -> (Tensor)'''
pass
class ATenNextafterSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::nextafter(Tensor self, Tensor other) -> (Tensor)'''
pass
class ATenRenameSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::rename(Tensor(a) self, str[]? names) -> (Tensor(a))'''
pass
class ATenRefineNamesSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::refine_names(Tensor(a) self, str[] names) -> (Tensor(a))'''
pass
class ATenMedianSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::median(Tensor self) -> (Tensor)
aten::median.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices)
aten::median.dim_values(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
aten::median.names_dim(Tensor self, str dim, bool keepdim=False) -> (Tensor values, Tensor indices)
aten::median.names_dim_values(Tensor self, str dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)'''
pass
class ATenMaxPool3dWithIndicesSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::max_pool3d_with_indices(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=[0, 0, 0], int[3] dilation=[1, 1, 1], bool ceil_mode=False) -> (Tensor, Tensor)
aten::max_pool3d_with_indices.out(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=[0, 0, 0], int[3] dilation=[1, 1, 1], bool ceil_mode=False, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!))'''
pass
class ATenLogicalXorSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::logical_xor(Tensor self, Tensor other) -> (Tensor)'''
pass
class ATenMaxPool3dSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::max_pool3d(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=[0, 0, 0], int[3] dilation=[1, 1, 1], bool ceil_mode=False) -> (Tensor)'''
pass
class ATenLogicalOrSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::logical_or(Tensor self, Tensor other) -> (Tensor)'''
pass
class ATenMaxPool2dWithIndicesSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::max_pool2d_with_indices(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=[0, 0], int[2] dilation=[1, 1], bool ceil_mode=False) -> (Tensor, Tensor)
aten::max_pool2d_with_indices.out(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=[0, 0], int[2] dilation=[1, 1], bool ceil_mode=False, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!))'''
pass
class ATenLogicalNotSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::logical_not(Tensor self) -> (Tensor)'''
pass
class ATenMaxPool2dSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::max_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=[0, 0], int[2] dilation=[1, 1], bool ceil_mode=False) -> (Tensor)'''
pass
class ATenMaxPool1dWithIndicesSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::max_pool1d_with_indices(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=[0], int[1] dilation=[1], bool ceil_mode=False) -> (Tensor, Tensor)'''
pass
class ATenLogicalAndSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::logical_and(Tensor self, Tensor other) -> (Tensor)'''
pass
class ATenMaxPool1dSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::max_pool1d(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=[0], int[1] dilation=[1], bool ceil_mode=False) -> (Tensor)'''
pass
class ATenLogaddexp2Schema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::logaddexp2(Tensor self, Tensor other) -> (Tensor)'''
pass
class ATenMaxSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::max(Tensor self) -> (Tensor)
aten::max.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices)
aten::max.dim_max(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) max, Tensor(b!) max_values) -> (Tensor(a!) values, Tensor(b!) indices)
aten::max.names_dim(Tensor self, str dim, bool keepdim=False) -> (Tensor values, Tensor indices)
aten::max.names_dim_max(Tensor self, str dim, bool keepdim=False, *, Tensor(a!) max, Tensor(b!) max_values) -> (Tensor(a!) values, Tensor(b!) indices)
aten::max.other(Tensor self, Tensor other) -> (Tensor)'''
pass
class ATenLogaddexpSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::logaddexp(Tensor self, Tensor other) -> (Tensor)'''
pass
class ATenMatrixExpSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::matrix_exp(Tensor self) -> (Tensor)'''
pass
class ATenMatmulSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::matmul(Tensor self, Tensor other) -> (Tensor)'''
pass
class ATenMaskedSelectSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::masked_select(Tensor self, Tensor mask) -> (Tensor)'''
pass
class ATenMarginRankingLossSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::margin_ranking_loss(Tensor input1, Tensor input2, Tensor target, float margin=0., int reduction=1) -> (Tensor)'''
pass
class ATenPoissonSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::poisson(Tensor self, Generator? generator=None) -> (Tensor)'''
pass
class ATenIndexFillSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::index_fill.Dimname_Scalar(Tensor self, str dim, Tensor index, Scalar value) -> (Tensor)
aten::index_fill.Dimname_Tensor(Tensor self, str dim, Tensor index, Tensor value) -> (Tensor)
aten::index_fill.int_Scalar(Tensor self, int dim, Tensor index, Scalar value) -> (Tensor)
aten::index_fill.int_Tensor(Tensor self, int dim, Tensor index, Tensor value) -> (Tensor)'''
pass
class ATenIgammacSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::igammac(Tensor self, Tensor other) -> (Tensor)'''
pass
class ATenIgammaSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::igamma(Tensor self, Tensor other) -> (Tensor)'''
pass
class ATenI0Schema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::i0(Tensor self) -> (Tensor)'''
pass
class ATenMaskedFillSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::masked_fill.Scalar(Tensor self, Tensor mask, Scalar value) -> (Tensor)
aten::masked_fill.Tensor(Tensor self, Tensor mask, Tensor value) -> (Tensor)'''
pass
class ATenLstsqSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::lstsq.X(Tensor self, Tensor A, *, Tensor(a!) X, Tensor(b!) qr) -> (Tensor(a!) solution, Tensor(b!) QR)
aten::lstsq(Tensor self, Tensor A) -> (Tensor solution, Tensor QR)'''
pass
class ATenHypotSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::hypot(Tensor self, Tensor other) -> (Tensor)'''
pass
class ATenFullLikeSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::full_like(Tensor self, Scalar fill_value, *, int? dtype=None, int? layout=None, Device? device=None, bool? pin_memory=None, int? memory_format=None) -> (Tensor)'''
pass
class ATenFloorDivideSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::floor_divide(Tensor self, Tensor other) -> (Tensor)
aten::floor_divide.Scalar(Tensor self, Scalar other) -> (Tensor)'''
pass
class ATenFlattenSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::flatten.DimnameList(Tensor(a) self, str[] dims, str out_dim) -> (Tensor(a))
aten::flatten.named_out_dim(Tensor(a) self, int start_dim, int end_dim, str out_dim) -> (Tensor(a))
aten::flatten.using_ints(Tensor(a) self, int start_dim=0, int end_dim=-1) -> (Tensor(a))
aten::flatten.using_names(Tensor(a) self, str start_dim, str end_dim, str out_dim) -> (Tensor(a))'''
pass
class ATenLogsumexpSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::logsumexp(Tensor self, int[1] dim, bool keepdim=False) -> (Tensor)
aten::logsumexp.names(Tensor self, str[1] dim, bool keepdim=False) -> (Tensor)'''
pass
class ATenLogcumsumexpSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::logcumsumexp(Tensor self, int dim) -> (Tensor)
aten::logcumsumexp.dimname(Tensor self, str dim) -> (Tensor)
aten::_logcumsumexp(Tensor self, int dim) -> (Tensor)'''
pass
class ATenLogSoftmaxBackwardDataSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::_log_softmax_backward_data(Tensor grad_output, Tensor output, int dim, Tensor self) -> (Tensor)'''
pass
class ATenLessEqualSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::less_equal.Scalar(Tensor self, Scalar other) -> (Tensor)
aten::less_equal.Tensor(Tensor self, Tensor other) -> (Tensor)'''
pass
class ATenThresholdSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::threshold(Tensor self, Scalar threshold, Scalar value) -> (Tensor)'''
pass
class ATenEmptyLikeSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::empty_like(Tensor self, *, int? dtype=None, int? layout=None, Device? device=None, bool? pin_memory=None, int? memory_format=None) -> (Tensor)'''
pass
class ATenProdSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::prod(Tensor self, *, int? dtype=None) -> (Tensor)
aten::prod.dim_int(Tensor self, int dim, bool keepdim=False, *, int? dtype=None) -> (Tensor)
aten::prod.dim_Dimname(Tensor self, str dim, bool keepdim=False, *, int? dtype=None) -> (Tensor)'''
pass
class ATenDropoutSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::dropout(Tensor input, float p, bool train) -> (Tensor)'''
pass
class ATenDetachSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::detach(Tensor(a) self) -> (Tensor(a))'''
pass
class ATenChannelShuffleSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::channel_shuffle(Tensor self, int groups) -> (Tensor)'''
pass
class ATenTensorSplitSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::tensor_split.sections(Tensor(a) self, int sections, int dim=0) -> (Tensor[])
aten::tensor_split.indices(Tensor(a) self, int[] indices, int dim=0) -> (Tensor[])
aten::tensor_split.tensor_indices_or_sections(Tensor(a) self, Tensor tensor_indices_or_sections, int dim=0) -> (Tensor[])'''
pass
class ATenDeg2radSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::deg2rad(Tensor self) -> (Tensor)'''
pass
class ATenCumminSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::cummin(Tensor self, int dim) -> (Tensor values, Tensor indices)
aten::cummin.dimname(Tensor self, str dim) -> (Tensor values, Tensor indices)
aten::cummin.out(Tensor self, int dim, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)'''
pass
class ATenLog2Schema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::log2(Tensor self) -> (Tensor)'''
pass
class ATenLog1pSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::log1p(Tensor self) -> (Tensor)'''
pass
class ATenLog10Schema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::log10(Tensor self) -> (Tensor)'''
pass
class ATenBitwiseNotSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::bitwise_not(Tensor self) -> (Tensor)'''
pass
class ATenVarMeanSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::var_mean(Tensor self, bool unbiased=True) -> (Tensor, Tensor)
aten::var_mean.dim(Tensor self, int[1] dim, bool unbiased=True, bool keepdim=False) -> (Tensor, Tensor)
aten::var_mean.names_dim(Tensor self, str[1] dim, bool unbiased=True, bool keepdim=False) -> (Tensor, Tensor)
aten::var_mean.correction(Tensor self, int[1]? dim, *, int? correction, bool keepdim=False) -> (Tensor, Tensor)
aten::var_mean.correction_names(Tensor self, str[1] dim, *, int? correction, bool keepdim=False) -> (Tensor, Tensor)'''
pass
class ATenArctanSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::arctan(Tensor self) -> (Tensor)'''
pass
class ATenVdotSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::vdot(Tensor self, Tensor other) -> (Tensor)'''
pass
class ATenStdMeanSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::std_mean(Tensor self, bool unbiased=True) -> (Tensor, Tensor)
aten::std_mean.dim(Tensor self, int[1] dim, bool unbiased=True, bool keepdim=False) -> (Tensor, Tensor)
aten::std_mean.names_dim(Tensor self, str[1] dim, bool unbiased=True, bool keepdim=False) -> (Tensor, Tensor)
aten::std_mean.correction(Tensor self, int[1]? dim, *, int? correction, bool keepdim=False) -> (Tensor, Tensor)
aten::std_mean.correction_names(Tensor self, str[1] dim, *, int? correction, bool keepdim=False) -> (Tensor, Tensor)'''
pass
class ATenAtanhSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::atanh(Tensor self) -> (Tensor)'''
pass
class ATenBatchNormGatherStatsSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::batch_norm_gather_stats(Tensor input, Tensor mean, Tensor invstd, Tensor? running_mean, Tensor? running_var, float momentum, float eps, int count) -> (Tensor, Tensor)'''
pass
class ATenAnySchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::any(Tensor self) -> (Tensor)
aten::any.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor)
aten::any.dimname(Tensor self, str dim, bool keepdim=False) -> (Tensor)'''
pass
class ATenAlignAsSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::align_as(Tensor self, Tensor other) -> (Tensor)'''
pass
class ATenAliasSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::alias(Tensor(a) self) -> (Tensor(a))'''
pass
class ATenUniqueDimSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::unique_dim(Tensor self, int dim, bool sorted=True, bool return_inverse=False, bool return_counts=False) -> (Tensor, Tensor, Tensor)'''
pass
class ATenLcmSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::lcm(Tensor self, Tensor other) -> (Tensor)'''
pass
class ATenAddReluSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::_add_relu.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> (Tensor)'''
pass
class ATenLayerNormSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::layer_norm(Tensor input, int[] normalized_shape, Tensor? weight=None, Tensor? bias=None, float eps=1.0000000000000001e-05, bool cudnn_enable=True) -> (Tensor)'''
pass
class ATenTrapzSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::trapz.x(Tensor y, Tensor x, *, int dim=-1) -> (Tensor)
aten::trapz.dx(Tensor y, *, float dx=1., int dim=-1) -> (Tensor)'''
pass
class ATenFusedDropoutSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::_fused_dropout(Tensor self, float p, Generator? generator=None) -> (Tensor, Tensor)'''
pass
class ATenSortedSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::sorted.Tensor(Tensor[](a) input) -> (Tensor[])'''
pass
class ATenBinaryCrossEntropySchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::binary_cross_entropy(Tensor self, Tensor target, Tensor? weight=None, int reduction=1) -> (Tensor)'''
pass
class ATenScatterAddSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::scatter_add(Tensor self, int dim, Tensor index, Tensor src) -> (Tensor)
aten::scatter_add.dimname(Tensor self, str dim, Tensor index, Tensor src) -> (Tensor)'''
pass
class ATenTensordotSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::tensordot(Tensor self, Tensor other, int[] dims_self, int[] dims_other) -> (Tensor)'''
pass
class ATenTensorToListSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::_tensor_to_list(Tensor self) -> (int[])'''
pass
class ATenCrossSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::cross(Tensor self, Tensor other, int? dim=None) -> (Tensor)'''
pass
class ATenBilinearSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::bilinear(Tensor input1, Tensor input2, Tensor weight, Tensor? bias) -> (Tensor)'''
pass
class ATenCumprodSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::cumprod(Tensor self, int dim, *, int? dtype=None) -> (Tensor)
aten::cumprod.dimname(Tensor self, str dim, *, int? dtype=None) -> (Tensor)
aten::_cumprod(Tensor self, int dim) -> (Tensor)'''
pass
class ATenLogSoftmaxSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::log_softmax.int(Tensor self, int dim, int? dtype=None) -> (Tensor)
aten::log_softmax.Dimname(Tensor self, str dim, *, int? dtype=None) -> (Tensor)
aten::_log_softmax(Tensor self, int dim, bool half_to_float) -> (Tensor)'''
pass
class ATenAcoshSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::acosh(Tensor self) -> (Tensor)'''
pass
class ATenSoftmaxSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::softmax.int(Tensor self, int dim, int? dtype=None) -> (Tensor)
aten::softmax.Dimname(Tensor self, str dim, *, int? dtype=None) -> (Tensor)
aten::_softmax(Tensor self, int dim, bool half_to_float) -> (Tensor)'''
pass
class ATenAtan2Schema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::atan2(Tensor self, Tensor other) -> (Tensor)'''
pass
class ATenRenormSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::renorm(Tensor self, Scalar p, int dim, Scalar maxnorm) -> (Tensor)'''
pass
class ATenCdistSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::cdist(Tensor x1, Tensor x2, float p=2., int? compute_mode=None) -> (Tensor)'''
pass
class ATenPdistSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::pdist(Tensor self, float p=2.) -> (Tensor)'''
pass
class ATenDistSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::dist(Tensor self, Tensor other, Scalar p=2) -> (Tensor)'''
pass
class ATenMultiMarginLossSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::multi_margin_loss(Tensor self, Tensor target, Scalar p=1, Scalar margin=1, Tensor? weight=None, int reduction=1) -> (Tensor)'''
pass
class ATenConv2dSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::conv2d(Tensor input, Tensor weight, Tensor? bias=None, int[2] stride=[1, 1], int[2] padding=[0, 0], int[2] dilation=[1, 1], int groups=1) -> (Tensor)
aten::conv2d.padding(Tensor input, Tensor weight, Tensor? bias=None, int[2] stride=[1, 1], str padding="valid", int[2] dilation=[1, 1], int groups=1) -> (Tensor)'''
pass
class ATenSoftMarginLossSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::soft_margin_loss(Tensor self, Tensor target, int reduction=1) -> (Tensor)'''
pass
class ATenMultilabelMarginLossSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::multilabel_margin_loss(Tensor self, Tensor target, int reduction=1) -> (Tensor)'''
pass
class ATenKthvalueSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::kthvalue(Tensor self, int k, int dim=-1, bool keepdim=False) -> (Tensor values, Tensor indices)
aten::kthvalue.dimname(Tensor self, int k, str dim, bool keepdim=False) -> (Tensor values, Tensor indices)
aten::kthvalue.values(Tensor self, int k, int dim=-1, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)'''
pass
class ATenHuberLossSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::huber_loss(Tensor self, Tensor target, int reduction=1, float delta=1.) -> (Tensor)'''
pass
class ATenNllLossSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::nll_loss(Tensor self, Tensor target, Tensor? weight=None, int reduction=1, int ignore_index=-100) -> (Tensor)'''
pass
class ATenPoissonNllLossSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::poisson_nll_loss(Tensor input, Tensor target, bool log_input, bool full, float eps, int reduction) -> (Tensor)'''
pass
class ATenSortSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::sort.values(Tensor self, int dim=-1, bool descending=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
aten::sort(Tensor self, int dim=-1, bool descending=False) -> (Tensor values, Tensor indices)
aten::sort.values_stable(Tensor self, *, bool? stable, int dim=-1, bool descending=False, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
aten::sort.stable(Tensor self, *, bool? stable, int dim=-1, bool descending=False) -> (Tensor values, Tensor indices)
aten::sort.dimname_values(Tensor self, str dim, bool descending=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
aten::sort.dimname(Tensor self, str dim, bool descending=False) -> (Tensor values, Tensor indices)
aten::sort.dimname_values_stable(Tensor self, *, bool? stable, str dim, bool descending=False, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
aten::sort.dimname_stable(Tensor self, *, bool? stable, str dim, bool descending=False) -> (Tensor values, Tensor indices)'''
pass
class ATenArcsinhSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::arcsinh(Tensor self) -> (Tensor)'''
pass
class ATenCosineSimilaritySchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::cosine_similarity(Tensor x1, Tensor x2, int dim=1, float eps=1e-08) -> (Tensor)'''
pass
class ATenGroupNormSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::group_norm(Tensor input, int num_groups, Tensor? weight=None, Tensor? bias=None, float eps=1.0000000000000001e-05, bool cudnn_enabled=True) -> (Tensor)'''
pass
class ATenGeluSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::gelu(Tensor self) -> (Tensor)'''
pass
class ATenCosineEmbeddingLossSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::cosine_embedding_loss(Tensor input1, Tensor input2, Tensor target, float margin=0., int reduction=1) -> (Tensor)'''
pass
class ATenArcsinSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::arcsin(Tensor self) -> (Tensor)'''
pass
class ATenSoftplusSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::softplus(Tensor self, Scalar beta=1, Scalar threshold=20) -> (Tensor)'''
pass
class ATenIndexSelectSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::index_select(Tensor self, int dim, Tensor index) -> (Tensor)
aten::index_select.dimname(Tensor self, str dim, Tensor index) -> (Tensor)'''
pass
class ATenErfinvSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::erfinv(Tensor self) -> (Tensor)'''
pass
class ATenLinalgTensorsolveSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::linalg_tensorsolve(Tensor self, Tensor other, int[]? dims=None) -> (Tensor)'''
pass
class ATenThnnFusedGruCellSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::_thnn_fused_gru_cell(Tensor input_gates, Tensor hidden_gates, Tensor hx, Tensor? input_bias=None, Tensor? hidden_bias=None) -> (Tensor, Tensor)'''
pass
class ATenNormSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::norm.Scalar(Tensor self, Scalar p=2) -> (Tensor)
aten::norm.ScalarOpt_dim(Tensor self, Scalar? p, int[1] dim, bool keepdim=False) -> (Tensor)
aten::norm.names_ScalarOpt_dim(Tensor self, Scalar? p, str[1] dim, bool keepdim=False) -> (Tensor)
aten::norm.ScalarOpt_dtype(Tensor self, Scalar? p, *, int dtype) -> (Tensor)
aten::norm.ScalarOpt_dim_dtype(Tensor self, Scalar? p, int[1] dim, bool keepdim, *, int dtype) -> (Tensor)
aten::norm.names_ScalarOpt_dim_dtype(Tensor self, Scalar? p, str[1] dim, bool keepdim, *, int dtype) -> (Tensor)'''
pass
class ATenThnnFusedLstmCellSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::_thnn_fused_lstm_cell(Tensor input_gates, Tensor hidden_gates, Tensor cx, Tensor? input_bias=None, Tensor? hidden_bias=None) -> (Tensor, Tensor, Tensor)'''
pass
class ATenRandLikeSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::rand_like(Tensor self, *, int? dtype=None, int? layout=None, Device? device=None, bool? pin_memory=None, int? memory_format=None) -> (Tensor)'''
pass
class ATenAddbmmSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::addbmm(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> (Tensor)'''
pass
class ATenAlignToSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::align_to(Tensor(a) self, str[] names) -> (Tensor(a))
aten::align_to.ellipsis_idx(Tensor(a) self, str[] order, int ellipsis_idx) -> (Tensor(a))'''
pass
class ATenLinearSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::linear(Tensor input, Tensor weight, Tensor? bias=None) -> (Tensor)'''
pass
class ATenSqrtSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::sqrt(Tensor self) -> (Tensor)'''
pass
class ATenConvolutionSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::convolution(Tensor input, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups) -> (Tensor)
aten::_convolution.deprecated(Tensor input, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups, bool benchmark, bool deterministic, bool cudnn_enabled) -> (Tensor)
aten::_convolution(Tensor input, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups, bool benchmark, bool deterministic, bool cudnn_enabled, bool allow_tf32) -> (Tensor)'''
pass
class ATenConvTranspose3dSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::conv_transpose3d.input(Tensor input, Tensor weight, Tensor? bias=None, int[3] stride=[1, 1, 1], int[3] padding=[0, 0, 0], int[3] output_padding=[0, 0, 0], int groups=1, int[3] dilation=[1, 1, 1]) -> (Tensor)'''
pass
class ATenXlogySchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::xlogy.Tensor(Tensor self, Tensor other) -> (Tensor)
aten::xlogy.Scalar_Self(Scalar self, Tensor other) -> (Tensor)
aten::xlogy.Scalar_Other(Tensor self, Scalar other) -> (Tensor)'''
pass
class ATenLstmCellSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::lstm_cell(Tensor input, Tensor[] hx, Tensor w_ih, Tensor w_hh, Tensor? b_ih=None, Tensor? b_hh=None) -> (Tensor, Tensor)'''
pass
class ATenConvTranspose1dSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::conv_transpose1d(Tensor input, Tensor weight, Tensor? bias=None, int[1] stride=[1], int[1] padding=[0], int[1] output_padding=[0], int groups=1, int[1] dilation=[1]) -> (Tensor)'''
pass
class ATenSoftmaxBackwardDataSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::_softmax_backward_data(Tensor grad_output, Tensor output, int dim, Tensor self) -> (Tensor)'''
pass
class ATenArccoshSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::arccosh(Tensor self) -> (Tensor)'''
pass
class ATenEmptyPerChannelAffineQuantizedSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::_empty_per_channel_affine_quantized(int[] size, *, Tensor scales, Tensor zero_points, int axis, int? dtype=None, int? layout=None, Device? device=None, bool? pin_memory=None, int? memory_format=0) -> (Tensor)'''
pass
class ATenConvTbcSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::conv_tbc(Tensor self, Tensor weight, Tensor bias, int pad=0) -> (Tensor)'''
pass
class ATenConv3dSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::conv3d(Tensor input, Tensor weight, Tensor? bias=None, int[3] stride=[1, 1, 1], int[3] padding=[0, 0, 0], int[3] dilation=[1, 1, 1], int groups=1) -> (Tensor)
aten::conv3d.padding(Tensor input, Tensor weight, Tensor? bias=None, int[3] stride=[1, 1, 1], str padding="valid", int[3] dilation=[1, 1, 1], int groups=1) -> (Tensor)'''
pass
class ATenSmoothL1LossSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::smooth_l1_loss(Tensor self, Tensor target, int reduction=1, float beta=1.) -> (Tensor)'''
pass
class ATenConv1dSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::conv1d(Tensor input, Tensor weight, Tensor? bias=None, int[1] stride=[1], int[1] padding=[0], int[1] dilation=[1], int groups=1) -> (Tensor)
aten::conv1d.padding(Tensor input, Tensor weight, Tensor? bias=None, int[1] stride=[1], str padding="valid", int[1] dilation=[1], int groups=1) -> (Tensor)'''
pass
class ATenL1LossSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::l1_loss(Tensor self, Tensor target, int reduction=1) -> (Tensor)'''
pass
class ATenNativeLayerNormSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::native_layer_norm(Tensor input, int[] normalized_shape, Tensor? weight, Tensor? bias, float eps) -> (Tensor, Tensor, Tensor)'''
pass
class ATenKlDivSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::kl_div(Tensor self, Tensor target, int reduction=1, *, bool log_target=False) -> (Tensor)'''
pass
class ATenAddrSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::addr(Tensor self, Tensor vec1, Tensor vec2, *, Scalar beta=1, Scalar alpha=1) -> (Tensor)'''
pass
class ATenQPerChannelZeroPointsSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::q_per_channel_zero_points(Tensor self) -> (Tensor)'''
pass
class ATenAddcmulSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::addcmul(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> (Tensor)'''
pass
class ATenRandintLikeSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::randint_like(Tensor self, int high, *, int? dtype=None, int? layout=None, Device? device=None, bool? pin_memory=None, int? memory_format=None) -> (Tensor)
aten::randint_like.low_dtype(Tensor self, int low, int high, *, int? dtype=None, int? layout=None, Device? device=None, bool? pin_memory=None, int? memory_format=None) -> (Tensor)'''
pass
class ATenAddmmSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::addmm(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> (Tensor)'''
pass
class ATenNormalSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::normal.Tensor_float(Tensor mean, float std=1., *, Generator? generator=None) -> (Tensor)
aten::normal.float_Tensor(float mean, Tensor std, *, Generator? generator=None) -> (Tensor)
aten::normal.Tensor_Tensor(Tensor mean, Tensor std, *, Generator? generator=None) -> (Tensor)'''
pass
class ATenRnnTanhCellSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::rnn_tanh_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor? b_ih=None, Tensor? b_hh=None) -> (Tensor)'''
pass
class ATenBinaryCrossEntropyWithLogitsSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::binary_cross_entropy_with_logits(Tensor self, Tensor target, Tensor? weight=None, Tensor? pos_weight=None, int reduction=1) -> (Tensor)'''
pass
class ATenRnnReluCellSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::rnn_relu_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor? b_ih=None, Tensor? b_hh=None) -> (Tensor)'''
pass
class ATenMseLossSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::mse_loss(Tensor self, Tensor target, int reduction=1) -> (Tensor)'''
pass
class ATenQuantizePerChannelSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::quantize_per_channel(Tensor self, Tensor scales, Tensor zero_points, int axis, int dtype) -> (Tensor)'''
pass
class ATenInterpolateSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::__interpolate.scale_list(Tensor input, int? size=None, float[]? scale_factor=None, str mode="nearest", bool? align_corners=None, bool? recompute_scale_factor=None) -> (Tensor)
aten::__interpolate.size_list_scale_list(Tensor input, int[]? size=None, float[]? scale_factor=None, str mode="nearest", bool? align_corners=None, bool? recompute_scale_factor=None) -> (Tensor)
aten::__interpolate(Tensor input, int? size=None, float? scale_factor=None, str mode="nearest", bool? align_corners=None, bool? recompute_scale_factor=None) -> (Tensor)
aten::__interpolate.size_list(Tensor input, int[]? size=None, float? scale_factor=None, str mode="nearest", bool? align_corners=None, bool? recompute_scale_factor=None) -> (Tensor)'''
pass
class ATenExpandSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::expand(Tensor(a) self, int[] size, *, bool implicit=False) -> (Tensor(a))'''
pass
class ATenSvdHelperSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::_svd_helper(Tensor self, bool some, bool compute_uv) -> (Tensor U, Tensor S, Tensor V)'''
pass
class ATenTraceSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::trace(Tensor self) -> (Tensor)'''
pass
class ATenTripletMarginLossSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::triplet_margin_loss(Tensor anchor, Tensor positive, Tensor negative, float margin=1., float p=2., float eps=9.9999999999999995e-07, bool swap=False, int reduction=1) -> (Tensor)'''
pass
class ATenNeSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::ne.Tensor(Tensor self, Tensor other) -> (Tensor)
aten::ne.Scalar(Tensor self, Scalar other) -> (Tensor)'''
pass
class ATenEqSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::eq.Tensor(Tensor self, Tensor other) -> (Tensor)
aten::eq.Scalar(Tensor self, Scalar other) -> (Tensor)'''
pass
class ATenNewZerosSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::new_zeros(Tensor self, int[] size, *, int? dtype=None, int? layout=None, Device? device=None, bool? pin_memory=None) -> (Tensor)'''
pass
class ATenNewEmptyStridedSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::new_empty_strided(Tensor self, int[] size, int[] stride, *, int? dtype=None, int? layout=None, Device? device=None, bool? pin_memory=None) -> (Tensor)'''
pass
class ATenNewEmptySchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::new_empty(Tensor self, int[] size, *, int? dtype=None, int? layout=None, Device? device=None, bool? pin_memory=None) -> (Tensor)'''
pass
class ATenStackSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::stack(Tensor[] tensors, int dim=0) -> (Tensor)
aten::_stack(Tensor[] tensors, int dim=0) -> (Tensor)'''
pass
class ATenConvTranspose2dSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::conv_transpose2d.input(Tensor input, Tensor weight, Tensor? bias=None, int[2] stride=[1, 1], int[2] padding=[0, 0], int[2] output_padding=[0, 0], int groups=1, int[2] dilation=[1, 1]) -> (Tensor)'''
pass
class ATenCatSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::cat(Tensor[] tensors, int dim=0) -> (Tensor)
aten::cat.names(Tensor[] tensors, str dim) -> (Tensor)
aten::_cat(Tensor[] tensors, int dim=0) -> (Tensor)'''
pass
class ATenMmSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::mm(Tensor self, Tensor mat2) -> (Tensor)'''
pass
class ATenBmmSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::bmm(Tensor self, Tensor mat2) -> (Tensor)
aten::_bmm(Tensor self, Tensor mat2, *, bool deterministic=False) -> (Tensor)'''
pass
class ATenSampleDirichletSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::_sample_dirichlet(Tensor self, Generator? generator=None) -> (Tensor)'''
pass
class ATenDotSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::dot(Tensor self, Tensor tensor) -> (Tensor)'''
pass
class ATenViewAsComplexSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::view_as_complex(Tensor(a) self) -> (Tensor(a))'''
pass
class ATenRelu6Schema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::relu6(Tensor self) -> (Tensor)'''
pass
class ATenPreluSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::prelu(Tensor self, Tensor weight) -> (Tensor)'''
pass
class ATenViewAsRealSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::view_as_real(Tensor(a) self) -> (Tensor(a))'''
pass
class ATenPositiveSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::positive(Tensor(a) self) -> (Tensor(a))'''
pass
class ATenImagSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::imag(Tensor(a) self) -> (Tensor(a))'''
pass
class ATenMultinomialSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::multinomial(Tensor self, int num_samples, bool replacement=False, *, Generator? generator=None) -> (Tensor)'''
pass
class ATenExpm1Schema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::expm1(Tensor self) -> (Tensor)'''
pass
class ATenToSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::to.device(Tensor self, Device device, int dtype, bool non_blocking=False, bool copy=False, int? memory_format=None) -> (Tensor)
aten::to.dtype(Tensor self, int dtype, bool non_blocking=False, bool copy=False, int? memory_format=None) -> (Tensor)
aten::to.other(Tensor self, Tensor other, bool non_blocking=False, bool copy=False, int? memory_format=None) -> (Tensor)
aten::to.dtype_layout(Tensor self, *, int? dtype=None, int? layout=None, Device? device=None, bool? pin_memory=None, bool non_blocking=False, bool copy=False, int? memory_format=None) -> (Tensor)
aten::to.prim_Device(Tensor(a) self, Device? device, int? dtype=None, bool non_blocking=False, bool copy=False) -> (Tensor(a|b))
aten::to.prim_dtype(Tensor(a) self, int? dtype=None, bool non_blocking=False, bool copy=False) -> (Tensor(a|b))
aten::to.prim_other(Tensor(a) self, bool non_blocking=False, bool copy=False) -> (Tensor(a|b))'''
pass
class ATenTopkSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::topk.values(Tensor self, int k, int dim=-1, bool largest=True, bool sorted=True, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)
aten::topk(Tensor self, int k, int dim=-1, bool largest=True, bool sorted=True) -> (Tensor values, Tensor indices)'''
pass
class ATenLessSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::less.Scalar(Tensor self, Scalar other) -> (Tensor)
aten::less.Tensor(Tensor self, Tensor other) -> (Tensor)'''
pass
class ATenNuclearNormSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::nuclear_norm(Tensor self, bool keepdim=False) -> (Tensor)
aten::nuclear_norm.dim(Tensor self, int[2] dim, bool keepdim=False) -> (Tensor)'''
pass
class ATenGridSamplerSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::grid_sampler(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> (Tensor)'''
pass
class ATenViewSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::view(Tensor(a) self, int[] size) -> (Tensor(a))
aten::view.dtype(Tensor(a) self, int dtype) -> (Tensor(a))'''
pass
class ATenMvSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::mv(Tensor self, Tensor vec) -> (Tensor)'''
pass
class ATenExpSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::exp(Tensor self) -> (Tensor)'''
pass
class ATenRoundSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::round(Tensor self) -> (Tensor)'''
pass
class ATenClampMaxSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::clamp_max(Tensor self, Scalar max) -> (Tensor)
aten::clamp_max.Tensor(Tensor self, Tensor max) -> (Tensor)'''
pass
class ATenAngleSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::angle(Tensor self) -> (Tensor)'''
pass
class ATenClampMinSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::clamp_min(Tensor self, Scalar min) -> (Tensor)
aten::clamp_min.Tensor(Tensor self, Tensor min) -> (Tensor)'''
pass
class ATenSignSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::sign(Tensor self) -> (Tensor)'''
pass
class ATenFracSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::frac(Tensor self) -> (Tensor)'''
pass
class ATenLogSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::log(Tensor self) -> (Tensor)'''
pass
class ATenSinSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::sin(Tensor self) -> (Tensor)'''
pass
class ATenCloneSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::clone(Tensor self, *, int? memory_format=None) -> (Tensor)'''
pass
class ATenSignbitSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::signbit(Tensor self) -> (Tensor)'''
pass
class ATenChunkSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::chunk(Tensor(a) self, int chunks, int dim=0) -> (Tensor[])'''
pass
class ATenLerpSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::lerp.Scalar(Tensor self, Tensor end, Scalar weight) -> (Tensor)
aten::lerp.Tensor(Tensor self, Tensor end, Tensor weight) -> (Tensor)'''
pass
class ATenAlignTensorsSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::align_tensors(Tensor[] tensors) -> (Tensor[])'''
pass
class ATenOuterSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::outer(Tensor self, Tensor vec2) -> (Tensor)'''
pass
class ATenUnsqueezeSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::unsqueeze(Tensor(a) self, int dim) -> (Tensor(a))'''
pass
class ATenFloorSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::floor(Tensor self) -> (Tensor)'''
pass
class ATenRepeatInterleaveSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::repeat_interleave.Tensor(Tensor repeats) -> (Tensor)
aten::repeat_interleave.self_Tensor(Tensor self, Tensor repeats, int? dim=None) -> (Tensor)
aten::repeat_interleave.self_int(Tensor self, int repeats, int? dim=None) -> (Tensor)'''
pass
class ATenCumsumSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::cumsum(Tensor self, int dim, *, int? dtype=None) -> (Tensor)
aten::cumsum.dimname(Tensor self, str dim, *, int? dtype=None) -> (Tensor)
aten::_cumsum(Tensor self, int dim) -> (Tensor)'''
pass
class ATenBatchNormUpdateStatsSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::batch_norm_update_stats(Tensor input, Tensor? running_mean, Tensor? running_var, float momentum) -> (Tensor, Tensor)'''
pass
class ATenTanSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::tan(Tensor self) -> (Tensor)'''
pass
class ATenAllSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::all(Tensor self) -> (Tensor)
aten::all.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor)
aten::all.dimname(Tensor self, str dim, bool keepdim=False) -> (Tensor)'''
pass
class ATenReciprocalSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::reciprocal(Tensor self) -> (Tensor)'''
pass
class ATenNcfViewSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::_ncf_view(Tensor(a) self, int[] input_shape, int normalized_ndim) -> (Tensor(a))'''
pass
class ATenCosSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::cos(Tensor self) -> (Tensor)'''
pass
class ATenRsqrtSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::rsqrt(Tensor self) -> (Tensor)'''
pass
class ATenCeilSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::ceil(Tensor self) -> (Tensor)'''
pass
class ATenLinalgSolveSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::linalg_solve(Tensor input, Tensor other) -> (Tensor)'''
pass
class ATenSliceSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::slice.Tensor(Tensor(a) self, int dim=0, int? start=None, int? end=None, int step=1) -> (Tensor(a))'''
pass
class ATenAbsoluteSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::absolute(Tensor self) -> (Tensor)'''
pass
class ATenSinhSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::sinh(Tensor self) -> (Tensor)'''
pass
class ATenConjSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::conj(Tensor(a) self) -> (Tensor(a))
aten::_conj(Tensor self) -> (Tensor)'''
pass
class ATenAbsSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::abs(Tensor self) -> (Tensor)'''
pass
class ATenCoshSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::cosh(Tensor self) -> (Tensor)'''
pass
class ATenRealSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::real(Tensor(a) self) -> (Tensor(a))'''
pass
class ATenGruCellSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::gru_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor? b_ih=None, Tensor? b_hh=None) -> (Tensor)'''
pass
class ATenSizeSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::size(Tensor self) -> (int[])'''
pass
class ATenNllLoss2dSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::nll_loss2d(Tensor self, Tensor target, Tensor? weight=None, int reduction=1, int ignore_index=-100) -> (Tensor)'''
pass
class ATenFrobeniusNormSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::frobenius_norm(Tensor self) -> (Tensor)
aten::frobenius_norm.dim(Tensor self, int[1] dim, bool keepdim=False) -> (Tensor)'''
pass
class ATenConvolutionNogroupSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::_convolution_nogroup(Tensor input, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding) -> (Tensor)'''
pass
class ATenArccosSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::arccos(Tensor self) -> (Tensor)'''
pass
class ATenContiguousSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::contiguous(Tensor(a) self, *, int memory_format=0) -> (Tensor(a))'''
pass
class ATenUnbindSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::unbind.int(Tensor(a) self, int dim=0) -> (Tensor[])
aten::unbind.Dimname(Tensor(a) self, str dim) -> (Tensor[])'''
pass
class ATenCummaxSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::cummax(Tensor self, int dim) -> (Tensor values, Tensor indices)
aten::cummax.dimname(Tensor self, str dim) -> (Tensor values, Tensor indices)
aten::cummax.out(Tensor self, int dim, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)'''
pass
class ATenLinalgMatrixNormSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::linalg_matrix_norm(Tensor self, Scalar ord, int[] dim=[-2, -1], bool keepdim=False, *, int? dtype=None) -> (Tensor)
aten::linalg_matrix_norm.str_ord(Tensor self, str ord="fro", int[] dim=[-2, -1], bool keepdim=False, *, int? dtype=None) -> (Tensor)'''
pass
class ATenComputeLinearCombinationSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::_compute_linear_combination(Tensor input, Tensor coefficients) -> (Tensor)'''
pass
class ATenTSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::t(Tensor(a) self) -> (Tensor(a))'''
pass
class ATenClipSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::clip(Tensor self, Scalar? min=None, Scalar? max=None) -> (Tensor)
aten::clip.Tensor(Tensor self, Tensor? min=None, Tensor? max=None) -> (Tensor)'''
pass
class ATenStdSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::std(Tensor self, bool unbiased=True) -> (Tensor)
aten::std.dim(Tensor self, int[1] dim, bool unbiased=True, bool keepdim=False) -> (Tensor)
aten::std.names_dim(Tensor self, str[1] dim, bool unbiased=True, bool keepdim=False) -> (Tensor)
aten::std.correction(Tensor self, int[1]? dim, *, int? correction, bool keepdim=False) -> (Tensor)
aten::std.correction_names(Tensor self, str[1] dim, *, int? correction, bool keepdim=False) -> (Tensor)'''
pass
class ATenSqueezeSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::squeeze(Tensor(a) self) -> (Tensor(a))
aten::squeeze.dim(Tensor(a) self, int dim) -> (Tensor(a))
aten::squeeze.dimname(Tensor(a) self, str dim) -> (Tensor(a))'''
pass
class ATenReshapeSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::reshape(Tensor(a) self, int[] shape) -> (Tensor(a))'''
pass
class ATenNcfUnsqueezeSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::_ncf_unsqueeze(Tensor(a) self, int ndim) -> (Tensor(a))'''
pass
class ATenIndexPutSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::index_put(Tensor self, Tensor?[] indices, Tensor values, bool accumulate=False) -> (Tensor)
aten::index_put.hacked_twin(Tensor self, Tensor[] indices, Tensor values, bool accumulate=False) -> (Tensor)'''
pass
class ATenBernoulliSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::bernoulli(Tensor self, *, Generator? generator=None) -> (Tensor)
aten::bernoulli.p(Tensor self, float p, *, Generator? generator=None) -> (Tensor)'''
pass
class ATenBaddbmmSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::baddbmm(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> (Tensor)'''
pass
class ATenPermuteSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::permute(Tensor(a) self, int[] dims) -> (Tensor(a))'''
pass
class ATenNumpyTSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::numpy_T(Tensor(a) self) -> (Tensor(a))'''
pass
class ATenRad2degSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::rad2deg(Tensor self) -> (Tensor)'''
pass
class ATenQuantizedMaxPool2dSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::quantized_max_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=[0, 0], int[2] dilation=[1, 1], bool ceil_mode=False) -> (Tensor)'''
pass
class ATenAddSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::add.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> (Tensor)
aten::add.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> (Tensor)'''
pass
class ATenRandnLikeSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::randn_like(Tensor self, *, int? dtype=None, int? layout=None, Device? device=None, bool? pin_memory=None, int? memory_format=None) -> (Tensor)'''
pass
class ATenIntReprSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::int_repr(Tensor self) -> (Tensor)'''
pass
class ATenAddmvSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::addmv(Tensor self, Tensor mat, Tensor vec, *, Scalar beta=1, Scalar alpha=1) -> (Tensor)'''
pass
class ATenQPerChannelScalesSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::q_per_channel_scales(Tensor self) -> (Tensor)'''
pass
class ATenAddcdivSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::addcdiv(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> (Tensor)'''
pass
class ATenSplitSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::split.Tensor(Tensor(a) self, int split_size, int dim=0) -> (Tensor[])
aten::split(Tensor self, int[] split_sizes, int dim=0) -> (Tensor[])'''
pass
class ATenNarrowSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::narrow(Tensor(a) self, int dim, int start, int length) -> (Tensor(a))
aten::narrow.Tensor(Tensor(a) self, int dim, Tensor start, int length) -> (Tensor(a))'''
pass
class ATenMovedimSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::movedim.intlist(Tensor(a) self, int[] source, int[] destination) -> (Tensor(a))
aten::movedim.int(Tensor(a) self, int source, int destination) -> (Tensor(a))'''
pass
class ATenAsStridedSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::as_strided(Tensor(a) self, int[] size, int[] stride, int? storage_offset=None) -> (Tensor(a))'''
pass
class ATenReluSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::relu(Tensor self) -> (Tensor)'''
pass
class ATenRemoveBatchDimSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::_remove_batch_dim(Tensor self, int level, int batch_size, int out_dim) -> (Tensor)'''
pass
class ATenSearchsortedSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::searchsorted.Tensor(Tensor sorted_sequence, Tensor self, *, bool out_int32=False, bool right=False) -> (Tensor)
aten::searchsorted.Scalar(Tensor sorted_sequence, Scalar self, *, bool out_int32=False, bool right=False) -> (Tensor)'''
pass
class ATenSigmoidSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::sigmoid(Tensor self) -> (Tensor)'''
pass
class ATenDiagonalSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::diagonal(Tensor(a) self, int offset=0, int dim1=0, int dim2=1) -> (Tensor(a))
aten::diagonal.Dimname(Tensor(a) self, *, str outdim, str dim1, str dim2, int offset=0) -> (Tensor(a))'''
pass
class ATenSplitWithSizesSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::split_with_sizes(Tensor(a) self, int[] split_sizes, int dim=0) -> (Tensor[])'''
pass
class ATenMaximumSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::maximum(Tensor self, Tensor other) -> (Tensor)'''
pass
class ATenUnfoldSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::unfold(Tensor(a) self, int dimension, int size, int step) -> (Tensor(a))'''
pass
class ATenErfcSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::erfc(Tensor self) -> (Tensor)'''
pass
class ATenDigammaSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::digamma(Tensor self) -> (Tensor)'''
pass
class ATenQuantizedGruSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::quantized_gru.input(Tensor input, Tensor hx, __torch__.torch.classes.rnn.CellParamsBase[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor)
aten::quantized_gru.data(Tensor data, Tensor batch_sizes, Tensor hx, __torch__.torch.classes.rnn.CellParamsBase[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor)
aten::quantized_gru.input_legacy(Tensor input, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor)
aten::quantized_gru.data_legacy(Tensor data, Tensor batch_sizes, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor)'''
pass
class ATenLinalgVectorNormSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::linalg_vector_norm(Tensor self, Scalar ord=2, int[1]? dim=None, bool keepdim=False, *, int? dtype=None) -> (Tensor)'''
pass
class ATenAminmaxSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::_aminmax(Tensor self) -> (Tensor, Tensor)
aten::_aminmax.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor, Tensor)'''
pass
class ATenSumSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::sum.dim_IntList(Tensor self, int[1] dim, bool keepdim=False, *, int? dtype=None) -> (Tensor)
aten::sum(Tensor self, *, int? dtype=None) -> (Tensor)
aten::sum.dim_DimnameList(Tensor self, str[1] dim, bool keepdim=False, *, int? dtype=None) -> (Tensor)'''
pass
class ATenAddBatchDimSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::_add_batch_dim(Tensor self, int batch_dim, int level) -> (Tensor)'''
pass
class ATenUpsampleNearestSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::__upsample_nearest(Tensor input, int? size=None, int? scale_factor=None) -> (Tensor)
aten::__upsample_nearest.size_list(Tensor input, int[]? size=None, int? scale_factor=None) -> (Tensor)'''
pass
class ATenExpandAsSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::expand_as(Tensor(a) self, Tensor other) -> (Tensor(a))'''
pass
class ATenModeSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::mode(Tensor self, int dim=-1, bool keepdim=False) -> (Tensor values, Tensor indices)
aten::mode.dimname(Tensor self, str dim, bool keepdim=False) -> (Tensor values, Tensor indices)
aten::mode.values(Tensor self, int dim=-1, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)'''
pass
class ATenUnsafeChunkSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::unsafe_chunk(Tensor self, int chunks, int dim=0) -> (Tensor[])'''
pass
class ATenSelectSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::select.int(Tensor(a) self, int dim, int index) -> (Tensor(a))
aten::select.Dimname(Tensor(a) self, str dim, int index) -> (Tensor(a))'''
pass
class ATenLinalgMatrixPowerSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::linalg_matrix_power(Tensor self, int n) -> (Tensor)'''
pass
class ATenInverseHelperSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::_inverse_helper(Tensor self) -> (Tensor)'''
pass
class ATenRsubSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::rsub.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> (Tensor)
aten::rsub.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> (Tensor)'''
pass
class ATenQuantizedLstmSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::quantized_lstm.input(Tensor input, Tensor[] hx, __torch__.torch.classes.rnn.CellParamsBase[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first, *, int? dtype=None, bool use_dynamic=False) -> (Tensor, Tensor, Tensor)
aten::quantized_lstm.data(Tensor data, Tensor batch_sizes, Tensor[] hx, __torch__.torch.classes.rnn.CellParamsBase[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, *, int? dtype=None, bool use_dynamic=False) -> (Tensor, Tensor, Tensor)
aten::quantized_lstm.input_legacy(Tensor input, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first, *, int? dtype=None, bool use_dynamic=False) -> (Tensor, Tensor, Tensor)
aten::quantized_lstm.data_legacy(Tensor data, Tensor batch_sizes, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, *, int? dtype=None, bool use_dynamic=False) -> (Tensor, Tensor, Tensor)'''
pass
class ATenFwPrimalSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::_fw_primal(Tensor(a) self, int level) -> (Tensor(a))'''
pass
class ATenMishSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::mish(Tensor self) -> (Tensor)'''
pass
class ATenReshapeAsSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::reshape_as(Tensor(a) self, Tensor other) -> (Tensor(a))'''
pass
class ATenTanhSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::tanh(Tensor self) -> (Tensor)'''
pass
class ATenFakeQuantizePerTensorAffineCachemaskSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::fake_quantize_per_tensor_affine_cachemask(Tensor self, float scale, int zero_point, int quant_min, int quant_max) -> (Tensor output, Tensor mask)'''
pass
class ATenLgammaSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::lgamma(Tensor self) -> (Tensor)'''
pass
class ATenHingeEmbeddingLossSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::hinge_embedding_loss(Tensor self, Tensor target, float margin=1., int reduction=1) -> (Tensor)'''
pass
class ATenMatrixPowerSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::matrix_power(Tensor self, int n) -> (Tensor)'''
pass
class ATenErfSchema(OperatorConverter):
@abstractmethod
def parse(self, node, attrs, args, graph_converter):
'''aten::erf(Tensor self) -> (Tensor)'''
pass
|
12,125 | 9e420833d8016a97809726225a3e5e002066f2c3 | import os
from engine.core import module
from engine.hardware import use_gpu, first_device, all_devices, device_description
from engine.logging import print_info, print_errors, print_debug
import torch
from engine.parameters import special_parameters
from engine.path import output_path
_checkpoint_path = 'models/{}.torch'
_model = None
_optimizer = None
_checkpoint = {}
@module
def create_model(model_class, model_params=None, model_name='model'):
"""
create and eventually load model
:param model_name:
:param model_class:
:param model_params:
:param model_name:
:return:
"""
model_params = {} if model_params is None else model_params
model = model_class(**model_params)
if special_parameters.load_model: # recover from checkpoint
_load_model(model, model_name)
# configure usage on GPU
if use_gpu():
model.to(first_device())
model = torch.nn.DataParallel(model, device_ids=all_devices())
# print info about devices
print_info('Device(s)): ' + str(device_description()))
return model
def create_optimizer(parameters, optimizer_class, optim_params, model_name='model'):
"""
create and eventually load optimizer
:param model_name:
:param parameters:
:param optimizer_class:
:param optim_params:
:return:
"""
opt = optimizer_class(parameters, **optim_params)
if special_parameters.load_model:
_load_optimizer(opt, model_name)
return opt
def _load_optimizer(optimizer, model_name):
"""
load checkpoint
:param optimizer:
:return:
"""
global _checkpoint
if model_name not in _checkpoint:
_load_checkpoint(model_name)
if 'optimizer_state_dict' in _checkpoint[model_name]:
optimizer.load_state_dict(_checkpoint[model_name]['optimizer_state_dict'])
def _load_model(model, model_name, path=None, reload=False):
"""
load checkpoint
:param model:
:param model_name:
:return:
"""
global _checkpoint
if model_name not in _checkpoint or reload:
_load_checkpoint(model_name, path=path)
if 'model_state_dict' in _checkpoint[model_name]:
model.load_state_dict(_checkpoint[model_name]['model_state_dict'])
else:
model.load_state_dict(_checkpoint[model_name])
def _load_checkpoint(model_name, path=None):
if path is None:
path = output_path(_checkpoint_path.format(model_name), have_validation=True)
global _checkpoint
if not os.path.isfile(path):
print_errors('{} does not exist'.format(path), do_exit=True)
print_debug('Loading checkpoint from ' + path)
_checkpoint[model_name] = torch.load(path)
def load_checkpoint(model, model_name='model', validation_id=None):
"""
change state of the model
"""
path = output_path(_checkpoint_path.format(model_name), validation_id=validation_id, have_validation=True)
_load_model(model.module if type(model) is torch.nn.DataParallel else model, model_name, path=path, reload=True)
def save_checkpoint(model, optimizer=None, model_name='model', validation_id=None):
"""
save checkpoint (optimizer and model)
:param model_name:
:param validation_id:
:param model:
:param optimizer:
:return:
"""
path = output_path(_checkpoint_path.format(model_name), validation_id=validation_id, have_validation=True)
print_debug('Saving checkpoint: ' + path)
model = model.module if type(model) is torch.nn.DataParallel else model
checkpoint = {
'model_state_dict': model.state_dict()
}
if optimizer is not None:
checkpoint['optimizer_state_dict'] = optimizer.state_dict()
torch.save(checkpoint, path)
|
12,126 | a1160401f1c4c2e4f8e2651681a32c837acd457d | # ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
## -------- General Local Endpoint Errors -------- ##
from typing import Union
from azure.ai.ml._ml_exceptions import MlException, ErrorCategory, ErrorTarget
class LocalEndpointNotFoundError(MlException):
def __init__(self, endpoint_name: str, deployment_name: str = None, error_category=ErrorCategory.USER_ERROR):
resource_name = (
f"Local deployment ({endpoint_name} / {deployment_name})"
if deployment_name
else f"Local endpoint ({endpoint_name})"
)
err = f"{resource_name} does not exist."
resource_type = "deployment" if deployment_name else "endpoint"
super().__init__(
message=err,
error_category=error_category,
target=ErrorTarget.LOCAL_ENDPOINT,
no_personal_data_message=f"Local ({resource_type}) does not exist.",
)
class LocalEndpointInFailedStateError(MlException):
def __init__(self, endpoint_name, deployment_name=None, error_category=ErrorCategory.UNKNOWN):
resource_name = (
f"Local deployment ({endpoint_name} / {deployment_name})"
if deployment_name
else f"Local endpoint ({endpoint_name})"
)
err = f"{resource_name} is in failed state. Try getting logs to debug scoring script."
resource_type = "deployment" if deployment_name else "endpoint"
super().__init__(
message=err,
error_category=error_category,
target=ErrorTarget.LOCAL_ENDPOINT,
no_personal_data_message=f"Local ({resource_type}) is in failed state. Try getting logs to debug scoring script.",
)
class DockerEngineNotAvailableError(MlException):
def __init__(self, error_category=ErrorCategory.UNKNOWN):
msg = "Please make sure Docker Engine is installed and running. https://docs.docker.com/engine/install/"
super().__init__(
message=msg, no_personal_data_message=msg, target=ErrorTarget.LOCAL_ENDPOINT, error_category=error_category
)
class MultipleLocalDeploymentsFoundError(MlException):
def __init__(self, endpoint_name: str, error_category=ErrorCategory.UNKNOWN):
super().__init__(
message=f"Multiple deployments found for local endpoint ({endpoint_name}), please specify deployment name.",
no_personal_data_message="Multiple deployments found for local endpoint, please specify deployment name.",
error_category=error_category,
target=ErrorTarget.LOCAL_ENDPOINT,
)
class InvalidLocalEndpointError(MlException):
def __init__(self, message: str, no_personal_data_message: str, error_category=ErrorCategory.USER_ERROR):
super().__init__(
message=message,
target=ErrorTarget.LOCAL_ENDPOINT,
no_personal_data_message=no_personal_data_message,
error_category=error_category,
)
class LocalEndpointImageBuildError(MlException):
def __init__(self, error: Union[str, Exception], error_category=ErrorCategory.UNKNOWN):
err = f"Building the local endpoint image failed with error: {str(error)}"
super().__init__(
err,
message=err,
target=ErrorTarget.LOCAL_ENDPOINT,
no_personal_data_message="Building the local endpoint image failed with error.",
error_category=error_category,
error=error if error is Exception else None,
)
class LocalEndpointImageBuildCondaError(LocalEndpointImageBuildError):
def __init__(self, error: Union[str, Exception], conda_file_path: str, conda_yaml_contents: str):
err = f"Issue creating conda environment:\n{error}"
if conda_file_path:
err += f"\nPlease check configuration of the conda yaml source: {conda_file_path}"
err += f"\n\nConda yaml contents:\n{conda_yaml_contents}\n"
super().__init__(err)
class CloudArtifactsNotSupportedError(MlException):
def __init__(
self,
endpoint_name: str,
invalid_artifact: str,
deployment_name: str = None,
error_category=ErrorCategory.USER_ERROR,
):
resource_name = (
f"local deployment ({endpoint_name} / {deployment_name})"
if deployment_name
else f"local endpoint ({endpoint_name})"
)
err = f"Local endpoints only support local artifacts. '{invalid_artifact}' in {resource_name} referenced cloud artifacts."
super().__init__(
message=err,
target=ErrorTarget.LOCAL_ENDPOINT,
no_personal_data_message="Local endpoints only support local artifacts.",
error_category=error_category,
)
class RequiredLocalArtifactsNotFoundError(MlException):
def __init__(
self,
endpoint_name: str,
required_artifact: str,
required_artifact_type: str,
deployment_name: str = None,
error_category=ErrorCategory.USER_ERROR,
):
resource_name = (
f"Local deployment ({endpoint_name} / {deployment_name})"
if deployment_name
else f"Local endpoint ({endpoint_name})"
)
err = f"Local endpoints only support local artifacts. {resource_name} did not contain required local artifact '{required_artifact}' of type '{required_artifact_type}'."
super().__init__(
message=err,
target=ErrorTarget.LOCAL_ENDPOINT,
no_personal_data_message="Resource group did not contain required local artifact.",
error_category=error_category,
)
## -------- VSCode Debugger Errors -------- ##
class InvalidVSCodeRequestError(MlException):
def __init__(self, error_category=ErrorCategory.USER_ERROR, msg=None):
super().__init__(
message=msg, target=ErrorTarget.LOCAL_ENDPOINT, no_personal_data_message=msg, error_category=error_category
)
class VSCodeCommandNotFound(MlException):
def __init__(self, output=None, error_category=ErrorCategory.USER_ERROR):
error_msg = f" due to error: [{output}]" if output else ""
super().__init__(
message=f"Could not start VSCode instance{error_msg}. Please make sure the VSCode command 'code' is installed and accessible from PATH environment variable. See https://code.visualstudio.com/docs/editor/command-line#_common-questions.\n",
target=ErrorTarget.LOCAL_ENDPOINT,
no_personal_data_message="Could not start VSCode instance.",
error_category=error_category,
)
|
12,127 | a38f6e4bd75237ccee0bcdfcfdd9d67f19d509b7 | from section import *
from symbols import *
from dispatcher import dispatcher
arg_table = {
}
class simple_section(section) :
"""
Simple point-to-point section
"""
def __init__(self, name, **args) :
construct(self, arg_table, args)
section.__init__(self, name, **args)
def position_train(self, t, left, offset) :
self.my_train = t
self.direction = DIR_LEFT if left else DIR_RIGHT
self.state = SECTION_STOPPED
if offset >= 0 :
self.position = self.prev_position = offset
else :
end = self.left if left else self.right
self.position = self.prev_position = \
self.length - end.sensor_offset + t.loco.magnet_offset
t.set_head(self)
t.set_tail(self)
section.enroll_type('simple', simple_section)
|
12,128 | 07b6b4c745ae50cc7ec1ff5087b95d209db2c8c6 | from datetime import datetime, timedelta
from unittest import TestCase
from django import http
from django.conf import settings
from webpay.pin import utils
class PinRecentlyEnteredTestCase(TestCase):
def setUp(self):
self.request = http.HttpRequest()
self.request.session = {}
def test_pin_never_entered(self):
assert not utils.pin_recently_entered(self.request)
def test_pin_recenlty_entered_successfully(self):
self.request.session['last_pin_success'] = datetime.now()
assert utils.pin_recently_entered(self.request)
def test_pin_entered_after_timeout(self):
self.request.session['last_pin_success'] = (
datetime.now() - timedelta(seconds=settings.PIN_UNLOCK_LENGTH + 60)
)
assert not utils.pin_recently_entered(self.request)
|
12,129 | f51ac104675541f5a596fd766ccb625f3e03b8e5 | # coding:utf-8
from rest_framework import serializers
from .models import Goods, GoodsCategory
# class GoodsSerializer(serializers.Serializer):
# click_num = serializers.IntegerField(default=0)
# name = serializers.CharField(required=True, allow_blank=True, max_length=100)
#
# def create(self, validated_data):
# """
# Create and return a new `Snippet` instance, given the validated data.
# """
# return Goods.objects.create(**validated_data)
#
# def update(self, instance, validated_data):
# """
# Update and return an existing `Snippet` instance, given the validated data.
# """
# instance.title = validated_data.get('title', instance.title)
# instance.code = validated_data.get('code', instance.code)
# instance.linenos = validated_data.get('linenos', instance.linenos)
# instance.language = validated_data.get('language', instance.language)
# instance.style = validated_data.get('style', instance.style)
# instance.save()
# return instance
class GoodsCategorySerializer3(serializers.ModelSerializer):
class Meta:
model = GoodsCategory
fields = "__all__"
class GoodsCategorySerializer2(serializers.ModelSerializer):
sub_cat = GoodsCategorySerializer3(many=True)
class Meta:
model = GoodsCategory
fields = "__all__"
class GoodsCategorySerializer(serializers.ModelSerializer):
sub_cat = GoodsCategorySerializer2(many=True)
class Meta:
model = GoodsCategory
fields = "__all__"
class GoodsSerializer(serializers.ModelSerializer):
category = GoodsCategorySerializer()
class Meta:
model = Goods
fields = "__all__"
|
12,130 | 495d59aa1596e68f9a418ea36fe478fac2c7dbaa | from app_justcook.models.tecnica import TecnicaModel
from flask_restful import Resource
class Tecnica(Resource):
def get(self):
tecnicas = TecnicaModel.find_all()
return [tecnica.json() for tecnica in tecnicas], 200
class TecnicaId(Resource):
def get(self, tecnica_id):
tecnica = TecnicaModel.find_by_id(tecnica_id)
if tecnica:
return tecnica.json(), 200
return {"message":"Tecnica '{}' não encontrada.".format(tecnica_id)}, 404
class ItemsByTecnica(Resource):
def get (self, tecnica_id):
tecnica = TecnicaModel.find_by_id(tecnica_id)
if not tecnica:
return {"message":"Tecnica '{}' não encontrada.".format(tecnica_id)}, 404
items = tecnica.items
return [item.json() for item in items], 200
|
12,131 | 7487ec57487df548f68ef2857828306fad7d9373 | import math
a = 5
b = 8
c = 1
delta = b * b - 4 * a * c
if delta < 0:
print ("A equação não possui raizes reais")
elif delta == 0:
raiz = (-1 * b + math.sqrt(delta)) / (2 * a)
print ("A raiz da equação é: ",raiz)
else:
raiz1 = (-1 * b + math.sqrt(delta)) / (2 * a)
raiz2 = (-1 * b - math.sqrt(delta)) / (2 * a)
print(raiz1, raiz2) |
12,132 | 545881018530be36d4b6128fa1a12822b94ee356 | #!/usr/bin/env python
# coding: utf-8
# In[3]:
print("hello world")
# In[4]:
dir()
# In[5]:
dir(_i)
# In[9]:
import string
# In[10]:
"Hello World".split()
# In[13]:
#Try-catch statement
try:
print(7/0)
except ZeroDivisionError:
print("Division by zero")
# In[14]:
# String with multiple lines
longtext = """This is a very long text.
Using multiple lines.
Hello World!"""
print(longtext)
# In[18]:
# Some special characters
specialcharacters = "\' \n \\ \""
print(specialcharacters)
# In[20]:
# Write R or r in front of a string to disable special characters
nospecialcharacters = r"\' \" \\ "
print(nospecialcharacters)
# In[79]:
# repr(), str(): Convert a number to a string
print("Convert number to string with repr: " + repr(5.5))
print("Convert number to string with repr: " + str(5.5))
# In[25]:
# duplicate with the * opertor
print("A whole bunch of Tables: " + "Table "*10)
print("And som chairs: " + 20*"Chair ")
# In[28]:
# Access a particular index
print("The first character from longtext is \"" + longtext[0] + "\" and the last is \"" + longtext[-1] + "\"")
# In[31]:
#Access substrings
print(longtext[2:13])
print(longtext[:5])
print(len(longtext))
# In[61]:
# Split
print(longtext.split())
print(longtext.split("\n"))
list = [p for q in longtext.split("\n") for p in q.split(" ")]
print(list)
longtext.split(maxsplit=3)
# In[55]:
# Join
text = "!".join(list)
print(text)
# In[62]:
# Count/Index/Length
print(text.count("!"))
print(text.index("!")) # First index
print(text.rindex("!")) # Last index
print(len(text))
# In[78]:
text2 = text.center(92)
print(text2 + "...")
print(text2.lstrip() + "...")
print(text2.rstrip() + "...")
print(text2.strip() + "...")
print(text.ljust(92))
print(text.rjust(92))
print(text.capitalize()) # Capitalize only first letter
print(text.upper())
print(text.lower())
print(text.title()) # Capitalize the first letter in each word
# In[ ]:
##### USEFUL FUNCTIONS NOT COVERED IN BERKELEY LECTURE NOTES #####
# In[86]:
# Replace
text = "Hello World!!! Mundo, Mundo, Mundo"
text = text.replace("Mundo","").replace(",","").strip()
print("---" + text + "---")
# In[87]:
print(string.digits)
print(string.ascii_uppercase)
print(string.ascii_lowercase)
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
|
12,133 | 9a07a9afc06a44a63c0b37182aa9832fe7b28c7f | from helpers.utilities import *
|
12,134 | 26c1e655b79a3c677e32915058a0ad24ef65cfb0 | from pippi import dsp
from pippi import tune
def play(ctl):
midi = ctl.get('midi')
midi.setOffset(111)
pw = midi.get(1, low=0.01, high=1)
scale = [1, 2, 3, 6, 9]
scale = tune.fromdegrees(scale, octave = midi.geti(4, low=0, high=4))
freq = dsp.randchoose(scale)
length = dsp.stf(midi.get(2, low=0.1, high=8) * dsp.rand(0.5, 1.5))
wf = dsp.wavetable('sine2pi')
win = dsp.wavetable('sine')
mod = [ dsp.rand(0, 1) for m in range(1000) ]
modr = midi.get(5, low=0, high=0.3)
modf = midi.get(6, low=0.001, high=10)
amp = midi.get(3, low=0, high=1)
out = dsp.pulsar(freq, length, pw, wf, win, mod, modr, modf, amp)
out = dsp.env(out, dsp.randchoose(['sine', 'tri']))
out = dsp.pan(out, dsp.rand())
return out |
12,135 | 8b035af8a56c86e5eb03a5febd8d597de2d097b2 | # Scrapy settings for tutorial project
#
# For simplicity, this file contains only the most important settings by
# default. All the other settings are documented here:
#
# http://doc.scrapy.org/topics/settings.html
#
BOT_NAME = 'tutorial'
SPIDER_MODULES = ['tutorial.spiders']
NEWSPIDER_MODULE = 'tutorial.spiders'
ITEM_PIPELINES = [
'tutorial.pipelines.NoData',
'tutorial.pipelines.MongoDBPipeline',
'tutorial.pipelines.DuplicatesPipeline',
]
MONGODB_SERVER = "localhost"
MONGODB_PORT = 27017
MONGODB_DB = "tutorial"
MONGODB_COLLECTION = "talkbass"
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'tutorial (+http://www.yourdomain.com)'
|
12,136 | 7c0eee6bfe3d91423732b429d33ceb83072387c0 | import os
import os.path
import re
import copy
re_os_sep = re.compile(r"[/\\]+")
class JSIndex(object):
formats = [
('.js', "application/javascript"),
]
def __init__(self):
self.index = []
def addpath(self, path):
if os.path.isfile(path):
self.addfile(path)
elif os.path.isdir(path):
for path, dirs, files in os.walk(path):
for file in files:
for ext, mime in self.formats:
if file.endswith(ext):
file_path = path+os.sep+file
self.addfile(file_path)
else:
print "error: cannot add path:", path
def addfile(self, file_path):
file_path = os.path.join(*re_os_sep.split(file_path))
if os.path.exists(file_path) and os.path.isfile(file_path):
if file_path not in self.index:
print "adding:", file_path
self.index.append(file_path)
else:
print "not adding:", file_path
else:
print "cannot add:", file_path
def combine(self):
combined = []
for file in self.index:
f = open(file)
combined.append("// from:" + file + "\n")
combined.append(f.read())
f.close()
return "".join(combined)
class JSCompiler(object):
compilers = {
'yui': {
'jar_file': 'yuicompressor.jar',
'js_input_param': '',
'js_output_param': '-o',
'optional_params': '--charset utf-8 -v'
},
'closure': {
'jar_file': 'compiler.jar',
'js_input_param': '--js',
'js_output_param': '--js_output_file',
'optional_params': '--compilation_level SIMPLE_OPTIMIZATIONS',
#WHITESPACE_ONLY
#SIMPLE_OPTIMIZATIONS
#ADVANCED_OPTIMIZATIONS
},
}
def __init__(self, js_index):
self.js_index = js_index
def compile(self, compiler, out_filename):
compiler = copy.deepcopy(self.compilers[compiler])
out_temp = out_filename + ".tmp.js"
compiler['input_file'] = out_temp
compiler['output_file'] = out_filename + ".js"
f = open(out_temp, 'wb')
f.write(self.js_index.combine())
f.close()
command = " ".join([
"java -jar %(jar_file)s",
"%(js_input_param)s %(input_file)s",
"%(js_output_param)s %(output_file)s",
"%(optional_params)s",
]) % compiler
print "running:", command
os.system(command);
|
12,137 | 3a5253a66aa4f68d25b5bb152f34bf7096e28df0 | import cv2
import numpy as np
from matplotlib import pyplot as plt
import itertools as it
invert = True
find_contours = True
img = cv2.imread('img/batman.png',0)
ret,thresh1 = cv2.threshold(img,127,255,cv2.THRESH_BINARY)
#ret,thresh2 = cv2.threshold(img,127,255,cv2.THRESH_BINARY_INV)
#ret,thresh3 = cv2.threshold(img,127,255,cv2.THRESH_TRUNC)
#ret,thresh4 = cv2.threshold(img,127,255,cv2.THRESH_TOZERO)
#ret,thresh5 = cv2.threshold(img,127,255,cv2.THRESH_TOZERO_INV)
if find_contours:
thresh1_inv = cv2.bitwise_not(thresh1)
edged = cv2.Canny(thresh1_inv, 30, 200)
cv2.waitKey(0)
# Finding Contours
# Use a copy of the image e.g. edged.copy()
# since findContours alters the image
contours, hierarchy, _ = cv2.findContours(edged, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
if invert:
edged_inv = cv2.bitwise_not(edged)
else:
edged_inv = edged
#cv2.imshow('Contour', edged_inv)
#cv2.waitKey(0)
else:
edged_inv = thresh1
coordinates = []
for i,j in it.product(*[range(a) for a in edged_inv.shape]):
if edged_inv[i][j] == 0:
coordinates.append((i,j))
ordered_coordinates = []
crnt_coordinate = coordinates[0]
nrst_coordinate = (0,0)
len_coordinates = len(coordinates)
print(len_coordinates)
for i in range(len_coordinates):
crnt_dist = 0
smlst_distance = 100000000
for j in range(len_coordinates-i):
if i == j: continue
crnt_dist = (crnt_coordinate[0]-coordinates[j][0])**2 + (crnt_coordinate[1]-coordinates[j][1])**2
if crnt_dist < smlst_distance:
#print(crnt_dist)
nrst_coordinate = coordinates[j]
smlst_distance = crnt_dist
#print(coordinates)
coordinates.remove(nrst_coordinate)
crnt_coordinate = nrst_coordinate
ordered_coordinates.append(nrst_coordinate)
result = np.array([x - 1.j * y for y,x in ordered_coordinates])
np.savetxt('img/batman.txt', result)
|
12,138 | bd8edb2a832853ab3532249e75787f6e33a69660 | import networkx as nx
from aoc.util import perf
test_data = """start-A
start-b
A-c
A-b
b-d
A-end
b-end"""
from aocd import data
@perf
def solve(data, double_visit=False):
G = nx.Graph()
for line in data.splitlines():
G.add_edge(*line.split('-'))
return sum(1 for _ in find_paths(G, ['start'], double_visit=double_visit))
def find_paths(G, current_path, double_visit=False):
current_node = current_path[-1]
for node in G.neighbors(current_node):
new_path = current_path + [node]
if node == 'end':
yield new_path
elif node.isupper() or node not in current_path:
yield from find_paths(G, new_path, double_visit)
elif double_visit and node != 'start':
yield from find_paths(G, new_path, False)
assert solve(test_data) == 10
print('Part 1:', solve(data))
assert solve(test_data, double_visit=True) == 36
print('Part 2:', solve(data, double_visit=True))
|
12,139 | dcf5ee306e7e33cbae5a618e19804c8b97e0bd9f | # -*- coding:utf-8 -*-
"""
@author: leonardo
@created time: 2020-07-01
@last modified time:2020-07-01
""" |
12,140 | 1f771d735b22cf5f7b93157b01f42484df643b62 | # adaption of https://gist.github.com/alexalemi/2151722
import numpy as np
class Welford(object):
""" Implements Welford's algorithm for computing a running mean
and standard deviation as described at:
http://www.johndcook.com/standard_deviation.html
can take single values or iterables
Properties:
mean - returns the mean
std - returns the std
meanfull- returns the mean and std of the mean
Usage:
>>> foo = Welford()
>>> foo(range(100))
>>> foo
<Welford: 49.5 +- 29.0114919759>
>>> foo([1]*1000)
>>> foo
<Welford: 5.40909090909 +- 16.4437417146>
>>> foo.mean
5.409090909090906
>>> foo.std
16.44374171455467
>>> foo.meanfull
(5.409090909090906, 0.4957974674244838)
"""
def __init__(self,lst=None):
self.count = 0
self.M = 0
self.M2 = 0
self.__call__(lst)
def update(self,x):
if x is None:
return
self.count += 1
delta = x - self.M
self.M += delta / self.count
delta2 = x - self.M
self.M2 += delta*delta2
def __call__(self,x):
self.update(x)
@property
def mean(self):
# if self.count<=2:
# return float('nan')
return self.M
@property
def var(self,samplevar=True):
# if self.count<=2:
# return float('nan')
return self.M2/(self.count if samplevar else self.count -1)
@property
def std(self,samplevar=True):
return np.sqrt(self.var(samplevar))
def __repr__(self):
return "<Welford: {} +- {}>".format(self.mean, self.std)
|
12,141 | 6543af8d371bb91026f650d3e69e740bc9db1374 | import sys
def longestConsti(arr):
#lenth=len(arr)
s=set(arr)
ans=-sys.maxsize-1
for num in arr:
if num-1 in s:
continue
else:
count=1
while num+1 in s:
count+=1
num+=1
ans=max(ans,count)
return ans
arr=[100,4,200,1,3,2]
print(longestConsti(arr))
|
12,142 | 160c2b065ca0a3955ad1a7bd6bc5b4c92efd97f6 | import mysql
from mysql import connector
db = connector.connect(host="localhost", user="root", password="", db="skatefest")
cur = db.cursor() |
12,143 | f745c40aae18a538cba6f0cca387a754fdf0f276 | from flask import Flask, request
from gaia.api.views import api
import os,logging
import logging.handlers
import gaia.demo.views
def create_app(config=None):
"""
Creates the app.
"""
# Initialize the app
app = Flask("gaia")
# config
app.config.from_envvar("GAIA_SETTINGS")
configure_blueprints(app)
configure_logging(app)
return app
def configure_blueprints(app):
app.register_blueprint(api, url_prefix="/api")
def configure_logging(app):
"""
Configures logging.
"""
logs_folder = os.path.join(app.root_path, os.pardir, "logs")
formatter = logging.Formatter('%(asctime)s %(levelname)s: %(message)s ')
info_log = os.path.join(logs_folder, app.config['INFO_LOG'])
info_file_handler = logging.handlers.RotatingFileHandler(
info_log,
maxBytes=100000,
backupCount=10
)
info_file_handler.setLevel(logging.INFO)
info_file_handler.setFormatter(formatter)
app.logger.addHandler(info_file_handler)
error_log = os.path.join(logs_folder, app.config['ERROR_LOG'])
error_file_handler = logging.handlers.RotatingFileHandler(
error_log,
maxBytes=100000,
backupCount=10
)
error_file_handler.setLevel(logging.ERROR)
error_file_handler.setFormatter(formatter)
app.logger.addHandler(error_file_handler)
|
12,144 | 4bf74a92a43c24398d74c632102a0f25696d7f79 | from django.contrib import admin
from django.conf.urls import url, include
from apps.users_app import views
urlpatterns = [
url('admin/', admin.site.urls),
url(r'^$', views.index, name='index'),
url(r'^users_app/', include('apps.users_app.urls')),
url(r'^logout/$', views.user_logout, name='logout'),
url(r'^special/', views.special, name='special'),
] |
12,145 | 1ca2638bf4dcc23d74c40011eda618612b55e800 | import time
from selenium import webdriver
# A package to have a chromedriver always up-to-date.
from webdriver_manager.chrome import ChromeDriverManager
from proxies import chrome_proxy
USERNAME = "your_username"
PASSWORD = "your_password"
HOST = "pr.oxylabs.io"
PORT = 7777
# Specify country code if you want proxies from a single country, e.g. `US`.
# Otherwise - set the variable to `None`.
COUNTRY = "US"
options = webdriver.ChromeOptions()
proxy_ext = chrome_proxy(USERNAME, PASSWORD, HOST, PORT, COUNTRY)
options.add_extension(proxy_ext)
driver = webdriver.Chrome(ChromeDriverManager().install(), options=options)
try:
driver.get("https://ip.oxylabs.io/")
time.sleep(5)
finally:
driver.close()
|
12,146 | 7dbfb19b89e4096346dd5f9f4eff8cfb1596ce29 | class Solution:
def removeDuplicateLetters(self, s):
char_list = sorted(set(s))
for char in char_list:
position = s.index(char)
next_string = s[position:]
if set(next_string) == set(s):
return char + self.removeDuplicateLetters(next_string.replace(char,''))
return ''
Demo = Solution()
result = Demo.removeDuplicateLetters("cbacdcbc")
print(result)
|
12,147 | 6672427dfa37d83401d534b6ede52ae52d44a520 |
# method sort_swap : swaps elements at x and y index of list A
def sort_swap(A,x,y):
temp = A[x]
A[x] = A[y]
A[y] = temp
# compare_func : returns comparing function depending upon ascending parameter
def compare_func(ascending):
return (lambda curr, x : (curr < x)) if ascending is True else (lambda curr, x : (curr > x))
# method linear_sort : sorts given list using linear sort
# parameters : A - input list
# ascending - True if sorting is ascending, else False
# returns : sorted array
def linear_sort(A, ascending=True):
compare = compare_func(ascending)
for i in range(len(A) - 1):
for j in range(i+1, len(A)):
if compare(A[j], A[i]):
sort_swap(A, i, j)
return A
# method bubble_sort : sorts given list using bubble sort
# parameters : A - input list
# ascending - True if sorting is ascending, else False
# returns : sorted array
def bubble_sort(A, ascending=True):
compare = compare_func(ascending)
for i in range(1, len(A)):
print(A[:len(A) - i])
for j in range(len(A) - i):
if compare(A[j+1], A[j]):
sort_swap(A, j, j+1)
return A
# method selection_sort : sorts given list using selection sort
# parameters : A - input list
# ascending - True if sorting is ascending, else False
# returns : sorted array
def selection_sort(A, ascending=True):
compare = compare_func(ascending)
for i in range(len(A)):
least = i
for j in range(i+1, len(A)):
if compare(A[j], A[least]) is True:
least = j
sort_swap(A, least, i)
return A
# method insertion_sort : sorts given list using insertion sort
# parameters : A - input list
# ascending - True if sorting is ascending, else False
# returns : sorted array
def insertion_sort(A, ascending=True):
compare = compare_func(ascending)
for i in range(1, len(A)):
j = i - 1
x = A[i]
while compare(x, A[j]) and j >= 0:
A[j+1] = A[j]
j -= 1
A[j+1] = x
return A
# method merge_sort : sorts given list using merge sort
# parameters : A - input list
# ascending - True if sorting is ascending, else False
# returns : sorted array
def merge_sort(A, ascending=True):
compare = compare_func(ascending)
def _merge_sort(p, r):
if p < r:
q = (p + r) // 2
_merge_sort(p, q)
_merge_sort(q + 1, r)
merge(p, q, r)
def merge(p, q, r):
L = A[p:(q+1)] + [float("infinity")]
R = A[q+1:r+1] + [float("infinity")]
i = j = 0
for k in range(p, r+1):
if compare(L[i], R[j]):
A[k] = L[i]
i += 1
else:
A[k] = R[j]
j += 1
_merge_sort(0, len(A) - 1)
return A
# method quick_sort : sorts given list using quick sort
# parameters : A - input list
# ascending - True if sorting is ascending, else False
# returns : sorted array
def quick_sort(A, ascending=True):
compare = compare_func(ascending)
def _quick_sort(p ,r):
if p < r:
q = partition(p, r)
_quick_sort(p, q-1)
_quick_sort(q+1, r)
def partition(p, r):
pivot = A[r]
i = p
j = p
while j < r:
if compare(A[j], pivot):
sort_swap(A, i, j)
i += 1
j += 1
sort_swap(A, i, r)
return i
_quick_sort(0, len(A)-1)
return A
# method heap_sort : sorts given list using heap sort
# parameters : A - input list
# ascending - True if sorting is ascending, else False
# returns : sorted array
def heap_sort(A, ascending=True):
from Heap import BinaryHeap
h = BinaryHeap(A, not ascending)
for i in range(len(A)):
A[i] = h.extract_root()
return A
# method tree_sort : sorts given list using tree sort
# parameters : A - input list
# ascending - True if sorting is ascending, else False
# returns : sorted array
def tree_sort(A, ascending=True):
from Tree import BST
def traverse(root):
if not root:
return
traverse(get_left(root))
A[traverse.point] = root.get_data()
traverse.point += 1
traverse(get_right(root))
t = BST()
for i in range(len(A)):
t.insert(A[i])
if ascending:
get_left = lambda root: root.get_left()
get_right = lambda root: root.get_right()
else:
get_left = lambda root: root.get_right()
get_right = lambda root: root.get_left()
traverse.point = 0
traverse(t.root)
return A
# method tim_sort : sorts given list using tim sort
# parameters : A - input list
# ascending - True if sorting is ascending, else False
# returns : sorted array
def tim_sort(A, ascending=True):
run = 32
compare = compare_func(ascending)
def _tim(p, r):
if (r - p) > run:
q = (p + r) // 2
_tim(p, q)
_tim(q+1, r)
A[p:q+1] = insertion_sort(A[p:q+1], ascending)
A[q+1:r+1] = insertion_sort(A[q+1:r+1], ascending)
merge(p, q, r)
else:
A[p:r+1] = insertion_sort(A[p:r+1], ascending)
def merge(p, q, r):
L = A[p:(q+1)] + [float("infinity")]
R = A[q+1:r+1] + [float("infinity")]
i = j = 0
for k in range(p, r+1):
if compare(L[i], R[j]):
A[k] = L[i]
i += 1
else:
A[k] = R[j]
j += 1
_tim(0, len(A) - 1)
return A |
12,148 | 9b1693d9a6ed7b451f0cabdfefd8af1e6e31f413 | import socket # 导入 socket 模块
import json
import test
from tencentcloud.common import credential
from tencentcloud.common.profile.client_profile import ClientProfile
from tencentcloud.common.profile.http_profile import HttpProfile
from tencentcloud.common.exception.tencent_cloud_sdk_exception import TencentCloudSDKException
from tencentcloud.ocr.v20181119 import ocr_client, models
import json
import jsonpath
def tencentOCR(src,format):
try:
cred = credential.Credential("", "")//api秘钥,在访问管理---->访问秘钥------>api秘钥管理
httpProfile = HttpProfile()
httpProfile.endpoint = "ocr.tencentcloudapi.com"
clientProfile = ClientProfile()
clientProfile.httpProfile = httpProfile
client = ocr_client.OcrClient(cred, "ap-guangzhou", clientProfile)
req = models.GeneralBasicOCRRequest()
params = '{"ImageBase64":"data:image/'+format+';base64,'+src+'"}'
req.from_json_string(params)
resp = client.GeneralBasicOCR(req)
recv = resp.to_json_string()
except TencentCloudSDKException as err:
print(err)
str = json.loads(recv)
DetectedText = jsonpath.jsonpath(str, "$..DetectedText")
parseDetect=""
for msg in DetectedText:
print(msg)
parseDetect+=msg+'\n'
return parseDetect
s = socket.socket() # 创建 socket 对象
host = socket.gethostname() # 获取本地主机名
print(host)
port = 7996 # 设置端口
s.bind((host, port)) # 绑定端口i
s.listen(5) # 等待客户端连接
while True:
c, addr = s.accept() # 建立客户端连接
print('连接地址:', addr)
msg=""
while True:
getmsg = c.recv(1024)
print(len(getmsg))
if len(getmsg) >0 :
msg+=getmsg.decode();
else:
break;
print(len(msg))
words=tencentOCR(msg,'jpeg')
c.send(words.encode())
c.close() # 关闭连接
|
12,149 | f591812fdfd8ee844441548ef440bbfcb2fc72de |
class naming(object):
def __init__(self):
self.dataset = ""
def features(self, dataset, screening_rule, directory):
self.dataset = dataset
self.screening_rule = screening_rule
self.directory = directory
def phenotype(self, phenotype):
self.phenotype = phenotype
def solver(self, solver):
self.solver = solver
def dataset(self, dataset):
self.dataset = str(dataset)
#name = naming()
#functions used as a scratch version
def train_lasso_2(X, y, geomul=0.9, lower_bound=0.001, steps=65):
# import less stuff to only find weights and ??
solver = [SklearnCDSolver(), SklearnLarsSolver(),
ProximalGradientSolver(), AccelProximalGradientSolver()] # ActiveSetCDSolver, GlmnetSolver
myLasso = ScreeningLassoPath(DOME(), solver[1], path_lb=lower_bound, path_steps=steps, path_stepsize=geomul,
path_scale='geometric')
beta, nz_inds, scr_inds, path, times_solver, times_screening = myLasso.fit(X.T, y, max_iter=1000, tol=1e-4,
debug=False)
weights = beta[:, 15]
weights = NP.reshape(weights, (X.shape[1], 1))
timescreenandsolve = times_solver[15] + times_screening[15]
return weights, path
def train_lasso_3(X, y, geomul=0.9, lower_bound=0.001, steps=65):
# import less stuff to only find weights and ??
solver = [SklearnCDSolver(), SklearnLarsSolver(),
ProximalGradientSolver(), AccelProximalGradientSolver()] # ActiveSetCDSolver, GlmnetSolver
myLasso = ScreeningLassoPath(StrongRule(), solver[1], path_lb=lower_bound, path_steps=steps, path_stepsize=geomul,
path_scale='geometric')
beta, nz_inds, scr_inds, path, times_solver, times_screening = myLasso.fit(X.T, y, max_iter=1000, tol=1e-4,
debug=False)
weights = beta[:, 15]
weights = NP.reshape(weights, (X.shape[1], 1))
timescreenandsolve = times_solver[15] + times_screening[15]
return weights, path
def train_lasso_4(X, y, geomul=0.9, lower_bound=0.001, steps=65):
# import less stuff to only find weights and ??
solver = [SklearnCDSolver(), SklearnLarsSolver(),
ProximalGradientSolver(), AccelProximalGradientSolver()] # ActiveSetCDSolver, GlmnetSolver
myLasso = ScreeningLassoPath(SAFE(), solver[1], path_lb=lower_bound, path_steps=steps, path_stepsize=geomul,
path_scale='geometric')
beta, nz_inds, scr_inds, path, times_solver, times_screening = myLasso.fit(X.T, y, max_iter=1000, tol=1e-4,
debug=False)
weights = beta[:, 15]
weights = NP.reshape(weights, (X.shape[1], 1))
timescreenandsolve = times_solver[15] + times_screening[15]
return weights, path
def train_lasso_6(X, y, geomul=0.9, lower_bound=0.001, steps=65):
#import less stuff to only find weights and ??
solver = [SklearnCDSolver(), SklearnLarsSolver(),
ProximalGradientSolver(), AccelProximalGradientSolver()] #ActiveSetCDSolver, GlmnetSolver
myLasso = ScreeningLassoPath(EDPP(), solver[1], path_lb=lower_bound, path_steps=steps, path_stepsize=geomul,
path_scale='geometric')
beta, nz_inds, scr_inds, path, times_solver, times_screening = myLasso.fit(X.T, y, max_iter=1000, tol=1e-4, debug=False)
weights = beta[:, 15]
weights = NP.reshape(weights, (X.shape[1], 1))
timescreenandsolve = times_solver[15] + times_screening[15]
return weights, path
#sftp://aliki@172.20.24.26/mnt/30T/data/ukbiobank/original/genetics/microarray/EGAD00010001497/ukb_cal_chr22_v2.bed.gz
#sftp://aliki@172.20.24.26/mnt/30T/data/ukbiobank/original/genetics/microarray/EGAD00010001497/ukb_snp_chr22_v2.bim.gz
# after pip install cprofilev
# python -m cProfile -o output.profile test.py
# cprofilev -f output.profile
# visualize with KCacheGrind,firstly: source activate py2
# pyprof2calltree -i prof.out -k pyprof2calltree -i output.profileallSNPs -k
# ~/Documents/master-thesis/LMM-Lasso/code in output.profile, output.profileallSNPs
#loading different data sets: 1. Aradopsis Thaliana 2. synthetic data
"""
# load genotypes
geno_filename = os.path.join(data_dir,'genotypes.csv')
X = SP.genfromtxt(geno_filename)
[n_s,n_f] = X.shape
# simulate phenotype
SP.random.seed(1)
n_c = 5
idx = SP.random.randint(0,n_f,n_c)
w = 1./n_c * SP.ones((n_c,1))
ypheno = SP.dot(X[:,idx],w)
ypheno = (ypheno-ypheno.mean())/ypheno.std()
pheno_filename = os.path.join(data_dir,'poppheno.csv')
ypop = SP.genfromtxt(pheno_filename)
ypop = SP.reshape(ypop,(n_s,1))
y = 0.3*ypop + 0.5*ypheno + 0.2*SP.random.randn(n_s,1)
y = (y-y.mean())/y.std()
# init
debug = False
n_train = 150
n_test = n_s - n_train
n_reps = 100
f_subset = 0.5
mu = 10
dataset = "semi-empirical"
"""
"""
#synthetic data that is correlated
X, y, _ = lmm_lasso.load_toy_data()
y = (y-y.mean())/y.std()
# init
[n_s,n_f] = X.shape
debug = False
n_train = int(n_s * 0.7)
n_test = n_s - n_train
n_reps = 100
f_subset = 0.5
mu = 10
dataset = "synthetic"
"""
"""
corrv1 = 1./n_test * np.asarray([((yhat[i]-yhat[i].mean())*(y[test_idx]-y[test_idx].mean())).sum() / (
yhat[i].std()*y[test_idx].std()) for i in range(y_ada.shape[0])])
corr_adav1 = 1. / n_test * np.asarray([((y_ada[i]-y_ada[i].mean())*(y[test_idx]-y[test_idx].mean())).sum() / (
y_ada[i].std() * y[test_idx].std()) for i in range(y_ada.shape[0])])
corr_baselinev1 = 1. / n_test * np.asarray([((res_baseline['predictors'][i] - res_baseline['predictors'][
i].mean()) * (y[test_idx] - y[test_idx].mean())).sum() / (res_baseline['predictors'][i].std() * y[test_idx].std()) for i in range(res_baseline['predictors'].shape[0])])
# stability selection
ss = lmm_lasso.stability_selection(X,K,y,mu,n_reps,f_subset)
# create plot folder
if not os.path.exists(plots_dir):
os.makedirs(plots_dir)
# plot kernel
fig = plt.figure()
fig.add_subplot(111)
plt.imshow(K,interpolation='nearest')
plt.xlabel('samples')
plt.ylabel('samples')
plt.title('Population Kernel')
fn_out = os.path.join(plots_dir,'kernel.pdf')
plt.savefig(fn_out)
plt.close()
# plot negative log likelihood of the null model
monitor = res['monitor_nm']
fig = plt.figure()
fig.add_subplot(111)
plt.plot(monitor['ldeltagrid'],monitor['nllgrid'],'b-')
plt.plot(monitor['ldeltaopt'],monitor['nllopt'],'r*')
plt.xlabel('ldelta')
plt.ylabel('negative log likelihood')
plt.title('nLL on the null model')
fn_out = os.path.join(plots_dir, 'nLL.pdf')
plt.savefig(fn_out)
plt.close()
# plot Lasso convergence
monitor = res['monitor_lasso']
fig = plt.figure()
fig.add_subplot(311)
plt.plot(monitor['objval'])
plt.title('Lasso convergence')
plt.ylabel('objective')
fig.add_subplot(312)
plt.plot(monitor['r_norm'],'b-',label='r norm')
plt.plot(monitor['eps_pri'],'k--',label='eps pri')
plt.ylabel('r norm')
fig.add_subplot(313)
plt.plot(monitor['s_norm'],'b-',label='s norm')
plt.plot(monitor['eps_dual'],'k--',label='eps dual')
plt.ylabel('s norm')
plt.xlabel('iteration')
fn_out = os.path.join(plots_dir,'lasso_convergence.pdf')
plt.savefig(fn_out)
plt.close()
# plot weights
fig = plt.figure()
fig.add_subplot(111)
plt.title('Weight vector')
plt.plot(w,'b',alpha=0.7)
for i in range(idx.shape[0]):
plt.axvline(idx[i],linestyle='--',color='k')
fn_out = os.path.join(plots_dir,'weights.pdf')
plt.savefig(fn_out)
plt.close()
# plot stability selection
fig = plt.figure()
fig.add_subplot(111)
plt.title('Stability Selection')
plt.plot(ss,'b',alpha=0.7)
for i in range(idx.shape[0]):
plt.axvline(idx[i],linestyle='--',color='k')
plt.axhline(0.5,color='r')
fn_out = os.path.join(plots_dir,'ss_frequency.pdf')
plt.savefig(fn_out)
plt.close()
# plot predictions
fig = plt.figure()
fig.add_subplot(111)
plt.title('prediction')
plt.plot(y[test_idx],yhat, 'bx')
plt.plot(y[test_idx],y[test_idx],'k')
plt.xlabel('y(true)')
plt.ylabel('y(predicted)')
plt.xlabel('SNPs')
plt.ylabel('weights')
fn_out = os.path.join(plots_dir,'predictions.pdf')
plt.savefig(fn_out)
plt.close()
import statsmodels.api as sm
model = sm.OLS(y.flatten(), X).fit()
predictions = model.predict(X)
print_model = model.summary()
coef std err t P>|t| [0.025 0.975]
------------------------------------------------------------------------------
x1 0.0043 inf 0 nan nan nan
x2 -0.0033 inf -0 nan nan nan
x3 -0.0036 inf -0 nan nan nan
x4 -0.0002 inf -0 nan nan nan
map(lambda j: len(np.unique(a[:,j])), range(a.shape[1]) )
np.allclose(a, a1)
start1 = time.time()
x1 = example.prioritized_SNps(X, y, numberofSNPs)
x2 = example.prioritized_SNpsv2(X, y, numberofSNPs)
end1 = time.time()
cythcode = end1 - start1
"""
|
12,150 | 0199d8019130d906e3e4be64c43a9c3705fba1b3 | import time
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.select import Select
browser=webdriver.Chrome('/usr/local/bin/chromedriver')
browser.get('http://localhost:3000/')
a=0
i=20
while i<25+a:
search=browser.find_element_by_id('register_id')
search.clear()
search.send_keys('test'+str(i))
search=browser.find_element_by_id('register_pwd')
search.clear()
search.send_keys('q1w2e3r4t5y6')
time.sleep(1)
search=browser.find_element_by_id('register_idmessage')
if search.text == "이미 존재하는 아이디입니다.":
print("test{0} already exists".format(i))
a=a+1
i=i+1
continue
search=browser.find_element_by_id('register_pwdcheck')
search.clear()
search.send_keys('q1w2e3r4t5y6')
search=browser.find_element_by_id('register_email')
search.clear()
search.send_keys('test'+str(i)+'@naver.com')
search=browser.find_element_by_id('register_submit')
search.send_keys(Keys.RETURN)
time.sleep(1)
try:
search=browser.find_element_by_id('register_welcome')
search=browser.find_element_by_id('register_back')
search.send_keys(Keys.RETURN)
except NoSuchElementException:
print("error in register")
exit(1)
search=browser.find_element_by_id('login_id')
search.clear()
search.send_keys('test'+str(i))
search=browser.find_element_by_id('login_pwd')
search.clear()
search.send_keys('q1w2e3r4t5y6')
search=browser.find_element_by_id('login_submit')
search.send_keys(Keys.RETURN)
# send chat
for index in range(1,9):
time.sleep(1)
userlist =Select(browser.find_element_by_id("chat_userlist"))
time.sleep(1)
userlist.select_by_value(str(index))
search=browser.find_element_by_id('chat_box')
search.clear()
search.send_keys('hello!')
search=browser.find_element_by_id('chat_send')
search.send_keys(Keys.RETURN)
# end send chat
time.sleep(1)
try:
search=browser.find_element_by_id('login_welcome')
search=browser.find_element_by_id('logout')
search.send_keys(Keys.RETURN)
except NoSuchElementException:
print("error in login")
exit(1)
i=i+1
browser.quit |
12,151 | 61abf153780cb4c3c7f35b2141a36243d1036bbb | # -*- coding: utf-8 -*-
"""
Created on Tue Oct 22 16:21:44 2019
@author: s1995204
"""
# different way to import packages
import numpy as np
x = np.arange(11)
# from numpy import arange
# x = arange(11)
np.arange(11) # print numbers from 0 to 10
np.arange(1,11,1)
np.arange(0.1,1.1,0.1)
# import the required packages to plot with matplotlib
import matplotlib.pyplot as plt
t = np.arange(0,1e4,100) # time 0 to 9900 years in steps of 100
thalf = 5730 # half-life in years
L = np.log(2) / thalf # decay constant in 1/years
C14 = np.exp(-L*t) # fraction of 14C remaining after time t
plt.figure(figsize=(6,4))
plt.rcParams['font.size'] = 10
plt.plot(t, C14, 'k--',linewidth=2) # plot fraction against time
plt.xlim(0,10000)
plt.ylim(0,1)
plt.xlabel('time')
plt.ylabel('remaining $^{14}$C')
plt.title('$^{14}$C')
# plt.xticks(xlocs)
# plt.yticks(ylocs)
plt.savefig('fig.png')
###########################
import matplotlib.pyplot as plt
from scipy import stats
data = np.loadtxt('ocean.txt', skiprows=2)
d = data[:,0]
T = data[:,1]
N = data[:,2]
P = data[:,3]
Si = data[:,4]
print(d)
# make a scatter plot and label the axes
plt.figure(figsize=(6,4))
plt.rcParams['font.size'] = 10
plt.plot(P,N, 'kx')
plt.xlim(0,4)
plt.ylim(0,50)
plt.xlabel('depth')
plt.xlabel('Phosphate ($\mu$mol kg$^{-1}$)')
plt.ylabel('Nitrate ($\mu$mol kg$^{-1}$)')
# fit a line by linear regression
m, c, r, p, se = stats.linregress(P,N) # slope, intercept, correlation coefficient,p-value, sterror of estimate
Pfit = np.arange(max(P)+2)
Nfit = c + m*Pfit
plt.plot(Pfit, Nfit,color='black')
plt.xticks([0,1,2,3,4])
# label the line
eqn = 'N = ' + str(round(c,2)) + '+' + str(round(m,2)) + 'P'
x0=1
y0=20
plt.text(x0,y0,eqn,rotation=37)
# print and check the intercept and slope
print('intercept', c, np.mean(N) - m*np.mean(P))
print('slope', m, r*np.std(N)/np.std(P))
# correlation coefficients
r0,p0 = stats.pearsonr(P, N) # print correlation coefficient and p-value
r1,p1=stats.spearmanr(P,N)
print([r0,p0],
[r1,p1])
#######################
name1 = 'Julie'
name2 = 'Jamie'
age1 = 20
age2 = 25
students = [[name1, name2],[age1, age2]]
print(students[0][1])
|
12,152 | 69893ebca275f08b2cb7c1dd7bebd03785f85d98 | from tkinter import ttk, messagebox, Button, Tk, StringVar, Label, Entry, Listbox, END
from BusinessLogic import BLProject, BLRecordType, BLTimeRecordView, BLTimeRecord, TimeRecordValidation, BLDayView, Cache, Globals
from BusinessEntities import TimeRecord, TimeRecordStatusEnum, DayView
import time
from GUI.RecordTypeEditForm import RecordTypeEditForm
class RecordTypeListForm:
def __init__(self, Cache, conn):
self.Cache = Cache
self.Connection = conn
master = Tk()
self.Master = master
master.protocol('WM_DELETE_WINDOW', self.Quit)
self.Master.title("Record Types")
self.AddButton = Button(master, text='Add', command=self.Add)
self.AddButton.grid(row=0, column=0, sticky='NSEW')
self.EditButton = Button(master, text='Edit', command=self.Edit)
self.EditButton.grid(row=0, column=1, sticky='NSEW')
self.DeleteButton = Button(master, text='Delete', command=self.Delete)
self.DeleteButton.grid(row=0, column=2, sticky='NSEW')
self.RecordTypesListBox = Listbox(master, width=80)
self.RecordTypesListBox.grid(
row=1, column=0, columnspan=10, sticky='NSEW')
self.FillRecordTypes()
self.RecordTypesListBox.bind('<Double-1>', lambda x: self.Edit())
# def CloseWindow(self):
# self.Master.quit()
def Quit(self):
self.Master.quit()
def Show(self):
self.Master.mainloop()
def FillRecordTypes(self):
self.RecordTypesListBox.delete(0, END)
recordTypes = self.Cache.RecordTypes
for item in recordTypes:
self.RecordTypesListBox.insert(END, item)
def Add(self):
pr = RecordTypeEditForm(self.Connection)
pr.Show()
self.Cache.RefreshRecordTypes()
self.FillRecordTypes()
pr.Master.destroy()
def Edit(self):
sel = self.RecordTypesListBox.curselection()[0]
recordType = self.GetRecordType(sel)
pr = RecordTypeEditForm(self.Connection, recordType)
pr.Show()
self.Cache.RefreshRecordTypes()
self.FillRecordTypes()
pr.Master.destroy()
def Delete(self):
sel = self.RecordTypesListBox.curselection()[0]
project = self.GetRecordType(sel)
bl = BLRecordType.BLRecordType(self.Connection)
bl.DeleteByID(project.ID)
self.Cache.RefreshRecordTypes()
self.FillRecordTypes()
def GetRecordType(self, ID):
return self.Cache.RecordTypes[ID]
|
12,153 | a18aecbed90bab5f57160c8cb2ba8a4c508b1332 | from mycroft import MycroftSkill, intent_file_handler
class CreateInternalNetworkForGuests(MycroftSkill):
def __init__(self):
MycroftSkill.__init__(self)
@intent_file_handler('guests.for.network.internal.create.intent')
def handle_guests_for_network_internal_create(self, message):
self.speak_dialog('guests.for.network.internal.create')
def create_skill():
return CreateInternalNetworkForGuests()
|
12,154 | e56487609aacf36e3fde31ccbcfdea8f216c61c5 | import numbers
import numpy as np
import torch.nn as nn
import brancher.distributions as distributions
import brancher.functions as BF
import brancher.geometric_ranges as geometric_ranges
from brancher.variables import var2link, Variable, DeterministicVariable, RandomVariable, PartialLink
from brancher.utilities import join_sets_list
class LinkConstructor(nn.ModuleList):
"""
Summary
Parameters
----------
"""
def __init__(self, **kwargs):
self.kwargs = kwargs
modules = [link
for partial_link in kwargs.values()
for link in var2link(partial_link).links]
super().__init__(modules) #TODO: asserts that specified links are valid pytorch modules
def __call__(self, values):
return {k: var2link(x).fn(values) for k, x in self.kwargs.items()}
class VariableConstructor(RandomVariable):
"""
Summary
Parameters
----------
"""
def __init__(self, name, learnable, ranges, is_observed=False, **kwargs): #TODO: code duplication here
self.name = name
self._evaluated = False
self._observed = is_observed
self._observed_value = None
self._current_value = None
self.construct_deterministic_parents(learnable, ranges, kwargs)
self.parents = join_sets_list([var2link(x).vars for x in kwargs.values()])
self.ancestors = join_sets_list([self.parents] + [parent.ancestors for parent in self.parents])
self.link = LinkConstructor(**kwargs)
self.samples = None
self.ranges = {}
self.dataset = None
self.has_random_dataset = False
self.has_observed_value = False
self.is_normalized = True
self.partial_links = {name: var2link(link) for name, link in kwargs.items()}
def construct_deterministic_parents(self, learnable, ranges, kwargs):
for parameter_name, value in kwargs.items():
if not isinstance(value, (Variable, PartialLink)):
if isinstance(value, np.ndarray):
dim = value.shape[0] #TODO: This is probably not general enough
elif isinstance(value, numbers.Number):
dim = 1
else:
dim = [] #TODO: You should consider the other possible cases individually
deterministic_parent = DeterministicVariable(ranges[parameter_name].inverse_transform(value, dim),
self.name + "_" + parameter_name, learnable, is_observed=self._observed)
kwargs.update({parameter_name: ranges[parameter_name].forward_transform(deterministic_parent, dim)})
class EmpiricalVariable(VariableConstructor):
"""
Summary
Parameters
----------
"""
def __init__(self, dataset, name, learnable=False, is_observed=False, batch_size=None, indices=None, weights=None): #TODO: Ugly logic
self._type = "Empirical"
input_parameters = {"dataset": dataset, "batch_size": batch_size, "indices": indices, "weights": weights}
ranges = {par_name: geometric_ranges.UnboundedRange()
for par_name, par_value in input_parameters.items()
if par_value is not None}
kwargs = {par_name: par_value
for par_name, par_value in input_parameters.items()
if par_value is not None}
super().__init__(name, **kwargs, learnable=learnable, ranges=ranges, is_observed=is_observed)
if not batch_size:
if indices:
batch_size = len(indices)
else:
raise ValueError("Either the indices or the batch size has to be given as input")
self.batch_size = batch_size
self.distribution = distributions.EmpiricalDistribution(batch_size=batch_size, is_observed=is_observed)
class RandomIndices(EmpiricalVariable):
"""
Summary
Parameters
----------
"""
def __init__(self, dataset_size, batch_size, name, is_observed=False):
self._type = "Random Index"
super().__init__(dataset=list(range(dataset_size)),
batch_size=batch_size, is_observed=is_observed, name=name)
def __len__(self):
return self.batch_size
class NormalVariable(VariableConstructor):
"""
Summary
Parameters
----------
"""
def __init__(self, loc, scale, name, learnable=False):
self._type = "Normal"
ranges = {"loc": geometric_ranges.UnboundedRange(),
"scale": geometric_ranges.RightHalfLine(0.)}
super().__init__(name, loc=loc, scale=scale, learnable=learnable, ranges=ranges)
self.distribution = distributions.NormalDistribution()
def __add__(self, other):
if isinstance(other, NormalVariable):
return NormalVariable(self.partial_links["loc"] + other.partial_links["loc"],
scale=BF.sqrt(self.partial_links["scale"]**2 + other.partial_links["scale"]**2),
name=self.name + " + " + other.name, learnable=False)
else:
return super().__add__(other)
class CauchyVariable(VariableConstructor):
"""
Summary
Parameters
----------
"""
def __init__(self, loc, scale, name, learnable=False):
self._type = "Cauchy"
ranges = {"loc": geometric_ranges.UnboundedRange(),
"scale": geometric_ranges.RightHalfLine(0.)}
super().__init__(name, loc=loc, scale=scale, learnable=learnable, ranges=ranges)
self.distribution = distributions.CauchyDistribution()
class LaplaceVariable(VariableConstructor):
"""
Summary
Parameters
----------
"""
def __init__(self, loc, scale, name, learnable=False):
self._type = "Laplace"
ranges = {"loc": geometric_ranges.UnboundedRange(),
"scale": geometric_ranges.RightHalfLine(0.)}
super().__init__(name, loc=loc, scale=scale, learnable=learnable, ranges=ranges)
self.distribution = distributions.LaplaceDistribution()
class LogNormalVariable(VariableConstructor):
"""
Summary
Parameters
----------
"""
def __init__(self, loc, scale, name, learnable=False):
self._type = "Log Normal"
ranges = {"loc": geometric_ranges.UnboundedRange(),
"scale": geometric_ranges.RightHalfLine(0.)}
super().__init__(name, loc=loc, scale=scale, learnable=learnable, ranges=ranges)
self.distribution = distributions.LogNormalDistribution()
class LogitNormalVariable(VariableConstructor):
"""
Summary
Parameters
----------
"""
def __init__(self, loc, scale, name, learnable=False):
self._type = "Logit Normal"
ranges = {"loc": geometric_ranges.UnboundedRange(),
"scale": geometric_ranges.RightHalfLine(0.)}
super().__init__(name, loc=loc, scale=scale, learnable=learnable, ranges=ranges)
self.distribution = distributions.LogitNormalDistribution()
class BetaVariable(VariableConstructor):
"""
Summary
Parameters
----------
"""
def __init__(self, alpha, beta, name, learnable=False):
self._type = "Logit Normal"
ranges = {"alpha": geometric_ranges.RightHalfLine(0.),
"beta": geometric_ranges.RightHalfLine(0.)}
super().__init__(name, alpha=alpha, beta=beta, learnable=learnable, ranges=ranges)
self.distribution = distributions.BetaDistribution()
class BinomialVariable(VariableConstructor):
"""
Summary
Parameters
----------
"""
def __init__(self, n, p=None, logit_p=None, name="Binomial", learnable=False):
self._type = "Binomial"
if p is not None and logit_p is None:
ranges = {"n": geometric_ranges.UnboundedRange(),
"p": geometric_ranges.Interval(0., 1.)}
super().__init__(name, n=n, p=p, learnable=learnable, ranges=ranges)
self.distribution = distributions.BinomialDistribution()
elif logit_p is not None and p is None:
ranges = {"n": geometric_ranges.UnboundedRange(),
"logit_p": geometric_ranges.UnboundedRange()}
super().__init__(name, n=n, logit_p=logit_p, learnable=learnable, ranges=ranges)
self.distribution = distributions.BinomialDistribution()
else:
raise ValueError("Either p or " +
"logit_p needs to be provided as input")
class CategoricalVariable(VariableConstructor): #TODO: Work in progress
"""
Summary
Parameters
----------
"""
def __init__(self, p=None, softmax_p=None, name="Categorical", learnable=False):
self._type = "Categorical"
if p is not None and softmax_p is None:
ranges = {"p": geometric_ranges.Simplex()}
super().__init__(name, p=p, learnable=learnable, ranges=ranges)
self.distribution = distributions.CategoricalDistribution()
elif softmax_p is not None and p is None:
ranges = {"softmax_p": geometric_ranges.UnboundedRange()}
super().__init__(name, softmax_p=softmax_p, learnable=learnable, ranges=ranges)
self.distribution = distributions.CategoricalDistribution()
else:
raise ValueError("Either p or " +
"softmax_p needs to be provided as input")
class ConcreteVariable(VariableConstructor):
"""
Summary
Parameters
----------
"""
def __init__(self, tau, p, name, learnable=False):
self._type = "Concrete"
ranges = {"tau": geometric_ranges.RightHalfLine(0.),
"p": geometric_ranges.Simplex()}
super().__init__(name, tau=tau, p=p, learnable=learnable, ranges=ranges)
self.distribution = distributions.ConcreteDistribution()
class MultivariateNormalVariable(VariableConstructor):
"""
Summary
Parameters
----------
"""
def __init__(self, loc, covariance_matrix=None, precision_matrix=None, cholesky_factor=None, name="Multivariate Normal", learnable=False):
self._type = "Multivariate Normal"
if cholesky_factor is not None and covariance_matrix is None and precision_matrix is None:
ranges = {"loc": geometric_ranges.UnboundedRange(),
"cholesky_factor": geometric_ranges.UnboundedRange()}
super().__init__(name, loc=loc, cholesky_factor=cholesky_factor, learnable=learnable, ranges=ranges)
self.distribution = distributions.MultivariateNormalDistribution()
elif cholesky_factor is None and covariance_matrix is not None and precision_matrix is None:
ranges = {"loc": geometric_ranges.UnboundedRange(),
"covariance_matrix": geometric_ranges.PositiveDefiniteMatrix()}
super().__init__(name, loc=loc, covariance_matrix=covariance_matrix, learnable=learnable, ranges=ranges)
self.distribution = distributions.MultivariateNormalDistribution()
elif cholesky_factor is None and covariance_matrix is None and precision_matrix is not None:
ranges = {"loc": geometric_ranges.UnboundedRange(),
"precision_matrix": geometric_ranges.UnboundedRange()}
super().__init__(name, loc=loc, precision_matrix=precision_matrix, learnable=learnable, ranges=ranges)
self.distribution = distributions.MultivariateNormalDistribution()
else:
raise ValueError("Either covariance_matrix or precision_matrix or"+
"cholesky_factor needs to be provided as input") |
12,155 | 87e43f3384abfa9763559d03e002ce07c385a9f9 | import numpy as np
from numba import cuda, float64, void
from numba.cuda.testing import unittest, CUDATestCase
from numba.core import config
# NOTE: CUDA kernel does not return any value
if config.ENABLE_CUDASIM:
tpb = 4
else:
tpb = 16
SM_SIZE = tpb, tpb
class TestCudaLaplace(CUDATestCase):
def test_laplace_small(self):
@cuda.jit(float64(float64, float64), device=True, inline=True)
def get_max(a, b):
if a > b:
return a
else:
return b
@cuda.jit(void(float64[:, :], float64[:, :], float64[:, :]))
def jocabi_relax_core(A, Anew, error):
err_sm = cuda.shared.array(SM_SIZE, dtype=float64)
ty = cuda.threadIdx.x
tx = cuda.threadIdx.y
bx = cuda.blockIdx.x
by = cuda.blockIdx.y
n = A.shape[0]
m = A.shape[1]
i, j = cuda.grid(2)
err_sm[ty, tx] = 0
if j >= 1 and j < n - 1 and i >= 1 and i < m - 1:
Anew[j, i] = 0.25 * ( A[j, i + 1] + A[j, i - 1]
+ A[j - 1, i] + A[j + 1, i])
err_sm[ty, tx] = Anew[j, i] - A[j, i]
cuda.syncthreads()
# max-reduce err_sm vertically
t = tpb // 2
while t > 0:
if ty < t:
err_sm[ty, tx] = get_max(err_sm[ty, tx], err_sm[ty + t, tx])
t //= 2
cuda.syncthreads()
# max-reduce err_sm horizontally
t = tpb // 2
while t > 0:
if tx < t and ty == 0:
err_sm[ty, tx] = get_max(err_sm[ty, tx], err_sm[ty, tx + t])
t //= 2
cuda.syncthreads()
if tx == 0 and ty == 0:
error[by, bx] = err_sm[0, 0]
if config.ENABLE_CUDASIM:
NN, NM = 4, 4
iter_max = 20
else:
NN, NM = 256, 256
iter_max = 1000
A = np.zeros((NN, NM), dtype=np.float64)
Anew = np.zeros((NN, NM), dtype=np.float64)
n = NN
tol = 1.0e-6
error = 1.0
for j in range(n):
A[j, 0] = 1.0
Anew[j, 0] = 1.0
iter = 0
blockdim = (tpb, tpb)
griddim = (NN // blockdim[0], NM // blockdim[1])
error_grid = np.zeros(griddim)
stream = cuda.stream()
dA = cuda.to_device(A, stream) # to device and don't come back
dAnew = cuda.to_device(Anew, stream) # to device and don't come back
derror_grid = cuda.to_device(error_grid, stream)
while error > tol and iter < iter_max:
self.assertTrue(error_grid.dtype == np.float64)
jocabi_relax_core[griddim, blockdim, stream](dA, dAnew, derror_grid)
derror_grid.copy_to_host(error_grid, stream=stream)
# error_grid is available on host
stream.synchronize()
error = np.abs(error_grid).max()
# swap dA and dAnew
tmp = dA
dA = dAnew
dAnew = tmp
iter += 1
if __name__ == '__main__':
unittest.main()
|
12,156 | 44f4ed092a04a9904792581716a82bce58190f61 | from rest_framework import serializers
from adplayer.models import Playlist,Player,Video,Impression
from rest_framework import serializers
from django.contrib.auth.models import User
from django.contrib.auth import authenticate
class CreateUserSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ('id', 'username', 'password')
extra_kwargs = {'password': {'write_only': True}}
def create(self, validated_data):
user = User.objects.create_user(validated_data['username'],
None,
validated_data['password'])
return user
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ('id', 'username')
class LoginUserSerializer(serializers.Serializer):
username = serializers.CharField()
password = serializers.CharField()
def validate(self, data):
user = authenticate(**data)
if user and user.is_active:
return user
raise serializers.ValidationError("Unable to log in with provided credentials.")
class PlaylistSerializer(serializers.ModelSerializer):
class Meta:
model = Playlist
fields = ('name', 'id')
class VideoSerializer(serializers.ModelSerializer):
class Meta:
model = Video
fields = ('name', 'url', 'playlist', 'id')
class PlayerSerializer(serializers.ModelSerializer):
class Meta:
model = Player
fields = ('name', 'id')
class ImpressionViewSerializer(serializers.ModelSerializer):
player = PlayerSerializer(read_only=True)
video = VideoSerializer(read_only=True)
playlist = PlaylistSerializer(read_only=True)
class Meta:
model = Impression
fields = ('timestamp','player','video','playlist', 'id')
class ImpressionAddSerializer(serializers.ModelSerializer):
class Meta:
model = Impression
fields = ('timestamp','player','video','playlist', 'id')
|
12,157 | a0c19e99cb03a5edbd680895eba3a015775c3868 | # -*- coding: utf-8 -*-
import os
import random
import string
from sqlalchemy.dialects import registry
registry.register("awsathena.jdbc", "pyathenajdbc.sqlalchemy_athena", "AthenaDialect")
BASE_PATH = os.path.dirname(os.path.abspath(__file__))
S3_PREFIX = "test_pyathena_jdbc"
WORK_GROUP = "test-pyathena-jdbc"
SCHEMA = "test_pyathena_jdbc_" + "".join(
[random.choice(string.ascii_lowercase + string.digits) for i in range(10)]
)
class Env(object):
def __init__(self):
self.region_name = os.getenv("AWS_DEFAULT_REGION", None)
assert (
self.region_name
), "Required environment variable `AWS_DEFAULT_REGION` not found."
self.s3_staging_dir = os.getenv("AWS_ATHENA_S3_STAGING_DIR", None)
assert (
self.s3_staging_dir
), "Required environment variable `AWS_ATHENA_S3_STAGING_DIR` not found."
ENV = Env()
class WithConnect(object):
def connect(self, **opts):
from pyathenajdbc import connect
return connect(Schema=SCHEMA, **opts)
|
12,158 | 41a9026474620d1f9d33ea76233cf7effac361e1 | a=[]
b=int(input("value of N"))
count=0
for count in range(0,b):
i=int(input("Enter the number:"))
a.append(i)
del i
count+=1
a.remove(min(a))
print(min(a))
|
12,159 | 2361f53dd12066be8e1a06267def865f15bde2ba | #!/usr/bin/env python
'''
def quick_sort(arr):
arr_len = len(arr)
great = []
less = []
if arr_len <= 1:
return arr
else:
pivot = arr[0]
for element in arr[1:]:
if element > pivot:
great.append(element)
else:
less.append(element)
#recursively sort the smaller sorts
return quick_sort(less) + [pivot] + quick_sort(great)
print(quick_sort([3,4,2,1,5,35,0,55,2, 5, 1, 3, 7, 4, 2, 3, 9, 8, 6, 3]))
'''
#using list comprehensions
def quick_sort(arr):
arr_len = len(arr)
if arr_len <= 1:
return arr
else:
pivot = arr[0]
return quick_sort([el for el in arr[1:] if el <= pivot])+[pivot]+quick_sort([el for el in arr[1:] if el > pivot])
unsorted = [3,4,2,1,5,35,0,55,2, 5, 1, 3, 7, 4, 2, 3, 9, 8, 6, 3]
print(quick_sort(unsorted))
|
12,160 | 73a243d40cbdcc7111eca3bcf85e7313b911891b | import operator
import json
from text_preprocessing import preprocess
from collections import Counter
from nltk.corpus import stopwords
from nltk import bigrams,ngrams
from collections import defaultdict
import string
import sys
punctuation = list(string.punctuation)
stop = stopwords.words('english') + punctuation + ['rt', 'via']
search_word = sys.argv[1] # pass a term as a command-line argument
fname = 'python.json'
com = defaultdict(lambda : defaultdict(int))
with open(fname, 'r') as f:
count_all = Counter()
count_single = Counter()
count_hash = Counter()
count_terms = Counter()
count_bigram = Counter()
count_search = Counter()
for line in f:
tweet = json.loads(line)
# Create a list with all the terms
terms_stop = [term for term in preprocess(tweet['text']) if term not in stop]# Update the counter
# Count terms only once, equivalent to Document Frequency
terms_single = set(terms_stop)
# Count hashtags only
terms_hash = [term for term in preprocess(tweet['text'],lowercase=True) if term.startswith('#')] # Count terms only (no hashtags, no mentions)
terms_only = [term for term in preprocess(tweet['text']) if term not in stop and not term.startswith(('#', '@'))]
terms_bigram = bigrams(terms_only)
for i in range(len(terms_only)-1):
for j in range(i+1, len(terms_only)):
w1, w2 = sorted([terms_only[i], terms_only[j]])
if w1 != w2:
com[w1][w2] += 1
count_all.update(terms_stop)
count_terms.update(terms_only)
count_hash.update(terms_hash)
count_single.update(terms_single)
count_bigram.update(terms_bigram)
if search_word in terms_only:
count_search.update(terms_only)
com_max = []
# For each term, look for the most common co-occurrent terms
for t1 in com:
t1_max_terms = sorted(com[t1].items(), key=operator.itemgetter(1), reverse=True)[:5]
for t2, t2_count in t1_max_terms:
com_max.append(((t1, t2), t2_count))
# Get the most frequent co-occurrences
terms_max = sorted(com_max, key=operator.itemgetter(1), reverse=True)
# print(terms_max[:5])
#Print the first 5 most frequent words
print("ALL words:")
print(count_all.most_common(5))
print ("_______________________________")
print("single words:")
print(count_single.most_common(5))
print ("_______________________________")
print("Hash words:")
print(count_hash.most_common(5))
print ("_______________________________")
print("Terms only:")
print(count_terms.most_common(5))
print ("_______________________________")
print("bigrams:")
print(count_bigram.most_common(5))
print ("_______________________________")
print("Co-occurrence for %s:" % search_word)
print(count_search.most_common(20))
|
12,161 | 74417d1085587d9eb7c01fd411966e2fc312c1e0 | '''
leadership_tasks_admin - leadership task administrative handling
===========================================
'''
# standard
from datetime import date
from re import match
# pypi
from flask import g, url_for, request
from flask_security import current_user
from slugify import slugify
from dominate.tags import input_, button
# homegrown
from . import bp
from ...model import db
from ...model import LocalInterest, LocalUser, Task, TaskField, TaskGroup, TaskTaskField, TaskCompletion
from ...model import Position
from ...model import input_type_all, localinterest_query_params, localinterest_viafilter, gen_fieldname
from ...model import FIELDNAME_ARG, INPUT_TYPE_UPLOAD, INPUT_TYPE_DISPLAY
from ...model import date_unit_all, DATE_UNIT_WEEKS, DATE_UNIT_MONTHS, DATE_UNIT_YEARS
from ...version import __docversion__
from ...helpers import positions_active
from .viewhelpers import lastcompleted, get_status, get_order, get_expires, localinterest
from .viewhelpers import get_position_taskgroups, get_taskgroup_taskgroups
from .viewhelpers import create_taskcompletion, get_task_completion, user2localuser, localuser2user
from .viewhelpers import get_fieldoptions, get_taskfields
from .viewhelpers import get_member_tasks
from .viewhelpers import dtrender, dttimerender
from .viewhelpers import EXPIRES_SOON, PERIOD_WINDOW_DISPLAY, STATUS_DISPLAYORDER
from .viewhelpers import PositionTaskgroupCacheMixin, TASK_CHECKLIST_ROLES_ACCEPTED, localuser2user
from .viewhelpers import has_oneof_roles
from .viewhelpers import profile, profiler
# this is just to pick up list() function
from .leadership_tasks_member import fieldupload
from loutilities.user.model import User
from loutilities.user.roles import ROLE_SUPER_ADMIN, ROLE_LEADERSHIP_ADMIN
from loutilities.user.tables import DbCrudApiInterestsRolePermissions, AssociationSelect, AssociationCrudApi
from loutilities.tables import DteDbRelationship, get_request_action, get_request_data
from loutilities.tables import SEPARATOR, REGEX_ISODATE
from loutilities.filters import filtercontainerdiv, filterdiv, yadcfoption
class ParameterError(Exception): pass
debug = False
adminguide = 'https://members.readthedocs.io/en/{docversion}/leadership-task-admin-guide.html'.format(docversion=__docversion__)
##########################################################################################
# tasks endpoint
###########################################################################################
class TaskView(AssociationCrudApi):
def editor_method_posthook(self, form):
'''
do validation after editor method because we want all processing to have taken place before
we try to read thistask.fields
'''
action = get_request_action(form)
# we only have to worry about create and edit functions
if action == 'create':
thisid = self.created_id
elif action in ['edit', 'editRefresh']:
# kludge to get task.id
# NOTE: this is only called from 'edit' / put function, and there will be only one id
thisid = list(get_request_data(form).keys())[0]
else:
return
thistask = Task.query.filter_by(id=thisid).one()
# build sets of duplicated fields
duplicated = set()
found = set()
for tasktaskfield in thistask.fields:
taskfield = tasktaskfield.taskfield
if taskfield.fieldname in found:
duplicated.add(taskfield.fieldname)
found.add(taskfield.fieldname)
# indicate error for any fields which were duplicated
if duplicated:
dupnames = [TaskField.query.filter_by(fieldname=fn).one().taskfield for fn in list(duplicated)]
self._fielderrors = [{'name': 'fields.id', 'get_status': '{} fields were found in more than one category'.format(dupnames)}]
raise ParameterError
# disable position if not isbyposition
if not thistask.isbyposition:
thistask.position = None
self._responsedata[0]['position']['id'] = None
self._responsedata[0]['position']['position'] = None
# update any affected taskcompletions
# this allows the isbyposition or position to change with the completed tasks updated accordingly
taskcompletions = TaskCompletion.query.filter_by(task=thistask).filter(TaskCompletion.position != thistask.position).all()
for taskcompletion in taskcompletions:
taskcompletion.position = thistask.position
def task_validate(action, formdata):
results = []
# TODO: remove this when #51 fixed
from re import compile
# datepattern = compile('^(19|20)\d\d[-](0[1-9]|1[012])[-](0[1-9]|[12][0-9]|3[01])$')
datepattern = compile('^(0[1-9]|1[012])[-](0[1-9]|[12][0-9]|3[01])$')
if formdata['dateofyear'] and not datepattern.match(formdata['dateofyear']):
results.append({'name': 'dateofyear', 'status': 'must be formatted as MM-DD'})
# if both of these are set, they will conflict with each other
if formdata['period'] and formdata['dateofyear']:
results.append({'name': 'period', 'status': 'only one of these should be supplied'})
results.append({'name': 'dateofyear', 'status': 'only one of these should be supplied'})
# expirysoon is needed for nonoptional tasks which have a period or dateofyear
if formdata['isoptional'] != 'yes' and (formdata['period'] or formdata['dateofyear']):
if not formdata['expirysoon']:
results.append({'name': 'expirysoon', 'status': 'please supply'})
# for task completion by position, a position needs to be supplied
if formdata['isbyposition'] == 'yes' and not formdata['position']['id']:
results.append({'name': 'position.id', 'status': 'please supply'})
return results
task_dbattrs = 'id,interest_id,task,description,isbyposition,position,priority,expirysoon,expirysoon_units,period,period_units,dateofyear,expirystarts,expirystarts_units,isoptional,taskgroups,fields'.split(',')
task_formfields = 'rowid,interest_id,task,description,isbyposition,position,priority,expirysoon,expirysoon_units,period,period_units,dateofyear,expirystarts,expirystarts_units,isoptional,taskgroups,fields'.split(',')
task_dbmapping = dict(zip(task_dbattrs, task_formfields))
task_formmapping = dict(zip(task_formfields, task_dbattrs))
# only take mm-dd portion of date into database
# TODO: uncomment these when #51 fixed
# task_dbmapping['dateofyear'] = lambda formrow: formrow['dateofyear'][-5:] if formrow['dateofyear'] else None
# task_formmapping['dateofyear'] = lambda dbrow: '{}-{}'.format(date.today().year, dbrow.dateofyear) if dbrow.dateofyear else None
task_view = TaskView(
roles_accepted = [ROLE_SUPER_ADMIN, ROLE_LEADERSHIP_ADMIN],
local_interest_model = LocalInterest,
app = bp, # use blueprint instead of app
db = db,
model = Task,
assnmodelfield='task',
assnlistfield='fields',
version_id_col = 'version_id', # optimistic concurrency control
template = 'tasks.view.jinja2',
templateargs={'adminguide': adminguide},
pagename = 'Tasks',
endpoint = 'admin.tasks',
endpointvalues={'interest': '<interest>'},
rule = '/<interest>/tasks',
dbmapping = task_dbmapping,
formmapping = task_formmapping,
checkrequired = True,
validate = task_validate,
clientcolumns = [
{'data': 'task', 'name': 'task', 'label': 'Task',
'className': 'field_req',
},
{'data': 'priority', 'name': 'priority', 'label': 'Priority',
'className': 'field_req',
'class': 'TextCenter',
},
{'data': 'description', 'name': 'description', 'label': 'Display', 'type': 'textarea',
'className': 'field_req',
'render': {'eval': '$.fn.dataTable.render.ellipsis( 80 )'},
'fieldInfo': '<a href=https://daringfireball.net/projects/markdown/syntax target=_blank>Markdown</a>' +
' can be used. Click link for syntax'
},
{'data': 'taskgroups', 'name': 'taskgroups', 'label': 'Task Groups',
'fieldInfo': 'task groups this task should be associated with',
'_treatment': {
'relationship': {'fieldmodel': TaskGroup, 'labelfield': 'taskgroup',
'formfield': 'taskgroups',
'dbfield': 'taskgroups', 'uselist': True,
'searchbox': True,
'queryparams': localinterest_query_params,
}}
},
{'data': 'isbyposition', 'name': 'isbyposition', 'label': 'Position Based',
'class': 'TextCenter',
'_treatment': {'boolean': {'formfield': 'isbyposition', 'dbfield': 'isbyposition'}},
'ed': {'def': 'no'},
'fieldInfo': 'if yes, task completion occurs when anyone in the indicated Position completes; if no, all individuals assigned must complete',
},
{'data': 'position', 'name': 'position', 'label': 'Position',
'_treatment': {
'relationship':
{
'fieldmodel': Position,
'labelfield': 'position',
'formfield': 'position',
'dbfield': 'position',
'uselist': False,
'searchbox': True,
'queryparams': localinterest_query_params,
}},
'fieldInfo': 'required if Position Based = yes, otherwise ignored',
},
{'data': 'expirysoon', 'name': 'expirysoon', 'label': 'Expires Soon',
'class': 'TextCenter',
'fieldInfo': 'time before task expires to start indicating "expires soon"',
'ed': {'def': EXPIRES_SOON / PERIOD_WINDOW_DISPLAY}
},
{'data': 'expirysoon_units', 'name': 'expirysoon_units', 'label': '',
'type': 'select2',
'className': 'inhibitlabel',
'options': date_unit_all,
'ed' :{
'def': DATE_UNIT_WEEKS
},
},
{'data': 'fields', 'name': 'fields', 'label': 'Fields',
'_treatment': {
'relationship': {
'optionspicker':
AssociationSelect(
tablemodel=Task,
associationtablemodelfield='task',
associationmodel=TaskTaskField,
associationfields=['need', 'taskfield'],
selectattrs=[TaskTaskField.need, TaskField.taskfield],
labelfield='fields',
formfield='fields',
dbfield='fields', uselist=True,
queryparams=localinterest_query_params,
)
}}
},
{'data': 'period', 'name': 'period', 'label': 'Period',
'fieldInfo': 'Period or Date of Year may be specified. Leave blank if this task doesn\'t need to be done periodically',
'class': 'TextCenter',
},
{'data': 'period_units', 'name': 'period_units', 'label': '',
'type': 'select2',
'className': 'inhibitlabel',
'options': date_unit_all,
'ed' :{
'def': DATE_UNIT_YEARS
},
},
{'data': 'dateofyear', 'name': 'dateofyear', 'label': 'Date of Year',
# TODO: uncomment these when #51 fixed
# 'type': 'datetime',
# 'render': {'eval': 'render_month_date'},
# 'ed': {'displayFormat': 'MM-DD', 'wireFormat':'YYYY-MM-DD', 'def': None},
'fieldInfo': 'Period or Date of Year may be specified. Leave blank if this task doesn\'t need to be done by a particular date',
# TODO: remove this when #51 fixed
'ed': {'label': 'Date of Year (mm-dd)'},
},
{'data': 'expirystarts', 'name': 'expirystarts', 'label': 'Overdue Starts',
'fieldInfo': 'only used if Date of Year specified. time after task expires to start indicating "overdue"',
'class': 'TextCenter',
},
{'data': 'expirystarts_units', 'name': 'expirystarts_units', 'label': '',
'type': 'select2',
'className': 'inhibitlabel',
'options': date_unit_all,
'ed' :{
'def': DATE_UNIT_MONTHS
},
},
{'data': 'isoptional', 'name': 'isoptional', 'label': 'Optional Task',
'class': 'TextCenter',
'_treatment': {'boolean': {'formfield': 'isoptional', 'dbfield': 'isoptional'}},
'ed': {'def': 'no'},
'fieldInfo': 'indicates if task completion is optional',
},
],
servercolumns = None, # not server side
idSrc = 'rowid',
buttons = ['create', 'editRefresh', 'remove', 'csv'],
dtoptions = {
'scrollCollapse': True,
'scrollX': True,
'scrollXInner': "100%",
'scrollY': True,
},
edoptions = {
'template': '#customForm',
}
)
task_view.register()
##########################################################################################
# taskfields endpoint
###########################################################################################
taskfield_dbattrs = 'id,interest_id,taskfield,fieldname,displaylabel,displayvalue,fieldinfo,fieldoptions,inputtype,priority,uploadurl,override_completion'.split(',')
taskfield_formfields = 'rowid,interest_id,taskfield,fieldname,displaylabel,displayvalue,fieldinfo,fieldoptions,inputtype,priority,uploadurl,override_completion'.split(',')
taskfield_dbmapping = dict(zip(taskfield_dbattrs, taskfield_formfields))
taskfield_formmapping = dict(zip(taskfield_formfields, taskfield_dbattrs))
from ...model import INPUT_TYPE_CHECKBOX, INPUT_TYPE_RADIO, INPUT_TYPE_SELECT2
INPUT_TYPE_HASOPTIONS = [INPUT_TYPE_CHECKBOX, INPUT_TYPE_RADIO, INPUT_TYPE_SELECT2]
taskfield_formmapping['fieldoptions'] = get_fieldoptions
class TaskFieldCrud(DbCrudApiInterestsRolePermissions):
def createrow(self, formdata):
taskfieldrow = super().createrow(formdata)
taskfield = TaskField.query.filter_by(id=self.created_id).one()
taskfield.fieldname = gen_fieldname()
if taskfield.inputtype == INPUT_TYPE_UPLOAD:
taskfield.uploadurl = (url_for('admin.fieldupload', interest=g.interest)
+ '?{}={}'.format(FIELDNAME_ARG, taskfield.fieldname))
return self.dte.get_response_data(taskfield)
taskfield_view = TaskFieldCrud(
roles_accepted = [ROLE_SUPER_ADMIN, ROLE_LEADERSHIP_ADMIN],
local_interest_model = LocalInterest,
app = bp, # use blueprint instead of app
db = db,
model = TaskField,
version_id_col = 'version_id', # optimistic concurrency control
template = 'datatables.jinja2',
templateargs={'adminguide': adminguide},
pagename = 'Task Fields',
endpoint = 'admin.taskfields',
endpointvalues={'interest': '<interest>'},
rule = '/<interest>/taskfields',
dbmapping = taskfield_dbmapping,
formmapping = taskfield_formmapping,
checkrequired = True,
clientcolumns = [
{'data': 'taskfield', 'name': 'taskfield', 'label': 'Field',
'className': 'field_req',
'_unique': True,
},
{'data': 'priority', 'name': 'priority', 'label': 'Priority',
'className': 'field_req',
},
{'data': 'displaylabel', 'name': 'displaylabel', 'label': 'Field Label',
'className': 'field_req',
},
{'data': 'inputtype', 'name': 'inputtype', 'label': 'Input Type',
'fieldInfo' : 'if you want the field to collect input, select the input type',
'type': 'select2',
'options': sorted(input_type_all),
'ed' :{
'opts' : {
'placeholder' : 'Select input type',
'allowClear' : True
}
},
},
# see taskfield_formmapping and afterdatatables.js editor.on('initEdit', ...
{'data': 'fieldoptions', 'name': 'fieldoptions', 'label': 'Options',
'type': 'select2', 'separator':SEPARATOR,
'options': [],
'opts': {
'multiple': 'multiple',
'tags': True
}
},
{'data': 'fieldinfo', 'name': 'fieldinfo', 'label': 'Field Hint',
'fieldInfo': 'this gets displayed under the field to help the user fill in the form'
},
{'data': 'displayvalue', 'name': 'displayvalue', 'label': 'Field Value', 'type': 'textarea',
'render': {'eval': '$.fn.dataTable.render.ellipsis( 80 )'},
'fieldInfo': 'text to display for {} Input Type (display-only)'.format(INPUT_TYPE_DISPLAY)},
{'data': 'fieldname', 'name': 'fieldname', 'label': 'Field Name', 'type': 'readonly'
},
{'data': 'uploadurl', 'name': 'uploadurl', 'label': 'Upload URL', 'type': 'readonly'
},
{'data': 'override_completion', 'name': 'override_completion', 'label': 'Override Completion',
'_treatment': {'boolean': {'formfield': 'override_completion', 'dbfield': 'override_completion'}},
'fieldInfo': 'if \'yes\' this field overrides date when member marks task completed',
'ed': {'def': 'no'},
},
],
servercolumns = None, # not server side
idSrc = 'rowid',
buttons = ['create', 'editRefresh', 'remove', 'csv'],
dtoptions = {
'scrollCollapse': True,
'scrollX': True,
'scrollXInner': "100%",
'scrollY': True,
},
)
taskfield_view.register()
##########################################################################################
# taskgroups endpoint
###########################################################################################
def _validate_branch(taskgroup, branchlist):
'''
recursively check if this taskgroup in branchlist -- if it is, there's an error
:param taskgroup: task group to check
:param branchlist: list of task groups so far in this branch
:return: results error list
'''
results = []
if taskgroup.id in branchlist:
branchnames = ', '.join(["'{}'".format(TaskGroup.query.filter_by(id=id).one().taskgroup) for id in branchlist])
results = [{'name': 'tgtaskgroups.id', 'status': 'task group loop found: \'{}\' repeated following {}'.format(taskgroup.taskgroup, branchnames)}]
else:
thisbranch = branchlist + [taskgroup.id]
for tg in taskgroup.taskgroups:
results = _validate_branch(tg, thisbranch)
if results: break
return results
def _validate_taskgroup(action, formdata):
results = []
# NOTE: only using from 'create', 'edit' functions, so assuming there will be only one id
if action == 'create':
initialbranch = []
elif action == 'edit':
# kludge to get referenced taskgroup.id
thisid = int(list(get_request_data(request.form).keys())[0])
initialbranch = [thisid]
else:
return results
# recursively look through all task groups this task group refers to
# if the any task group is referenced more than once on a branch then we have a loop
# stop at first problem
for tgid in formdata['tgtaskgroups']['id'].split(SEPARATOR):
# if empty string, no ids were supplied
if tgid == '': break
taskgroup = TaskGroup.query.filter_by(id=tgid).one()
results = _validate_branch(taskgroup, initialbranch)
if results: break
return results
taskgroup_dbattrs = 'id,interest_id,taskgroup,description,tasks,positions,users,taskgroups'.split(',')
taskgroup_formfields = 'rowid,interest_id,taskgroup,description,tasks,positions,users,tgtaskgroups'.split(',')
taskgroup_dbmapping = dict(zip(taskgroup_dbattrs, taskgroup_formfields))
taskgroup_formmapping = dict(zip(taskgroup_formfields, taskgroup_dbattrs))
taskgroup_view = DbCrudApiInterestsRolePermissions(
roles_accepted = [ROLE_SUPER_ADMIN, ROLE_LEADERSHIP_ADMIN],
local_interest_model = LocalInterest,
app = bp, # use blueprint instead of app
db = db,
model = TaskGroup,
version_id_col = 'version_id', # optimistic concurrency control
template = 'datatables.jinja2',
templateargs={'adminguide': adminguide},
pagename = 'Task Groups',
endpoint = 'admin.taskgroups',
endpointvalues={'interest': '<interest>'},
rule = '/<interest>/taskgroups',
dbmapping = taskgroup_dbmapping,
formmapping = taskgroup_formmapping,
checkrequired = True,
validate = _validate_taskgroup,
clientcolumns = [
{'data': 'taskgroup', 'name': 'taskgroup', 'label': 'Task Group',
'className': 'field_req',
# TODO: is this unique in the table or within an interest? Needs to be within an interest
'_unique': True,
},
{'data': 'description', 'name': 'description', 'label': 'Description',
'className': 'field_req',
},
# note name tgtaskgroups rather than taskgroups to avoid conflict with name in tasks subform
# see also #55
{'data': 'tgtaskgroups', 'name': 'tgtaskgroups', 'label': 'Task Groups',
'_treatment': {
'relationship': {'fieldmodel': TaskGroup, 'labelfield': 'taskgroup', 'formfield': 'tgtaskgroups',
'dbfield': 'taskgroups', 'uselist': True,
'queryparams': localinterest_query_params,
}}
},
{'data': 'tasks', 'name': 'tasks', 'label': 'Tasks',
'_treatment': {
'relationship': {'fieldmodel': Task, 'labelfield': 'task', 'formfield': 'tasks',
'dbfield': 'tasks', 'uselist': True,
'queryparams': localinterest_query_params,
'editable' : { 'api' : task_view },
}}
},
{'data': 'positions', 'name': 'positions', 'label': 'Positions',
'_treatment': {
'relationship': {'fieldmodel': Position, 'labelfield': 'position', 'formfield': 'positions',
'dbfield': 'positions', 'uselist': True,
'queryparams': localinterest_query_params,
}}
},
{'data': 'users', 'name': 'users', 'label': 'Members',
'_treatment': {
# viadbattr stores the LocalUser id which has user_id=user.id for each of these
# and pulls the correct users out of User based on LocalUser table
'relationship': {'fieldmodel': User, 'labelfield': 'name',
'formfield': 'users', 'dbfield': 'users',
'viadbattr': LocalUser.user_id,
'viafilter': localinterest_viafilter,
'queryparams': {'active': True},
'uselist': True}}
},
],
servercolumns = None, # not server side
idSrc = 'rowid',
buttons = ['create', 'editRefresh', 'remove', 'csv'],
dtoptions = {
'scrollCollapse': True,
'scrollX': True,
'scrollXInner': "100%",
'scrollY': True,
},
)
taskgroup_view.register()
##########################################################################################
# taskdetails endpoint
###########################################################################################
def taskdetails_addlfields(task, member):
tc = get_task_completion(task, member)
return get_taskfields(tc, task)
# map id to rowid, retrieve all other required fields
# no dbmapping because this table is read-only
taskdetails_dbattrs = 'id,member,task,lastcompleted,status,order,expires,fields,task_taskgroups,member_taskgroups,member_positions'.split(',')
taskdetails_formfields = 'rowid,member,task,lastcompleted,status,order,expires,fields,task_taskgroups,member_taskgroups,member_positions'.split(',')
taskdetails_dbmapping = dict(zip(taskdetails_dbattrs, taskdetails_formfields))
class TaskMember():
'''
allows creation of "taskuser" object to simulate database behavior
'''
def __init__(self, **kwargs):
for key in kwargs:
setattr(self, key, kwargs[key])
class TaskDetails(DbCrudApiInterestsRolePermissions, PositionTaskgroupCacheMixin):
def __init__(self, formmapping={}, **kwargs):
self.kwargs = kwargs
# update formmapping here,
# a) because super().__init__ makes a copy of formmapping, so must be done before __init__ called
# b) because self.open() stores some information used by some of these functions
# NOTE: the lambda functions are not called until after self.open() is called
formmapping['rowid'] = 'id'
formmapping['task_taskgroups'] = 'task_taskgroups'
formmapping['member_taskgroups'] = 'member_taskgroups'
formmapping['member_positions'] = 'member_positions'
formmapping['member'] = lambda tu: tu.member.name
formmapping['task'] = lambda tu: tu.task.task
formmapping['lastcompleted'] = lambda tu: lastcompleted(tu.task, tu.member)
formmapping['status'] = lambda tu: get_status(self, tu.member, tu.task)
formmapping['order'] = lambda tu: get_order(self, tu.member, tu.task)
formmapping['expires'] = lambda tu: get_expires(self, tu.member, tu.task)
formmapping['fields'] = lambda tu: 'yes' if tu.task.fields else ''
formmapping['addlfields'] = lambda tu: taskdetails_addlfields(tu.task, tu.member)
super().__init__(formmapping=formmapping, **kwargs)
def getids(self, id):
'''
return split of id into local user id, task id
:param id: id for each TaskMember entry
:return: (localuserid, taskid)
'''
return tuple([int(usertask) for usertask in id.split(';')])
def setid(self, userid, taskid):
'''
return combined userid, taskid
:param userid: id for each LocalUser entry
:param taskid: id for each Task entry
:return: id
'''
return ';'.join([str(userid), str(taskid)])
# @profile
def open(self):
locinterest = localinterest()
localusersdb = LocalUser.query.filter_by(interest=locinterest).all()
# collect all the members who should be in task details view (i.e., they are allowe do do task checklist)
localusers = [lu for lu in localusersdb if has_oneof_roles(localuser2user(lu), TASK_CHECKLIST_ROLES_ACCEPTED)]
# initialize cache
ondate = request.args.get('ondate', date.today())
self.init_position_taskgroup_cache(localusersdb, ondate)
# retrieve member data from localusers
members = []
for localuser in localusers:
# None can be returned, but it seems like this should happen only if the User table was manipulated
# manually without adjusting the LocalUser table accordingly.
# This should only happen in development testing of member management
user = User.query.filter_by(id=localuser.user_id).one_or_none()
if user:
members.append({'localuser':localuser, 'member': user})
tasksmembers = []
for member in members:
# collect all the tasks which are referenced by positions and taskgroups for this member
tasks = get_member_tasks(member['localuser'], ondate)
# create/add taskmember to list for all tasks
active_positions = self.get_activepositions(member['localuser'])
for task in iter(tasks):
membertaskid = self.setid(member['localuser'].id, task.id)
taskmember = TaskMember(
id=membertaskid,
task=task, task_taskgroups=task.taskgroups,
member=member['member'],
member_positions=active_positions,
)
# drill down to get all the taskgroups
member_taskgroups = set()
for position in active_positions:
member_taskgroups |= self.get_position_taskgroups(position)
member_taskgroups |= self.get_localuser_taskgroups(member['localuser'])
taskmember.member_taskgroups = member_taskgroups
tasksmembers.append(taskmember)
self.rows = iter(tasksmembers)
def close(self):
# profiler.print_stats()
return super().close()
def updaterow(self, thisid, formdata):
'''
just update TaskCompletion.completion
:param thisid:
:param formdata:
:return:
'''
memberid, taskid = self.getids(thisid)
luser = LocalUser.query.filter_by(id=memberid).one()
task = Task.query.filter_by(id=taskid).one()
# create new TaskCompletion record, update the task completion time and user who made the update
tc = create_taskcompletion(task, luser, self.localinterest, formdata)
tc.completion = dtrender.asc2dt(formdata['lastcompleted'])
tc.updated_by = user2localuser(current_user).id
member = {'localuser': luser, 'member': User.query.filter_by(id=luser.user_id).one()}
ondate = request.args.get('ondate', date.today())
taskmember = TaskMember(
id=thisid,
task=task, task_taskgroups=task.taskgroups,
member=member['member'],
member_positions=positions_active(member['localuser'], ondate),
)
# drill down to get all the taskgroups
member_taskgroups = set()
for position in positions_active(member['localuser'], ondate):
get_position_taskgroups(position, member_taskgroups)
for taskgroup in member['localuser'].taskgroups:
get_taskgroup_taskgroups(taskgroup, member_taskgroups)
taskmember.member_taskgroups = member_taskgroups
return self.dte.get_response_data(taskmember)
def refreshrows(self, ids):
'''
refresh row(s) from database
:param ids: comma separated ids of row to be refreshed
:rtype: list of returned rows for rendering, e.g., from DataTablesEditor.get_response_data()
'''
theseids = ids.split(',')
responsedata = []
ondate = request.args.get('ondate', date.today())
for thisid in theseids:
# id is made up of localuser.id, task.id
localuserid, taskid = self.getids(thisid)
localuser = LocalUser.query.filter_by(id=localuserid).one()
task = Task.query.filter_by(id=taskid).one()
member = {'localuser': localuser, 'member': User.query.filter_by(id=localuser.user_id).one()}
taskuser = TaskMember(
id=thisid,
task=task, task_taskgroups=task.taskgroups,
member=member['member'],
member_positions=positions_active(member['localuser'], ondate),
member_taskgroups=member['localuser'].taskgroups,
)
responsedata.append(self.dte.get_response_data(taskuser))
return responsedata
class ReadOnlySelect2(DteDbRelationship):
def col_options(self):
col = super().col_options()
# readonly select2
col['opts']['disabled'] = True
return col
def taskdetails_validate(action, formdata):
results = []
# kludge to get task.id
# NOTE: this is only called from 'edit' / put function, and there will be only one id
thisid = list(get_request_data(request.form).keys())[0]
# id is made up of localuser.id, task.id
localuserid, taskid = taskdetails_view.getids(thisid)
task = Task.query.filter_by(id=taskid).one()
# build list of fields which could override completion date (should only be one)
override_completion = []
for tasktaskfield in task.fields:
taskfield = tasktaskfield.taskfield
if taskfield.override_completion:
override_completion.append(taskfield.fieldname)
for field in override_completion:
if not match(REGEX_ISODATE, formdata[field]):
results.append({'name': field, 'status': 'please specify date in yyyy-mm-dd format'})
elif formdata[field] > date.today().isoformat():
results.append({'name':field, 'status': 'cannot specify date later than today'})
if not match(REGEX_ISODATE, formdata['lastcompleted']):
results.append({'name':'lastcompleted', 'status': 'please specify date in yyyy-mm-dd format'})
elif formdata['lastcompleted'] > date.today().isoformat():
results.append({'name':'lastcompleted', 'status': 'cannot specify date later than today'})
return results
taskdetails_filters = filtercontainerdiv()
with taskdetails_filters:
filterdiv('members-external-filter-members', 'Member')
filterdiv('members-external-filter-positions-by-member', 'Members in Positions')
filterdiv('members-external-filter-taskgroups-by-member', 'Members in Task Groups')
filterdiv('members-external-filter-tasks', 'Task')
filterdiv('members-external-filter-taskgroups-by-task', 'Tasks in Task Groups')
filterdiv('members-external-filter-statuses', 'Status')
filterdiv('members-external-filter-completed', 'Last Completed')
filterdiv('members-external-filter-expires', 'Expiration Date')
datefilter = filterdiv('positiondate-external-filter-startdate', 'In Position On')
with datefilter:
input_(type='text', id='effective-date', name='effective-date', _class='like-select2-sizing')
button('Today', id='todays-date-button')
taskdetails_yadcf_options = [
yadcfoption('member:name', 'members-external-filter-members', 'multi_select', placeholder='Select members', width='200px'),
yadcfoption('task:name', 'members-external-filter-tasks', 'multi_select', placeholder='Select tasks', width='200px'),
yadcfoption('task_taskgroups.taskgroup:name', 'members-external-filter-taskgroups-by-task', 'multi_select', placeholder='Select task groups', width='200px'),
yadcfoption('member_positions.position:name', 'members-external-filter-positions-by-member', 'multi_select', placeholder='Select task groups', width='200px'),
yadcfoption('member_taskgroups.taskgroup:name', 'members-external-filter-taskgroups-by-member', 'multi_select', placeholder='Select task groups', width='200px'),
yadcfoption('status:name', 'members-external-filter-statuses', 'multi_select', placeholder='Select statuses', width='200px'),
yadcfoption('lastcompleted:name', 'members-external-filter-completed', 'range_date'),
yadcfoption('expires:name', 'members-external-filter-expires', 'range_date'),
]
taskdetails_view = TaskDetails(
roles_accepted = [ROLE_SUPER_ADMIN, ROLE_LEADERSHIP_ADMIN],
local_interest_model = LocalInterest,
app = bp, # use blueprint instead of app
db = db,
model = Task,
template = 'datatables.jinja2',
templateargs = {
'tablefiles': lambda: fieldupload.list(),
'adminguide': adminguide,
},
pretablehtml = taskdetails_filters.render(),
yadcfoptions = taskdetails_yadcf_options,
pagename = 'Task Details',
endpoint = 'admin.taskdetails',
endpointvalues={'interest': '<interest>'},
rule = '/<interest>/taskdetails',
dbmapping = taskdetails_dbmapping,
# formmapping = taskdetails_formmapping,
checkrequired = True,
validate = taskdetails_validate,
clientcolumns = [
{'data': 'member', 'name': 'member', 'label': 'Member',
'type': 'readonly',
},
{'data': 'order', 'name': 'order', 'label': 'Display Order',
'type': 'hidden',
'className': 'Hidden',
},
{'data': 'status', 'name': 'status', 'label': 'Status',
'type': 'readonly',
'className': 'status-field',
},
{'data': 'task', 'name': 'task', 'label': 'Task',
'type': 'readonly',
},
{'data': 'lastcompleted', 'name': 'lastcompleted', 'label': 'Last Completed',
'type': 'datetime',
# 'ed': {'opts':{'maxDate':date.today().isoformat()}}
},
{'data': 'expires', 'name': 'expires', 'label': 'Expiration Date',
'type': 'readonly',
'className': 'status-field',
},
{'data': 'member_positions', 'name': 'member_positions', 'label': 'Member Positions',
# 'type': 'readonly',
'_treatment': {
'relationship': {
'optionspicker' : ReadOnlySelect2(
fieldmodel = Position, labelfield = 'position',
formfield = 'member_positions',
dbfield = 'member_positions', uselist = True,
queryparams = localinterest_query_params,
)
}}
},
{'data': 'member_taskgroups', 'name': 'member_taskgroups', 'label': 'Member Task Groups',
# 'type': 'readonly',
'_treatment': {
'relationship': {
'optionspicker' : ReadOnlySelect2(
fieldmodel = TaskGroup, labelfield = 'taskgroup',
formfield = 'member_taskgroups',
dbfield = 'member_taskgroups', uselist = True,
queryparams = localinterest_query_params,
)
}}
},
{'data': 'task_taskgroups', 'name': 'task_taskgroups', 'label': 'Task Task Groups',
'type': 'readonly',
'_treatment': {
'relationship': {
'optionspicker' : ReadOnlySelect2(
fieldmodel = TaskGroup, labelfield = 'taskgroup', formfield = 'task_taskgroups',
dbfield = 'task_taskgroups', uselist = True,
queryparams = localinterest_query_params,
)
}}
},
{'data': 'fields', 'name': 'fields', 'label': 'Add\'l Fields',
'type': 'readonly',
'dtonly': True,
},
],
servercolumns = None, # not server side
idSrc = 'rowid',
buttons = [
{
'extend':'editRefresh',
'text':'View',
'editor': {'eval':'editor'},
'formButtons': [
{'text': 'Update', 'action': {'eval': 'submit_button'}},
{'text': 'Dismiss', 'action': {'eval':'dismiss_button'}}
]
},
'csv'
],
dtoptions = {
'scrollCollapse': True,
'scrollX': True,
'scrollXInner': "100%",
'scrollY': True,
'rowCallback': {'eval': 'set_cell_status_class'},
# note id is column 0 to datatables, col 2 (display order) hidden
'order': [['member:name', 'asc'], ['order:name', 'asc'], ['expires:name', 'asc']],
},
edoptions={
'i18n':
# "edit" window shows "Task" in title
{'edit':
{
'title': 'Task',
}
}
},
)
taskdetails_view.register()
##########################################################################################
# membersummary endpoint
###########################################################################################
status_slugs = [slugify(s) for s in STATUS_DISPLAYORDER]
slug2status = dict(zip(status_slugs, STATUS_DISPLAYORDER))
status2slug = dict(zip(STATUS_DISPLAYORDER, status_slugs))
membersummary_dbattrs = 'id,interest_id,member,member_positions,member_taskgroups'.split(',') + status_slugs
membersummary_formfields = 'rowid,interest_id,member,member_positions,member_taskgroups'.split(',') + status_slugs
membersummary_dbmapping = dict(zip(membersummary_dbattrs, membersummary_formfields))
membersummary_formmapping = dict(zip(membersummary_formfields, membersummary_dbattrs))
class MemberMember():
'''
allows creation of "membermember" object to simulate database behavior
'''
def __init__(self, **kwargs):
for key in kwargs:
setattr(self, key, kwargs[key])
class MemberSummary(DbCrudApiInterestsRolePermissions):
def open(self):
# create another instance of TaskDetails
taskdetails = TaskDetails(
app=bp, # use blueprint instead of app
db=db,
model=Task,
local_interest_model=LocalInterest,
dbmapping=taskdetails_dbmapping,
# formmapping=taskdetails_formmapping,
rule='unused',
clientcolumns=[
{'data': 'member', 'name': 'member', 'label': 'Member',
'type': 'readonly',
},
{'data': 'status', 'name': 'status', 'label': 'Status',
'type': 'readonly',
'className': 'status-field',
},
],
)
members = {}
taskdetails.open()
linterest = localinterest()
for row in taskdetails.rows:
thistask = taskdetails.dte.get_response_data(row)
# add user record
localuserid, taskid = taskdetails.getids(row.id)
thistask['User'] = localuser2user(localuserid)
name = thistask['User'].name
# add member name to members if not already there
if name not in members:
# note taskgroups should be the same for all task records, so ok to set with first for this member
members[name] = MemberMember(
id = localuserid,
member = name,
member_positions = thistask['member_positions'],
member_taskgroups = thistask['member_taskgroups'],
interest_id = linterest.id,
)
for slug in status_slugs:
setattr(members[name], slug, 0)
# update status for this record
thisslug = status2slug[thistask['status']]
count = getattr(members[name], thisslug)
setattr(members[name], thisslug, count+1)
# set rows for response
therows = []
for name in members:
for slug in status_slugs:
if (getattr(members[name],slug) == 0):
setattr(members[name],slug,None)
therows.append(members[name])
self.rows = iter(therows)
membersummary_filters = filtercontainerdiv()
membersummary_filters += filterdiv('members-external-filter-members', 'Member')
membersummary_filters += filterdiv('members-external-filter-positions-by-member', 'Members in Positions')
membersummary_filters += filterdiv('members-external-filter-taskgroups-by-member', 'Members in Task Groups')
membersummary_yadcf_options = [
yadcfoption('member:name', 'members-external-filter-members', 'multi_select', placeholder='Select members', width='200px'),
yadcfoption('member_positions.position:name', 'members-external-filter-positions-by-member', 'multi_select', placeholder='Select task groups', width='200px'),
yadcfoption('member_taskgroups.taskgroup:name', 'members-external-filter-taskgroups-by-member', 'multi_select', placeholder='Select task groups', width='200px'),
]
membersummary = MemberSummary(
roles_accepted = [ROLE_SUPER_ADMIN, ROLE_LEADERSHIP_ADMIN],
local_interest_model = LocalInterest,
app = bp, # use blueprint instead of app
db = db,
model = LocalUser,
template = 'datatables.jinja2',
templateargs={'adminguide': adminguide},
pretablehtml = membersummary_filters.render(),
yadcfoptions = membersummary_yadcf_options,
pagename = 'Member Summary',
endpoint = 'admin.membersummary',
endpointvalues={'interest': '<interest>'},
rule = '/<interest>/membersummary',
dbmapping = membersummary_dbmapping,
formmapping = membersummary_formmapping,
checkrequired = True,
clientcolumns = [
{'data': 'member', 'name': 'member', 'label': 'Member',
'type':'readonly',
},
] + [
{'data':slug, 'name':slug,
'type':'readonly',
'class': 'TextCenter',
'label':slug2status[slug]
} for slug in status_slugs
] + [
{'data': 'member_positions', 'name': 'member_positions', 'label': 'Member Positions',
'type': 'readonly',
'_treatment': {
'relationship': {
'optionspicker' : ReadOnlySelect2(
fieldmodel = Position, labelfield = 'position',
formfield = 'member_positions',
dbfield = 'member_positions', uselist = True,
queryparams = localinterest_query_params,
)
}}
},
{'data': 'member_taskgroups', 'name': 'member_taskgroups', 'label': 'Member Task Groups',
'type': 'readonly',
'_treatment': {
'relationship': {
'optionspicker' : ReadOnlySelect2(
fieldmodel = TaskGroup, labelfield = 'taskgroup',
formfield = 'member_taskgroups',
dbfield = 'member_taskgroups', uselist = True,
queryparams = localinterest_query_params,
)
}}
},
],
servercolumns = None, # not server side
idSrc = 'rowid',
buttons=[
{
'extend': 'edit',
'text': 'View Member',
'action': {'eval': 'member_details'}
},
'csv'
],
dtoptions = {
'scrollCollapse': True,
'scrollX': True,
'scrollXInner': "100%",
'scrollY': True,
},
)
membersummary.register()
##########################################################################################
# history endpoint
###########################################################################################
def history_addlfields(tc, task):
return get_taskfields(tc, task)
history_dbattrs = 'id,interest_id,member,position,task,completion,update_time,updated_by'.split(',')
history_formfields = 'rowid,interest_id,member,position,task,completion,update_time,updated_by'.split(',')
history_dbmapping = dict(zip(history_dbattrs, history_formfields))
history_formmapping = dict(zip(history_formfields, history_dbattrs))
history_formmapping['member'] = lambda tc: localuser2user(tc.user_id).name
history_formmapping['position'] = lambda tc: tc.position.position if tc.position else ""
history_formmapping['task'] = lambda tc: tc.task.task
history_formmapping['completion'] = lambda tc: dtrender.dt2asc(tc.completion)
history_formmapping['update_time'] = lambda tc: dttimerender.dt2asc(tc.update_time)
history_formmapping['updated_by'] = lambda tc: localuser2user(tc.updated_by).name
history_formmapping['addlfields'] = lambda tc: history_addlfields(tc, tc.task)
history_filters = filtercontainerdiv()
history_filters += filterdiv('members-external-filter-update-time', 'Update Time')
history_filters += filterdiv('members-external-filter-updated-by', 'Updated By')
history_filters += filterdiv('members-external-filter-members', 'Member')
history_filters += filterdiv('members-external-filter-tasks', 'Task')
history_filters += filterdiv('members-external-filter-completed', 'Completed')
history_yadcf_options = [
yadcfoption('update_time:name', 'members-external-filter-update-time', 'range_date'),
yadcfoption('updated_by:name', 'members-external-filter-updated-by', 'multi_select', placeholder='Select who updated', width='200px'),
yadcfoption('member:name', 'members-external-filter-members', 'multi_select', placeholder='Select members', width='200px'),
yadcfoption('task:name', 'members-external-filter-tasks', 'multi_select', placeholder='Select tasks', width='200px'),
yadcfoption('completion:name', 'members-external-filter-completed', 'range_date'),
]
history = DbCrudApiInterestsRolePermissions(
roles_accepted = [ROLE_SUPER_ADMIN, ROLE_LEADERSHIP_ADMIN],
local_interest_model = LocalInterest,
app = bp, # use blueprint instead of app
db = db,
model = TaskCompletion,
template = 'datatables.jinja2',
templateargs={'adminguide': adminguide},
pretablehtml = history_filters.render(),
yadcfoptions=history_yadcf_options,
pagename = 'History',
endpoint = 'admin.history',
endpointvalues={'interest': '<interest>'},
rule = '/<interest>/history',
dbmapping = history_dbmapping,
formmapping = history_formmapping,
checkrequired = True,
clientcolumns = [
{'data': 'update_time', 'name': 'update_time', 'label': 'Update Time',
'type': 'readonly',
},
{'data': 'updated_by', 'name': 'updated_by', 'label': 'Updated By',
'type': 'readonly',
},
{'data': 'member', 'name': 'member', 'label': 'Member',
'type': 'readonly',
},
{'data': 'position', 'name': 'position', 'label': 'Position',
'type': 'readonly',
},
{'data': 'task', 'name': 'task', 'label': 'Task',
'type': 'readonly',
},
{'data': 'completion', 'name': 'completion', 'label': 'Date Completed',
'type': 'readonly',
},
],
servercolumns = None, # not server side
idSrc = 'rowid',
buttons = [
{
'extend':'editRefresh',
'text':'View',
'editor': {'eval':'editor'},
'formButtons': [
{'text': 'Dismiss', 'action': {'eval':'dismiss_button'}}
]
},
'csv'
],
dtoptions = {
'scrollCollapse': True,
'scrollX': True,
'scrollXInner': "100%",
'scrollY': True,
'order': [['update_time:name', 'desc']],
},
edoptions={
'i18n':
# "edit" window shows "Task" in title
{'edit':
{
'title': 'Task Completion',
}
}
},
)
history.register()
|
12,162 | d0ca63029566550a2c9e3888fba9c405ea1b37ea | import tensorflow.compat.v1 as tf
with tf.compat.v1.Session() as sess:
tf.set_random_seed(777)
filename_queue = tf.train.string_input_producer(['/Users/dong-wongim/Documents/playgroud/tensorflow/data-03-diabetes.csv'], shuffle=False, name='filename_quere')
# text 파일 읽어오는 형식 지정
reader = tf.TextLineReader()
key, value = reader.read(filename_queue)
record_default = [[0.], [0.], [0.], [0.], [0.], [0.], [0.], [0.]]
xy = tf.decode_csv(value, record_defaults = record_default)
# collect batches of csv in
train_x_batch, train_y_batch = \
tf.train.batch([xy[0:-1], xy[-1:]], batch_size=10)
X = tf.placeholder(tf.float32, shape=[None, 8])
Y = tf.placeholder(tf.float32, shape=[None, 1])
W = tf.Variable(tf.random_normal([8,1]), name='weight')
b = tf.Variable(tf.random_normal([1]), name='bias')
hypothesis = tf.sigmoid(tf.matmul(X,W)+b)
cost = -tf.reduce_mean(Y * tf.log(hypothesis) + (1-Y)*tf.log(1-hypothesis))
train = tf.train.GradientDescentOptimizer(learning_rate=0.01).minimize(cost)
predicted = tf.cast(hypothesis > 0.5, dtype=tf.float32)
accuracy = tf.reduce_mean(tf.cast(tf.equal(predicted, Y), dtype = tf.float32))
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
for step in range(10001):
x_batch, y_batch = sess.run([train_x_batch, train_y_batch])
feed = {X: x_batch, Y: y_batch}
# sess.run(train, feed_dict=feed)
cost_val, _ = sess.run([cost, train], feed_dict=feed)
if step % 200 == 0:
print(step, cost_val)
# print(step, sess.run(cost, feed_dict=feed))
coord.request_stop()
coord.join(threads)
print("Your first data-decode_csv", sess.run(hypothesis, feed_dict={X:[[0,0,0,0,0,0,0,0]]}))
|
12,163 | a283f4357c878988dec07b5905652ead6c0dff9c | import codecs
from antlr4 import *
from antlr4.InputStream import InputStream
from prompto.parser.OParser import OParser
from prompto.parser.ONamingLexer import ONamingLexer
from prompto.parser.OPromptoBuilder import OPromptoBuilder
class OCleverParser(OParser):
def __init__(self, path=None, stream=None, text=None):
self.path = path
chars = None
if stream is not None:
bytes = stream.read()
data = codecs.decode(bytes)
chars = InputStream(data)
stream.close()
elif text is not None:
chars = InputStream(text)
if chars is not None:
lexer = ONamingLexer(chars)
tokens = CommonTokenStream(lexer)
super().__init__(tokens)
def parse(self):
return self.doParse(self.declaration_list)
def equalToken(self):
return OParser.EQ
def doParse(self, rule):
tree = rule()
builder = OPromptoBuilder(self)
walker = ParseTreeWalker()
walker.walk(builder, tree)
return builder.getNodeValue(tree)
|
12,164 | b2ae26f5989b1867d0df4e9b592866d689ce41bc | #Daniel Lee
##CSCI 1101 Section 1
def match(text, matchText):
##searches for a string inside of a texts and checks to see if
##the string is within that larger text
if matchText == text:
return True
for i in range(len(text) - len(matchText) + 1):
if text[i:i+len(matchText)] == matchText:
return True
return False
def main():
text = str(input("Please enter a string that you want to input here: "))
matchText = str(input("Please enter a string that you want to find inside of text: "))
function = match(text, matchText)
##check to see if the previous function was found true
if function:
print('"%s" was found in "%s".' % (matchText, text))
else:
print('"%s" was not found in "%s".' % (matchText, text))
|
12,165 | 9712a0aec7eea8c6bd6e3dba665a7168b874a2fa | # ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
'''
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id: $'
import ctypes
from pyglet import app
from pyglet.app.base import PlatformEventLoop
from pyglet.libs.darwin import *
EventLoopTimerProc = ctypes.CFUNCTYPE(None, ctypes.c_void_p, ctypes.c_void_p)
class CarbonEventLoop(PlatformEventLoop):
def __init__(self):
self._event_loop = carbon.GetMainEventLoop()
self._timer = ctypes.c_void_p()
self._timer_func = None
self._timer_func_proc = EventLoopTimerProc(self._timer_proc)
super(CarbonEventLoop, self).__init__()
def notify(self):
carbon.SetEventLoopTimerNextFireTime(
self._timer, ctypes.c_double(0.0))
def start(self):
# Create timer
timer = self._timer
carbon.InstallEventLoopTimer(self._event_loop,
ctypes.c_double(0.1), #?
ctypes.c_double(kEventDurationForever),
self._timer_func_proc,
None,
ctypes.byref(timer))
def stop(self):
carbon.RemoveEventLoopTimer(self._timer)
def step(self, timeout=None):
self.dispatch_posted_events()
event_dispatcher = carbon.GetEventDispatcherTarget()
e = ctypes.c_void_p()
if timeout is None:
timeout = kEventDurationForever
self._is_running.set()
# XXX should spin on multiple events after first timeout
if carbon.ReceiveNextEvent(0, None, ctypes.c_double(timeout),
True, ctypes.byref(e)) == 0:
carbon.SendEventToEventTarget(e, event_dispatcher)
carbon.ReleaseEvent(e)
timed_out = False
else:
timed_out = True
self._is_running.clear()
return not timed_out
def set_timer(self, func, interval):
if interval is None or func is None:
interval = kEventDurationForever
self._timer_func = func
carbon.SetEventLoopTimerNextFireTime(self._timer,
ctypes.c_double(interval))
def _timer_proc(self, timer, data):
if self._timer_func:
self._timer_func()
'''
self.dispatch_posted_events()
allow_polling = True
for window in app.windows:
# Check for live resizing
if window._resizing is not None:
allow_polling = False
old_width, old_height = window._resizing
rect = Rect()
carbon.GetWindowBounds(window._window,
kWindowContentRgn,
ctypes.byref(rect))
width = rect.right - rect.left
height = rect.bottom - rect.top
if width != old_width or height != old_height:
window._resizing = width, height
window.switch_to()
window.dispatch_event('on_resize', width, height)
# Check for live dragging
if window._dragging:
allow_polling = False
# Check for deferred recreate
if window._recreate_deferred:
# Break out of ReceiveNextEvent so it can be processed
# in next iteration.
carbon.QuitEventLoop(self._event_loop)
self._force_idle = True
sleep_time = self.idle()
if sleep_time is None:
sleep_time = kEventDurationForever
elif sleep_time < 0.01 and allow_polling and self._allow_polling:
# Switch event loop to polling.
carbon.QuitEventLoop(self._event_loop)
self._force_idle = True
sleep_time = kEventDurationForever
carbon.SetEventLoopTimerNextFireTime(timer, ctypes.c_double(sleep_time))
'''
|
12,166 | a13a75f56d830b8dfeec06a007be5c416fe96145 | import pygame
import person
import constants
import random
import fireball
class Donkey(person.Person):
""" Class which defines the Donkey object """
# Class variable which is a sprite group conatining all
# the donkeys in the game
all_donkeys = pygame.sprite.Group()
def __init__(self,left,bottom,left_boundary,right_boundary):
"""
Constructor for the Donkey object.Assigns the donkey at the
given left,bottom position.Ensures that the donkey does motions
whithin the specified boundary.
"""
super(Donkey,self).__init__(0,0,69,71,'p1_duck.png')
self.rect.left = left
self.rect.bottom = bottom
self.left_boundary = left_boundary
self.right_boundary = right_boundary
self.move_right()
# Variable for keeping track so as to when to change the Donkeys direction
self.__steps = 0
self.__threshold_steps = random.randint(25,50)
self.direction = 'RIGHT' # Current direction of Donkey
# Variable for keeping track so to when to emit fireballs
self.__loop_count = 0
# Variable which determines minimum iterations of main game loop
# after which to emit a fireball
self.__threshold_time = 50
Donkey.all_donkeys.add(self)
def move_left(self):
""" Moves the Donkey to the left """
self.set_x_vector(-1 * constants.DONKEY_SPEED)
def move_right(self):
""" Moves the Donkey to the right """
self.set_x_vector(constants.DONKEY_SPEED)
def __random_movement(self):
""" Responsible for random motion of the Donkey """
self.__steps += 1 # Increment after every frame
# When __steps greater than threshold reverse the direction
# and set threshold to a new random value
if self.__steps >= self.__threshold_steps:
if self.direction == 'RIGHT':
self.move_left()
self.direction = 'LEFT'
else:
self.move_right()
self.direction = 'RIGHT'
self.__threshold_steps = random.randint(25,50)
self.__steps = 0
# Confines the Donkeys movement to within the boundary
self.__check_boundary()
def __check_boundary(self):
""" Confines Donkeys movement to within the boundary """
if self.rect.left <= self.left_boundary:
self.move_right()
if self.rect.right >= self.right_boundary:
self.move_left()
def update(self):
""" Updates Donkey's motion and determines whether to emit fireball or not """
self.__loop_count += 1
if self.__loop_count >= self.__threshold_time:
fireball.Fireball.all_fireballs.add(fireball.Fireball(self.rect.left,self.rect.bottom))
self.__loop_count = 0
self.__threshold_time = 300
self.__random_movement()
super(Donkey,self).update()
|
12,167 | 49df9a396a6730416a739981cd6b71598c1d38b3 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
********************************************
Created on Thu May 10 12:46:34 2018
by
Chamara Rajapakshe
(cpn.here@umbc.edu)
********************************************
Comparing new DYCOMS2 MSCART field file with the old one
"""
import numpy as np
import cpnLES_MSCARTlib as MSCARTlib
import cpnCommonlib as cpn
def check_PMs():
figdyn,ttldyn=dyn.plot_PM('PM_new',show=False)
figdy ,ttldy =dy.plot_PM('PM_old',show=False)
for i in np.arange(0,25,1):
cpn.savefig(figdyn[i],ttldyn[i],'figures/DYCOMS2_comp/')
cpn.savefig(figdy[i],ttldy[i],'figures/DYCOMS2_comp/')
dy=MSCARTlib.LES_field('OP_dharma_008036_full_3_26.nc',dpath='/umbc/xfs1/zzbatmos/users/charaj1/LES_MSCART/')#DYCOM field file for MSCART
dy.readLES_field()
dyn=MSCARTlib.LES_field('DYCOMS2_dharma_008036_b0p860.nc')#DYCOM field file for MSCART
dyn.readLES_field()
|
12,168 | 92cbb3fa7008784728e29cba362162d912829569 | # PMSP Torch
# Ian Dennis Miller, Brian Lam, Blair Armstrong
__version__ = '0.2'
__project__ = 'pmsp-torch'
__author__ = 'Ian Dennis Miller, Brian Lam, Blair Armstrong'
__email__ = 'CAP Lab'
__url__ = 'https://projects.sisrlab.com/cap-lab/pmsp-torch'
__repo__ = 'https://projects.sisrlab.com/cap-lab/pmsp-torch'
__copyright__ = '2020'
|
12,169 | e75e6e34bba9303a88901a3130debae072472683 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 2 17:33:40 2017
@author: Mike
Axial marginal ray data for double gauss lens, wl=587.6nm
[x y z], [tanX, tanY], dist
"""
rayf1r2 = [[[0.000000, 0.000000, 0.000000], [0.000000, 0.000000], 5.866433],
[[25.000000, 0.000000, 5.866433], [-0.185302, 0.000000], 4.885983],
[[24.109772, 0.000000, 1.920632], [-0.200953, 0.000000], 6.419725],
[[22.844992, 0.000000, 7.714534], [-0.394267, 0.000000], 5.143978],
[[20.958242, 0.000000, 0.000000], [-0.395393, 0.000000], 11.350044],
[[16.784894, 0.000000, 6.754936], [-0.112583, 0.000000], 9.675248],
[[15.702464, 0.000000, 0.000000], [-0.112583, 0.000000], 9.758844],
[[14.610682, 0.000000, -4.050377], [0.153430, 0.000000], 7.942241],
[[15.815163, 0.000000, 0.000000], [0.151784, 0.000000], 7.115484],
[[16.882946, 0.000000, -3.965091], [-0.057039, 0.000000], 5.250422],
[[16.583956, 0.000000, 0.776812], [-0.071127, 0.000000], 4.551955],
[[16.261006, 0.000000, -1.682704], [-0.258832, 0.000000], 64.838492],
[[0.014138, 0.000000, 0.000000], [-0.258832, 0.000000], 0]]
|
12,170 | 6d13b0d9cf38ab15bb27081fa954f78ef25c3f24 | from django.http import HttpResponse, HttpResponseNotFound, JsonResponse
from rest_framework import status
from rest_framework.response import Response
from rest_framework.decorators import api_view
from rest_framework.parsers import JSONParser
from django.shortcuts import render, redirect
from django.core import serializers
from typing import List
from django.views.decorators.csrf import csrf_exempt
from juridico.serializers import QuestionSerializer, ReponseSerializer, DocumentationSerializer, OrganisationSerializer
from .models import *
import juridico.methodes as met
from .forms import *
def get_client_ip(request):
x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')
if x_forwarded_for:
ip = x_forwarded_for.split(',')[0]
else:
ip = request.META.get('REMOTE_ADDR')
return ip
def index(request):
return redirect("requete/client1")
def questions(request):
questions = serializers.serialize('json', Question.objects.all())
return HttpResponse(questions)
def question0(request):
reqcontent = getattr(request, request.method)
# print('question0------------')
requete = Requete.objects.create(
description_cas = reqcontent["description_cas"],
client = Client.objects.get(cid=int(reqcontent["cid"])),
ip = get_client_ip(request)
)
requete.save()
prochaine_question = met.desc2domaine(reqcontent["description_cas"])
print("test question0")
return redirect("/juridico/antique/question" % prochaine_question)
def question(request, question_id):
if question_id == 0:
return question0(request)
o_question = Question.objects.filter(qid=question_id)[0]
if request.method == 'POST':
default_user = Client.objects.get(cid=1)
default_request = Requete.objects.get(reqid=1)
if o_question.reponse_type == "t":
form = QuestionFormText(request.POST)
elif o_question.reponse_type == "e":
form = QuestionFormInt(request.POST)
elif o_question.reponse_type == "f":
form = QuestionFormFloat(request.POST)
elif o_question.reponse_type == "b":
form = QuestionFormBool(request.POST)
elif o_question.reponse_type == "d":
form = QuestionFormDate(request.POST)
elif o_question.reponse_type == "l":
form = QuestionFormList(request.POST)
else:
raise ValueError('Type de réponse non pris en compte : {o_reponse_type}'.format(
o_reponse_type=o_question.reponse_type
))
if form.is_valid():
reponse = Reponse.objects.create(
question = o_question,
client = default_user,
requete = default_request,
reponse = form.cleaned_data['reponse']
)
reponse.save()
next_question_id = next_question(question_id, reponse.reponse)
return redirect('/juridico/question{next_question_id}'.format(
next_question_id=next_question_id
))
else:
raise ValueError('Form not valid')
else:
if o_question.reponse_type == "t":
form = QuestionFormText()
elif o_question.reponse_type == "e":
form = QuestionFormInt()
elif o_question.reponse_type == "f":
form = QuestionFormFloat()
elif o_question.reponse_type == "b":
form = QuestionFormBool()
elif o_question.reponse_type == "d":
form = QuestionFormDate()
elif o_question.reponse_type == "l":
elements = o_question.contenu_liste.split('\r\n')
form = QuestionFormList()
# form = QuestionFormList(choice_list=__list_to_tuple(elements))
# form.response = forms.ChoiceField(choices=d)
else:
raise ValueError('Type de reponse non pris en compte : %s' % o_question.reponse_type)
return render(
request,
'question.html',
{
'question_id': question_id,
'question_label': o_question.question,
'form': form
}
)
def next_question(question_id: int, answer: str) -> int:
print('next question()')
# pass
# if question_id == 0:
if 'Yes' in answer:
return 6
else:
return 7
def erreur404(request):
return HttpResponseNotFound("""
<h1>Erreur 404</h1>
<p>Bah non, elle est pas là, la page...</p>
""")
def requete(request, cid):
return render(
request,
'requete.html',
{
'cid': cid
}
)
@api_view(['GET', 'POST'])
def api_questions(request):
if request.method == 'GET':
questions = Question.objects.all()
serializer = QuestionSerializer(questions, many=True)
return Response(serializer.data)
elif request.method == 'POST':
serializer = QuestionSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
@csrf_exempt
def api_question(request, question_id: int):
"""
Retrieve, update or delete a question
"""
try:
question = Question.objects.get(qid=question_id)
except Question.DoesNotExist:
return HttpResponse(status=404)
if request.method == 'GET':
serializer = QuestionSerializer(question)
return JsonResponse(serializer.data)
elif request.method == 'PUT':
data = JSONParser().parse(request)
serializer = QuestionSerializer(question, data=data)
if serializer.is_valid():
serializer.save()
return JsonResponse(serializer.data)
return JsonResponse(serializer.errors, status=400)
elif request.method == 'DELETE':
question.delete()
return HttpResponse(status=204)
@api_view(['GET', 'POST'])
def api_reponses(request):
"""GET lists all responses (models.Reponse serialized)
POST creates one"""
if request.method == 'GET':
reponses = Reponse.objects.all()
serializer = ReponseSerializer(reponses, many=True)
return Response(serializer.data)
elif request.method == 'POST':
serializer = ReponseSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def api_next_question(request):
"""Gets the next question after a specific answer
:return: a serialized **models.Question** object.
:return: A JSON object with the question id as the value of 'question_id' key if *id_only* has been passed as HTTP argument
"""
# if request.method == 'GET':
try:
request_id: int = int(request.GET["reqid"])
reponse_id: int = int(request.GET['repid'])
if reponse_id is -1:
# Convention pour la requete
return JsonResponse({
# Temporary cheat, always first question
'question_id': 1
})
id_only: bool = bool(int(request.GET.get('id_only', '0')))
o_reponse = Reponse.objects.get(repid=reponse_id)
o_request = Requete.objects.get(reqid=request_id)
method = getattr(met, f'question{o_reponse.question_id}')
next_question_id = method(requete=o_request, reponse=o_reponse)
if not next_question_id:
return JsonResponse({})
if next_question_id == -1:
return JsonResponse({
'question_id': next_question_id
})
o_question = Question.objects.get(qid=next_question_id)
if id_only:
return JsonResponse({
'question_id': o_question.qid
})
serializer = QuestionSerializer(o_question)
return JsonResponse(serializer.data)
except AttributeError:
raise NotImplementedError('')
pass
@api_view(['GET', 'POST'])
def api_resultats(request):
"""Retourne les résultats. Trois types de résultats: des org"""
try:
request_id: int = int(request.GET["reqid"])
req = Requete.objects.get(reqid=request_id)
# Populer les résultats
n_orgs = RessourceDeRequete.objects.filter(
type_classe="Organisation",
requete=req
).count()
n_docs =RessourceDeRequete.objects.filter(
type_classe="Documentation",
requete=req
).count()
compte_desire_docu = int(request.GET.get("compte_desire_docu",10))
compte_desire_orgs = int(request.GET.get("compte_desire_orgs",10))
if n_orgs < compte_desire_orgs:
met.add_orgs(req, conditions=None, topn=compte_desire_orgs-n_orgs, poids=0.3)
if n_docs < compte_desire_docu:
v = req.get_desc_vector()
for d, o in met.get_top_educaloi(v,topn=compte_desire_docu-n_docs):
met.add_documentation(req, o.resid, poids=0.3)
# Les convertir en json pour les envoyer à angular
docu_objs = DocumentationSerializer(
[
Documentation.objects.get(resid=rr.resid)
for rr in RessourceDeRequete.objects.filter(
type_classe="Documentation",
requete=req
).order_by("-poids")
],
many=True
).data
orgs = [
Organisation.objects.get(resid=rr.resid)
for rr in RessourceDeRequete.objects.filter(
type_classe="Organisation",
requete=req
).order_by("-poids")
]
orgs_avocat = [ i for i in orgs if i.tags.filter(pk=11).count() > 0 ]
orgs_autres = [ i for i in orgs if i.tags.filter(pk=11).count() == 0 ]
org_objs = OrganisationSerializer(
orgs_avocat + orgs_autres,
many=True
).data
dir_objs = [
{
"resid": o.resid,
"description": Direction.objects.get(resid=o.resid).formatted_description(req)
}
for o in RessourceDeRequete.objects.filter(
type_classe="Direction",
requete=req
)
]
return JsonResponse({
"directions": dir_objs,
"documentation": docu_objs,
"organisations": org_objs
})
except AttributeError:
raise NotImplementedError('')
pass
def api_nouv_requete(request, cid=None):
"""
Crée une nouvelle requête
:return: un Json avec une entrée "requete_id" qui correspond à l'id de la
requete crée.
"""
dat = getattr(request, request.method)
pcid = cid if cid != None else dat.get("cid")
pcid = 1 if pcid == None or pcid == '' else pcid
client = Client.objects.get(cid=int(pcid))
req = Requete.objects.create(
description_cas = dat.get("description_cas"),
client = client,
ip = get_client_ip(request)
)
req.save()
return JsonResponse({
"requete_id": req.reqid
})
def antique_question(request, cid=None):
dat = getattr(request, request.method)
pcid = cid if cid != None else dat.get("cid")
fvars = {"q0class": "q0done", "q0actif": " disabled"}
if dat.get("reqid") != None:
# après avoir rempli la première question
req = Requete.objects.get(reqid=int(dat.get("reqid")))
qnum = int(dat.get("qnum"))
question = Question.objects.get(qid=qnum)
fvars["requete"] = req
reponse = Reponse.objects.create(
requete = req,
question = question,
reponse = dat.get("reponse").strip()
)
reponse.save()
qfn = getattr(met,"question%d" % qnum)
next_qnum = qfn(req, reponse)
if next_qnum == -1:
return antique_resultats(request)
fvars["qnum"] = next_qnum
fvars["qactive"] = Question.objects.get(qid=next_qnum)
fvars["reponses"] = Reponse.objects.filter(requete=req)
return render(request, "question_ant.html", fvars)
else:
pcid = 1 if pcid == None or pcid == '' else pcid
client = Client.objects.get(cid=int(pcid))
fvars["client"] = client
fvars["reponses"] = []
if "description_cas" in dat:
# Après avoir entré la description
fvars["q0class"] = "q0done"
req = Requete.objects.create(
description_cas = dat.get("description_cas"),
client = client,
ip = get_client_ip(request)
)
req.save()
fvars["requete"] = req
next_qnum = met.desc2domaine(req.description_cas)
fvars["qnum"] = next_qnum
fvars["qactive"] = Question.objects.get(qid=next_qnum)
fvars["reponses"] = []
return render(request, "question_ant.html", fvars)
else:
# avant d'avoir entré la description
client= Client.objects.get(cid=int(1))
fvars["client"] = client
fvars["reponses"] = []
fvars["qnum"] = 0
fvars["reponses"] = []
fvars["q0actif"] = ""
return render(request, "question_ant.html", fvars)
def antique_resultats(request, requeteid=None):
dat = getattr(request, request.method)
# reqid = requeteid if requeteid != None else int(dat.get("reqid"))
reqid = int(dat.get("reqid"))
req = Requete.objects.get(reqid=reqid)
fvars = {"requete": req}
# Populer les résultats
n_orgs = RessourceDeRequete.objects.filter(
type_classe="Organisation",
requete=req
).count()
n_docs =RessourceDeRequete.objects.filter(
type_classe="Documentation",
requete=req
).count()
compte_desire = 10
from juridico.methodes import get_top_educaloi, add_orgs, add_documentation
if n_orgs < compte_desire:
add_orgs(req, conditions=None, topn=compte_desire-n_orgs)
if n_docs < compte_desire:
v = req.get_desc_vector()
for d, o in get_top_educaloi(v,topn=compte_desire-n_docs):
add_documentation(req, o.resid)
# Les convertir en json pour les envoyer à angular
fvars["documentation"] = [Documentation.objects.get(resid=rr.resid)
for rr in RessourceDeRequete.objects.filter(
type_classe="Documentation",
requete=req
)]
fvars["organisations"] = [Organisation.objects.get(resid=rr.resid)
for rr in RessourceDeRequete.objects.filter(
type_classe="Organisation",
requete=req
)]
fvars["directions"] = [ Direction.objects.get(resid=rr.resid).formatted_description(req)
for rr in RessourceDeRequete.objects.filter(
type_classe="Direction",
requete=req
)]
return render(request,"resultats_ant.html", fvars)
|
12,171 | f17b9c57a8c4f322805ef12240cbea560080e0d9 | import hashlib
class md5HashSum():
def valueToHash(self,value):
return hashlib.md5(value).hexdigest() |
12,172 | a57ef4ce37543e5021fc37a5a84c14a849f9193e |
class SplunkLogisticRegression(SplunkClassifierBase):
def __init__(self, host, port, username, password, training='batch', regularization=False):
super(SplunkLogisticRegression, self).__init__(host, port, username, password)
self.training = training
self.mapping = {1:'1', 0:'0'} #change
def initialization(self, feature_count):
#1: initialize size of features
self.feature_count = feature_count
#1: set theta to be 0s
self.theta = np.zeros(feature_count + 1) # +1 for the theta_0 term (see andrew ng's notes)
def make_batch_gradient_descent_search(self, search_string, feature_fields, class_field):
'''
'''
# 1: make the different strings to compute sigmoid
z_string = 'eval z=(-1)*'
eval_string = ''
stats_sum_string = 'stats sum(sum_*)'
for i in range(len(feature_fields)):
z_string += '(%s*%s)+' %(feature_fields[i],self.theta[i])
eval_string += 'eval sum_%s=result*%s | ' % (i, feature_fields[i])
z_string += '%s' % self.theta[-1]
eval_string += 'eval sum_end=result'
# 2: turn into a splunk search
splunk_search = 'search %s | %s | eval sigmoiddenom = 1 + exp(z) | eval sigmoid = if(sigmoiddenom=="inf",0,1/sigmoiddenom) | eval result = %s - sigmoid | %s | %s' % (search_string, z_string, class_field, eval_string, stats_sum_string)
# 3: return
print splunk_search
return splunk_search
def splunk_batch_gradient_descent(self, search_string, feature_fields, class_field, alphas=[2.0,1.5,1.0,.5,.3,.2,.1,.01,.001,.00001,.0000001], maxIter=1000, convergence=.01):
'''
'''
# current_diff = np.ones((1,self.feature_count+1))*100 #initialize
for iternum in range(maxIter):
print 'iter: %s' % iternum
#1: make the new splunk search
splunk_search = self.make_batch_gradient_descent_search(search_string, feature_fields, class_field)
search_kwargs = {'timeout':1000, 'exec_mode':'blocking'}
job = self.jobs.create(splunk_search, **search_kwargs)
search_results = job.results()
old_theta = np.copy(self.theta)
#2: iterate and update theta
for result in results.ResultsReader(search_results):
for i in range(self.feature_count):
# update theta_i
self.theta[i] += alphas[iternum]*float(result['sum(sum_%s)' % i])
self.theta[-1] += alphas[iternum]*float(result['sum(sum_end)'])
#3: check convergence
diff = np.linalg.norm(old_theta - self.theta)
print diff
print old_theta
print self.theta
if diff < convergence:
break
else:
print "difference: %f" % diff
def sigmoid_function(self, z):
return (1 / (1 + np.exp(z)))
def find_h_x(self, feature_fields, event_to_predict):
'''
'''
#1: find z = theta . x
z = 0
for i in range(len(feature_fields)):
z += float(event_to_predict[feature_fields[i]])*self.theta[i]
z += self.theta[-1] # add intercept
z *= -1 #make negative
#2: do sigmoid function
sigmoid = self.sigmoid_function(z)
#3: return
return sigmoid
def evaluate_accuracy(self, search_string, feature_fields, class_field):
self.train(search_string, feature_fields, class_field)
corr = 0
total = 0
job = self.predict(search_string, feature_fields, class_field, False)
offset = 0
count = 1000
result_count = int(job["resultCount"])
while (offset < result_count):
print offset
kwargs_paginate = {'count': count, 'offset':offset}
search_results = job.results(**kwargs_paginate)
for result in results.ResultsReader(search_results):
if result[class_field] == result['predicted_splunkML']:
corr += 1
total += 1
else:
total += 1
offset += count
print "acc: "
print float(corr)/(total)
def predict(self,search_string, feature_fields,class_field,event_to_predict, return_numpy_rep=False):
'''
predict(*):
takes in a string representing a search; returns a splunk job where each event in the search has a new field,
'predicted_splunkML', which is the predicted value for that event.
'''
#1: make z string (z = theta transpose x)
z_string = 'eval z=(-1)*'
for i in range(len(feature_fields)):
z_string += '(%s*%s)+' %(feature_fields[i],self.theta[i])
z_string += '%s' % self.theta[-1]
# 2: add logic to turn into sigmoid
splunk_search = 'search %s | %s | eval sigmoiddenom = 1 + exp(z) | eval sigmoid = if(sigmoiddenom=="inf",0,1/sigmoiddenom) | eval predicted_splunkML=if(sigmoid<.5, %s, %s) | table %s, predicted_splunkML' % (search_string,z_string, self.mapping[0], self.mapping[1], class_field)
print splunk_search
# 3: search and return
search_kwargs = {'timeout':1000, 'exec_mode':'blocking'}
job = self.jobs.create(splunk_search, **search_kwargs)
return job
# #1: find h(x) for this event x
# h_of_x = self.find_h_x(feature_fields, event_to_predict)
# #2: return the closer value
# if h_of_x > .5:
# if return_numpy_rep:
# return 1, False, False
# else:
# return '1'
# else:
# if return_numpy_rep:
# return 0,False,False
# else:
# return '0'
def train_classifier_batch(self, search_string, feature_fields, class_field):
'''
'''
#1: initalize theta parameter
self.initialization(len(feature_fields))
#2: train theta using batch gradient descent
self.splunk_batch_gradient_descent(search_string, feature_fields, class_field)
def train_classifier(self, search_string, feature_fields, class_field):
'''
train_classifier
trains the classifier given the feature fields and class field
feature_fields: list of strings corresponding to features
class_field: string corresponding to class field
'''
if self.training=='batch':
self.train_classifier_batch(search_string, feature_fields, class_field)
else:
pass
def compare_sklearn(self):
'''
compares our implementation to sklearn's implementation.
assumes that evaluate_accuracy has been called.
'''
if not self.accuracy_tested:
raise 'you must test the accuracy of the classifier before comparing to sklearn'
print "--> Checking sklearn's accuracy..."
X = np.array(self.np_reps)
LR = LogisticRegression(alpha=0)
y = np.array(self.gold)
LR.fit(X,y)
print "...done."
print "sklearn accuracy is %f. Our accuracy was %f. " % (LR.score(X,y), self.accuracy)
|
12,173 | ac668e3cb4a8de706c6a595028b94187570cbf0f | import json
from datetime import datetime
def add2protocol(password, ip):
dtts=datetime.now().timestamp()
protocol=loadProtocol()
id=len(protocol)
protocolentry={'ID':id,'DTTS':dtts,'PW':password,'IP':ip}
protocol.append(protocolentry)
saveProtocol(protocol)
def saveProtocol(protocol):
jsondict = json.dumps(protocol,ensure_ascii=False,)
with open("protocol.json","w", encoding='utf-8') as fw:
fw.write(jsondict)
def loadProtocol():
with open('protocol.json','r',encoding='utf-8') as fr:
jsonstring=fr.read()
protocol=json.loads(jsonstring)
return protocol
def checkIPwithProtocol(ip):
protocol=loadProtocol()
ipBool=False
for protocolentry in protocol:
if ip == protocolentry.get("IP"):
ipBool=True
return ipBool
def getIDfromIP(ip):
protocol=loadProtocol()
for entry in protocol:
if ip == entry.get("IP"):
id=entry.get("ID")
return id
def getDTTSFromID(id):
protocol=loadProtocol()
entry=protocol[id]
dtts=entry.get("DTTS")
return dtts
def getDTTSFromIP(ip):
id=getIDfromIP(ip)
dtts=getDTTSFromID(id)
return dtts
def getTimeDiffBetweenLogins(ip):
id=getIDfromIP(ip)
dtts=getDTTSFromID(id)
dttsnow=datetime.now().timestamp()
dttsdiff=dttsnow-dtts
return dttsdiff |
12,174 | 05784dfb284c59c2ea2967e8f5aaff12c5100477 | class MinNumberOfCoins:
def run(self, denominations_array, change_to_give):
coins_used = []
for i in range(len(denominations_array) - 1, -1, -1): # we want to start from the highest denomination possible
while change_to_give >= denominations_array[i] and change_to_give > 0: # we go from back to front while change is bigger
change_to_give -= denominations_array[i] # update the amount of change we still need to give
coins_used.append(denominations_array[i]) # append the coins used to the purse
for coin in coins_used: # we are just gonna print out the result here after we are done iterating
print(coin)
|
12,175 | 132c612dc296181e6bde6e3c82317b8babdd5528 | # -*- coding: utf-8 -*-
# Copyright (c) 2019 BuildGroup Data Services Inc.
from davinci_crawling.proxy.proxy import ProxyManager
from davinci_crawling.throttle.throttle import Throttle
from django.apps import AppConfig
class DaVinciCrawlingConfig(AppConfig):
name = "davinci_crawling"
verbose_name = "Django DaVinci Crawling Framework"
def ready(self):
from davinci_crawling.proxy import proxy_quality_checker
ProxyManager.get_proxy_manager()
Throttle.get_manager_clazz()
# Add System checks
# from .checks import pagination_system_check # NOQA
|
12,176 | d7e2a9c33bc7b4f1705fbbd5194992afd68f6127 | from django.conf.urls import url
from .views import Login, TimeInTimeOutHandler
from .views import Register
from .views import Logout
from .views import API
urlpatterns = [
url(r'^$', Login.as_view(), name='index'),
url(r'^register', Register.as_view(), name='register'),
url(r'^register-member', Register.as_view(), name='register-member'),
url(r'^login', Login.as_view(), name='login'),
url(r'^logout', Logout.as_view(), name='login'),
url(r'^timer-start-end', TimeInTimeOutHandler.as_view(), name="timer-start-end"),
url(r'^api/v1/timeintimeout', API.as_view(), name="API")
# url(r'^$',
# ListView.as_view(queryset=User_accounts.objects.all().order_by("-created_date")[:25],
# template_name="timelogger/home.html"
# )),
]
|
12,177 | b3976d56bf0e363d3ed8a4933554a3b0a1412f15 | from django.db import models
# Create your models here.
class StoreOTPVerificationLinks(models.Model):
OTP=models.TextField()
mobileNo=models.CharField(max_length=10)
uid=models.TextField() |
12,178 | 7d1e4083597f823270b2c7d4cc52dda1288dcda5 | # Python 3 program to
# find maximum triplet sum
# Function to calculate
# maximum triplet sum
def maxTripletSum(arr, m) :
# Initialize the answer
ans = 0
for i in range(1, (m - 1)) :
max1 = 0
max2 = 0
# find maximum value(less than arr[i])
# from i + 1 to n-1
for j in range(0, i) :
if (arr[j] < arr[i]) :
max1 = max(max1, arr[j])
# find maximum value(greater than arr[i])
# from i + 1 to n-1
for j in range((i + 1), m) :
if (arr[j] > arr[i]) :
max2 = max(max2, arr[j])
# store maximum answer
ans = max(ans, max1 + arr[i] + max2)
return ans
# Driver code
arr = [ 2, 5, 3, 1, 4, 9 ]
m = len(arr)
print(maxTripletSum(arr, m))
# This code is contributed
# by Nikita Tiwari.
|
12,179 | 8d919c8d6da907941222470b29c74acc739eef28 | import pytest
from sciwing.modules.embedders.flair_embedder import FlairEmbedder
from sciwing.data.line import Line
from sciwing.tokenizers.word_tokenizer import WordTokenizer
@pytest.fixture(params=["news", "en"])
def flair_embedder(request):
embedding_type = request.param
embedder = FlairEmbedder(embedding_type=embedding_type, datasets_manager=None)
return embedder
@pytest.fixture
def lines():
texts = ["First line", "Second Line which is longer"]
lines = []
for text in texts:
line = Line(
text=text, tokenizers={"tokens": WordTokenizer(tokenizer="vanilla")}
)
lines.append(line)
return lines
class TestFlairEmbedder:
def test_embedding_dimension(self, flair_embedder, lines):
embedding = flair_embedder(lines)
assert embedding.dim() == 3
def test_embedding_length(self, flair_embedder, lines):
embedding = flair_embedder(lines)
assert embedding.size(1) == 5
|
12,180 | 116390819d6805a43e884a6e69fc971ac263a09e | #!/usr/bin/env python3
"""
The benchmark modules provides a convenient interface to standardized benchmarks in the literature.
It provides train/validation/test Tasksets and TaskTransforms for pre-defined datasets.
This utility is useful for researchers to compare new algorithms against existing benchmarks.
For a more fine-grained control over tasks and data, we recommend directly using `l2l.data.Taskset` and `l2l.data.TaskTransforms`.
"""
import os
import learn2learn as l2l
from collections import namedtuple
from .omniglot_benchmark import omniglot_tasksets
from .mini_imagenet_benchmark import mini_imagenet_tasksets
from .tiered_imagenet_benchmark import tiered_imagenet_tasksets
from .fc100_benchmark import fc100_tasksets
from .cifarfs_benchmark import cifarfs_tasksets
__all__ = ['list_tasksets', 'get_tasksets']
BenchmarkTasksets = namedtuple('BenchmarkTasksets', ('train', 'validation', 'test'))
_TASKSETS = {
'omniglot': omniglot_tasksets,
'mini-imagenet': mini_imagenet_tasksets,
'tiered-imagenet': tiered_imagenet_tasksets,
'fc100': fc100_tasksets,
'cifarfs': cifarfs_tasksets,
}
def list_tasksets():
"""
[[Source]](https://github.com/learnables/learn2learn/blob/master/learn2learn/vision/benchmarks/)
**Description**
Returns a list of all available benchmarks.
**Example**
~~~python
for name in l2l.vision.benchmarks.list_tasksets():
print(name)
tasksets = l2l.vision.benchmarks.get_tasksets(name)
~~~
"""
return _TASKSETS.keys()
def get_tasksets(
name,
train_ways=5,
train_samples=10,
test_ways=5,
test_samples=10,
num_tasks=-1,
root='~/data',
device=None,
**kwargs,
):
"""
[[Source]](https://github.com/learnables/learn2learn/blob/master/learn2learn/vision/benchmarks/)
**Description**
Returns the tasksets for a particular benchmark, using literature standard data and task transformations.
The returned object is a namedtuple with attributes `train`, `validation`, `test` which
correspond to their respective Tasksets.
See `examples/vision/maml_miniimagenet.py` for an example.
**Arguments**
* **name** (str) - The name of the benchmark. Full list in `list_tasksets()`.
* **train_ways** (int, *optional*, default=5) - The number of classes per train tasks.
* **train_samples** (int, *optional*, default=10) - The number of samples per train tasks.
* **test_ways** (int, *optional*, default=5) - The number of classes per test tasks. Also used for validation tasks.
* **test_samples** (int, *optional*, default=10) - The number of samples per test tasks. Also used for validation tasks.
* **num_tasks** (int, *optional*, default=-1) - The number of tasks in each Taskset.
* **device** (torch.Device, *optional*, default=None) - If not None, tasksets are loaded as Tensors on `device`.
* **root** (str, *optional*, default='~/data') - Where the data is stored.
**Example**
~~~python
train_tasks, validation_tasks, test_tasks = l2l.vision.benchmarks.get_tasksets('omniglot')
batch = train_tasks.sample()
or:
tasksets = l2l.vision.benchmarks.get_tasksets('omniglot')
batch = tasksets.train.sample()
~~~
"""
root = os.path.expanduser(root)
# Load task-specific data and transforms
datasets, transforms = _TASKSETS[name](train_ways=train_ways,
train_samples=train_samples,
test_ways=test_ways,
test_samples=test_samples,
root=root,
device=device,
**kwargs)
train_dataset, validation_dataset, test_dataset = datasets
train_transforms, validation_transforms, test_transforms = transforms
# Instantiate the tasksets
train_tasks = l2l.data.Taskset(
dataset=train_dataset,
task_transforms=train_transforms,
num_tasks=num_tasks,
)
validation_tasks = l2l.data.Taskset(
dataset=validation_dataset,
task_transforms=validation_transforms,
num_tasks=num_tasks,
)
test_tasks = l2l.data.Taskset(
dataset=test_dataset,
task_transforms=test_transforms,
num_tasks=num_tasks,
)
return BenchmarkTasksets(train_tasks, validation_tasks, test_tasks)
|
12,181 | d76455024fb6d90ef2286a70ef052a0f0ec57458 | from larcc import *
from exercise1 import *
from TopDown import *
from corridoio import *
serieAppartamenti = T(1)(20.7)(STRUCT([biAppartamento,T([1])([41.4])]*4))
edificioAppartamenti = STRUCT([serieAppartamenti,T([3])(3)]*4)
palazzina = STRUCT([topDown,T([3])([3])(edificioAppartamenti),corridoioPalazzo])
controlpoints = [[20,0],[22,0],[24,0],[26,-1],[28,-4],[29,-7],[30,-10]]
dom = larDomain([64])
mapping = larBezierCurve(controlpoints)
obj = larMap(mapping)(dom)
curva = STRUCT(MKPOLS(obj))
hill = STRUCT([curva,S(1)(-1)(curva),POLYLINE([[-20,0],[20,0]]),POLYLINE([[-30,-10],[30,-10]])])
hill2D = T(1)(-1.3)(MAP([S3,S1,S2])((PROD([SOLIDIFY(hill),Q(3)]))))
hill2D = COLOR([0.002,0.743,0.224])(hill2D)
hill3D = T([1,3])([25,-0.1])(STRUCT(NN(36)([hill2D,R([1,2])(PI/36)])))
VIEW(STRUCT([palazzina,S([1,2])([5,5])(hill3D)])) |
12,182 | 153555977715370cb1678ff106e621770fd918d9 | #!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import webapp2
import cgi
# Functions to be used by all classes
def alphabet_position(letter):
"""Returns the relative position of a particular character
"""
alphabet = "abcdefghijklmnopqrstuvwxyz"
pos = 0
for ltr in alphabet:
if ltr == letter.lower():
return pos
pos += 1
return pos
def rotate_character(char, rot):
"""Returns the character that is the result of moving char by rot
"""
alphabet = "abcdefghijklmnopqrstuvwxyz"
if char.lower() not in alphabet:
return char
mod = (alphabet_position(char) + rot) % len(alphabet)
if char in alphabet:
newChar = chr(97 + mod)
else:
newChar = chr(65 + mod)
return newChar
def encrypt(text, rot):
"""Takes a string and rotates each character by a given amount, returns a new string
"""
newText = ""
for ltr in text:
newChar = rotate_character(ltr, rot)
newText += newChar
return newText
# Building the bones of the html for the page
html_head ="""
<!DOCTYPE html>
<html>
<title>Caesar's Legacy</title>
<body>
"""
html_tail ="""
</body>
</html>
"""
class MainHandler(webapp2.RequestHandler):
"""Builds the landing page, and handles any returns to it.
"""
def get(self):
form = """
<h3>Enter your text below:</h3>
<form id ="encryptForm" method="POST" action="/">
<div>
<label for="rot">Rotate by:</label>
<input name="rot" type="text"></input>
</div>
<textarea name="text" rows="20" cols="60"></textarea>
<br>
<input type="submit"/>
</form>
"""
response = html_head + form + html_tail
self.response.write(response)
def post(self):
txt = cgi.escape(self.request.get("text"))
rot = int(self.request.get("rot"))
etxt = encrypt(txt, rot)
form = """
<h3>Enter your text below:</h3>
<form id ="encryptForm" method="POST" action="/">
<div>
<label for="rot">Rotate by:</label>
<input name="rot" type="text"></input>
</div>
<textarea name="text" rows="20" cols="60">{}</textarea>
<br>
<input type="submit"/>
</form>
""".format(etxt)
response = html_head + form + html_tail
self.response.write(response)
app = webapp2.WSGIApplication([
('/', MainHandler)
], debug=True)
|
12,183 | 1fc71061a205ca22935eed019562ac24b5450cc3 | #!/bin/python
str1="abc"
dict1={'key':1,'value':5}
if str1 <> dict1 :
print("string is not equal dictionary")
if str1 != dict1 :
print("string is not equal dict")
get_consult = 9//2
print("9//2=",get_consult)
get_consult1 = 9.0//2.0
print("9.0//2.0=",get_consult1)
print("2**3=",2**3)
|
12,184 | 663ae87147324bbdabfe5cb6cee66b324b6521fc | # Escreva um algoritmo que encontre o maior dentre 3 números.
# Para facilitar a resolução do exercício utilize funções.
def max(x,y,z):
num = [x,y,z]
num.sort()
return num[len(num)-1]
print(max(7,3,4))
|
12,185 | f0dc6fdb8815406d65736663d164119f1d0ec737 | import sys
import os
from argparse import Namespace
import collections
import copy
import random
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torch.backends.cudnn as cudnn
from torchvision.datasets import MNIST, CIFAR10
from torch.utils.data import DataLoader
import torchvision.transforms as transforms
import config.utils as cutils
from config.config import get_config
import activations
import layers
import loss_fns
from models import FCNet, ConvNet
from hooks import Hook
import plotting
import db.utils as dutils
import metrics
import utils
class Trainer():
def __init__(self, cfg):
self.cfg = cfg
self.db = dutils.init_db(self.cfg.db_path)
self.init_post()
self.device = torch.device(self.cfg.device)
# dataset parameters
if self.cfg.dataset.lower() == 'mnist':
self.dataset = MNIST
self.data_path = self.cfg.data_dir + 'mnist'
self.img_size = [1, 28, 28]
self.normalize = [(0.1307,), (0.3081,)]
elif self.cfg.dataset.lower() == 'cifar10':
self.dataset = CIFAR10
self.data_path = self.cfg.data_dir + 'cifar10'
self.img_size = [3, 32, 32]
self.normalize = [(0.4914, 0.4822, 0.4465), (0.247, 0.243, 0.261)]
else:
raise NotImplementedError()
# datasets and dataloaders
# base transforms
self.train_transforms = [transforms.ToTensor()]
if self.cfg.normalize_input:
self.train_transforms.append(transforms.Normalize(self.normalize[0], self.normalize[1]))
self.val_transforms = copy.deepcopy(self.train_transforms)
# # (if applicable) additional training set transforms defined here
# train_transforms.extend([
# ])
self.dataset_train = self.dataset(root=self.data_path, train=True, download=True,
transform=transforms.Compose(self.train_transforms),
target_transform=None)
self.dataloader_train = DataLoader(dataset=self.dataset_train, batch_size=self.cfg.batch_size, shuffle=self.cfg.shuffle,
num_workers=self.cfg.num_workers, pin_memory=True, drop_last=False)
# number of output classes (based only on training data)
self.c_dim = len(torch.unique(self.dataset_train.targets))
self.dataset_val = self.dataset(root=self.data_path, train=False, download=True,
transform=transforms.Compose(self.val_transforms),
target_transform=None)
self.dataloader_val = DataLoader(dataset=self.dataset_val, batch_size=self.cfg.batch_size, shuffle=False,
num_workers=self.cfg.num_workers, pin_memory=True, drop_last=False)
# maximum entropy threshold for training with random inputs
self.max_entropy = metrics.max_entropy(self.c_dim)
self.thresh_entropy = self.cfg.train_random * self.max_entropy
# define model
# parameters for each hidden layer is passed in as an argument
self.params = utils.read_params(self.cfg.model_params[self.cfg.model_type])
self.activation = getattr(activations, self.cfg.activation.lower())
if self.cfg.model_type.lower() == 'fc':
if self.cfg.norm.lower() == 'batch':
self.norm = nn.BatchNorm1d
elif self.cfg.norm.lower() == 'layer':
self.norm = layers.LayerNorm1d
else:
self.norm = None
net = FCNet
elif self.cfg.model_type.lower() == 'conv':
if self.cfg.norm.lower() == 'batch':
self.norm = nn.BatchNorm2d
elif self.cfg.norm.lower() == 'layer':
self.norm = layers.LayerNorm2d
else:
self.norm = None
net = ConvNet
else:
raise NotImplementedError()
self.net = net(self.img_size, self.c_dim, self.params, self.activation, self.norm).to(self.device)
self.post['params'] = self.params
# TODO: add custom weight initialization scheme
# # weight initialization - weights are initialized using Kaiming uniform (He) initialization by default
# loss function <kl_y_to_p> generalizes the cross entropy loss to continuous label distributions
# i.e. <kl_y_to_p> is equivalent to <cross_entropy_loss> for one-hot labels
# but is also a sensible loss function for continuous label distributions
self.criterion = loss_fns.kl_y_to_p
if self.cfg.optim.lower() == 'sgd':
self.optimizer = optim.SGD(params=self.net.parameters(), lr=self.cfg.lr, momentum=self.cfg.optim_params['sgd']['momentum'], nesterov=self.cfg.optim_params['sgd']['nesterov'])
self.post['momentum'], self.post['nesterov'] = self.cfg.optim_params['sgd']['momentum'], self.cfg.optim_params['sgd']['nesterov']
else:
self.optimizer = optim.Adam(params=self.net.parameters(), lr=self.cfg.lr, betas=(self.cfg.optim_params['adam']['beta1'], self.cfg.optim_params['adam']['beta2']))
self.post['beta1'], self.post['beta2'] = self.cfg.optim_params['adam']['beta1'], self.cfg.optim_params['adam']['beta2']
def train(self):
# tracking training and validation stats over epochs
self.metrics = collections.defaultdict(list)
self.metrics['epochs'].append(0)
# best model is defined as model with best performing (lowest) validation loss
self.best_loss = float('inf')
# # fixed noise input -> can be used to benchmark output class entropy for random inputs
# self.fixed_noise = torch.randn(size=(self.cfg.batch_size, *self.img_size)).to(self.device)
# register hooks
self.hook = Hook(self.cfg.num_log > 0)
self.hook.init_hook(self.net.names, self.net.layers)
# measure performance before any training is done
with torch.no_grad():
self.validate(self.dataloader_train, is_val_set=False, measure_entropy=True)
self.validate(self.dataloader_val, is_val_set=True, measure_entropy=True)
# save initial weights
self.eval_best_model(epoch=0)
self.save_model(epoch=0)
for epoch in range(1, self.cfg.epochs+1):
self.metrics['epochs'].append(epoch)
self.hook.clear_hook()
self.train_one_epoch(self.dataloader_train)
self.hook.init_hook(self.net.names, self.net.layers)
with torch.no_grad():
self.validate(self.dataloader_train, is_val_set=False, measure_entropy=True)
self.validate(self.dataloader_val, is_val_set=True, measure_entropy=True)
if self.cfg.plot:
plotting.plot_line(self.metrics['epochs'],
[self.metrics['train_loss_avg'], self.metrics['val_loss_avg']],
[self.metrics['train_loss_std'], self.metrics['val_loss_std']],
['Training', 'Validation'],
'Epoch Number', 'Loss', self.cfg)
plotting.plot_line(self.metrics['epochs'],
[self.metrics['train_acc'], self.metrics['val_acc']],
None,
['Training', 'Validation'],
'Epoch Number', 'Accuracy', self.cfg)
plotting.plot_line(self.metrics['epochs'],
[self.metrics['train_entropy_avg'], self.metrics['val_entropy_avg'], self.metrics['entropy_rand_avg']],
[self.metrics['train_entropy_std'], self.metrics['val_entropy_std'], self.metrics['entropy_rand_std']],
['Training', 'Validation', 'Random'],
'Epoch Number', 'Entropy', self.cfg)
self.eval_best_model(epoch)
self.save_model(epoch)
self.update_post()
dutils.insert(self.db, self.post)
def eval_best_model(self, epoch):
if self.metrics['val_loss_avg'][-1] < self.best_loss:
self.best_loss = self.metrics['val_loss_avg'][-1]
print('New best model at epoch {:0=3d} with val_loss {:.4f}'.format(epoch, self.best_loss))
utils.flush()
def save_model(self, epoch):
if self.cfg.save_model:
save_name = '{}-net_{}_epoch{:0=3d}_val_loss{:.4f}'.format(self.cfg.model_type, self.cfg.model_name, epoch, self.metrics['val_loss_avg'][-1])
torch.save(self.net.state_dict(), os.path.join(self.cfg.model_dir, self.cfg.model_type, self.cfg.model_name, '{}.pth'.format(save_name)))
if self.best_loss == self.metrics['val_loss_avg'][-1]:
with open(os.path.join(self.cfg.model_dir, self.cfg.model_type, self.cfg.model_name, '{}-net_{}.txt'.format(self.cfg.model_type, self.cfg.model_name)), 'w') as file:
file.write('{}.pth'.format(save_name))
def train_one_epoch(self, dataloader):
self.net.train()
self.hook.flag_hook = False
for mb, (x, y) in enumerate(dataloader):
x, y = x.to(self.device), y.to(self.device)
y_one_hot = utils.to_one_hot(y, self.c_dim)
if self.cfg.train_random > 0 and (mb+1) % 10 == 0:
with torch.no_grad():
x_rand = torch.randn(size=x.shape).to(self.device)
logits_rand = self.net(x_rand)
entropy_rand = metrics.entropy(utils.logits_to_probs(logits_rand))
if torch.mean(entropy_rand).item() <= self.thresh_entropy:
print('training on random inputs & random labels for minibatch {}'.format(mb+1))
x = torch.randn(size=x.shape).to(self.device)
y_one_hot = torch.ones(size=(x.shape[0], self.c_dim)).to(self.device) / self.c_dim
self.optimizer.zero_grad()
logits = self.net(x)
losses = self.criterion(logits, y_one_hot)
torch.mean(losses).backward()
self.optimizer.step()
def validate(self, dataloader, is_val_set=True, measure_entropy=True):
self.net.eval()
self.hook.flag_hook = True
prefix = self.get_prefix(is_val_set)
self.metrics_epoch = collections.defaultdict(utils.Meter)
matrix = np.zeros((self.c_dim, self.c_dim), dtype=np.uint32)
for mb, (x, y) in enumerate(dataloader):
x, y = x.to(self.device), y.to(self.device)
y_one_hot = utils.to_one_hot(y, self.c_dim)
logits = self.net(x)
losses = self.criterion(logits, y_one_hot)
matrix = matrix + metrics.confusion_matrix(utils.tensor2array(utils.get_class_outputs(logits)), utils.tensor2array(y), self.c_dim)
self.metrics_epoch['{}_loss'.format(prefix)].update(utils.tensor2array(losses), x.shape[0])
if self.cfg.num_log > 0 and self.cfg.plot and mb == 0:
num_log = min(self.cfg.num_log, x.shape[0])
name = '{}_{}_{}_epoch{:0=3d}_minibatch{}'
filepath = '{}/{}'.format(os.path.join(self.cfg.plot_dir, self.cfg.model_type, self.cfg.model_name), name)
x_ = x[0:num_log]
x_np, y_np = utils.tensor2array(x[0:num_log]), utils.tensor2array(y[0:num_log])
losses_np = utils.tensor2array(losses[0:num_log])
plotting.make_grid(x_, filepath.format(prefix, 'data', 'x', self.metrics['epochs'][-1], mb+1))
utils.save_array(x_np, filepath.format(prefix, 'data', 'x', self.metrics['epochs'][-1], mb+1))
utils.save_array(y_np, filepath.format(prefix, 'data', 'y', self.metrics['epochs'][-1], mb+1))
utils.save_array(losses_np, filepath.format(prefix, 'data', 'losses', self.metrics['epochs'][-1], mb+1))
for (k, layer_name) in enumerate(self.hook.layers):
layer_np = utils.tensor2array(self.hook.layers[layer_name][0:num_log])
utils.save_array(layer_np, filepath.format(prefix, 'data', layer_name, self.metrics['epochs'][-1], mb+1))
if measure_entropy:
entropy = metrics.entropy(utils.logits_to_probs(logits))
self.metrics_epoch['{}_entropy'.format(prefix)].update(utils.tensor2array(entropy), x.shape[0])
if self.cfg.num_log > 0 and self.cfg.plot and mb == 0:
entropy_np = utils.tensor2array(entropy[0:num_log])
utils.save_array(entropy_np, filepath.format(prefix, 'data', 'entropy', self.metrics['epochs'][-1], mb+1))
if is_val_set:
x_rand = torch.randn(size=x.shape).to(self.device)
logits_rand = self.net(x_rand)
entropy_rand = metrics.entropy(utils.logits_to_probs(logits_rand))
self.metrics_epoch['entropy_rand'].update(utils.tensor2array(entropy_rand), x.shape[0])
if self.cfg.num_log > 0 and self.cfg.plot and mb == 0:
name = '{}_{}_{}_epoch{:0=3d}_minibatch{}'
filepath = '{}/{}'.format(os.path.join(self.cfg.plot_dir, self.cfg.model_type, self.cfg.model_name), name)
x_ = x_rand[0:num_log]
x_np = utils.tensor2array(x_rand[0:num_log])
entropy_np = utils.tensor2array(entropy_rand[0:num_log])
plotting.make_grid(x_, filepath.format(prefix, 'noise', 'x', self.metrics['epochs'][-1], mb+1))
utils.save_array(x_np, filepath.format(prefix, 'noise', 'x', self.metrics['epochs'][-1], mb+1))
utils.save_array(entropy_np, filepath.format(prefix, 'noise', 'entropy', self.metrics['epochs'][-1], mb+1))
for (k, layer_name) in enumerate(self.hook.layers):
layer_np = utils.tensor2array(self.hook.layers[layer_name][0:num_log])
utils.save_array(layer_np, filepath.format(prefix, 'noise', layer_name, self.metrics['epochs'][-1], mb+1))
# disable hook after first minibatch by default - this is done for computational/speed purposes
self.hook.flag_hook = False
self.summarize_metrics(matrix, prefix)
@staticmethod
def get_prefix(is_val_set):
if is_val_set: return 'val'
else: return 'train'
def summarize_metrics(self, matrix, prefix):
for key in sorted(self.metrics_epoch.keys()):
self.metrics['{}_{}'.format(key, 'avg')].append(self.metrics_epoch[key].avg)
self.metrics['{}_{}'.format(key, 'std')].append(self.metrics_epoch[key].std)
print('epoch{:0=3d}_{}{:.4f}'.format(self.metrics['epochs'][-1], '{}_{}'.format(key, 'avg'), self.metrics['{}_{}'.format(key, 'avg')][-1]))
print('epoch{:0=3d}_{}{:.4f}'.format(self.metrics['epochs'][-1], '{}_{}'.format(key, 'std'), self.metrics['{}_{}'.format(key, 'std')][-1]))
print(matrix)
self.metrics['{}_acc'.format(prefix)].append(metrics.accuracy(matrix))
print('epoch{:0=3d}_{}{:.4f}'.format(self.metrics['epochs'][-1], '{}_acc'.format(prefix), self.metrics['{}_acc'.format(prefix)][-1]))
utils.flush()
def init_post(self):
last_run = dutils.get_last(self.db, 'run')
if last_run:
run = last_run + 1
else:
run = 1
self.post = {'run': run}
self.post['timestamp'] = self.cfg.time
cfg_dict = vars(self.cfg)
for key in cfg_dict:
if type(cfg_dict[key]) == str:
self.post[key] = cfg_dict[key].lower()
elif type(cfg_dict[key]) != dict:
self.post[key] = cfg_dict[key]
def update_post(self):
self.post['train_loss_avg'] = self.metrics['train_loss_avg']
self.post['train_loss_std'] = self.metrics['train_loss_std']
self.post['val_loss_avg'] = self.metrics['val_loss_avg']
self.post['val_loss_std'] = self.metrics['val_loss_std']
self.post['train_acc'] = self.metrics['train_acc']
self.post['val_acc'] = self.metrics['val_acc']
best_epoch_train_loss = int(np.argmin(np.asarray(self.metrics['train_loss_avg'])))
best_epoch_train_acc = int(np.argmax(np.asarray(self.metrics['train_acc'])))
best_epoch_val_loss = int(np.argmin(np.asarray(self.metrics['val_loss_avg'])))
best_epoch_val_acc = int(np.argmax(np.asarray(self.metrics['val_acc'])))
self.post['best_epoch_train_loss'] = best_epoch_train_loss
self.post['best_epoch_train_acc'] = best_epoch_train_acc
self.post['best_epoch_val_loss'] = best_epoch_val_loss
self.post['best_epoch_val_acc'] = best_epoch_val_acc
self.post['train_loss_at_best_train_loss'] = self.metrics['train_loss_avg'][best_epoch_train_loss]
self.post['train_acc_at_best_train_loss'] = self.metrics['train_acc'][best_epoch_train_loss]
self.post['val_loss_at_best_train_loss'] = self.metrics['val_loss_avg'][best_epoch_train_loss]
self.post['val_acc_at_best_train_loss'] = self.metrics['val_acc'][best_epoch_train_loss]
self.post['train_loss_at_best_train_acc'] = self.metrics['train_loss_avg'][best_epoch_train_acc]
self.post['train_acc_at_best_train_acc'] = self.metrics['train_acc'][best_epoch_train_acc]
self.post['val_loss_at_best_train_acc'] = self.metrics['val_loss_avg'][best_epoch_train_acc]
self.post['val_acc_at_best_train_acc'] = self.metrics['val_acc'][best_epoch_train_acc]
self.post['train_loss_at_best_val_loss'] = self.metrics['train_loss_avg'][best_epoch_val_loss]
self.post['train_acc_at_best_val_loss'] = self.metrics['train_acc'][best_epoch_val_loss]
self.post['val_loss_at_best_val_loss'] = self.metrics['val_loss_avg'][best_epoch_val_loss]
self.post['val_acc_at_best_val_loss'] = self.metrics['val_acc'][best_epoch_val_loss]
self.post['train_loss_at_best_val_acc'] = self.metrics['train_loss_avg'][best_epoch_val_acc]
self.post['train_acc_at_best_val_acc'] = self.metrics['train_acc'][best_epoch_val_acc]
self.post['val_loss_at_best_val_acc'] = self.metrics['val_loss_avg'][best_epoch_val_acc]
self.post['val_acc_at_best_val_acc'] = self.metrics['val_acc'][best_epoch_val_acc]
self.post['train_entropy_avg'] = self.metrics['train_entropy_avg']
self.post['train_entropy_std'] = self.metrics['train_entropy_std']
self.post['val_entropy_avg'] = self.metrics['val_entropy_avg']
self.post['val_entropy_std'] = self.metrics['val_entropy_std']
self.post['entropy_rand_avg'] = self.metrics['entropy_rand_avg']
self.post['entropy_rand_std'] = self.metrics['entropy_rand_std']
def main(cfg):
start_time = utils.get_current_time()
# override base-config parameters with arguments provided at run-time
base_cfg_dict = utils.load_json(cfg.base_config)
membership = cutils.get_membership(base_cfg_dict)
cfg_dict = vars(cfg)
cfg_dict = {key: cfg_dict[key] for key in cfg_dict if cfg_dict[key] is not None}
updated_cfg_dict = cutils.update_params(base_cfg_dict, cfg_dict, membership)
cfg = Namespace(**updated_cfg_dict)
utils.make_dirs('./config/save/', replace=False)
utils.save_json(updated_cfg_dict, './config/save/config_{}.json'.format(start_time))
cfg.time = start_time
cfg.model_name = '{}_{}'.format(cfg.model_name, start_time)
# setting up output directories, and writing to stdout
utils.make_dirs(os.path.join(cfg.stdout_dir, cfg.model_type), replace=False)
sys.stdout = open(r'./{}/{}/stdout_{}_{}.txt'.format(cfg.stdout_dir, cfg.model_type, cfg.model_type, cfg.model_name), 'w')
print(cfg)
utils.flush()
if cfg.plot:
utils.make_dirs(os.path.join(cfg.plot_dir, cfg.model_type, cfg.model_name), replace=True)
if cfg.save_model:
utils.make_dirs(os.path.join(cfg.model_dir, cfg.model_type, cfg.model_name), replace=True)
# set random seed
if cfg.random_seed == 0:
cfg.random_seed = random.randint(1, 10000)
print('random seed set to {}'.format(cfg.random_seed))
utils.flush()
random.seed(cfg.random_seed)
np.random.seed(cfg.random_seed)
torch.manual_seed(cfg.random_seed)
# set device as cuda or cpu
if cfg.device.lower() == 'cuda' and torch.cuda.is_available():
# reproducibility using cuda
torch.cuda.manual_seed(cfg.random_seed)
cudnn.deterministic = True
cudnn.benchmark = False
else:
if cfg.device.lower() == 'cuda':
print('device option was set to <cuda>, but no cuda device was found')
utils.flush()
cfg.device = 'cpu'
trainer = Trainer(cfg)
trainer.train()
if __name__ == '__main__':
cfg, unparsed = get_config()
main(cfg)
|
12,186 | 952123d5380e1dab0bad2995f9d7d2391c9ce4c5 | import numpy as np
import sys
import csv
from ffnet import *
global inputs
global outputs
def main():
if len(sys.argv) != 2:
print("python test.py [quote]")
else:
net = loadnet("test_net")
output = net.call( [float(sys.argv[1])] )
print output[0]
if __name__ == "__main__":
sys.exit(main())
|
12,187 | db61ed07f595e6f452936de9e597a57cf313c84c | import logging
logger = logging.getLogger(__name__)
params = dict()
try:
import numpy
except ImportError:
raise ImportError(
"'swi-ml has a single NumPy dependency, visit their installation "
"guide: https://numpy.org/install/"
)
try:
import cupy
_raise_cupy_error = False
except ImportError:
_raise_cupy_error = True
logger.warning(
"No 'cupy' installation found, backend will be defaulted to 'numpy'"
)
class _Backend:
def __init__(self):
global params
self.backend = None
def set_backend(self, backend):
global params
logger.warning(f"Setting backend: {backend}")
if backend == "numpy":
params["backend"] = numpy
elif backend == "cupy":
from swi_ml import _fallback_to_numpy
if not _raise_cupy_error:
params["backend"] = cupy
elif _fallback_to_numpy:
logger.warning(
"'cupy' backend not found, falling back to 'numpy'"
)
self.set_backend("numpy")
else:
raise ImportError(
"'cupy' backend needs to be installed first, visit "
"https://docs.cupy.dev/en/stable/install.html#install-cupy"
)
else:
raise NotImplementedError(
"Only 'numpy' and 'cupy' backends are supported"
)
self.backend = params["backend"]
def get_backend(self):
global params
if "backend" not in params.keys():
logger.critical("Backend is not set, using default 'numpy'")
self.set_backend("numpy")
return params["backend"]
|
12,188 | 5f2a15bc934c036dfbe207d3d029e4db16906925 | import os
import subprocess
import platform
from copy import deepcopy
from itertools import(chain,
tee,
imap,
ifilter)
from collections import Counter
from operator import itemgetter
from templates import (osx_circos_command,
cygwin_circos_command,
greg_linux_circos_command,
svg_to_png_command)
from filters import read_filled_csv
from templates import(circos_conf_header,
circos_conf_links,
ideogram_conf_template)
# Class storing all necessary information to create a set of circos
# configuration files. Can be modified on the fly to change the
# produced image.
class CircosConfig(object):
def __init__(self, data, **kwargs):
self.data = data
self.link_filter = kwargs.get('link_filter', lambda x, y: True)
self.use_self_map = kwargs.get('use_self_map', False)
# self.salary_filter = kwargs.get('salary_filter', lambda x: True)
self.ltag_parse = kwargs.get('ltag_parse', lambda x: x)
self.rtag_parse = kwargs.get('rtag_parse', lambda x: x)
self.lside_tag_order = kwargs.get('lside_tag_order',
self.data.lcounts.keys())
self.rside_tag_order = kwargs.get('rside_tag_order',
self.data.rcounts.keys())
self.verify_tags()
# ----------------------------------------
# ----- Setup for Color Dictionaries -----
# ----------------------------------------
self.karyotype_colors = kwargs.get('karyotype_colors', {})
self.link_colors = kwargs.get('link_colors', {})
build_links = (self.link_colors == {})
self.use_default_colors = kwargs.get('use_default_colors', False)
open('tmp/customcolors.conf', 'w').close() # clear custom colors
# Default colors using 'default_color{num}'
if self.use_default_colors:
for index, ltag in enumerate(self.lside_tag_order):
self.karyotype_colors[ltag] = 'default_color{index}'.format(index=index)
if build_links:
for rtag in self.rside_tag_order:
self.link_colors[(ltag, rtag)] = 'default_color{index}'.format(index=index)
# Pre-fab color palette using '{palette_name}{num}'
elif kwargs.get('color_palette', False):
palette = kwargs.get('color_palette')
for index, ltag in enumerate(self.lside_tag_order):
self.karyotype_colors[ltag] = '{palette}{index}'.format(index=index, palette=palette)
if build_links:
if self.use_self_map:
for ltag_2 in self.lside_tag_order:
self.link_colors[(ltag, ltag_2)] = '{palette}{index}'.format(index=index, palette=palette)
else:
for rtag in self.rside_tag_order:
self.link_colors[(ltag, rtag)] = '{palette}{index}'.format(index=index, palette=palette)
# Custom dictionary, default to grey for missing entries.
else:
# This needs to happen first because it changes the values
# stored in the color dictionary.
self.write_custom_colors()
# Color links by ltag if only karyotype colors are specified.
if build_links and self.karyotype_colors != {}:
for ltag in self.lside_tag_order:
for rtag in self.rside_tag_order:
self.link_colors[(ltag, rtag)] = self.karyotype_colors.get(ltag, 'grey')
# Add Transparency
tp_level = kwargs.get('transparency_level', 0)
if 0 < tp_level < 6:
for key in self.link_colors:
self.link_colors[key] = self.link_colors[key] + ('_a%d' % tp_level)
# -----------------------------
# ----- Verify Tag Orders -----
# -----------------------------
if set(self.lside_tag_order) != set(data.lcounts.keys()):
print "Warning: lside tag order does not match lcount key set."
print self.lside_tag_order
print data.lcounts.keys()
print set(self.lside_tag_order).symmetric_difference(set(data.lcounts.keys()))
if set(self.rside_tag_order) != set(data.rcounts.keys()):
print "Warning: rside tag order does not match rcount key set."
print self.rside_tag_order
print data.rcounts.keys()
print set(self.rside_tag_order).symmetric_difference(set(data.rcounts.keys()))
# Define and format chromosome names. These are always of the
# form {r, l}side{0-length}.
lside_chroms = gen_chromosome_names('l', len(self.data.lcounts))
rside_chroms = gen_chromosome_names('r', len(self.data.rcounts))
all_chroms = chain(lside_chroms, rside_chroms)
if kwargs.get('filename', '').find('/') != -1:
print "########################################################"
print "Warning: Replacing character [/] with [-] in filename %s" % kwargs['filename']
print "########################################################"
# Settings for circos.conf file template.
self.circos_conf_settings = \
{"chromosomes_units" : 1,
"chromosomes" : ';'.join(all_chroms),
"show_ticks" : "no",
"show_tick_labels" : "no",
"image_size" : kwargs.get('image_size', '3000p'),
"filename" : kwargs.get('filename', 'circos.png').replace('/','-')}
self.circos_conf_link_settings = \
{"radius" : "0.99r",
"bezier_radius" : ".25r",
"crest" : ".4",
"bezier_radius_purity" : ".8",
"show_by_default" : "yes",
"ribbon" : "yes",
"flat" : "no",
"grey_default" : kwargs.get("grey_default", 'lgrey')}
# Settings for ideogram_conf file template.
self.ideogram_conf_settings = \
{"default_spacing" : "0.006r",
"break" : "0.2r",
"radius" : "0.75r"}
def write_config_files(self):
self.write_circos_conf()
self.write_ideogram_conf()
self.write_karyotype_conf()
def write_custom_colors(self):
with open('./tmp/customcolors.conf', 'w') as colors:
if self.use_default_colors:
return
for index, key in enumerate(self.karyotype_colors.keys()):
line = "custom{i} = {value}\n".format(i=index,
value=self.karyotype_colors[key])
colors.write(line)
self.karyotype_colors[key] = "custom%s" % index
def write_circos_conf(self):
with open('./tmp/circos.conf', 'w') as circos_conf:
header = circos_conf_header.format(**self.circos_conf_settings)
circos_conf.write(header)
link_block = circos_conf_links.format(**self.circos_conf_link_settings)
circos_conf.write(link_block)
def write_ideogram_conf(self):
with open('./tmp/ideogram.conf', 'w') as ideogram_conf:
config = ideogram_conf_template.format(**self.ideogram_conf_settings)
ideogram_conf.write(config)
# karyotype.conf controls how the outer ring of the circos image
# is partitioned and colored. Each line defines an arc with a
# width, a color, and a tag used to identify the region by other
# parts of the image configuration.
def write_karyotype_conf(self):
with open('./tmp/karyotype.conf', 'w') as karyotype_conf:
line_template = \
'chr\t-\t{l_or_r}side{index}\t{name}\t{start}\t{end}\t{color}\n'
# Right side karyotypes.
for (index, tag) in enumerate(self.rside_tag_order):
width = self.data.rcounts.get(tag, 0)
# No data for this tag, move on to the next one.
if width == 0:
print("Warning, no data for rside tag: %s" % tag)
continue
color = self.karyotype_colors.get(tag, 'grey')
karyotype_conf.write(line_template.format(l_or_r='r',
index=index,
name=self.rtag_parse(tag),
start=0,
end=width,
color=color
))
# Left side karyotypes.
for (index, tag) in enumerate(self.lside_tag_order):
width = self.data.lcounts.get(tag, 0)
# No data for this tag, move on to the next one.
if width == 0:
print("Warning, no data for rside tag: {tag}.".format(
tag=tag
))
continue
color = self.karyotype_colors.get(tag, 'grey')
karyotype_conf.write(line_template.format(l_or_r='l',
index=index,
name=self.ltag_parse(tag),
start=0,
end=width,
color=color
))
# The meat of the work done by this class occurs here. Writes two
# lines for each (lvalue, rvalue) pair. The lines generate a link
# whose width is given by the count stored in
# self.data.pair_counts.
def write_linkdata(self):
# Make copies of the stored data so we can decrement and check
# correctness at the end.
lcounts = deepcopy(self.data.lcounts)
rcounts = deepcopy(self.data.rcounts)
with open('./tmp/linkdata.txt') as link_data:
link_id = 0
line_template = "{hide_link}id{id}\t{name}\t{start}\t{end}\tcolor={color}\n"
link_data = open('./tmp/linkdata.txt', 'w')
# For each lside tag, iterate over all rside tags, drawing
# a ribbon of width given by the number of data entries
# matching both tags (as stored in self.data.pair_counts).
for (l_index, l_tag) in enumerate(self.lside_tag_order):
for (r_index, r_tag) in enumerate(self.rside_tag_order):
# We prepend a # symbol to comment out the line if
# we don't want to actually show this link.
hide_link = '' if self.link_filter(l_tag, r_tag) else '#'
ribbon_width = self.data.pair_counts.get((l_tag, r_tag), 0)
color = self.link_colors.get((l_tag, r_tag), 'grey')
# hide_link = '' if self.salary_filter(color) else '#'
# No data for this pair.
if ribbon_width == 0:
print "No data for combination %s %s" % (l_tag, r_tag)
continue
# ------------------------------------
# Write the line defining the left-side half of the ribbon.
end = lcounts[l_tag]
start = end - ribbon_width
lside_line = line_template.format(id=link_id,
name="lside%d" % l_index,
start=start,
end=end,
color=color,
hide_link=hide_link)
link_data.write(lside_line)
# Resize the count of remaining entries for this
# left-side tag.
lcounts[l_tag] = start
# ------------------------------------
# Write the line defining the right-side half of the ribbon.
end = rcounts[r_tag]
start = end - ribbon_width
rside_line = line_template.format(id=link_id,
name="rside%d" % r_index,
start=start,
end=end,
color=color,
hide_link=hide_link)
link_data.write(rside_line)
# Resize the count of remaining entries for this
# right-side tag.
rcounts[r_tag] = start
# ------------------------------------
link_id += 1
# End rside-loop. We should have processed all
# entries for this lside tag.
#assert lcounts[l_tag] == 0, l_tag
print "Finished processing lside tag: {tag}".format(tag=l_tag)
# End lside-loop
for r_tag, count in rcounts.iteritems():
assert count == 0, "%r %r" % (r_tag, count)
def write_linkdata_self_map(self):
# Make copies of the stored data so we can decrement and check
# correctness at the end.
lcounts = deepcopy(self.data.lcounts)
with open('./tmp/linkdata.txt') as link_data:
link_id = 0
line_template = "{hide_link}id{id}\t{name}\t{start}\t{end}\tcolor={color}\n"
link_data = open('./tmp/linkdata.txt', 'w')
# For each lside tag, iterate over all rside tags, drawing
# a ribbon of width given by the number of data entries
# matching both tags (as stored in self.data.pair_counts).
for (l_index_1, l_tag_1) in enumerate(self.lside_tag_order):
for (l_index_2, l_tag_2) in enumerate(self.lside_tag_order):
# We prepend a # symbol to comment out the line if
# we don't want to actually show this link.
hide_link = '' if self.link_filter(l_tag_1, l_tag_2) else '#'
ribbon_width = self.data.pair_counts.get((l_tag_1, l_tag_2), 0)
color = self.link_colors.get((l_tag_1, l_tag_2), 'grey')
# hide_link = '' if self.salary_filter(color) else '#'
# No data for this pair.
if ribbon_width == 0:
print "No data for combination %s %s" % (l_tag_1, l_tag_2)
continue
# ------------------------------------
if l_index_1 == l_index_2:
ribbon_width *= 2
# Write the line defining the left-side half of the ribbon.
end = lcounts[l_tag_1]
start = end - ribbon_width
lside_line = line_template.format(id=link_id,
name="lside%d" % l_index_1,
start=start,
end=end,
color=color,
hide_link=hide_link)
link_data.write(lside_line)
# Resize the count of remaining entries for this
# left-side tag.
lcounts[l_tag_1] = start
# ------------------------------------
if l_index_1 == l_index_2:
ribbon_width = 0
# Write the line defining the right-side half of the ribbon.
end = lcounts[l_tag_2]
start = end - ribbon_width
rside_line = line_template.format(id=link_id,
name="lside%d" % l_index_2,
start=start,
end=end,
color=color,
hide_link=hide_link)
link_data.write(rside_line)
# Resize the count of remaining entries for this
# right-side tag.
lcounts[l_tag_2] = start
# ------------------------------------
link_id += 1
# End rside-loop. We should have processed all
# entries for this lside tag.
#assert lcounts[l_tag] == 0, l_tag
print "Finished processing lside tag: {tag}".format(tag=l_tag_2)
# End lside-loop
# for l_tag_2, count in rcounts.iteritems():
# assert count == 0, "%r %r" % (l_tag_2, count)
def produce_image(self):
self.write_config_files()
if self.use_self_map:
self.write_linkdata_self_map()
else:
self.write_linkdata()
self.run_circos()
def run_circos(self):
# If you are on OSX
if platform.system() == 'Darwin':
subprocess.call(osx_circos_command)
# If you are on Windows via Cygwin.
elif platform.system().startswith('CYGWIN'):
subprocess.call(cygwin_circos_command)
# If you are Kaison and your computer doesn't know about white.
if platform.system().endswith('WOW64'):
print "-----------------------"
print "Converting .svg to .png"
print "-----------------------"
# Cut off the .png or .svg extension.
filename = self.circos_conf_settings['filename'].replace('.png', '')
subprocess.call(svg_to_png_command(filename))
elif platform.system() == "Linux":
subprocess.call(greg_linux_circos_command)
def verify_tags(self):
a = set(self.lside_tag_order) - set(self.data.lcounts.keys())
b = set(self.data.lcounts.keys()) - set(self.lside_tag_order)
c = set(self.rside_tag_order) - set(self.data.rcounts.keys())
d = set(self.data.rcounts.keys()) - set(self.rside_tag_order)
assert(len(a) == len(b) == len(c) == len(d) == 0), \
"Tag order doesn't match data keys:\n%s%s%s%s" % \
("\t\tExtra keys in supplied left order: %s\n" % a,
"\t\tExtra keys in left side data: %s\n" % b,
"\t\tExtra keys in supplied right order: %s\n" % c,
"\t\tExtra keys in right side data: %s\n" % d)
def count_single_tag(data, tag):
tag_values = (entry[tag] for entry in data)
return Counter(tag_values)
def gen_chromosome_names(l_or_r, count):
assert(l_or_r in ('l', 'r'))
for index in xrange(count):
yield '{l_or_r}side{index}'.format(l_or_r=l_or_r, index=index)
|
12,189 | 10fad0d60f9e661cefc0ce67a00735bf92b84f9e | '''
lis[][0]:Petrol
lis[][1]:Distance
'''
#Your task isto complete this function
#Your function should return the starting point
def tour(lis, n):
start =0
total = len(lis)
end = 1 % total
petrol_left = lis[start][0] - lis[start][1]
while start != end or petrol_left < 0 :
while petrol_left < 0 and end != start:
petrol_left -= lis[start][0] - lis[start][1]
start = (start + 1)%total
if start == 0:
return -1
petrol_left += lis[end][0] - lis[end][1]
end = (end + 1)%total
return start
#Code here
#{
# Driver Code Starts
if __name__ == '__main__':
t = int(input())
for i in range(t):
n = int(input())
arr=list(map(int, input().strip().split()))
lis=[]
for i in range(1, 2*n, 2):
lis.append([ arr[i-1], arr[i] ])
print(tour(lis, n))
# Contributed by: Harshit Sidhwa
# } Driver Code Ends |
12,190 | 1aa8d0c472492e0413f3466ecb65dfbcc84ff1d9 | import cv2
import math
def fillHoles(mask):
'''
This hole filling algorithm is decribed in this post
https://www.learnopencv.com/filling-holes-in-an-image-using-opencv-python-c/
'''
maskFloodfill = mask.copy()
h, w = maskFloodfill.shape[:2]
maskTemp = np.zeros((h+2, w+2), np.uint8)
cv2.floodFill(maskFloodfill, maskTemp, (0, 0), 255)
mask2 = cv2.bitwise_not(maskFloodfill)
return mask2 | mask
def ellipse_X(width: int, height: int, y: int) -> int:
'''
Retrieve a certain x value on the ellipse for a given y.
'''
a = width / 2
b = height / 2
y_origin = b - y
return int(math.sqrt(
1 * (math.pow(a, 2) * (1 - math.pow(y_origin, 2) / math.pow(b, 2)))
))
def ellipse_Y(width: int, height: int, x: int) -> int:
'''
Retrieve a certain Y value on the ellipse for a given x.
'''
a = width / 2
b = height / 2
x_origin = a - x
return int(math.sqrt(
1 * (math.pow(b, 2) * (1 - math.pow(x_origin, 2) / math.pow(a, 2)))
)) |
12,191 | 395fe22294644688c2cc8ed452ae9670dcd55e91 | import plotly.graph_objs as go
import numpy as np
from analysis.misc import rgba
from analysis.global_vars import user_review_model
from analysis.global_vars import UI_STYLES
from analysis.misc import map_to_new_low_and_high, get_relative_strengths
from enum import Enum
class FeatureDisplayMode(Enum):
prediction_contribution = 'prediction_contribution'
feature_weight = 'feature_weight'
raw_feature_tfidf = 'tfidf'
@property
def title(self):
figure_title = None
display_mode = self
if display_mode == FeatureDisplayMode.prediction_contribution:
figure_title = 'Prediction Contribution'
elif display_mode == FeatureDisplayMode.feature_weight:
figure_title = 'Feature Weight'
elif display_mode == FeatureDisplayMode.raw_feature_tfidf:
figure_title = 'TF-IDF'
else:
raise ValueError("Invalid `display_mode` type.")
return figure_title
def preprocess(raw_input_text):
text = user_review_model.fv_text_preprocessor(raw_input_text)
text = ' '.join(user_review_model.fv_text_tokenize(text))
return text
def sort_features_human_friendly_order(tokens, features):
"""
Sort ngram features in order of input tokens.
"""
preferred_ordered_features = []
# Short features last
features = sorted(features, key=len, reverse=True)
for token in tokens:
# Iterate from last (shortest features first), and remove in-place*
for feature in reversed(features):
# Only add those that begins with current token
if feature.startswith(token):
preferred_ordered_features.append(feature)
features.remove(feature)
return preferred_ordered_features
def get_random_sample():
n_dev_total = len(user_review_model.sentiment.dev_data)
return user_review_model.sentiment.dev_data[np.random.randint(n_dev_total)]
def part1_analyze_coefficients(sentence, display_mode):
"""Analyze (already-preprocessed) review sentence"""
assert isinstance(display_mode, FeatureDisplayMode), "`display_mode` must be `FeatureDisplayMode`."
fv = user_review_model.fv
clf = user_review_model.clf
clf_coefficients = user_review_model.clf_coefficients
feature_names = user_review_model.feature_names
# feature_names_set = user_review_model.feature_names_set
x = fv.transform([sentence]).toarray().flatten()
prob_x = clf.predict_proba([x])[0]
pred_x = int(prob_x[1] > 0.5)
coef_feature_products = clf_coefficients * x
nonzero_inds = x.nonzero()[0]
if len(nonzero_inds) == 0:
raise ValueError('No features detected.')
figure_title = display_mode.title
if display_mode == FeatureDisplayMode.prediction_contribution:
nonzero_strength_values = coef_feature_products[nonzero_inds]
elif display_mode == FeatureDisplayMode.feature_weight:
nonzero_strength_values = clf_coefficients[nonzero_inds]
elif display_mode == FeatureDisplayMode.raw_feature_tfidf:
nonzero_strength_values = x[nonzero_inds]
else:
raise ValueError("Invalid `display_mode` type.")
detected_features = [feature_names[ind] for ind in nonzero_inds]
##################################
# Show in feature extraction list
##################################
tokenize = fv.build_tokenizer()
tokens = tokenize(sentence)
human_sorted_features = sort_features_human_friendly_order(tokens, detected_features)
feature_to_ind = fv.vocabulary_
ind_to_feature_contribution = {ind: contrib for ind, contrib in zip(nonzero_inds, nonzero_strength_values)}
human_sorted_values = [ind_to_feature_contribution[feature_to_ind[f]] for f in human_sorted_features]
########################################
# Show in feature contribution bar graph
########################################
sorted_feature_values = sorted(zip(detected_features, nonzero_strength_values), key=lambda tup: tup[1]) # sort by values
negative_feature_list = []
negative_feature_values = []
positive_feature_list = []
positive_feature_values = []
# Separate negative and positive
min_val = np.inf
max_val = -np.inf
for f, val in sorted_feature_values:
if val < 0:
negative_feature_list.append(f)
negative_feature_values.append(val)
else:
positive_feature_list.append(f)
positive_feature_values.append(val)
# Also get max/min values for later use
abs_val = abs(val)
if abs_val < min_val:
min_val = abs_val
if abs_val > max_val:
max_val = abs_val
positive_bars = go.Bar(
y = positive_feature_list,
x = positive_feature_values,
name = 'Positive',
orientation = 'h',
marker = {
'color': rgba(*UI_STYLES.POSITIVE_COLOR, 0.7),
'opacity': 0.7,
'line': {
'color': rgba(*UI_STYLES.POSITIVE_COLOR),
'width': 2,
}
},
)
negative_bars = go.Bar(
y = negative_feature_list,
x = negative_feature_values,
name = 'Negative',
orientation = 'h',
marker = {
'color': rgba(*UI_STYLES.NEGATIVE_COLOR, 0.7),
'line': {
'color': rgba(*UI_STYLES.NEGATIVE_COLOR),
'width': 2,
}
}
)
figure_feature_contribution = {
'data': [
negative_bars,
positive_bars,
],
'layout': go.Layout(
title=figure_title,
yaxis=dict(
autorange="reversed",
automargin=True,
),
xaxis=dict(
automargin=True,
),
),
}
# Will used to later map in html UI e.g., opacity of elements based on strength
relative_feature_strengths = get_relative_strengths(np.abs(human_sorted_values), 0.15, 1.0)
data_for_sp = {
'positive_features': list(zip(positive_feature_list, positive_feature_values)),
'negative_features': list(zip(negative_feature_list, negative_feature_values)),
'min_val': min_val,
'max_val': max_val,
}
return {
'figure_feature_contribution': figure_feature_contribution,
'sp_data': data_for_sp,
'human_sorted_features': human_sorted_features,
'human_sorted_values': human_sorted_values,
'relative_feature_strengths': relative_feature_strengths,
'pred_x': pred_x,
'prob_x': prob_x,
}
def part1_create_sentiment_prediction_figure(sp_data, top_k=10):
########################################
# Sentiment Prediction (sp_) Stacked Bar graph
########################################
positive_features = sp_data['positive_features']
negative_features = sp_data['negative_features']
min_val = sp_data['min_val']
max_val = sp_data['max_val']
if len(positive_features) + len(negative_features) == 0:
return {}
clf_intercept = user_review_model.clf_intercept
sp_figure_data = []
base_strength = 0.3
TOP_K_FEATURES = top_k
top_k_positives = list(reversed(positive_features[-TOP_K_FEATURES:]))
rest_positives = positive_features[:-TOP_K_FEATURES]
total_rest_positive_value = sum([v for _, v in rest_positives])
top_k_negatives = list(negative_features[:TOP_K_FEATURES])
rest_negatives = negative_features[TOP_K_FEATURES:]
total_rest_negative_value = abs(sum([v for _, v in rest_negatives]))
def __create_bar(name, value, show_text, x, marker_color, line_color):
return go.Bar(
x = [x],
y = [value],
text = name if show_text else None,
name = name,
textposition='auto',
marker= {
'color': marker_color,
'line': {
'color': line_color,
'width': 1,
},
}
)
def create_positive_bar(name, value, opacity, show_text):
return __create_bar(name, value, show_text, 'POSITIVE', rgba(*UI_STYLES.POSITIVE_COLOR, opacity), rgba(*UI_STYLES.POSITIVE_COLOR))
def create_negative_bar(name, value, opacity, show_text):
return __create_bar(name, value, show_text, 'NEGATIVE', rgba(*UI_STYLES.NEGATIVE_COLOR, opacity), rgba(*UI_STYLES.NEGATIVE_COLOR))
##################
# POSITIVE STACKS
##################
for i, (f,v) in enumerate(top_k_positives):
relative_strength = np.round(map_to_new_low_and_high(v, min_val, max_val, base_strength, 1), 1)
sp_figure_data.append(create_positive_bar(f, v, relative_strength, show_text=(i < 3)))
if len(rest_positives) > 0:
sp_figure_data.append(create_positive_bar(f'{len(rest_positives)} others', total_rest_positive_value, 0.1, show_text=True))
##################
# NEGATIVE STACKS
##################
for i, (f,v) in enumerate(top_k_negatives):
v = abs(v)
relative_strength = np.round(map_to_new_low_and_high(v, min_val, max_val, base_strength, 1), 1)
sp_figure_data.append(create_negative_bar(f, v, relative_strength, show_text=(i < 3)))
if len(rest_negatives) > 0:
sp_figure_data.append(create_negative_bar(f'{len(rest_negatives)} others', total_rest_negative_value, 0.1, show_text=True))
##################
# INTERCEPT
##################
sp_intercept_bar = None
if clf_intercept > 0:
opacity = np.round(map_to_new_low_and_high(clf_intercept, min_val, max_val, base_strength, 1), 1)
sp_intercept_bar = create_positive_bar('INTERCEPT', clf_intercept, opacity, True)
else:
opacity = np.round(map_to_new_low_and_high(abs(clf_intercept), min_val, max_val, base_strength, 1), 1)
sp_intercept_bar = create_negative_bar('INTERCEPT', abs(clf_intercept), opacity, True)
sp_figure_data.append(sp_intercept_bar)
sp_stacked_bars_layout = go.Layout(
title='Positiveness vs Negativeness',
barmode='stack'
)
figure_sp_stacked_bars = go.Figure(data=sp_figure_data, layout=sp_stacked_bars_layout)
return figure_sp_stacked_bars
def part1_create_feature_in_context(feature, show_k_samples):
fv = user_review_model.fv
sentiment = user_review_model.sentiment
trainX = user_review_model.trainX
pred_probs = user_review_model.train_pred_probs
feature_ind = fv.vocabulary_[feature]
found_in_training_inds = trainX[:, feature_ind].nonzero()[0]
found_preds = sentiment.trainy[found_in_training_inds]
positive_inds = found_in_training_inds[np.where(found_preds == 1)][:show_k_samples]
negative_inds = found_in_training_inds[np.where(found_preds == 0)][:show_k_samples]
num_training_samples = trainX.shape[0]
num_appears_in_train_set = len(found_in_training_inds)
num_not_appear_in_train_set = num_training_samples - num_appears_in_train_set
num_positives = sum(sentiment.trainy[found_in_training_inds])
num_negatives = num_appears_in_train_set - num_positives
pie_trace = go.Pie(
labels=['Positive context', 'Negative context', 'Not appear'],
values=[num_positives, num_negatives, num_not_appear_in_train_set],
hoverinfo='label+percent',
textinfo='value',
textfont=dict(size=16),
marker=dict(
colors=[rgba(*UI_STYLES.POSITIVE_COLOR), rgba(*UI_STYLES.NEGATIVE_COLOR), rgba(217, 217, 217, 0.5)],
),
)
pie_layout = go.Layout(
title=f"'{feature}' in training data",
)
pie_figure = go.Figure(data=[pie_trace], layout=pie_layout)
appearance_percent_value = np.round(100*num_appears_in_train_set/num_training_samples, 2)
appearance_percent_text = f' ({appearance_percent_value}%)' if appearance_percent_value != 0 else ''
positive_num_text = f'**{num_positives}**' if num_positives > num_negatives else num_positives
negative_num_text = f'**{num_negatives}**' if num_negatives > num_positives else num_negatives
positive_negative_comparison_text = None
if num_positives == 0:
positive_negative_comparison_text = f"'{feature}' appears **only in negative context!**"
elif num_negatives == 0:
positive_negative_comparison_text = f"'{feature}' appears **only in positive context!**"
else:
pos_neg_ratio = num_positives / num_negatives
neg_pos_ratio = 1. / pos_neg_ratio
if pos_neg_ratio >= 2:
positive_negative_comparison_text = f"'{feature}' appears **{int(pos_neg_ratio)} times in positive context** compared to negative context!"
elif neg_pos_ratio >= 2:
positive_negative_comparison_text = f"'{feature}' appears **{int(neg_pos_ratio)} times in negative context** compared to positive context!"
if positive_negative_comparison_text is not None:
positive_negative_comparison_text = f"""
>
> {positive_negative_comparison_text}
>
"""
else:
positive_negative_comparison_text = ''
md_explaination = f"""
'{feature}' appears in {num_appears_in_train_set} training samples, from a total of {num_training_samples} samples{appearance_percent_text}.
* {positive_num_text} of them are positive ({np.round(100*num_positives/num_appears_in_train_set, 2)}% of total appearances).
* {negative_num_text} of them are negative ({np.round(100*num_negatives/num_appears_in_train_set, 2)}% of total appearances).
{positive_negative_comparison_text}
"""
return pie_figure, dict(
md_explaination=md_explaination,
positive_samples=[sentiment.train_data[ind] for ind in positive_inds],
negative_samples=[sentiment.train_data[ind] for ind in negative_inds],
positive_samples_pred_probs = pred_probs[positive_inds],
negative_samples_pred_probs = pred_probs[negative_inds],
)
def get_information_values_for_top_positive_and_negative_features():
top_negatives = [
('the worst', 0.555712),
('horrible', 0.195638),
('worst', 0.184547),
('not', 0.183855),
('terrible', 0.100446),
('rude', 0.091172),
('bad', 0.054171),
('asked', 0.034427),
('disappointed', 0.030780),
('slow', 0.030436),
]
top_positives = [
('great', 0.217689),
('amazing', 0.148440),
('delicious', 0.123200),
('awesome', 0.097469),
('love', 0.094778),
('excellent', 0.092318),
('love this', 0.063266),
('favorite', 0.056707),
('perfect', 0.041055),
('fantastic', 0.036014),
]
top_positive_iv_bars = go.Bar(
y = [iv for _, iv in top_positives],
x = [feature for feature, _ in top_positives],
name = 'Most informative features for positive',
marker = {
'color': rgba(*UI_STYLES.POSITIVE_COLOR, 0.7),
'opacity': 0.7,
'line': {
'color': rgba(*UI_STYLES.POSITIVE_COLOR),
'width': 2,
}
},
)
top_negative_iv_bars = go.Bar(
y = [iv for _, iv in top_negatives],
x = [feature for feature, _ in top_negatives],
name = 'Most informative features for negative labeling',
marker = {
'color': rgba(*UI_STYLES.NEGATIVE_COLOR, 0.7),
'opacity': 0.7,
'line': {
'color': rgba(*UI_STYLES.NEGATIVE_COLOR),
'width': 2,
}
},
)
top_positive_layout = go.Layout(
title="Most informative features (IV) for positive labeling",
yaxis=dict(
title='IV',
automargin=True,
fixedrange=True,
),
xaxis=dict(
automargin=True,
fixedrange=True,
)
)
top_negative_layout = go.Layout(
title="Most informative features (IV) for negative labeling",
yaxis=dict(
title='IV',
automargin=True,
fixedrange=True,
),
xaxis=dict(
automargin=True,
fixedrange=True,
)
)
return go.Figure([top_positive_iv_bars], top_positive_layout), go.Figure([top_negative_iv_bars], top_negative_layout)
|
12,192 | 7db88d6acc2fbae38f487d064007ec028aa7a4c2 | from typing import Optional
from pydantic import BaseModel
class BETInput(BaseModel):
"""Default BET Fitting response"""
pressure: list
loading: list
pressureMode: str
pressureUnit: str
materialBasis: str
materialUnit: str
loadingBasis: str
loadingUnit: str
material: str
adsorbate: str
temperature: int
class BETResponse(BaseModel):
"""Default BET Fitting response"""
area: float
c_const: float
n_monolayer: float
p_monolayer: float
bet_slope: float
bet_intercept: float
corr_coef: float
limits: list
|
12,193 | c6a8a3dbac9a51a7063310d5bdf901f46d12abdb | # Generated by Django 2.2.3 on 2019-07-27 21:14
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Manufactures', '0002_auto_20190727_1754'),
]
operations = [
migrations.AlterField(
model_name='manufacture',
name='state',
field=models.CharField(choices=[('preparation', 'Para preparar'), ('cut', 'Cortados'), ('sewing', 'En costura')], default='preparation', max_length=50),
),
]
|
12,194 | f048c1e95ad54be8736c6156e6ded4395d5b1613 | import pandas as pd
from pymongo import MongoClient
client = MongoClient('mongodb://dobfriend:1234****asdf@cluster0-shard-00-00.rpfv6.mongodb.net:27017,cluster0-shard-00-01.rpfv6.mongodb.net:27017,cluster0-shard-00-02.rpfv6.mongodb.net:27017/dobfriend?ssl=true&replicaSet=atlas-p2skss-shard-0&authSource=admin&retryWrites=true&w=majority')
userInfo = []
df = pd.read_csv('DOBsearch - Sheet1.csv')
for i in range(0,len(df['DOB'])):
userInfo.append({
"name" : df['NAME'][i],
"DOB" : df['DOB'][i],
"fblink": df['Facebook link'][i],
"instid": df['Insta link'][i],
"month": df['Month'][i],
"gender": df['Gender'][i]})
print(userInfo)
try:
print("Connected To Mongo DB")
db=client.dobfriend
db.users.drop()
db.users.insert_many(userInfo)
except Exception:
print("Unable to connect to the server.") |
12,195 | 147bf7fcecce8a59ef43a84f93c5028b51e4a98e | # -*- coding: utf-8 -*-
import clr
clr.AddReference("System.Windows.Forms")
clr.AddReference("System.Drawing")
clr.AddReference("System.ComponentModel")
from System.Windows.Forms import Form, Application, Button, MessageBox, FormStartPosition, DockStyle, Label, Padding, \
TextBox, FormBorderStyle, GroupBox, CheckBox
from System.Drawing import Size, Point, Color, Font
cats = []
cats.append('Стены')
cats.append('Крыши')
cats.append('Перекрытия')
cats.append('Двери')
cats.append('Окна')
class MyForm(Form):
def __init__(self):
self.StartPosition = FormStartPosition.CenterScreen
self.FormBorderStyle = FormBorderStyle.FixedDialog
self.Text = 'Текст'
self.Name = 'Имя'
self.Size = Size(500, 250)
self.MaximizeBox = False
self.MinimizeBox = False
self.msg = []
gb = GroupBox()
gb.Text = "Категории"
gb.Size = Size(120, 110)
gb.Location = Point(20, 20)
gb.Parent = self
j = 25
for c in cats:
self.cb = CheckBox()
self.cb.Text = c
self.cb.Location = Point(25, j)
j += 25
self.cb.Width = 200
self.cb.Checked += self.OnChanged
gb.Size = Size(120, 20 + j)
gb.Controls.Add(self.cb)
self.label = Label()
self.label.Text = "Результат"
self.label.Location = Point(225, 20)
self.label.Height = 25
self.label.Width = 225
self.Controls.Add(self.label)
self.label.Text = "".join(self.msg)
def OnChanged(self, sender, event):
if sender.Checked:
self.msg.append(sender.Text)
MessageBox.Show('Hello world')
def button1_Click(self, sender, event):
MessageBox.Show('Hello world')
def textBox1_TextChanged(self, sender, event):
self.label.Text = self.textbox.Text
def update(self, sender, event):
for f in self.checkval:
self.output1.append(f.Checked)
self.Close()
Application.EnableVisualStyles()
Application.Run(MyForm())
OUT = msg
|
12,196 | 70e0faa65c1e96b9291b7f8cb4030cfb24c9b9e3 | #!/usr/bin/env python3
import datetime
x = datetime.datetime.now()
print(x)
|
12,197 | 8ee2bff1994b947eec9bd35cc2eca8e086e5be2c | import torchvision.models as models
import torch
import torch.nn as nn
from lib.model.roi_align.modules.roi_align import RoIAlignAvg
class UBR_VGG(nn.Module):
def __init__(self):
super(UBR_VGG, self).__init__()
self.model_path = 'data/pretrained_model/vgg16_caffe.pth'
def _init_modules(self):
vgg = models.vgg16()
print("Loading pretrained weights from %s" % (self.model_path))
state_dict = torch.load(self.model_path)
vgg.load_state_dict({k: v for k, v in state_dict.items() if k in vgg.state_dict()})
vgg.classifier = nn.Sequential(*list(vgg.classifier._modules.values())[:-1])
# not using the last maxpool layer
self.base = nn.Sequential(*list(vgg.features._modules.values())[:-1])
# Fix the layers before conv3:
for layer in range(10):
for p in self.base[layer].parameters(): p.requires_grad = False
self.top = vgg.classifier
self.bbox_pred_layer = nn.Linear(4096, 4)
self.roi_align = RoIAlignAvg(7, 7, 1.0/16.0)
def _init_weights(self):
def normal_init(m, mean, stddev, truncated=False):
"""
weight initalizer: truncated normal and random normal.
"""
# x is a parameter
if truncated:
m.weight.data.normal_().fmod_(2).mul_(stddev).add_(mean) # not a perfect approximation
else:
m.weight.data.normal_(mean, stddev)
m.bias.data.zero_()
normal_init(self.bbox_pred_layer, 0, 0.001, False)
def create_architecture(self):
self._init_modules()
self._init_weights()
def _head_to_tail(self, pool5):
pool5_flat = pool5.view(pool5.size(0), -1)
fc7 = self.top(pool5_flat)
return fc7
def forward(self, im_data, rois):
base_feat = self.base(im_data)
pooled_feat = self.roi_align(base_feat, rois)
# feed pooled features to top model
pooled_feat = self._head_to_tail(pooled_feat)
# compute bbox offset
bbox_pred = self.bbox_pred_layer(pooled_feat)
bbox_pred = bbox_pred.view(-1, 4)
return bbox_pred
|
12,198 | 93d1cd79e359e049fa217148a4981eb837355e95 | # -*- coding: utf-8 -*-
# from xlrd import *
# from xlwt import *
import xlrd
from xlutils.copy import copy
import os.path
# # w = Workbook()
# # ws = w.add_sheet('xlwt was here')
# book = xlrd.open_workbook('mini.xls')
# # book = xlrd.open_workbook("mini.xls")
# sh = book.sheet_by_index(0)
# sh.write(0,0,'A1')
#
# book.save('mini.xls')
rb = xlrd.open_workbook('mini.xls',formatting_info=True)
r_sheet = rb.sheet_by_index(0)
wb = copy(rb)
sheet = wb.get_sheet(0)
sheet.write(0,"sdfdsfs","jhh","165465")
# sheet.write(0,1,"yjhgjghj")
# sheet.write(0,2,"iuil")
wb.save('mini.xls') |
12,199 | 40b050dd967e9e04abc390bd0fafea9e1fb7d0fb | #To take or not to take
possible_solution=1
balloons=[]
d=""
current_answer=1
for x in range(int(input("Enter an integer: "))):
current_answer=1
for y in range(int(input("Enter number of balloons: "))):
balloons.append(input("Enter balloon "+ str(y+1) + ": "))
balloons_length=len("{0:b}".format((len(balloons)**2)-1))
for z in range((len(balloons)**2)-1,-1,-1):
current_answer=1
b="{0:b}".format(z)
for c in b:
d = c + d
for a in range(balloons_length):
try:
if(int(d[a])==1):
if(balloons[a]=="N"): current_answer*=-1
else:
current_answer=int(eval(str(current_answer) + balloons[a]))
except:
pass
if possible_solution < current_answer:
possible_solution = current_answer
print("Best Answer: ", possible_solution)
balloons=[] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.