index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
13,500 | 128d115d39ba8e9ba386c20137ca281bb42eafd8 | from django import forms
from . import models
class SongForm(forms.ModelForm):
class Meta:
model = models.Song
fields = ["title", "artist", "remixer", "url", "file", "genre", "tags"]
|
13,501 | 0f40ee5b37f046a40a84d6c510e38915412eabc8 | # Generated by Django 2.0.1 on 2019-05-23 07:34
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('ticket', '0007_ticketfile_title'),
]
operations = [
migrations.AlterField(
model_name='ticket',
name='assigned_to',
field=models.ForeignKey(blank=True, on_delete=django.db.models.deletion.CASCADE, related_name='ticket_assigned', to='accounts.Profile'),
),
migrations.AlterField(
model_name='ticket',
name='old_ticket',
field=models.ForeignKey(blank=True, on_delete=django.db.models.deletion.CASCADE, to='ticket.Ticket'),
),
]
|
13,502 | e6f4ea9449b9397cfed0e1f34f6a80a0801583d9 | STOP_WORDS = [
'a', 'an', 'and', 'are', 'as', 'at', 'be', 'by', 'for', 'from', 'has', 'he',
'i', 'in', 'is', 'it', 'its', 'of', 'on', 'that', 'the', 'to', 'were',
'will', 'with'
]
def print_word_freq(file):
"""Read in `file` and print out the frequency of words in that file."""
text_file = open(file, 'r')
contents = text_file.read()
words = contents.split()
def clean_text(text):
text = text.lower()
all_letters = "abcdefghijklmnopqrstuvwxyz"
text_to_keep = ""
for char in text:
if char in all_letters:
text_to_keep += char
return text_to_keep
clean_words = []
for word in words:
clean_words.append(clean_text(word))
go_words = [word for word in clean_words if word not in STOP_WORDS]
word_count = {}
for go_word in go_words:
word_count.update({go_word: go_words.count(go_word)})
sorted_word_count = sorted(word_count.items(), key=lambda x: x[1], reverse=True)
longest_word_len = len(get_longest_word(words))
for word, value in sorted_word_count[:10]:
print(word.rjust(longest_word_len), "|", str(value).ljust(3), "*" * value)
if __name__ == "__main__":
import argparse
from pathlib import Path
parser = argparse.ArgumentParser(
description='Get the word frequency in a text file.')
parser.add_argument('file', help='file to read')
args = parser.parse_args()
file = Path(args.file)
if file.is_file():
print_word_freq(file)
else:
print(f"{file} does not exist!")
exit(1)
|
13,503 | 67f6018639176696321814533edf05784f1c3dd2 | import re
def abbreviate(words):
return "".join(word[0].upper() for word in re.sub("-", " ", words).split(" "))
|
13,504 | b331230bbbfea89fd8cb80dc4b917c302e6a186f | from django.test import TestCase
from datetime import datetime
from .models import *
# Create your tests here.
class MenuItemsTest(TestCase):
def setUp(self) -> None:
cat1 = Category.objects.create(name='irani')
cat2 = Category.objects.create(name='fast food')
menu_item1 = MenuItems.objects.create(name='mi1', category=cat1, discount=10, price=10000)
menu_item2 = MenuItems.objects.create(name='mi2', category=cat2, discount=110, price=20000)
menu_item3 = MenuItems.objects.create(name='mi3', category=cat1, discount=-5, price=30000)
menu_item4 = MenuItems.objects.create(name='mi4', category=cat2, discount=5, price=-25000)
menu_item5 = MenuItems.objects.create(name='mi1', category=cat1, discount=10, price=12000.50)
menu_item6 = MenuItems.objects.create(name='mi1', category=cat1, discount=8, price='12000')
menu_item7 = MenuItems.objects.create(name='mi1', category=cat1, price=8000)
self.mi1 = MenuItems.final_price(menu_item1)
self.mi2 = MenuItems.final_price(menu_item2)
self.mi3 = MenuItems.final_price(menu_item3)
self.mi4 = MenuItems.final_price(menu_item4)
self.mi5 = MenuItems.final_price(menu_item5)
self.mi6 = MenuItems.final_price(menu_item6)
self.mi7 = MenuItems.final_price(menu_item7)
# print(self.mi1)
def test_menu_item_final_price_mi1(self):
self.assertEqual(self.mi1, 9000)
def test_menu_item_final_price_mi5(self):
self.assertEqual(self.mi5, 10800.45)
def test_menu_item_final_price_mi1_by_empty_discount(self):
self.assertEqual(self.mi7, 8000)
def test_menu_item_final_price_mi2_discount_greater_than_100(self):
self.assertRaises(AssertionError, MenuItems.final_price, 'menu_item2')
def test_menu_item_final_price_mi3_negative_discount(self):
self.assertRaises(AssertionError, MenuItems.final_price, 'menu_item3')
def test_menu_item_final_price_mi4_negative_price(self):
self.assertRaises(AssertionError, MenuItems.final_price, 'menu_item4')
def test_menu_item_final_price_mi5_string_price(self):
self.assertRaises(AssertionError, MenuItems.final_price, 'menu_item4')
class TimestampMixinTest(TestCase):
def setUp(self) -> None:
self.u1 = Cashier.objects.create(username='hadi', password='123', email='asd@a.com')
def test_timstampmixin_delete_timestamp(self):
self.assertEqual(self.u1.delete_timestamp, None)
def test_timstampmixin_delete_timestamp2(self):
self.u1.delete_timestamp = datetime.now()
self.u1.save()
self.assertNotEqual(self.u1.delete_timestamp, None)
def test_timstampmixin_delete_timestamp_greater_than_now(self):
self.u1.delete_timestamp = datetime.now() + timedelta(days=5)
self.u1.save()
self.assertGreater(self.u1.delete_timestamp, datetime.now())
|
13,505 | 4e66f86f60eaf39db1f9ff43814cb0fe00391d9e | """
Robert Matthews
CS 1400
Section 002
Exercise 3
3.4
Chapter 3, Programming Exercises #2, pg. 79. Write a program to calculate the cost/sq. inch of a circular pizza given
the diameter and total price. Submit the code as your answer. Do not submit results.
"""
def main():
import math
pizzaCostRaw = eval(input("Enter the cost of the pizza: "))
pizzaDiameter = eval(input("Enter the diameter in inches of the pizza: "))
print()
pizzaCostFormat = "${0:.2f}".format(pizzaCostRaw)
pizzaSqrIn = math.pi * (pizzaDiameter/2)**2
sqrInCost = pizzaCostRaw / pizzaSqrIn
print("Your", pizzaDiameter,"inch pizza cost", pizzaCostFormat)
print("The price per square inch for your pizza is", "${0:.2f}".format(sqrInCost))
main()
|
13,506 | 2f0528a3c75b723becd5fe53046faaa5c4fc091b | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-10-14 20:10
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Persona',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Nombre', models.CharField(max_length=50)),
('ApMaterno', models.CharField(max_length=50)),
('ApPaterno', models.CharField(max_length=50)),
('Telefono', models.PositiveIntegerField(blank=True, null=True)),
('Direccion', models.CharField(max_length=200)),
('FechaNacimiento', models.DateField(verbose_name='Fecha de nacimiento')),
],
),
migrations.CreateModel(
name='Agricultor',
fields=[
('persona_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='usuario.Persona')),
('CURP', models.CharField(max_length=18, null=True, unique=True)),
('RFC', models.CharField(max_length=18, null=True, unique=True)),
],
bases=('usuario.persona',),
),
migrations.CreateModel(
name='Empleado',
fields=[
('persona_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='usuario.Persona')),
('Cargo', models.CharField(choices=[('GR', 'GERENTE'), ('AD', 'ADMINISTRADOR'), ('SP', 'SUPERVISOR')], max_length=2)),
],
bases=('usuario.persona',),
),
]
|
13,507 | 51929c81591dea78d13a982d8a8b348b7b058031 | #encoding:utf-8
'''
Created on 2014-8-13
测试产品管理
@author: 张文硕
'''
from hhplt.testengine.autoTrigger import EmptyTrigger
class ProductTestSuite():
@staticmethod
def emptyFun(product):
'''空方法,用于没有定义set和rollback方法的测试用例集模块'''
pass
'''产品测试项'''
def __init__(self,testSuiteModule):
#测试项名称
self.suiteName = testSuiteModule.__dict__["suiteName"]
#版本
self.version = testSuiteModule.version
#测试描述
self.suiteDesc = testSuiteModule.__doc__
#测试内容(函数)列表
self.testFunList = [testSuiteModule.__dict__[testFcName]
for testFcName in filter(lambda s:s.startswith("T_") ,dir(testSuiteModule))]
#失败权值和
self.failWeightSum = testSuiteModule.failWeightSum
#准备函数,可以没有
self.setupFun = testSuiteModule.setup if "setup" in testSuiteModule.__dict__ else ProductTestSuite.emptyFun
#回滚处理函数,可以没有
self.rollbackFun = testSuiteModule.rollback if "rollback" in testSuiteModule.__dict__ else ProductTestSuite.emptyFun
#结束finally函数,可以没有
self.finalFun = testSuiteModule.finalFun if "finalFun" in testSuiteModule.__dict__ else ProductTestSuite.emptyFun
#自动开始结束触发,可以没有,入参是类,不是实例;启动时实例化
self.autoTrigger = testSuiteModule.autoTrigger if "autoTrigger" in testSuiteModule.__dict__ else EmptyTrigger
def __getFunName(self,fdoc):
inx = fdoc.find("-")
return fdoc[:inx] if inx>0 else fdoc;
def __getFunDesc(self,fdoc):
inx = fdoc.find("-")
return fdoc[inx+1:] if inx>0 else "";
def __getIndex(self,fname):
return fname[2:4]
def getItems(self):
return [{"name":self.__getFunName(f.__doc__),
"method":{"A":u"自动判定","M":u"手动判定"}[f.__name__[-1]],
"fun":f,
"desc":self.__getFunDesc(f.__doc__),
"index":self.__getIndex(f.__name__)
} for f in self.testFunList]
productTestSuits={}
def registerProduct(productName,testSuiteModules):
'''注册产品及测试项'''
productTestSuits[productName] = {}
for testSuiteModule in testSuiteModules:
suite = ProductTestSuite(testSuiteModule)
productTestSuits[productName][suite.suiteName] = suite
def getProductNameList():
'''获得产品名称列表'''
return productTestSuits.keys()
def getProductTestSuiteNameList(productName):
'''获得某产品的测试大项列表'''
ks = productTestSuits[productName].keys()
ks.sort()
return ks
def getTestItemList(productName,suiteName):
'''获得测试单项列表'''
testSuite = productTestSuits[productName][suiteName]
return testSuite.getItems()
def getTestSuiteDesc(productName,suiteName):
'''获得测试内容描述'''
testSuite = productTestSuits[productName][suiteName]
return testSuite.suiteDesc
def getTestSuiteVersion(productName,suiteName):
'''获得测试项版本'''
testSuite = productTestSuits[productName][suiteName]
return testSuite.version
def getAutoTrigger(productName,suiteName):
'''获得自动触发器'''
testSuite = productTestSuits[productName][suiteName]
return testSuite.autoTrigger
def getTestSuiteParallelSlotCount(productName,suiteName):
"获得测试用例集的并行数量"
testSuite = productTestSuits[productName][suiteName]
return testSuite.parallelSlot
|
13,508 | c222bfb856c0f5176275be533f1ed788b630cde5 | """
@author: Payam Dibaeinia
"""
import torch
import torch.nn as nn
from collections import OrderedDict
from ResidualBlock import ResNetBlock
class generator(nn.Module):
"""
According to the cycleGAN paper, reflection padding and instance normalization was used.
Also all dimensions including number of channels and kernels were set to those defined in cycleGAN paper
Arbitrary selections of hyper_parameters:
- Use bias in both convolutional layers
- If use dropout, prob = 0.5
"""
def __init__(self,in_channels, out_channels, nBlocks, nChanFirstConv = 64, dropout = False):
"""
nChanFirstConv: number of channels of the first convolution layer, 64 used in cycleGAN paper
"""
super(generator,self).__init__()
layers = []
layers += [nn.ReflectionPad2d(3)]
layers += [nn.Conv2d(in_channels = in_channels, out_channels = nChanFirstConv, kernel_size = 7)]
layers += [nn.InstanceNorm2d(nChanFirstConv)]
layers += [nn.ReLU(inplace = True)]
layers += [nn.Conv2d(in_channels = nChanFirstConv, out_channels = nChanFirstConv*2, kernel_size = 3, stride = 2, padding = 1)]
layers += [nn.InstanceNorm2d(nChanFirstConv*2)]
layers += [nn.ReLU(inplace = True)]
layers += [nn.Conv2d(in_channels = nChanFirstConv*2, out_channels = nChanFirstConv*4, kernel_size = 3, stride=2, padding = 1)]
layers += [nn.InstanceNorm2d(nChanFirstConv*4)]
layers += [nn.ReLU(inplace = True)]
for i in range(nBlocks):
layers += [ResNetBlock(nChanFirstConv*4, dropout)]
layers += [nn.ConvTranspose2d(in_channels = nChanFirstConv*4, out_channels = nChanFirstConv*2, kernel_size = 3, stride = 2, padding = 1, output_padding = 1)]
layers += [nn.InstanceNorm2d(nChanFirstConv*2)]
layers += [nn.ReLU(inplace = True)]
layers += [nn.ConvTranspose2d(in_channels = nChanFirstConv*2, out_channels = nChanFirstConv, kernel_size = 3, stride = 2, padding = 1, output_padding = 1)]
layers += [nn.InstanceNorm2d(nChanFirstConv)]
layers += [nn.ReLU(inplace = True)]
layers += [nn.ReflectionPad2d(3)]
layers += [nn.Conv2d(in_channels = nChanFirstConv, out_channels = out_channels, kernel_size = 7)]
layers += [nn.Tanh()]
self.all_layers_ = nn.Sequential(*layers)
def forward(self,x):
return self.all_layers_(x)
|
13,509 | 79a057497d2a5940ecac8e25a853ab7b079f2b66 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
def main():
startpoint = [ i for i in input()]
endpoint =[ j for j in input()]
x_distance = ord(startpoint[0]) - ord(endpoint[0])
y_distance = int(startpoint[1]) - int(endpoint[1])
x_abs_distance = abs(ord(startpoint[0]) - ord(endpoint[0]))
y_abs_distance = abs(int(startpoint[1]) - int(endpoint[1]))
print(max(x_abs_distance, y_abs_distance))
while x_distance != 0 or y_distance != 0:
r = ""
if x_distance < 0: r="R"; x_distance+=1
if x_distance > 0: r="L"; x_distance-=1
if y_distance < 0: r+="U"; y_distance +=1
if y_distance > 0: r+="D"; y_distance -= 1
print(r)
if __name__ == "__main__":
main()
|
13,510 | 3f7b939d6d4d12439643523c47f22bab9dcb0f0c | from flask import Flask, render_template, redirect, url_for, request, jsonify, session
from initQuestionfiles import initBiology, initComputing, initNorwegian, optionList
app = Flask(__name__)
#secret key is required for the use of sessions.
app.secret_key = 'A0Zr98j /3 yX R~XHH!jmN]LWX / ,? RT'
#initialising the pre-made flashcard sets and the option list, this pulls from initQuestionfiles.py
initBiology()
initComputing()
initNorwegian()
optionList()
#This will make sure that anyone going to the root page will be redirected to the home page.
@app.route('/')
def base():
return redirect(url_for("home"))
#to inspect the masterpage without any content
@app.route('/flashycards/')
def root():
return render_template('MasterPage.html')
#Home page
@app.route('/flashycards/home/')
def home():
return render_template('homePage.html')
#redundant code, decided to remove the about page.
@app.route('/flashycards/home/about/<text>')
def about(text):
return render_template('aboutPage.html',text=text)
#Create set page, will ask for the set name and the number of questions so they can be used in the questions page.
@app.route('/flashycards/home/createset/', methods=['GET', 'POST'])
def createset():
if request.method == 'POST':
session['setName'] = request.form['setName']
strQuestionCount="1"
strQuestionCount = request.form['questionCount']
intQuestionCount = int(strQuestionCount)
return redirect(url_for("questions", intQuestionCount=intQuestionCount,setName=session['setName']))
else:
return render_template('createSet.html')
# Open set page, this will use the option list file to take the option selected in the dropdown and use that to open a flashcard set.
@app.route('/flashycards/home/openset/', methods=['GET','POST'])
def openset():
if request.method == 'POST':
session['option'] = request.form['option']
for option in session['optionList']:
option = option.replace("\n", "")#blackslash n would get added to the string although it was only being used to create a new line, so it needs removed before the if statement
if session['option'] == option:
fo=open(option+".txt","r") # all the file naming schemes are the same so this allows for the option to be used to determine the file to open
session['questionlist']=fo.readlines()
fo.close()
fr=open(option+"Answers.txt") # again all the file naming conventions are the same so adding Answers to the option selected will always load the answers file.
session['answerlist']=fr.readlines()
fr.close()
#return(jsonify(session['answerList'], session['questionlist'])) # this was to test the returned values
return redirect(url_for("flashcard"))
else:
fo=open("optionList","r")
session['optionList']=fo.readlines()
fo.close()
return render_template('openSet.html', optionList=session['optionList'])
#this is the questions page where the user sets the questions and answers for their flashcard set.
@app.route('/flashycards/home/createset/questions/<int:intQuestionCount><setName>', methods=['GET', 'POST'])
def questions(intQuestionCount,setName):
if request.method == 'POST':
session['questionlist'] = request.form.getlist('question[]')
session['answerlist'] = request.form.getlist('answer[]')
return redirect(url_for("flashcard"))
else:
return render_template('createQuestion.html', questionCount=intQuestionCount,Name=session['setName'])
#this is the flashcard page, it displays the list of questions and once the check answer button is pressed will return check answer page
@app.route('/flashycards/home/openset/flashcard/', methods=['GET', 'POST'])
def flashcard():
if request.method == 'POST':
session['formAnswers'] = request.form.getlist("cardAnswer")
return redirect(url_for("checkAnswer"))
else:
return render_template('flashCard.html', questionlist=session['questionlist'])
#check answer page will calculate the grade and show it on screen
@app.route('/flashycards/home/openset/flashcard/checkanswer/', methods=['GET','POST'])
def checkAnswer():
grade=0
gradePercentage = 0
for formAnswer in session['formAnswers']:
for actualAnswer in session['answerlist']:
actualAnswer = actualAnswer.replace("\n", "")
if formAnswer.casefold() == actualAnswer.casefold():
grade+=1
gradePercentage = (grade/len(session['formAnswers']))*100
if gradePercentage >= 75:
letterGrade = "A"
gradeMessage = "Congradulations! you achieved a mark over 75%, this shows you have great knowledge of the flashcard set. But don't stop there, practising everyday will help you remember the answers."
elif gradePercentage >= 60 and gradePercentage < 75:
letterGrade = "B"
gradeMessage = "well done! you achieved a mark over 60%. This shows you have good knowledge of the flashcard set. But don't stop there, practising everyday will help you remember the answers."
elif gradePercentage >= 50 and gradePercentage < 60:
letterGrade = "c"
gradeMessage = "There's room for improvement! you achieved a mark over 50%, this shows you have decent knowledge of the flashcard set. But don't stop there, practising everyday will help you remember the answers."
else:
letterGrade = "D"
gradeMessage = "oh no! you achieved a mark of less than 50%, this shows you have bad knowledge of the flashcard set. But don't worry, practising everyday will help you remember the answers."
if request.method == "POST":
return redirect(url_for("flashcard"))
else:
return render_template('checkAnswer.html', gradePercentage=gradePercentage, gradeMessage=gradeMessage, letterGrade=letterGrade)
#this is just a reroute, doesnt open a html file. Will take the current session data and write it into a file which openset page can open. will return to the openset page.
@app.route('/flashycards/home/openset/flashcard/checkanswer/save/', methods=['GET','POST'])
def save():
if request.method == 'GET':
save=open(session['setName']+".txt", "w")
for question in session['questionlist']:
save.write(question)
save.write("\n")
save.close()
saveA=open(session['setName']+"Answers.txt", "w")
for answer in session['answerlist']:
saveA.write(answer)
saveA.write("\n")
saveA.close()
fo=open("optionList","a")
fo.write(session['setName'])
fo.write("\n")
fo.close()
return redirect(url_for("openset"))
if __name__ == "__main__":
app.run(host='0.0.0.0', debug=True)
|
13,511 | be0c81b34583cb5e1de518eaa04733de53703806 | from django.urls import path
from . import views
app_name = 'blog'
urlpatterns = [
path('', views.all_blog, name='all_blog'),
path('<int:bid>/', views.view_blog, name='view_blog'),
] |
13,512 | 8e02ec109a8473825968e6358dfdf61c47e42b5d | # Sample of simple LFSR
def shift(startstate, tap_1, tap_2):
counter = 0
cipherbits = []
register = startstate.copy()
while (True):
cipherbits.append(register[-1])
counter += 1
register = [(int(register[tap_1] != register[tap_2]))] + register[:-1]
if register == startstate:
break
return cipherbits[::-1]
print(shift([0,1,0,1], 0, 2)) |
13,513 | b4318ba59ab797d4d14cea21bf48a61746b17d05 | class Solution:
def binaryGap(self, N):
"""
:type N: int
:rtype: int
"""
l = len(bin(N)[2:])
position = -1
while l > 0 :
if N % 2 == 1:
if position == -1:
position = l
max_gap = 0
else:
# print(N, l, position)
max_gap = max(max_gap, position - l)
position = l
N = N // 2
l -= 1
return max_gap
print(Solution().binaryGap(1)) |
13,514 | fdb03d958558555cd8f088a4d4ea2bc3c53a2b53 | """
Retrieves the permissions on the file that are assigned to the current user.
"""
from pprint import pprint
from office365.sharepoint.client_context import ClientContext
from tests import test_team_site_url, test_client_credentials
ctx = ClientContext(test_team_site_url).with_credentials(test_client_credentials)
file_url = 'Shared Documents/big_buck_bunny.mp4'
file = ctx.web.get_file_by_server_relative_url(file_url)
file_item = file.listItemAllFields.select(["EffectiveBasePermissions"]).get().execute_query()
pprint(file_item.effective_base_permissions.permission_levels)
|
13,515 | 86a42db959d27797df27dc7edbfda69f4cc78bc5 | import numpy as np
import pandas as pd
df=pd.read_csv("/home/ghanshyam/Machine Learning/movie_data.csv",encoding='utf-8')
from sklearn.model_selection import GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn.linear_model import LogisticRegression
from sklearn.feature_extraction.text import TfidfVectorizer
def tokenizer(text):
return text.split()
from nltk.corpus import stopwords
stop=stopwords.words('english')
from nltk.stem.porter import PorterStemmer
porter=PorterStemmer()
def tokenizer_porter(text):
return [porter.stem(word) for word in text.split()]
import re
def preprocessor(text):
text=re.sub('<[^>]*>','',text)
emotions=re.findall('(?::|;|=)(?:-)?(?:\)|\(|D|P)',text)
text=(re.sub('[\W]+',' ',text.lower())+' '.join(emotions).replace('-',''))
return text
df['review']=df['review'].apply(preprocessor)
X_train=df.loc[:25000,'review'].values
y_train=df.loc[:25000,'sentiment'].values
X_test=df.loc[25000:,'review'].values
y_test=df.loc[25000:,'sentiment'].values
tfidf=TfidfVectorizer(strip_accents=None,lowercase=False,preprocessor=None)
param_grid=[{'vect__ngram_range':[(1,1)],
'vect__stop_words':[stop,None],
'vect__tokenizer':[tokenizer,tokenizer_porter],
'clf__penalty':['l1','l2'],
'clf__C':[1.0,10.0,100.0]},
{'vect__ngram_range':[(1,1)],
'vect__stop_words':[stop,None],
'vect__tokenizer':[tokenizer,tokenizer_porter],
'vect__use_idf':[False],
'vect__norm':[None],
'clf__penalty':['l1','l2'],
'clf__C':[1.0,10.0,100.0]}]
lr_tfidf=Pipeline([('vect',tfidf),
('clf',LogisticRegression(random_state=0))])
gs_lr_tfidf=GridSearchCV(lr_tfidf,param_grid,scoring='accuracy',cv=5,
verbose=1,n_jobs=1)
gs_lr_tfidf.fit(X_train,y_train)
print("Tfidf Best Parameters : %s"%gs_lr_tfidf.best_params_)
print("CV Accuracy : %.5f"%gs_lr_tfidf.best_score_)
clf=gs_lr_tfidf.best_estimator_
print("Test Accuracy : %.5f"%clf.score(X_test,y_test)) |
13,516 | 91f1527b97b6198c6ec162166af1b6d530ba8716 | from collections import namedtuple
from ibanity import Ibanity
def get_list(params={}):
uri = Ibanity.client.api_schema["sandbox"]["users"].replace("{sandboxUserId}", "")
response = Ibanity.client.get(uri, params, None)
return list(
map(
lambda user:
__create_user_named_tuple__(user), response["data"]
)
)
def create(attributes):
uri = Ibanity.client.api_schema["sandbox"]["users"].replace("{sandboxUserId}", "")
body = {
"data": {
"type": "sandboxUser",
"attributes": attributes
}
}
response = Ibanity.client.post(uri, body, {}, None)
return __create_user_named_tuple__(response["data"])
def delete(id):
uri = Ibanity.client.api_schema["sandbox"]["users"] \
.replace("{sandboxUserId}", id)
response = Ibanity.client.delete(uri, {}, None)
return __create_user_named_tuple__(response["data"])
def find(id):
uri = Ibanity.client.api_schema["sandbox"]["users"] \
.replace("{sandboxUserId}", id)
response = Ibanity.client.get(uri, {}, None)
return __create_user_named_tuple__(response["data"])
def __create_user_named_tuple__(user):
return namedtuple("SandboxUser", user.keys())(**user)
|
13,517 | b195afe4e187b614894648a3e6bd353ebedf2c50 | import quandl
import pandas as pd
api_key = '2LWMTZKKbDgy5Zqycxjq'
#df = quandl.get('FMAC/HPI_AK', authtoken=api_key, start_date="1999-01-31")
#print(df.head())
fiddy_states = pd.read_html('https://simple.wikipedia.org/wiki/List_of_U.S._states')
#This is a list:
#print(fiddy_states)
#This is a dataframe:
#print(fiddy_states[0])
#This is a column:
print(fiddy_states[0][1])
for abbv in fiddy_states [0] [1] [2:]:
#print(abbv)
print("FMAC/HPI_"+str(abbv))
|
13,518 | d939670aeabc3cec9fefc5e29b57614dd79b2bd3 | """Stores constants for all modules."""
import os
# Directories
PACKAGE_BASE_DIR = os.path.dirname(os.path.abspath(__file__))
PROCESSED_DATA_DIR = os.path.join(PACKAGE_BASE_DIR, "data")
# Option constants
ALPHA = "alpha"
LOWER = "lower"
|
13,519 | edf4eb25316ca438a6a3f65240a0e6680a163702 | import pyautogui
import time
rgb = pyautogui.pixelMatchesColor(31, 615, (0, 55, 132), tolerance=5)
while rgb == True:
pyautogui.hotkey('ctrl', 'win', '1')
time.sleep(1)
rgb = pyautogui.pixelMatchesColor(31, 615, (0, 55, 132), tolerance=5)
time.sleep(1)
pyautogui.moveTo(1250, 400) # Çanta2
time.sleep(0.1)
pyautogui.click(1250, 400)
time.sleep(0.1)
pyautogui.moveTo(1210, 400) # Çanta1
time.sleep(0.1)
pyautogui.click(1210, 400)
gumusbar = pyautogui.locateCenterOnScreen('C:/Python34/Ticaret/100Mbar.png',region=(1130, 385, 236, 360))
pyautogui.moveTo(gumusbar)
time.sleep(0.1)
pyautogui.click(gumusbar)
gumusbar = pyautogui.locateCenterOnScreen('C:/Python34/Ticaret/100Mbar.png',region=(1130, 385, 236, 360))
while gumusbar != None:
gbx, gby = gumusbar
pyautogui.moveTo(gumusbar)
time.sleep(0.1)
pyautogui.click(gbx, gby)
time.sleep(0.2)
gumusbar = pyautogui.locateCenterOnScreen('C:/Python34/Ticaret/100Mbar.png',region=(1130, 385, 236, 360))
pyautogui.click(gbx, gby) |
13,520 | 2288534bec9853055ee9e029ab3da6b51b72cac7 | from utils.Tree import letterTree
def depth_limited_search_r(problem, goal, limit):
if problem.value == goal:
return True
elif problem.depth == limit:
return 'cutoff'
elif problem.children is not None:
cutoff_occurred = False
for child in problem.children:
result = depth_limited_search_r(child, goal, limit)
if result == 'cutoff':
cutoff_occurred = True
elif result is not None:
return result
return 'cutoff' if cutoff_occurred else None
def depth_limited_search(head, goal, limit):
return depth_limited_search_r(head, goal, limit)
tree = letterTree()
print('{}'.format(depth_limited_search(tree, '4', 4)))
|
13,521 | db9a03106dc3c908c3e295080c5baffcefe6a6f1 | # coding=utf8
# 并行模拟器应使用多进程的方式进行模拟
# 如果要启动大量的子进程,可以用进程池的方式批量创建子进程:
from multiprocessing import Pool
import os, time, random
import BigGatewatPut2_2
def long_time_task(url, apikey, data):
print 'Run task %s (%s)...' % (apikey, os.getpid())
start = time.time()
time.sleep(random.random() * 3)
end = time.time()
print "show apikey ", apikey # 把要调用的外部方法在这里调用即可,可带参数
BigGatewatPut2_2.http_put(url, apikey, data)
print 'Task %s runs %0.2f seconds.' % (apikey, (end - start))
if __name__ == '__main__':
url_test = 'http://192.168.1.180:5858/gateway/up/0134567839600003' # 目的地址
# 测试数据
# 模拟网关2 两个节点,分别接入温度传感器
gateway_data = {
"node_datas": [{
"node_id": "0300110203600018",
"node_attributes": {
"hum": "10.770000", "tem": "19.200002"
}
},
{
"node_id": "0300110103700025",
"node_attributes": {
"hum": "10.780000", "tem": "19.200003"
}
}]
}
print 'Parent process %s.' % os.getpid()
p = Pool() # pool默认并行4个进程,可设置Pool(n),并发n个进程
# fish = [1, 2, 3, 4, 5] # 开启多进程的个数
# 从文件获取进程标识
bf = BaseFile()
fish = bf.read_file("biggateway_api_keys.csv")
for a_dict in fish:
a_api_key = a_dict["gateway_api_key"] # 如下args可以传递参数
p.apply_async(long_time_task, args=(url_test, a_api_key, gateway_data,))
print 'Waiting for all subprocesses done...'
p.close()
p.join()
print 'All subprocesses done.'
|
13,522 | 96928a8553438253bac21e87eaf932db3057080b | import collections
import re
from queue import PriorityQueue
test_inputs = [
"inputs/day7"
]
line_pattern = re.compile(r"Step\s*(\w*)\s+must.*step\s+(\w*)\s+can.*")
def interpret(rules):
pred = collections.defaultdict(set)
succ = collections.defaultdict(set)
domain = set()
for rule in rules:
pred[rule[1]].add(rule[0])
succ[rule[0]].add(rule[1])
domain.add(rule[0])
domain.add(rule[1])
return pred, succ, domain
def part1(rules):
pred, succ, domain = interpret(rules)
available = set(s for s in domain if len(pred[s]) == 0)
result = ""
while available:
item = min(available)
result += item
available.remove(item)
domain.remove(item)
for s in succ[item]:
pred[s].remove(item)
if len(pred[s]) == 0:
available.add(s)
return result
def time(task):
return ord(task) - ord('A') + 61
def part2(rules, workers):
clock = 0
jobs = {}
available = PriorityQueue()
pred, succ, domain = interpret(rules)
for task in domain:
if len(pred[task]) == 0:
available.put(task)
while not available.empty() or jobs:
capacity = workers - len(jobs)
while capacity > 0 and not available.empty():
task = available.get()
jobs[task] = time(task)
capacity -= 1
step = min(jobs.values())
clock += step
for task in list(jobs.keys()):
jobs[task] -= step
if jobs[task] == 0:
for s in succ[task]:
pred[s].remove(task)
if len(pred[s]) == 0:
available.put(s)
del jobs[task]
return clock
def parse_line(line):
match = line_pattern.match(line)
if match:
return match.group(1), match.group(2)
return None
def process(path):
with open(path) as f:
rules = [parse_line(line) for line in f]
print(part1(rules))
print(part2(rules, 5))
def main():
for path in test_inputs:
process(path)
if __name__ == "__main__":
main()
|
13,523 | 71a56ca9f7b895ac3cda7d0cde73ba9f9408b150 | def main():
import sys
input = sys.stdin.readline
N, K = map(int, input().split())
d = [0]*K
A = [0]*K
for i in range(K):
d[i] = int(input())
A[i] = list(map(int, input().split()))
have = []
for a in A:
have[len(have):] = set(a)
print(N - len(set(have)))
if __name__ == "__main__":
main()
|
13,524 | b97c8671bdb6bc6547f88efd80627bd07829a0b1 | import os
import math
import datetime
import logging
from itertools import chain
import pyaml
import torch
from torch.nn import ModuleDict
from torch.optim import Adam, SGD, RMSprop
def path_to_config(name):
return os.path.join(os.path.dirname(os.path.dirname(__file__)), "config", name)
def path_to_output(name):
return os.path.join(os.path.dirname(os.path.dirname(__file__)), "experiments", name)
def path_to_data(directory):
return os.path.join(os.path.dirname(os.path.dirname(__file__)), "data", directory)
def init_output_dirs(experiment):
directory = path_to_output(experiment)
ckpt_dir = "{}/checkpoints".format(directory)
runs_dir = "{}/tensorboard".format(directory)
try:
os.makedirs(ckpt_dir)
os.makedirs(runs_dir)
except FileExistsError:
logging.warning("ckpt and log dirs already exist for {}".format(experiment))
return ckpt_dir, runs_dir
def load_config(path):
"""
Load the config file and make any dynamic edits.
"""
with open(path, "rt") as reader:
config = pyaml.yaml.load(reader, Loader=pyaml.yaml.Loader)
if config["regularization"]["type"] is None or config["regularization"]["type"] == [None]:
config["regularization"]["type"] = []
if "attention" in config["regularization"]["type"]:
raise NotImplementedError
config["experiment"] = os.path.splitext(os.path.basename(path))[0]
config["ckpt_dir"], config["runs_dir"] = init_output_dirs(config["experiment"])
return config
def build_optimizers(model, gen_opt_kwargs, dis_opt_kwargs):
gen_optimizer = Adam(model.generator.parameters(), **gen_opt_kwargs)
if dis_opt_kwargs:
dis_optimizer = RMSprop(model.discriminators.parameters(), **dis_opt_kwargs)
return gen_optimizer, dis_optimizer
else:
return gen_optimizer, None
def save_checkpoint(model, gen_optimizer, dis_optimizer, step, experiment):
ckpt = "{0}.pt".format(str(step).zfill(6))
state_dict = {}
state_dict["model_state_dict"] = model.state_dict()
state_dict["gen_optimizer_state_dict"] = gen_optimizer.state_dict()
state_dict["step"] = step
if dis_optimizer is not None:
state_dict["dis_optimizer_state_dict"] = dis_optimizer.state_dict()
torch.save(state_dict, os.path.join(path_to_output(experiment), "checkpoints", ckpt))
def load_checkpoint(model, gen_opt_kwargs, dis_opt_kwargs, step, experiment):
ckpt = "{0}.pt".format(str(step).zfill(6))
state_dict = torch.load(os.path.join(path_to_output(experiment), "checkpoints", ckpt))
model.load_state_dict(state_dict["model_state_dict"])
gen_optimizer, dis_optimizer = build_optimizers(model, gen_opt_kwargs, dis_opt_kwargs)
gen_optimizer.load_state_dict(state_dict["gen_optimizer_state_dict"])
for state in gen_optimizer.state.values():
for k, v in state.items():
if torch.is_tensor(v):
state[k] = v.cuda()
if dis_optimizer is not None:
dis_optimizer.load_state_dict(state_dict["dis_optimizer_state_dict"])
for state in dis_optimizer.state.values():
for k, v in state.items():
if torch.is_tensor(v):
state[k] = v.cuda()
return model, gen_optimizer, dis_optimizer, state_dict["step"]
|
13,525 | c22b259266af181656e8b2baf44999718a40c5ed | from flask import Flask, request, jsonify
import tensorflow as tf
app = Flask(__name__)
available_fps = [3, 4, 5]
models = {fps: tf.keras.models.load_model(f'model/fps_{fps}') for fps in available_fps}
class_labels = ['push-up-arms-not-bent-enough', 'push-up-normal', 'push-up-waist-too-low', 'sit-up-normal', 'sit-up-too-low']
@app.route('/', methods=['GET'])
def root():
return 'Exercise classifier running'
@app.route('/classify', methods=['POST'])
def classify():
data = request.json
timeseries = tf.convert_to_tensor(data['timeseries'])
fps = data['fps']
model = models[fps]
probabilities = model.predict(timeseries)
print(probabilities)
index_result = list(tf.math.argmax(probabilities, axis=1).numpy())
label_result = [class_labels[index] for index in index_result]
return jsonify(label_result)
if __name__ == '__main__':
# This is used when running locally only.
app.run(host='0.0.0.0', port=8080, debug=True) |
13,526 | 969dd71121a604a65681fbfd148f5e896826e31c | import flask
from flask import Flask, request
import requests
from flask.logging import default_handler
from py_zipkin.transport import SimpleHTTPTransport
from py_zipkin.zipkin import zipkin_span, create_http_headers_for_new_span, ZipkinAttrs, Kind, zipkin_client_span
from py_zipkin.request_helpers import create_http_headers
from py_zipkin.encoding import Encoding
import time
from py_zipkin.encoding import Encoding
app=flask.Flask(__name__)
def default_handler(encoded_span):
body = encoded_span
# decoded = _V1ThriftDecoder.decode_spans(encoded_span)
app.logger.debug("body %s", body)
# return requests.post(
# "http://zipkin:9411/api/v1/spans",
# data=body,
# headers={'Content-Type': 'application/x-thrift'},
# )
return requests.post(
"http://zipkin:9411/api/v2/spans",
data=body,
headers={'Content-Type': 'application/json'},
)
@zipkin_span(service_name='webapp', span_name='do_stuff')
def do_stuff():
time.sleep(5)
headers = create_http_headers_for_new_span()
requests.get('http://localhost:5042/service1/', headers=headers)
return 'OK'
@app.route('/')
def index():
#transport = SimpleHTTPTransport("localhost", 4042)
with zipkin_span(
service_name='webapp',
span_name='index',
transport_handler=default_handler(),
port=4042,
sample_rate=100, #0.05, # Value between 0.0 and 100.0
):
do_stuff()
time.sleep(10)
return 'OK', 200
if __name__ == '__main__':
app.run(host='0.0.0.0', port = 4042, debug=True)
|
13,527 | ebcdee546958e9e5292c3d5ee3d34bc6be4b6d20 |
import boto3
"""
Note: code is for reference only (taken from an online course)
"""
if __name__ == '__main__':
# Generate the boto3 client for interacting with translate (for
# translation of text into another language)
translate = boto3.client('translate', region_name='us-east-1',
# Set up AWS credentials
aws_access_key_id=AWS_KEY_ID,
aws_secret_access_key=AWS_SECRET)
response = translate.translate_text(
Text='Hello, how are you?',
SourceLanguageCode='auto', # note: should conclude English ('en')
TargetLanguageCode='es')
print(response['TranslatedText'])
######################################################################
translated_text = translate.translate_text(
Text='Hello, how are you?',
SourceLanguageCode='auto', # note: should conclude English ('en')
TargetLanguageCode='es')['TranslatedText']
######################################################################
for index, row in dumping_df.iterrows():
# Get the public_description into a variable
description = dumping_df.loc[index, 'public_description']
if description != '':
# Translate the public description
resp = translate.translate_text(
Text=description,
SourceLanguageCode='auto', TargetLanguageCode='en')
# Store original language in original_lang column
dumping_df.loc[index, 'original_lang'] = resp['SourceLanguageCode']
# Store the translation in the translated_desc column
dumping_df.loc[index, 'translated_desc'] = resp['TranslatedText']
# Preview the resulting DataFrame
dumping_df = dumping_df[
['service_request_id', 'original_lang', 'translated_desc']]
dumping_df.head()
######################################################################
|
13,528 | 6493ae482d2bf2a066bf9cbe9ef68239cc2802e6 | # -*- coding: utf-8 -*-
# Scrapy settings for newcar_new project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://docs.scrapy.org/en/latest/topics/settings.html
# https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'newcar_new'
SPIDER_MODULES = ['newcar_new.spiders']
NEWSPIDER_MODULE = 'newcar_new.spiders'
# ITEM_PIPELINES = {'ganji.pipelines.GanjiPipeline':300, }
MONGODB_SERVER = "192.168.1.94"
MONGODB_PORT = 27017
MONGODB_DB = "newcar"
MONGODB_COLLECTION = "pcauto"
CrawlCar_Num = 2000000
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'newcar_new (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = False
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
COOKIES_ENABLED = True
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See https://docs.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'newcar_new.middlewares.NewcarNewSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
DOWNLOADER_MIDDLEWARES = {
# 'newcar_new.extra_middlewares.ProxyMiddleware': 300,
'newcar_new.extra_middlewares.RotateUserAgentMiddleware': 1,
}
# Enable or disable extensions
# See https://docs.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See https://docs.scrapy.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
'newcar_new.pipelines.NewcarNewPipeline': 300,
}
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
RETRY_HTTP_CODES = [403]
# DUPEFILTER_CLASS = "scrapy_redis_bloomfilter.dupefilter.RFPDupeFilter"
# SCHEDULER = "scrapy_redis_bloomfilter.scheduler.Scheduler"
SCHEDULER_PERSIST = False
REDIS_URL = 'redis://192.168.1.241:6379/15'
FEED_EXPORT_ENCODING = 'utf-8'
# HTTPERROR_ALLOWED_CODES = [301, 302]
# MEDIA_ALLOW_REDIRECTS = True
# DEFAULT_REQUEST_HEADERS = {
# 'Accept': '*/*',
# 'Accept-Encoding': 'gzip, deflate, br',
# 'Accept-Language': 'zh-CN,zh;q=0.9',
# 'Connection': 'keep-alive',
# 'Host': 'dealer.bitauto.com',
# 'Origin': 'https://dealer.bitauto.com',
# 'Cookie': 'XCWEBLOG_testcookie=yes; CIGDCID=6de59662384340f080489c2633aed4d8-yiche; locatecity=310100; UserGuid=0558732c-dee0-49ef-9964-f57f72422a9b; BitAutoLogId=f29ac14dc49a77808062856c47dae311; BitAutoUserCode=9ce9ffab-65ff-d525-36a6-f94a71fad167; CarStateForBitAuto=eb306dc3-cbbe-0439-56d0-421d2b00e6eb; Hm_lvt_03efd23dfd835817ac8b9504cf0e715d=1584412964,1584585741; _dc3c=1; dcad10=; dc_search10=; _dcisnw=1; citycode=2401; __RequestVerificationToken=6l1qKxnW2EUgSn4rJZcktxdEb2-dBsZNA0eFHxOKoSigSp8UJMGzQZ38J5DPwHI51r9gqsQL4il-fCUg1Pml-JTObj8oEaMmqd3BE0B0FF92QgI4p-g8rMM9qJa-YLkygdWCBw2; BIT_UTK_DVT=1; BIT_UTK_SN=0.226274330987436; csids=5441_5158_1834_4622_4173_5503; selectcity=370000; bitauto_ipregion=180.167.80.118%3a%e4%b8%8a%e6%b5%b7%e5%b8%82%3b21%2c%e5%b1%b1%e4%b8%9c%e7%9c%81%2cshandong; Hm_lpvt_03efd23dfd835817ac8b9504cf0e715d=1584591937; __xsptplus12=12.3.1584585740.1584591937.43%234%7C%7C%7C%7C%7C%23%23JFn2EwNe-ofl-F2ZJ9RVFyBZwPTHCLum%23; dmts10=1; dmt10=2%7C0%7C0%7Cdealer.bitauto.com%2F100015483%2Fcars_3088.html%3Fleads_source%3Dp004009%7Ccar.bitauto.com%2Falphard%2Fbaojia%2Fc0%2F; dm10=2%7C1584594232%7C0%7C%7C%7C%7C%7C1584585740%7C1584585740%7C1584585740%7C1584594076%7C75670a87e738c43684bb0b0ef506ebab%7C0%7C%7C'
# }
# # MYEXT_ENABLED: 是否启用扩展,启用扩展为 True, 不启用为 False
# # IDLE_NUMBER: 关闭爬虫的持续空闲次数,持续空闲次数超过IDLE_NUMBER,爬虫会被关闭。默认为 360 ,也就是30分钟,一分钟12个时间单位
# MYEXT_ENABLED = False # 开启扩展
# IDLE_NUMBER = 360 # 配置空闲持续时间单位为 360个 ,一个时间单位为5s
# # # 在 EXTENSIONS 配置,激活扩展
# EXTENSIONS = {
# 'carbuisness_new.extensions.RedisSpiderSmartIdleClosedExensions': 500,
# }
WEBSITE = ''
PCAUTO_DIC = {
"基本参数-车型名称": "salesdesc",
"基本参数-厂商指导价(元)": "price",
"基本参数-厂商": "factoryname",
"基本参数-级别": "level",
"基本参数-上市时间": "salemonth",
"基本参数-发动机": "motor",
"基本参数-进气形式": "method",
"基本参数-最大马力(PS)": "maxps",
"基本参数-最大扭矩(N·m)": "maxnm",
"基本参数-变速箱": "gear",
"基本参数-车身类型": "bodystyle",
"基本参数-长×宽×高(mm)": "lengthwh",
"基本参数-轴距(mm)": "wheel",
"基本参数-最高车速(km/h)": "maxspeed",
"基本参数-官方0-100km/h加速(s)": "accelerate",
"基本参数-实测0-100km/h加速(s)": "actualaccelerate",
"基本参数-实 测100-0km/h制动(m)": "actualstop",
"基本参数-实测油耗(L/100km)": "jbcs_ssyh",
"基本参数-工信部综合油耗(L/100km)": "petrol",
"基本参数-整车质保": "warranty",
"车身-车身类型": "type",
"车身-长度(mm)": "length",
"车身-宽度(mm)": "width",
"车身-高度(mm)": "height",
"车身-轴距(mm)": "wheel",
"车身-前轮距(mm)": "frontgauge",
"车身-后轮距(mm)": "backgauge",
"车身-最小离地间隙(mm)": "liftoff_distance",
"车身-车重(kg)": "weight",
"车身-车门数(个)": "doors",
"车身-座位数(个)": "seats",
"车身-油箱容积(L)": "fuelvolumn",
"车身-行李厢容积(L)": "baggage",
"车身-行李厢最大容积(L)": "maxbaggage",
"车身-行李厢内部尺寸(mm)": "cs_xlxnbcc",
"发动机-发动机型号": "motortype",
"发动机-排量(mL)": "cylinder",
"发动机-进气形式": "method1",
"发动机-最大马力(PS)": "maxps1",
"发动机-最大功率(kW)": "maxpower",
"发动机-最大功率转速(rpm)": "maxrpm",
"发动机-最大扭矩(N·m)": "maxnm1",
"发动机-最大扭矩转速(rpm)": "maxtorque",
"发动机-气缸排列形式": "lwv",
"发动机-气缸数(个)": "lwvnumber",
"发动机-每缸气门数(个)": "valve",
"发动机-压缩比": "compress",
"发动机-配气机构": "valve_gear",
"发动机-缸径(mm)": "cylinder_diameter",
"发动机-行程(mm)": "cylinder_travel",
"发动机-发动机特有技术": "motortechnique",
"发动机-燃料形式": "fuletype",
"发动机-燃油标号": "fulevolumn",
"发动机-供油方式": "fulemethod",
"发动机-缸盖材料": "cylinder_head_material",
"发动机-缸体材料": "cylinder_body_material",
"发动机-排放标准": "emission",
"变速箱-简称": "geardesc",
"变速箱-挡位个数": "gearnumber",
"变速箱-变速箱类型": "geartype",
"底盘转向-驱动方式": "driveway",
"底盘转向-前悬挂类型": "fronthang",
"底盘转向-后悬挂类型": "backhang",
"底盘转向-转向助力类型": "assistanttype",
"底盘转向-车体结构": "body_structure",
"车轮制动-前制动器类型": "frontbrake",
"车轮制动-后制动器类型": "backbrake",
"车轮制动-驻车制动类型": "parking_brake_type",
"车轮制动-前轮胎规格": "frontwheel",
"车轮制动-后轮胎规格": "backwheel",
"车轮制动-备胎规格": "sparewheel",
"车轮制动-备胎尺寸": "sizewheel",
"主动安全配置-ABS防抱死": "zdaqpz_ABSfbs",
"主动安全配置-制动力分配(EBD/CBC等)": "zdaqpz_zdlfp",
"主动安全配置-刹车辅助(EBA/BAS/BA等)": "zdaqpz_scfz",
"主动安全配置-牵引力控制(ASR/TCS/TRC等)": "zdaqpz_qylkz",
"主动安全配置-车身稳定控制(ESP/DSC/ESC等)": "zdaqpz_cswdkz",
"主动安全配置-胎压监测装置": "zdaqpz_tywdzz",
"主动安全配置-防爆轮胎": "zdaqpz_fblt",
"主动安全配置-安全带未系提示": "zdaqpz_aqwxts",
"主动安全配置-并线辅助": "zdaqpz_bxfz",
"主动安全配置-车道偏离预警系统": "zdaqpz_cdplyjxt",
"主动安全配置-车道保持辅助系统": "zdaqpz_cdbcfzxt",
"主动安全配置-主动刹车/主动安全系统": "zdaqpz_zdsc_zdaqxt",
"主动安全配置-道路交通标示识别": "zdaqpz_dljtbssb",
"主动安全配置-疲劳驾驶提示": "zdaqpz_pljsts",
"主动安全配置-夜视系统": "zdaqpz_ysxt",
"被动安全配置-前排正面安全气囊": "bdaqpz_qpzmaqqn",
"被动安全配置-前/后排侧气囊": "bdaqpz_q_hpcqn",
"被动安全配置-前/后排头部气囊(气帘)": "bdaqpz_q_hptbqn",
"被动安全配置-前排膝部气囊": "bdaqpz_qpxbqn",
"被动安全配置-行人碰撞防护系统": "bdaqpz_xrpzfhxt",
"被动安全配置-ISO FIX儿童座椅接口": "bdaqpz_ztzyjk",
"防盗配置-发动机电子防盗": "fdpz_fdjdzfd",
"防盗配置-车内中控锁": "fdpz_cnzks",
"防盗配置-遥控钥匙": "fdpz_ykys",
"防盗配置-远程启动": "fdpz_ycqd",
"防盗配置-无钥匙启动系统": "fdpz_wysqdxt",
"防盗配置-无钥匙进入系统": "fdpz_wysjrxt",
"驾驶辅助配置-巡航系统": "jsfzpz_xhxt",
"驾驶辅助配置-前/后雷达": "jsfzpz_q_hld",
"驾驶辅助配置-泊车影像系统": "jsfzpz_bcyxxt",
"驾驶辅助配置-车侧盲区影像系统": "jsfzpz_ccmqyxxt",
"驾驶辅助配置-倒车动态提醒系统": "jsfzpz_dcdttxxt",
"驾驶辅助配置-驾驶模式切换": "jsfzpz_jsmsqh",
"驾驶辅助配置-发动机启停技术": "jsfzpz_fdjqtjs",
"驾驶辅助配置-自动泊车入位": "jsfzpz_zdbcrw",
"驾驶辅助配置-自动驾驶辅助": "jsfzpz_zdjsfz",
"驾驶辅助配置-上坡辅助": "jsfzpz_spfz",
"驾驶辅助配置-自动驻车": "jsfzpz_zdzc",
"驾驶辅助配置-陡坡缓降": "jsfzpz_dphj",
"驾驶辅助配置-可变悬挂": "jsfzpz_kbxg",
"驾驶辅助配置-空气悬挂": "jsfzpz_kqxg",
"驾驶辅助配置-可变转向比": "jsfzpz_kbzxb",
"驾驶辅助配置-整体主动转向系统": "jsfzpz_ztzdzxxt",
"驾驶辅助配置-前桥限滑差速器/差速锁": "jsfzpz_qqxhcsq",
"驾驶辅助配置-中央差速器锁止功能": "jsfzpz_zycsqszgn",
"驾驶辅助配置-后桥限滑差速器/差速锁": "jsfzpz_hqxhcsq",
"外部配置-天窗类型": "wbpz_tclx",
"外部配置-天窗尺寸(mm)": "wbpz_tccc",
"外部配置-运动外观套件": "wbpz_ydwgtj",
"外部配置-铝合金轮毂": "wbpz_lhjlg",
"外部配置-电动吸合门": "wbpz_ddxhm",
"外部配置-电动后备厢": "wbpz_ddhbx",
"外部配置-后备厢感应开启": "wbpz_hbxgykq",
"外部配置-电动后备厢位置记忆": "wbpz_ddhbxwzjy",
"外部配置-车顶行李架": "wbpz_cdxlj",
"外部配置-主动进气格栅": "wbpz_zdjqgs",
"内部配置-方向盘材质": "nbpz_fxpcz",
"内部配置-方向盘调节范围": "nbpz_fxptjfw",
"内部配置-方向盘电动调节": "nbpz_fxpddtj",
"内部配置-多功能方向盘": "nbpz_dgnfxp",
"内部配置-方向盘换挡": "nbpz_fxphd",
"内部配置-方向盘加热": "nbpz_fxpjr",
"内部配置-方向盘记忆": "nbpz_fxpjy",
"内部配置-行车电脑显示屏功能": "nbpz_xcdnxspgn",
"内部配置-全液晶仪表盘": "nbpz_qyjybp",
"内部配置-液晶仪表盘尺寸": "nbpz_yjybpcc",
"内部配置-HUD抬头数字显示": "nbpz_HUDttszxs",
"内部配置-车载行车记录仪": "nbpz_czxcjly",
"内部配置-手机无线充电": "nbpz_sjwxcd",
"座椅配置-座椅材质": "zypz_zycz",
"座椅配置-运动风格座椅": "zypz_ydfgzy",
"座椅配置-前排座椅高低调节": "zypz_qpzygdtj",
"座椅配置-前排座垫倾角调节": "zypz_qpzdqjtj",
"座椅配置-前排腰部支撑调节": "zypz_qpybzctj",
"座椅配置-前排肩部支撑调节": "zypz_qpjbzctj",
"座椅配置-主/副驾驶座电动调节": "zypz_z_fjszddtj",
"座椅配置-副驾驶席座椅后排电动可调": "zypz_hpddkt",
"座椅配置-后排座椅调节": "zypz_hpzytj",
"座椅配置-后排座椅电动调节": "zypz_hpzyddtj",
"座椅配置-电动座椅记忆": "zypz_ddzyjy",
"座椅配置-前/后排座椅加热": "zypz_q_hpzyjr",
"座椅配置-前/后排座椅通风": "zypz_q_hpzytf",
"座椅配置-前/后排座椅按摩": "zypz_q_hpzyam",
"座椅配置-后排座椅放倒形式": "zypz_hpzyfdxs",
"座椅配置-第三排座椅": "zypz_dspzy",
"座椅配置-座椅布局形式": "zypz_zybjxs",
"座椅配置-前/后座中央扶手": "zypz_q_hzzyfs",
"座椅配置-后排杯架": "zypz_hpbj",
"空调配置-空调调节方式": "ktpz_kttjfs",
"空调配置-温度分区控制": "ktpz_wdfqkz",
"空调配置-后排独立空调": "ktpz_hpdlkt",
"空调配置-后座出风口": "ktpz_hpcfk",
"空调配置-车内PM2.5过滤装置": "ktpz_cnPM25glzz",
"空调配置-车载空气净化器": "ktpz_czkqjhq",
"空调配置-车载冰箱": "ktpz_czbx",
"灯光配置-近光灯光源": "dgpz_jygdy",
"灯光配置-远光灯光源": "dgpz_yjgdy",
"灯光配置-自适应远近光灯": "dgpz_zsyyjgd",
"灯光配置-日间行车灯": "dgpz_rjxcd",
"灯光配置-自动头灯": "dgpz_zdtd",
"灯光配置-转向辅助灯": "dgpz_zxfzd",
"灯光配置-随动转向大灯(AFS)": "dgpz_sdzxdd",
"灯光配置-前雾灯": "dgpz_qud",
"灯光配置-前大灯雨雾模式": "dgpz_qddywms",
"灯光配置-大灯高度可调": "dgpz_ddgdkt",
"灯光配置-大灯清洗装置": "dgpz_ddqxzz",
"灯光配置-大灯延时关闭": "dgpz_ddysgb",
"灯光配置-车内氛围灯": "dgpz_cnfwd",
"玻璃/后视镜-电动车窗": "bl_hsj_ddcc",
"玻璃/后视镜-车窗一键升/降": "bl_hsj_ccyjs_j",
"玻璃/后视镜-车窗防夹手功能": "bl_hsj_ccfjsgn",
"玻璃/后视镜-防紫外线/隔热玻璃": "bl_hsj_fzwx_grbol",
"玻璃/后视镜-后视镜电动调节": "bl_hsj_hsjddtj",
"玻璃/后视镜-外后视镜加热": "bl_hsj_whsjjr",
"玻璃/后视镜-后视镜电动折叠": "bl_hsj_hsjddzd",
"玻璃/后视镜-后视镜锁车自动折叠": "bl_hsj_hsjsczdzd",
"玻璃/后视镜-后视镜倒车自动下翻": "bl_hsj_hsjdczdxf",
"玻璃/后视镜-后视镜记忆": "bl_hsj_hsjjy",
"玻璃/后视镜-内/外后视镜自动防眩目": "bl_hsj_n_whsjzdfxm",
"玻璃/后视镜-后风挡遮阳帘": "bl_hsj_hfdzyl",
"玻璃/后视镜-后排侧遮阳帘": "bl_hsj_hpczyl",
"玻璃/后视镜-后排侧隐私玻璃": "bl_hsj_hpcysbl",
"玻璃/后视镜-遮阳板化妆镜": "bl_hsj_zybhzj",
"玻璃/后视镜-后雨刷": "bl_hsj_hys",
"玻璃/后视镜-感应雨刷": "bl_hsj_gyys",
"多媒体配置-中控台彩色大屏": "dmtpz_zktcsdp",
"多媒体配置-中控台大屏尺寸": "dmtpz_zktdpcc",
"多媒体配置-中控屏操作方式": "dmtpz_zkpczfs",
"多媒体配置-GPS导航系统": "dmtpz_GPSdhxt",
"多媒体配置-实时路况信息显示": "dmtpz_sslkxxxx",
"多媒体配置-手机互联/映射": "dmtpz_sjhl_ys",
"多媒体配置-车联网": "dmtpz_clw",
"多媒体配置-道路救援呼叫": "dmtpz_dljyhj",
"多媒体配置-语音识别控制系统": "dmtpz_yysbkzxt",
"多媒体配置-手势控制": "dmtpz_sskz",
"多媒体配置-蓝牙/车载电话": "dmtpz_ly_czdh",
"多媒体配置-中控液晶屏分屏显示": "dmtpz_zkyjpfpxs",
"多媒体配置-车载电视": "dmtpz_czds",
"多媒体配置-后排液晶屏": "dmtpz_hpyjp",
"多媒体配置-后排中央控制系统": "dmtpz_hpzykzxt",
"多媒体配置-接口类型": "dmtpz_jklx",
"多媒体配置-220V/230V电源": "dmtpz_220V_230Vdy",
"多媒体配置-后备厢12V电源接口": "dmtpz_hbx12Vdyjk",
"多媒体配置-CD/DVD": "dmtpz_CD_DVD",
"多媒体配置-扬声器品牌": "dmtpz_ysqpp",
"多媒体配置-扬声器数量": "dmtpz_ysqsl",
"多媒体配置-主动降噪系统": "dmtpz_zdjzxt",
"颜色配置": "color_pz",
"内饰颜色": "color_inner"
}
|
13,529 | 99c79d0646471ac8209f385d57c946a7caf8d0c7 | from __future__ import unicode_literals
__version__ = "5.0.29"
|
13,530 | ef3bcccb936e67d88cb08a3c8d368bf4fafb8780 | #!/urs/bin/env python
# coding:utf-8
#
'''
对爬取cnblogs.com上的文章并保存为本地pdf文档进行优化,
1.最开始采用requests模块进网页爬取,因为前一个版本使用的的是urllib,这个版本才用一种新的模块。
2.在爬取的时候发现爬取的某一篇文章呈现布局和浏览器中呈现的不一样
3.查找原因,结果是该html中嵌套了javascript代码,直接爬取的html代码当然和浏览器解析过不一样
4.这个问题其实也很简单找一个能够解析html中嵌套js代码的“非传统浏览器”即可,经过一番goole之后发现还真用这样的浏览器(方式)
5.PhantomJS与chrome headless的抉择,经过自己反复测试,确定选用chrome headless,为什么不用PhantomJS呢?
因为我发现经过PhantomJS解析之后所获得html代码任然和requests获取到的一模一样,结果是然并卵。因此选择了chrome headless
6.参考连接:https://segmentfault.com/a/1190000009399609#articleHeader1
7.浏览器关键字: chrome headless PhantomJS selenium
'''
import os
from time import sleep
# import requests
import progressbar
from lxml import etree
from selenium import webdriver
import pdfkit
base_url = r'http://www.cnblogs.com/pick'
html_head = '''
<head>
<meta charset="utf-8"/>
<meta content="width=device-width, initial-scale=1" name="viewport"/>
<link href="http://www.cnblogs.com/bundles/blog-common.css?v=ChDk9h03-S75WEqNhGvXkWireJ5cCWdK1xRM9NIXfnM1" rel="stylesheet" type="text/css"/>
</head>
'''
headers = {
'User-Agent': r'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko)'
r'Chrome/45.0.2454.85 Safari/537.36 115Browser/6.0.3',
'Connection': 'keep-alive',
}
bar = progressbar.ProgressBar(
widgets=[
'[ ', progressbar.SimpleProgress(), ' ]',
progressbar.Bar(),
'[', progressbar.AnimatedMarker(), ']',
progressbar.AbsoluteETA()
]
)
pdfkit_options = {
'page-size': 'Letter',
'margin-top': '0.75in',
'margin-right': '0.75in',
'margin-bottom': '0.75in',
'margin-left': '0.75in',
'encoding': "UTF-8",
'custom-header': [
('Accept-Encoding', 'gzip')
],
'cookie': [
('cookie-name1', 'cookie-value1'),
('cookie-name2', 'cookie-value2'),
],
'quiet': '',
'no-outline': None
}
pdfkit_config = pdfkit.configuration(
wkhtmltopdf=r'D:\Program Files\wkhtmltopdf\bin\wkhtmltopdf.exe')
blog_list = dict() # 定义一个字典变量,用于保存博客名称对应的博客连接
blogs = dict() # 定义一个字典变量,用于保存博客名称对应的博客内容
# defiend browser using chrome or phantomjs
# browser = webdriver.PhantomJS(executable_path='F:\\jtxiao\\program\\phantomjs-2.1.1-windows\\bin\\phantomjs.exe')
chrome_options = webdriver.ChromeOptions()
chrome_options.add_argument('--headless')
chrome_options.add_argument('--disable-gpu')
browser = webdriver.Chrome(chrome_options=chrome_options,
executable_path="F:\\jtxiao\\program\chromedriver.exe")
# res = requests.get(base_url,headers=headers)
browser.get(base_url)
res = browser.page_source
page = etree.HTML(res)
total_page = int([i.text for i in page.xpath(
'//div[@class="pager"]/a[last()-1]')][0])
blog_name = [name.text for name in page.xpath(
'//div[@class="post_item_body"]/h3/a[@class="titlelnk"]')]
blog_url = page.xpath('//div[@class="post_item_body"]/h3/a/@href')
for i in bar(range(len(blog_name))):
blog_list[blog_name[i]] = blog_url[i]
for name, url in blog_list.items():
# content_res = requests.get(url,headers=headers,params={'coding':'utf-8'}).text
browser.get(url)
content_res = browser.page_source
blog_content_page = etree.HTML(content_res)
browser.quit()
# soup = BeautifulSoup(content_res.text,'lxml')
# blog_content = soup.select('div#cnblogs_post_body')
blog_title = blog_content_page.xpath('//h1[@class="postTitle"]')[0]
blog_content = blog_content_page.xpath('//div[@id="cnblogs_post_body"]')[0]
# blog_contentblog_title = blog_content_page.xpath('//div[@id="topics"]/*')
blogs[name] = html_head + etree.tostring(blog_title, encoding='unicode') + \
etree.tostring(blog_content, encoding='unicode')
try:
pdfkit.from_string(
blogs[name],
str(name) + '.pdf',
options=pdfkit_options,
configuration=pdfkit_config
)
except OSError as er:
print('OSError:{0}'.format(er))
break
|
13,531 | 51be17e9ae03909eb7e155af59bf1c3891655f52 | # Program: Algoritmo237_Para64.py
# Author: Ramon R. Valeriano
# Descritption:
# Developed: 02/04/2020 - 23:04
# Updated:
number = int(input("Enter with the number: "))
sum_ = 1
for e in range(2, number+1):
if number%2==0:
sum_-=(1/number)
else:
sum_+=(1/number)
print("\n", sum_)
|
13,532 | a317e54bcee51f836cdf45c3ea24bee88af46ca1 | #!/usr/bin/env python
from http.server import BaseHTTPRequestHandler, HTTPServer
import urllib
from os import curdir, sep
import functools
import datetime
from enum import Enum
from urllib.parse import urlparse
import traceback
import configparser
import os
from config import Config
from common import Page
from ack import Ack, serve_acks
from my_acks import MyAcks
from report import Report
from config import Config
class Verb(Enum):
GET = 1
POST = 2
class PeerAckHTTPHandler(BaseHTTPRequestHandler):
def __init__(self, request, client_address, server):
self.routing = {
"/": "/ack",
"/ack": Ack(),
"/myacks": MyAcks(),
"/report": Report(),
"/acks": functools.partial(serve_acks, self),
"/favicon.ico": functools.partial(self.serve_file, "image/x-icon", "favicon.ico"),
"/question-mark.png": functools.partial(self.serve_file, "image/x-png", "question-mark.png"),
"/site.css": functools.partial(self.serve_file, "text/css", "site.css"),
"/test.html": functools.partial(self.serve_file, "text/html", "test.html"),
"/inforad.html": functools.partial(self.serve_file, "text/html", "inforad.html"),
"/auth.js": functools.partial(self.serve_file, "application/javascript", "auth.js"),
"/common.js": functools.partial(self.serve_file, "application/javascript", "common.js"),
"/del.png": functools.partial(self.serve_file, "image/png", "del.png"),
"/check.png": functools.partial(self.serve_file, "image/png", "check.png"),
"/logo.png": functools.partial(self.serve_file, "image/png", "logo.png"),
}
super(BaseHTTPRequestHandler, self).__init__(request, client_address, server)
def do_GET(self):
self.route_request(Verb.GET)
def route_request(self, verb):
route = urlparse(self.path).path
while type(route) == str:
route = self.routing[route]
if isinstance(route, Page):
try:
if verb == Verb.GET:
route.do_get(self)
return
else:
route.do_post(self)
return
except Exception as e:
route.send_error(self, traceback.format_exc())
raise
if not callable(route):
raise Exception("bad route %r" % route)
route(verb)
def do_POST(self):
self.route_request(Verb.POST)
def serve_file(self, content_type, file, verb):
if verb != Verb.GET:
raise Exception("required GET, got: %s" % verb)
f = open(curdir + sep + file, "rb")
self.send_response(200)
self.send_header('Content-type', content_type)
self.end_headers()
self.wfile.write(f.read())
f.close()
return
def run():
config = configparser.ConfigParser()
dir = os.path.dirname(__file__)
cfg_path = os.path.join(dir, 'config.ini')
print('reading config from {0}'.format(cfg_path))
config.read(cfg_path)
if 'config' in config.sections():
if 'conn_string' in config['config']:
Config.conn_string = config['config']['conn_string']
if 'superusers' in config['config']:
s = config['config']['superusers']
Config.superusers = [x.strip() for x in s.split(',')]
# Server settings
print('starting server on port 8081...')
server_address = ('127.0.0.1', 8081)
httpd = HTTPServer(server_address, PeerAckHTTPHandler)
print('running server...')
httpd.serve_forever()
run()
|
13,533 | e6fe262d292e7b7fdad96b190fbbfb6ce32972b2 | from behave import *
import requests
import json
@given('I have user authentication credentials')
def step_impl(context):
context.url = 'https://api.withleaf.io/api/authenticate'
context.headers = {'content-type': 'application/json'}
context.data = {"username": "fernandosdba@gmail.com", "password": "94084452", "rememberMe":"true"}
@when('I make an http post call to https://api.withleaf.io/api/authenticate')
def step_impl(context):
context.response = requests.request("POST" , context.url, json=context.data, headers=context.headers)
@then('I must get a response with status code 200 and a json object with token')
def step_impl(context):
print("\n" , context.response.content)
assert context.response.status_code == 200
|
13,534 | ead60a83f114c9433ca47f619d9ac5a336cda5ae | #!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
Copyright (C) 2019. Huawei Technologies Co., Ltd. All rights reserved.
This program is free software; you can redistribute it and/or modify
it under the terms of the Apache License Version 2.0.You may not use
this file except in compliance with the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
Apache License for more details at
http://www.apache.org/licenses/LICENSE-2.0
nd_2_nz
"""
from functools import reduce as functools_reduce
from te import platform as tbe_platform
from topi.cce import util
from te import tik
# available ub size
TOTAL_UB_MEMORY = tbe_platform.cce_conf.get_soc_spec(
tbe_platform.cce_conf.UB_SIZE)
# available number of cores
MAX_CORE_NUM = tbe_platform.cce_conf.get_soc_spec(
tbe_platform.cce_conf.CORE_NUM)
# bytes of type int8
SIZE_ONE_BYTES = 1
# bytes of type float16
SIZE_TWO_BYTES = 2
# bytes of type float32
SIZE_FOUR_BYTES = 4
# size of the cube unit
CUBE_SIZE = 16
# size of the cube unit of last axis when dtype is int8
CUBE_SIZE_2 = 32
# minimum unit of data_move: 32Bytes
DATA_MOVE_MIN_UNIT = 32
# maximum repeat number
MAX_REPEATS = 255
# maximum burst number
MAX_BURST_NUMBER = 4095
# maximum rep stride
MAX_STRIDE_REP = 255
# maximum blk stride
MAX_STRIDE_BLK = 65535
# maximum mask
MAX_MASK = 128
# number of cubes processed by one vadds instruction
NUM_CUBE = MAX_MASK // CUBE_SIZE
# pylint: disable=too-many-instance-attributes,too-many-arguments
# pylint: disable=too-many-locals,too-many-branches
# pylint: disable=unused-argument,too-many-lines,too-many-statements
def _cal_core(tik_instance, total_core_loop_num, num_core, core_number):
"""
calculate the loop number on each core
"""
core_loop = tik_instance.Scalar("uint64")
sum_core = tik_instance.Scalar("uint64")
with tik_instance.if_scope(num_core < total_core_loop_num % MAX_CORE_NUM):
core_loop.set_as((total_core_loop_num + core_number - 1) //
core_number)
sum_core.set_as(core_loop * num_core)
with tik_instance.else_scope():
core_loop.set_as(total_core_loop_num // core_number)
sum_core.set_as((core_loop + 1) *
(total_core_loop_num % MAX_CORE_NUM) +
core_loop *
(num_core - total_core_loop_num % MAX_CORE_NUM))
return core_loop, sum_core
def _set_core_num(loop_number):
"""
set the block_num
"""
if loop_number < MAX_CORE_NUM:
return loop_number
return MAX_CORE_NUM
def _cal_core_loop(tik_instance, num_data_one_loop, core_loop, ub_ori):
"""
calculate the number of loops and remainder on each core
"""
align_loop = tik_instance.Scalar("uint64")
align_loop.set_as((ub_ori + num_data_one_loop - 1) // num_data_one_loop)
with tik_instance.if_scope((align_loop - 1) * core_loop *
num_data_one_loop >= ub_ori):
align_loop.set_as(align_loop - 1)
remainder = tik_instance.Scalar("uint64")
remainder.set_as(core_loop % align_loop)
with tik_instance.if_scope(remainder == 0):
remainder.set_as(align_loop)
return align_loop, remainder
def _cal_core_loop_python(num_data_one_loop, core_loop, ub_ori):
"""
calculate the number of loops and remainder on each core and return python
variable
"""
align_loop = ub_ori // num_data_one_loop
remainder = core_loop % align_loop
if align_loop > core_loop:
align_loop = core_loop
remainder = 0
return align_loop, remainder
def _cal_core_loop_python_one(num_data_one_loop, core_loop, ub_ori):
"""
calculate the number of loops and remainder on each core in another case
and return python variable
"""
align_loop = ub_ori // num_data_one_loop
if align_loop * num_data_one_loop > ub_ori:
align_loop = align_loop - 1
remainder = core_loop % align_loop
return align_loop, remainder
def _cast_dtype(tik_instance, dst, src, cast_repeat_time,
cast_remainder, cast_case):
"""
cast the data form int8 to float16 and from float16 to int8
"""
if cast_case == "int8_2_float16":
tik_instance.vconv(MAX_MASK, 'none', dst, src, cast_repeat_time,
1, 1, 8, 4, None)
with tik_instance.if_scope(cast_remainder != 0):
tik_instance.vconv(cast_remainder, 'none',
dst[cast_repeat_time * MAX_MASK],
src[cast_repeat_time * MAX_MASK],
1, 1, 1, 8, 4, None)
elif cast_case == "float16_2_int8":
tik_instance.vconv(MAX_MASK, 'none', dst, src, cast_repeat_time,
1, 1, 4, 8, None)
with tik_instance.if_scope(cast_remainder != 0):
tik_instance.vconv(cast_remainder, 'none',
dst[cast_repeat_time * MAX_MASK],
src[cast_repeat_time * MAX_MASK],
1, 1, 1, 4, 8, None)
class ND2NzCompute:
"""
Rearranges data from ND format into FRACTAL_NZ format
Functions
----------
__init__:
initialize some properties
set_tik_instance:
set tik_instance
set_src_dst_tensor:
set input and output tensor
cal_core_loop:
calculate the loop number on each core
set_format_transfer_case:
divide the transfer case from nd to nz
data_rearrange_case_zero:
rearrange data when UB can put in second last axis * last axis data and
the shape of dst is not 4-D
data_rearrange_case_one:
rearrange data when UB can not put in second last axis * last axis data
data_rearrange_case_two:
rearrange data when UB can not put in second last axis * 16 data
data_rearrange_case_three:
rearrange data when UB can put in last axis * 16 data and
the shape of dst is 4-D
data_rearrange_case_four:
rearrange data when UB can not put in last axis * 16 data and
the shape of dst is 4-D
data_rearrange_case_five:
rearrange data when UB can not put in last axis * 16 data and
the shape of dst is 4-D
def format_transfer_case_zero:
the transfer process when the transfer case is 0
def format_transfer_case_one:
the transfer process when the transfer case is 1
def format_transfer_case_two:
the transfer process when the transfer case is 2
def format_transfer_case_three:
the transfer process when the transfer case is 3
def format_transfer_case_four:
the transfer process when the transfer case is 4
def format_transfer_case_five:
the transfer process when the transfer case is 5
def format_transfer_case_six:
the transfer process when the transfer case is 6
def format_transfer_case_seven:
the transfer process when the transfer case is 7
def format_transfer_case_eight:
the transfer process when the transfer case is 8
nd_2_nz_compute:
the overall transfer process
get_tik_instance:
obtain tik instance
Returns
-------
None
"""
def __init__(self, src_shape, dtype, kernel_name):
"""
initialize some properties
"""
self.src_shape_ori = src_shape[:]
self.src_shape = src_shape[:]
self.dtype = dtype
self.kernel_name = kernel_name
if len(src_shape) == 1:
self.src_shape = [1, src_shape[0]]
self.dst_shape = self.src_shape[:]
self.dst_shape[-2:] = [(self.src_shape[-1] + CUBE_SIZE - 1) //
CUBE_SIZE,
(self.src_shape[-2] + CUBE_SIZE - 1) //
CUBE_SIZE,
CUBE_SIZE, CUBE_SIZE]
self.num_byte = SIZE_TWO_BYTES
self.vadds_mask = MAX_MASK
if self.dtype == "float32":
self.num_byte = SIZE_FOUR_BYTES
self.vadds_mask = MAX_MASK // 2
# the number of data that can be moved in each data_move
self.num_data = DATA_MOVE_MIN_UNIT // self.num_byte
util.check_shape_rule(self.dst_shape)
# the number of data that UB can put in
self.ub_memory = min(TOTAL_UB_MEMORY, 252 * 1024) // self.num_byte // 2
self.dst_gm = None
self.src_gm = None
def set_tik_instance(self):
"""
set tik_instance
"""
tik_instance = tik.Tik()
self.set_src_dst_tensor(tik_instance)
return tik_instance
def set_src_dst_tensor(self, tik_instance):
"""
set input and output tensor
"""
src_element_number = functools_reduce(lambda x1, x2: x1 * x2,
self.src_shape_ori[:])
dst_element_number = functools_reduce(lambda x1, x2: x1 * x2,
self.dst_shape[:])
self.src_gm = tik_instance.Tensor(self.dtype,
(src_element_number,),
name="src_gm",
scope=tik.scope_gm)
self.dst_gm = tik_instance.Tensor(self.dtype,
(dst_element_number,),
name="dst_gm",
scope=tik.scope_gm)
def set_format_transfer_case(self):
"""
divide the transfer case from nd to nz
"""
format_transfer_case = 0
if self.src_shape[-1] % CUBE_SIZE == 0:
if self.dst_shape[-4] * self.dst_shape[-1] * self.dst_shape[-3] * \
(self.dst_shape[-2] + 1) > self.ub_memory:
format_transfer_case = 2
else:
format_transfer_case = 1
if self.dst_shape[-4] * self.dst_shape[-1] * self.dst_shape[-3] * \
(self.dst_shape[-2] + 1) > self.ub_memory:
format_transfer_case = 3
if (CUBE_SIZE * CUBE_SIZE * self.dst_shape[-3] + CUBE_SIZE) > \
self.ub_memory:
format_transfer_case = 4
if (self.dst_shape[-3] - 1) * self.dst_shape[-1] * \
self.dst_shape[-2] // self.num_data <= MAX_STRIDE_BLK and \
((CUBE_SIZE + 1) * CUBE_SIZE * self.dst_shape[-4]) >= \
self.ub_memory:
format_transfer_case = 7
is_four_d = 0
if len(self.dst_shape) == 4:
is_four_d = 1
else:
is_four_d = (functools_reduce(lambda x1, x2: x1 * x2,
self.src_shape[:-2]) == 1)
if is_four_d:
if self.dst_shape[-4] * self.dst_shape[-1] * \
(self.dst_shape[-2] + 1) <= self.ub_memory and \
self.src_shape[-1] % CUBE_SIZE == 0:
format_transfer_case = 5
elif self.dst_shape[-4] * self.dst_shape[-1] * \
(self.dst_shape[-2] + 1) <= self.ub_memory and \
self.src_shape[-1] % CUBE_SIZE != 0:
format_transfer_case = 6
elif self.dst_shape[-4] * self.dst_shape[-1] * \
(self.dst_shape[-2] + 1) > self.ub_memory and \
(self.dst_shape[-3] - 1) * self.dst_shape[-1] * \
self.dst_shape[-2] // self.num_data <= MAX_STRIDE_BLK:
format_transfer_case = 7
if self.dst_shape[-4] * self.dst_shape[-1] * \
(self.dst_shape[-2] + 1) <= self.ub_memory // 2 and \
self.src_shape[-2] % (MAX_CORE_NUM * CUBE_SIZE) == 0 and \
self.src_shape[-1] % CUBE_SIZE == 0 and\
self.src_shape[-2] // (MAX_CORE_NUM * CUBE_SIZE) >= 2:
format_transfer_case = 8
return format_transfer_case
def data_rearrange_case_zero(self, tik_instance, ub_ori, ub_trans,
loop_num):
"""
rearrange data when UB can put in second last axis * last axis data and
the shape of dst is not 4-D
"""
num_row_one_loop = loop_num * CUBE_SIZE * self.dst_shape[-3]
scalar_zero = tik_instance.Scalar(dtype=self.dtype, init_value=0.0)
if self.src_shape[-1] % CUBE_SIZE != 0:
mask = 0
for i, _ in enumerate(range(CUBE_SIZE -
self.src_shape[-1] % CUBE_SIZE)):
mask += 2 ** (CUBE_SIZE - 1 - i)
with tik_instance.for_range(0, num_row_one_loop // MAX_REPEATS) \
as num_repeat:
tik_instance.vector_dup([0, mask],
ub_ori[(MAX_REPEATS * num_repeat + 1) *
self.dst_shape[-4] *
self.dst_shape[-1] - CUBE_SIZE],
scalar_zero, MAX_REPEATS,
0, self.dst_shape[-4] *
self.dst_shape[-1] // self.num_data)
with tik_instance.if_scope(num_row_one_loop % MAX_REPEATS != 0):
tik_instance.vector_dup([0, mask],
ub_ori[((num_row_one_loop //
MAX_REPEATS) *
MAX_REPEATS + 1) *
self.dst_shape[-4] *
self.dst_shape[-1] - CUBE_SIZE],
scalar_zero,
num_row_one_loop % MAX_REPEATS,
0, self.dst_shape[-4] *
self.dst_shape[-1] // self.num_data)
if (self.src_shape[-2] % CUBE_SIZE) != 0:
with tik_instance.for_range(0, loop_num) as num_loop_index:
with tik_instance.for_range(
0, self.dst_shape[-4] // NUM_CUBE) as num_col_cube:
tik_instance.vector_dup(self.vadds_mask,
ub_ori[num_loop_index *
self.dst_shape[-3] *
self.dst_shape[-2] *
self.dst_shape[-4] *
self.dst_shape[-1] +
((self.dst_shape[-3] - 1) *
self.dst_shape[-2] +
self.src_shape[-2] %
CUBE_SIZE) *
self.dst_shape[-1] *
self.dst_shape[-4] +
num_col_cube * NUM_CUBE *
CUBE_SIZE],
scalar_zero,
CUBE_SIZE - self.src_shape[-2] %
CUBE_SIZE,
self.num_byte // 2,
self.dst_shape[-4] *
self.dst_shape[-1] //
self.num_data)
if self.vadds_mask == MAX_MASK // 2:
tik_instance.vector_dup(self.vadds_mask,
ub_ori[num_loop_index *
self.dst_shape[-3] *
self.dst_shape[-2] *
self.dst_shape[-4] *
self.dst_shape[-1] +
((self.dst_shape[-3] -
1) *
self.dst_shape[-2] +
self.src_shape[-2] %
CUBE_SIZE) *
self.dst_shape[-1] *
self.dst_shape[-4] +
num_col_cube *
NUM_CUBE * CUBE_SIZE +
CUBE_SIZE // 2],
scalar_zero,
CUBE_SIZE -
self.src_shape[-2] %
CUBE_SIZE,
self.num_byte // 2,
self.dst_shape[-4] *
self.dst_shape[-1] //
self.num_data)
if self.dst_shape[-4] % NUM_CUBE != 0:
tik_instance.vector_dup((self.dst_shape[-4] % NUM_CUBE) *
CUBE_SIZE * self.vadds_mask //
MAX_MASK,
ub_ori[num_loop_index *
self.dst_shape[-3] *
self.dst_shape[-2] *
self.dst_shape[-4] *
self.dst_shape[-1] +
((self.dst_shape[-3] - 1) *
self.dst_shape[-2] +
self.src_shape[-2] %
CUBE_SIZE) *
self.dst_shape[-1] *
self.dst_shape[-4] +
self.dst_shape[-4] //
NUM_CUBE * NUM_CUBE *
CUBE_SIZE],
scalar_zero,
CUBE_SIZE - self.src_shape[-2] %
CUBE_SIZE,
self.num_byte // 2,
self.dst_shape[-4] *
self.dst_shape[-1] //
self.num_data)
if self.vadds_mask == MAX_MASK // 2:
tik_instance.vector_dup((self.dst_shape[-4] %
NUM_CUBE) * CUBE_SIZE *
self.vadds_mask // MAX_MASK,
ub_ori[num_loop_index *
self.dst_shape[-3] *
self.dst_shape[-2] *
self.dst_shape[-4] *
self.dst_shape[-1] +
((self.dst_shape[-3] -
1) *
self.dst_shape[-2] +
self.src_shape[-2] %
CUBE_SIZE) *
self.dst_shape[-1] *
self.dst_shape[-4] +
self.dst_shape[-4] //
NUM_CUBE * NUM_CUBE *
CUBE_SIZE +
CUBE_SIZE // 2],
scalar_zero,
CUBE_SIZE -
self.src_shape[-2] %
CUBE_SIZE,
self.num_byte // 2,
self.dst_shape[-4] *
self.dst_shape[-1] //
self.num_data)
with tik_instance.for_range(0, loop_num) as num_loop_index:
with tik_instance.for_range(0, self.dst_shape[-4] // NUM_CUBE) as \
num_col_cube:
with tik_instance.for_range(
0, CUBE_SIZE * self.dst_shape[-3] // MAX_REPEATS) \
as num_repeat_one:
tik_instance.vadds(self.vadds_mask,
ub_trans[num_loop_index *
self.dst_shape[-3] *
self.dst_shape[-2] *
self.dst_shape[-4] *
self.dst_shape[-1] +
num_loop_index *
self.dst_shape[-4] *
self.dst_shape[-1] +
num_col_cube * NUM_CUBE *
self.dst_shape[-3] *
self.dst_shape[-2] *
self.dst_shape[-1] +
MAX_REPEATS * num_repeat_one *
CUBE_SIZE + MAX_MASK *
num_col_cube],
ub_ori[num_loop_index *
self.dst_shape[-4] *
self.dst_shape[-1] *
self.dst_shape[-2] *
self.dst_shape[-3] +
MAX_REPEATS * num_repeat_one *
self.dst_shape[-1] *
self.dst_shape[-4] +
num_col_cube * NUM_CUBE *
CUBE_SIZE],
scalar_zero, MAX_REPEATS,
self.dst_shape[-3] *
self.dst_shape[-2] *
self.dst_shape[-1] // self.num_data +
self.num_byte // 2, self.num_byte // 2,
self.num_byte // 2,
self.dst_shape[-4] *
self.dst_shape[-1] //
self.num_data)
if self.vadds_mask == MAX_MASK // 2:
tik_instance.vadds(self.vadds_mask,
ub_trans[num_loop_index *
self.dst_shape[-3] *
self.dst_shape[-2] *
self.dst_shape[-4] *
self.dst_shape[-1] +
num_loop_index *
self.dst_shape[-4] *
self.dst_shape[-1] +
num_col_cube * NUM_CUBE *
self.dst_shape[-3] *
self.dst_shape[-2] *
self.dst_shape[-1] +
MAX_REPEATS *
num_repeat_one *
CUBE_SIZE + MAX_MASK *
num_col_cube +
CUBE_SIZE // 2],
ub_ori[num_loop_index *
self.dst_shape[-4] *
self.dst_shape[-1] *
self.dst_shape[-2] *
self.dst_shape[-3] +
MAX_REPEATS *
num_repeat_one *
self.dst_shape[-1] *
self.dst_shape[-4] +
num_col_cube * NUM_CUBE *
CUBE_SIZE + CUBE_SIZE // 2],
scalar_zero, MAX_REPEATS,
self.dst_shape[-3] *
self.dst_shape[-2] *
self.dst_shape[-1] //
self.num_data +
self.num_byte // 2,
self.num_byte // 2,
self.num_byte // 2,
self.dst_shape[-4] *
self.dst_shape[-1] // self.num_data)
with tik_instance.if_scope((CUBE_SIZE * self.dst_shape[-3]) %
MAX_REPEATS != 0):
tik_instance.vadds(self.vadds_mask,
ub_trans[num_loop_index *
self.dst_shape[-3] *
self.dst_shape[-2] *
self.dst_shape[-4] *
self.dst_shape[-1] +
num_loop_index *
self.dst_shape[-4] *
self.dst_shape[-1] +
num_col_cube * NUM_CUBE *
self.dst_shape[-3] *
self.dst_shape[-2] *
self.dst_shape[-1] +
(MAX_REPEATS *
((CUBE_SIZE *
self.dst_shape[-3]) //
MAX_REPEATS)) * CUBE_SIZE +
MAX_MASK * num_col_cube],
ub_ori[num_loop_index *
self.dst_shape[-4] *
self.dst_shape[-1] *
self.dst_shape[-2] *
self.dst_shape[-3] +
(MAX_REPEATS *
((CUBE_SIZE *
self.dst_shape[-3]) //
MAX_REPEATS)) *
self.dst_shape[-1] *
self.dst_shape[-4] +
num_col_cube * NUM_CUBE *
CUBE_SIZE],
scalar_zero,
(CUBE_SIZE * self.dst_shape[-3]) %
MAX_REPEATS,
self.dst_shape[-3] *
self.dst_shape[-2] *
self.dst_shape[-1] // self.num_data +
self.num_byte // 2, self.num_byte // 2,
self.num_byte // 2,
self.dst_shape[-4] *
self.dst_shape[-1] // self.num_data)
if self.vadds_mask == MAX_MASK // 2:
tik_instance.vadds(self.vadds_mask,
ub_trans[num_loop_index *
self.dst_shape[-3] *
self.dst_shape[-2] *
self.dst_shape[-4] *
self.dst_shape[-1] +
num_loop_index *
self.dst_shape[-4] *
self.dst_shape[-1] +
num_col_cube * NUM_CUBE *
self.dst_shape[-3] *
self.dst_shape[-2] *
self.dst_shape[-1] +
(MAX_REPEATS *
((CUBE_SIZE *
self.dst_shape[-3]) //
MAX_REPEATS)) *
CUBE_SIZE +
MAX_MASK * num_col_cube +
CUBE_SIZE // 2],
ub_ori[num_loop_index *
self.dst_shape[-4] *
self.dst_shape[-1] *
self.dst_shape[-2] *
self.dst_shape[-3] +
(MAX_REPEATS *
((CUBE_SIZE *
self.dst_shape[-3]) //
MAX_REPEATS)) *
self.dst_shape[-1] *
self.dst_shape[-4] +
num_col_cube * NUM_CUBE *
CUBE_SIZE + CUBE_SIZE // 2],
scalar_zero,
(CUBE_SIZE * self.dst_shape[-3]) %
MAX_REPEATS,
self.dst_shape[-3] *
self.dst_shape[-2] *
self.dst_shape[-1] //
self.num_data + self.num_byte // 2,
self.num_byte // 2,
self.num_byte // 2,
self.dst_shape[-4] *
self.dst_shape[-1] //
self.num_data)
if self.dst_shape[-4] % NUM_CUBE != 0:
with tik_instance.for_range(
0, CUBE_SIZE * self.dst_shape[-3] // MAX_REPEATS) \
as num_repeat_one:
tik_instance.vadds((self.dst_shape[-4] % NUM_CUBE) *
CUBE_SIZE * self.vadds_mask // MAX_MASK,
ub_trans[num_loop_index *
self.dst_shape[-3] *
self.dst_shape[-2] *
self.dst_shape[-4] *
self.dst_shape[-1] +
num_loop_index *
self.dst_shape[-4] *
self.dst_shape[-1] +
self.dst_shape[-4] //
NUM_CUBE *
NUM_CUBE * self.dst_shape[-3] *
self.dst_shape[-2] *
self.dst_shape[-1] +
MAX_REPEATS * num_repeat_one *
CUBE_SIZE +
self.dst_shape[-4] //
NUM_CUBE *
MAX_MASK],
ub_ori[num_loop_index *
self.dst_shape[-4] *
self.dst_shape[-1] *
self.dst_shape[-2] *
self.dst_shape[-3] +
MAX_REPEATS * num_repeat_one *
self.dst_shape[-1] *
self.dst_shape[-4] +
self.dst_shape[-4] // NUM_CUBE *
NUM_CUBE * CUBE_SIZE],
scalar_zero, MAX_REPEATS,
self.dst_shape[-3] *
self.dst_shape[-2] *
self.dst_shape[-1] // self.num_data +
self.num_byte // 2, self.num_byte // 2,
self.num_byte // 2,
self.dst_shape[-4] *
self.dst_shape[-1] // self.num_data)
if self.vadds_mask == MAX_MASK // 2:
tik_instance.vadds((self.dst_shape[-4] % NUM_CUBE) *
CUBE_SIZE * self.vadds_mask //
MAX_MASK,
ub_trans[num_loop_index *
self.dst_shape[-3] *
self.dst_shape[-2] *
self.dst_shape[-4] *
self.dst_shape[-1] +
num_loop_index *
self.dst_shape[-4] *
self.dst_shape[-1] +
self.dst_shape[-4] //
NUM_CUBE * NUM_CUBE *
self.dst_shape[-3] *
self.dst_shape[-2] *
self.dst_shape[-1] +
MAX_REPEATS *
num_repeat_one *
CUBE_SIZE +
self.dst_shape[-4] //
NUM_CUBE * MAX_MASK +
CUBE_SIZE // 2],
ub_ori[num_loop_index *
self.dst_shape[-4] *
self.dst_shape[-1] *
self.dst_shape[-2] *
self.dst_shape[-3] +
MAX_REPEATS *
num_repeat_one *
self.dst_shape[-1] *
self.dst_shape[-4] +
self.dst_shape[-4] //
NUM_CUBE * NUM_CUBE *
CUBE_SIZE + CUBE_SIZE // 2],
scalar_zero, MAX_REPEATS,
self.dst_shape[-3] *
self.dst_shape[-2] *
self.dst_shape[-1] //
self.num_data + self.num_byte // 2,
self.num_byte // 2,
self.num_byte // 2,
self.dst_shape[-4] *
self.dst_shape[-1] //
self.num_data)
with tik_instance.if_scope((CUBE_SIZE * self.dst_shape[-3]) %
MAX_REPEATS != 0):
tik_instance.vadds((self.dst_shape[-4] % NUM_CUBE) *
CUBE_SIZE * self.vadds_mask // MAX_MASK,
ub_trans[num_loop_index *
self.dst_shape[-3] *
self.dst_shape[-2] *
self.dst_shape[-4] *
self.dst_shape[-1] +
num_loop_index *
self.dst_shape[-4] *
self.dst_shape[-1] +
self.dst_shape[-4] //
NUM_CUBE * NUM_CUBE *
self.dst_shape[-3] *
self.dst_shape[-2] *
self.dst_shape[-1] +
(MAX_REPEATS *
((CUBE_SIZE *
self.dst_shape[-3]) //
MAX_REPEATS)) * CUBE_SIZE +
self.dst_shape[-4] //
NUM_CUBE * MAX_MASK],
ub_ori[num_loop_index *
self.dst_shape[-4] *
self.dst_shape[-1] *
self.dst_shape[-2] *
self.dst_shape[-3] +
(MAX_REPEATS *
((CUBE_SIZE *
self.dst_shape[-3]) //
MAX_REPEATS)) *
self.dst_shape[-1] *
self.dst_shape[-4] +
self.dst_shape[-4] // NUM_CUBE *
NUM_CUBE * CUBE_SIZE],
scalar_zero,
(CUBE_SIZE * self.dst_shape[-3]) %
MAX_REPEATS,
self.dst_shape[-3] *
self.dst_shape[-2] *
self.dst_shape[-1] // self.num_data +
self.num_byte // 2, self.num_byte // 2,
self.num_byte // 2,
self.dst_shape[-4] *
self.dst_shape[-1] // self.num_data)
if self.vadds_mask == MAX_MASK // 2:
tik_instance.vadds((self.dst_shape[-4] % NUM_CUBE) *
CUBE_SIZE * self.vadds_mask //
MAX_MASK,
ub_trans[num_loop_index *
self.dst_shape[-3] *
self.dst_shape[-2] *
self.dst_shape[-4] *
self.dst_shape[-1] +
num_loop_index *
self.dst_shape[-4] *
self.dst_shape[-1] +
self.dst_shape[-4] //
NUM_CUBE * NUM_CUBE *
self.dst_shape[-3] *
self.dst_shape[-2] *
self.dst_shape[-1] +
(MAX_REPEATS *
((CUBE_SIZE *
self.dst_shape[-3]) //
MAX_REPEATS)) *
CUBE_SIZE +
self.dst_shape[-4] //
NUM_CUBE * MAX_MASK +
CUBE_SIZE // 2],
ub_ori[num_loop_index *
self.dst_shape[-4] *
self.dst_shape[-1] *
self.dst_shape[-2] *
self.dst_shape[-3] +
(MAX_REPEATS *
((CUBE_SIZE *
self.dst_shape[-3]) //
MAX_REPEATS)) *
self.dst_shape[-1] *
self.dst_shape[-4] +
self.dst_shape[-4] //
NUM_CUBE * NUM_CUBE *
CUBE_SIZE + CUBE_SIZE // 2],
scalar_zero,
(CUBE_SIZE * self.dst_shape[-3]) %
MAX_REPEATS,
self.dst_shape[-3] *
self.dst_shape[-2] *
self.dst_shape[-1] //
self.num_data + self.num_byte // 2,
self.num_byte // 2,
self.num_byte // 2,
self.dst_shape[-4] *
self.dst_shape[-1] //
self.num_data)
def data_rearrange_case_one(self, tik_instance, ub_ori, ub_trans,
col_cube_num, is_last):
"""
rearrange data when UB can not put in second last axis * last axis data
"""
scalar_zero = tik_instance.Scalar(dtype=self.dtype, init_value=0.0)
if self.src_shape[-1] % CUBE_SIZE != 0:
with tik_instance.if_scope(is_last == 1):
mask = 0
for i, _ in enumerate(range(CUBE_SIZE - self.src_shape[-1] %
CUBE_SIZE)):
mask += 2 ** (CUBE_SIZE - 1 - i)
with tik_instance.for_range(0, self.src_shape[-2]) \
as num_row:
tik_instance.vector_dup([0, mask],
ub_ori[(num_row + 1) *
col_cube_num *
self.dst_shape[-1] -
CUBE_SIZE],
scalar_zero, 1, 0, 0)
if self.src_shape[-2] % CUBE_SIZE != 0:
with tik_instance.for_range(0, col_cube_num // NUM_CUBE) as \
num_col_cube:
tik_instance.vector_dup(self.vadds_mask,
ub_ori[((self.dst_shape[-3] - 1) *
self.dst_shape[-2] +
self.src_shape[-2] %
CUBE_SIZE) *
self.dst_shape[-1] *
col_cube_num + num_col_cube *
NUM_CUBE * CUBE_SIZE],
scalar_zero, CUBE_SIZE -
self.src_shape[-2] % CUBE_SIZE,
self.num_byte // 2,
col_cube_num * self.dst_shape[-1] //
self.num_data)
if self.vadds_mask == MAX_MASK // 2:
tik_instance.vector_dup(self.vadds_mask,
ub_ori[((self.dst_shape[-3] - 1) *
self.dst_shape[-2] +
self.src_shape[-2] %
CUBE_SIZE) *
self.dst_shape[-1] *
col_cube_num +
num_col_cube * NUM_CUBE *
CUBE_SIZE + CUBE_SIZE // 2],
scalar_zero, CUBE_SIZE -
self.src_shape[-2] % CUBE_SIZE,
self.num_byte // 2,
col_cube_num *
self.dst_shape[-1] //
self.num_data)
with tik_instance.if_scope(col_cube_num % NUM_CUBE != 0):
tik_instance.vector_dup((col_cube_num % NUM_CUBE) * CUBE_SIZE *
self.vadds_mask // MAX_MASK,
ub_ori[((self.dst_shape[-3] - 1) *
self.dst_shape[-2] +
self.src_shape[-2] %
CUBE_SIZE) *
self.dst_shape[-1] *
col_cube_num + col_cube_num //
NUM_CUBE * NUM_CUBE *
CUBE_SIZE],
scalar_zero, CUBE_SIZE -
self.src_shape[-2] % CUBE_SIZE,
self.num_byte // 2,
col_cube_num * self.dst_shape[-1] //
self.num_data)
if self.vadds_mask == MAX_MASK // 2:
tik_instance.vector_dup((col_cube_num % NUM_CUBE) *
CUBE_SIZE * self.vadds_mask //
MAX_MASK,
ub_ori[((self.dst_shape[-3] - 1) *
self.dst_shape[-2] +
self.src_shape[-2] %
CUBE_SIZE) *
self.dst_shape[-1] *
col_cube_num +
col_cube_num // NUM_CUBE *
NUM_CUBE * CUBE_SIZE +
CUBE_SIZE // 2],
scalar_zero, CUBE_SIZE -
self.src_shape[-2] % CUBE_SIZE,
self.num_byte // 2,
col_cube_num *
self.dst_shape[-1] //
self.num_data)
with tik_instance.for_range(0, col_cube_num // NUM_CUBE) as \
num_col_cube:
with tik_instance.for_range(
0, CUBE_SIZE * self.dst_shape[-3] // MAX_REPEATS) \
as num_repeat_one:
tik_instance.vadds(self.vadds_mask,
ub_trans[num_col_cube * NUM_CUBE *
self.dst_shape[-3] *
self.dst_shape[-2] *
self.dst_shape[-1] + MAX_REPEATS *
num_repeat_one * CUBE_SIZE +
num_col_cube * MAX_MASK],
ub_ori[MAX_REPEATS * num_repeat_one *
self.dst_shape[-1] * col_cube_num +
num_col_cube * NUM_CUBE * CUBE_SIZE],
scalar_zero, MAX_REPEATS,
self.dst_shape[-3] *
self.dst_shape[-2] *
self.dst_shape[-1] // self.num_data +
self.num_byte // 2, self.num_byte // 2,
self.num_byte // 2,
col_cube_num * self.dst_shape[-1] //
self.num_data)
if self.vadds_mask == MAX_MASK // 2:
tik_instance.vadds(self.vadds_mask,
ub_trans[num_col_cube * NUM_CUBE *
self.dst_shape[-3] *
self.dst_shape[-2] *
self.dst_shape[-1] +
MAX_REPEATS *
num_repeat_one * CUBE_SIZE +
num_col_cube * MAX_MASK +
CUBE_SIZE // 2],
ub_ori[MAX_REPEATS * num_repeat_one *
self.dst_shape[-1] *
col_cube_num + num_col_cube *
NUM_CUBE * CUBE_SIZE +
CUBE_SIZE // 2],
scalar_zero, MAX_REPEATS,
self.dst_shape[-3] *
self.dst_shape[-2] *
self.dst_shape[-1] // self.num_data +
self.num_byte // 2, self.num_byte // 2,
self.num_byte // 2,
col_cube_num * self.dst_shape[-1] //
self.num_data)
with tik_instance.if_scope((CUBE_SIZE * self.dst_shape[-3]) %
MAX_REPEATS != 0):
tik_instance.vadds(self.vadds_mask,
ub_trans[num_col_cube * NUM_CUBE *
self.dst_shape[-3] *
self.dst_shape[-2] *
self.dst_shape[-1] +
(MAX_REPEATS *
((CUBE_SIZE *
self.dst_shape[-3]) //
MAX_REPEATS)) * CUBE_SIZE +
num_col_cube * MAX_MASK],
ub_ori[(MAX_REPEATS *
((CUBE_SIZE * self.dst_shape[-3]) //
MAX_REPEATS)) *
self.dst_shape[-1] *
col_cube_num + num_col_cube *
NUM_CUBE * CUBE_SIZE],
scalar_zero,
(CUBE_SIZE * self.dst_shape[-3]) %
MAX_REPEATS,
self.dst_shape[-3] *
self.dst_shape[-2] *
self.dst_shape[-1] // self.num_data +
self.num_byte // 2, self.num_byte // 2,
self.num_byte // 2,
col_cube_num * self.dst_shape[-1] //
self.num_data)
if self.vadds_mask == MAX_MASK // 2:
tik_instance.vadds(self.vadds_mask,
ub_trans[num_col_cube * NUM_CUBE *
self.dst_shape[-3] *
self.dst_shape[-2] *
self.dst_shape[-1] +
(MAX_REPEATS *
((CUBE_SIZE *
self.dst_shape[-3]) //
MAX_REPEATS)) * CUBE_SIZE +
num_col_cube * MAX_MASK +
CUBE_SIZE // 2],
ub_ori[(MAX_REPEATS *
((CUBE_SIZE *
self.dst_shape[-3]) //
MAX_REPEATS)) *
self.dst_shape[-1] *
col_cube_num +
num_col_cube * NUM_CUBE *
CUBE_SIZE + CUBE_SIZE // 2],
scalar_zero,
(CUBE_SIZE * self.dst_shape[-3]) %
MAX_REPEATS,
self.dst_shape[-3] *
self.dst_shape[-2] *
self.dst_shape[-1] // self.num_data +
self.num_byte // 2, self.num_byte // 2,
self.num_byte // 2,
col_cube_num * self.dst_shape[-1] //
self.num_data)
with tik_instance.if_scope(col_cube_num % NUM_CUBE != 0):
with tik_instance.for_range(
0, CUBE_SIZE * self.dst_shape[-3] // MAX_REPEATS) \
as num_repeat_one:
tik_instance.vadds((col_cube_num % NUM_CUBE) * CUBE_SIZE *
self.vadds_mask // MAX_MASK,
ub_trans[col_cube_num // NUM_CUBE *
NUM_CUBE * self.dst_shape[-3] *
self.dst_shape[-2] *
self.dst_shape[-1] + MAX_REPEATS *
num_repeat_one * CUBE_SIZE +
col_cube_num // NUM_CUBE *
MAX_MASK],
ub_ori[MAX_REPEATS * num_repeat_one *
self.dst_shape[-1] * col_cube_num +
col_cube_num // NUM_CUBE * NUM_CUBE *
CUBE_SIZE],
scalar_zero, MAX_REPEATS,
self.dst_shape[-3] *
self.dst_shape[-2] *
self.dst_shape[-1] // self.num_data +
self.num_byte // 2, self.num_byte // 2,
self.num_byte // 2,
col_cube_num * self.dst_shape[-1] //
self.num_data)
if self.vadds_mask == MAX_MASK // 2:
tik_instance.vadds((col_cube_num % NUM_CUBE) * CUBE_SIZE *
self.vadds_mask // MAX_MASK,
ub_trans[col_cube_num // NUM_CUBE *
NUM_CUBE * self.dst_shape[-3] *
self.dst_shape[-2] *
self.dst_shape[-1] +
MAX_REPEATS * num_repeat_one *
CUBE_SIZE + col_cube_num //
NUM_CUBE * MAX_MASK +
CUBE_SIZE // 2],
ub_ori[MAX_REPEATS * num_repeat_one *
self.dst_shape[-1] *
col_cube_num + col_cube_num //
NUM_CUBE * NUM_CUBE *
CUBE_SIZE + CUBE_SIZE // 2],
scalar_zero, MAX_REPEATS,
self.dst_shape[-3] *
self.dst_shape[-2] *
self.dst_shape[-1] // self.num_data +
self.num_byte // 2, self.num_byte // 2,
self.num_byte // 2,
col_cube_num * self.dst_shape[-1] //
self.num_data)
with tik_instance.if_scope((CUBE_SIZE * self.dst_shape[-3]) %
MAX_REPEATS != 0):
tik_instance.vadds((col_cube_num % NUM_CUBE) * CUBE_SIZE *
self.vadds_mask // MAX_MASK,
ub_trans[col_cube_num // NUM_CUBE *
NUM_CUBE * self.dst_shape[-3] *
self.dst_shape[-2] *
self.dst_shape[-1] +
(MAX_REPEATS *
((CUBE_SIZE *
self.dst_shape[-3]) //
MAX_REPEATS)) * CUBE_SIZE +
col_cube_num // NUM_CUBE *
MAX_MASK],
ub_ori[(MAX_REPEATS *
((CUBE_SIZE * self.dst_shape[-3]) //
MAX_REPEATS)) *
self.dst_shape[-1] *
col_cube_num + col_cube_num //
NUM_CUBE * NUM_CUBE *
CUBE_SIZE],
scalar_zero,
(CUBE_SIZE * self.dst_shape[-3]) %
MAX_REPEATS,
self.dst_shape[-3] *
self.dst_shape[-2] *
self.dst_shape[-1] // self.num_data +
self.num_byte // 2, self.num_byte // 2,
self.num_byte // 2,
col_cube_num * self.dst_shape[-1] //
self.num_data)
if self.vadds_mask == MAX_MASK // 2:
tik_instance.vadds((col_cube_num % NUM_CUBE) * CUBE_SIZE *
self.vadds_mask // MAX_MASK,
ub_trans[col_cube_num // NUM_CUBE *
NUM_CUBE * self.dst_shape[-3] *
self.dst_shape[-2] *
self.dst_shape[-1] +
(MAX_REPEATS *
((CUBE_SIZE *
self.dst_shape[-3]) //
MAX_REPEATS)) * CUBE_SIZE +
col_cube_num // NUM_CUBE *
MAX_MASK + CUBE_SIZE // 2],
ub_ori[(MAX_REPEATS *
((CUBE_SIZE *
self.dst_shape[-3]) //
MAX_REPEATS)) *
self.dst_shape[-1] *
col_cube_num + col_cube_num //
NUM_CUBE * NUM_CUBE *
CUBE_SIZE + CUBE_SIZE // 2],
scalar_zero,
(CUBE_SIZE * self.dst_shape[-3]) %
MAX_REPEATS,
self.dst_shape[-3] *
self.dst_shape[-2] *
self.dst_shape[-1] // self.num_data +
self.num_byte // 2, self.num_byte // 2,
self.num_byte // 2,
col_cube_num * self.dst_shape[-1] //
self.num_data)
def data_rearrange_case_two(self, tik_instance, ub_ori, num_loop_time,
loop_row, is_last):
"""
rearrange data when UB can not put in second last axis * 16 data
"""
scalar_zero = tik_instance.Scalar(dtype=self.dtype, init_value=0.0)
with tik_instance.if_scope(num_loop_time == self.dst_shape[-4] - 1):
if self.src_shape[-1] % CUBE_SIZE != 0:
mask = 0
for i, _ in enumerate(range(CUBE_SIZE - self.src_shape[-1] %
CUBE_SIZE)):
mask += 2 ** (CUBE_SIZE - 1 - i)
with tik_instance.for_range(0, loop_row * CUBE_SIZE //
MAX_REPEATS) as num_repeat:
tik_instance.vector_dup([0, mask],
ub_ori[MAX_REPEATS * num_repeat *
self.dst_shape[-1]],
scalar_zero, MAX_REPEATS,
0, self.dst_shape[-1] //
self.num_data)
with tik_instance.if_scope(loop_row * CUBE_SIZE %
MAX_REPEATS != 0):
tik_instance.vector_dup([0, mask],
ub_ori[(loop_row * CUBE_SIZE //
MAX_REPEATS) *
MAX_REPEATS *
self.dst_shape[-1]],
scalar_zero, loop_row * CUBE_SIZE %
MAX_REPEATS, 0,
self.dst_shape[-1] //
self.num_data)
with tik_instance.if_scope(is_last == 1):
if self.src_shape[-2] % CUBE_SIZE != 0:
tik_instance.vector_dup(CUBE_SIZE,
ub_ori[((loop_row - 1) *
self.dst_shape[-2] +
self.src_shape[-2] %
CUBE_SIZE) *
self.dst_shape[-1]],
scalar_zero, CUBE_SIZE -
self.src_shape[-2] % CUBE_SIZE, 0,
self.num_byte // 2)
def data_rearrange_case_three(self, tik_instance, ub_ori, ub_trans,
loop_num, is_last):
"""
rearrange data when UB can put in last axis * 16 data and
the shape of dst is 4-D
"""
num_row_one_loop = loop_num * CUBE_SIZE
scalar_zero = tik_instance.Scalar(dtype=self.dtype, init_value=0.0)
if self.src_shape[-1] % CUBE_SIZE != 0:
mask = 0
for i, _ in enumerate(range(CUBE_SIZE -
self.src_shape[-1] % CUBE_SIZE)):
mask += 2 ** (CUBE_SIZE - 1 - i)
with tik_instance.for_range(0, num_row_one_loop // MAX_REPEATS) \
as num_repeat:
tik_instance.vector_dup([0, mask],
ub_ori[(MAX_REPEATS * num_repeat + 1) *
self.dst_shape[-4] *
self.dst_shape[-1] - CUBE_SIZE],
scalar_zero, MAX_REPEATS,
0, self.dst_shape[-4] *
self.dst_shape[-1] // self.num_data)
with tik_instance.if_scope(num_row_one_loop % MAX_REPEATS != 0):
tik_instance.vector_dup([0, mask],
ub_ori[((num_row_one_loop //
MAX_REPEATS) *
MAX_REPEATS + 1) *
self.dst_shape[-4] *
self.dst_shape[-1] - CUBE_SIZE],
scalar_zero,
num_row_one_loop % MAX_REPEATS,
0, self.dst_shape[-4] *
self.dst_shape[-1] // self.num_data)
with tik_instance.if_scope(is_last == 1):
if (self.src_shape[-2] % CUBE_SIZE) != 0:
with tik_instance.for_range(0, self.dst_shape[-4] //
NUM_CUBE) as num_col_cube:
tik_instance.vector_dup(self.vadds_mask,
ub_ori[((loop_num - 1) *
self.dst_shape[-2] +
self.src_shape[-2] %
CUBE_SIZE) *
self.dst_shape[-1] *
self.dst_shape[-4] +
num_col_cube * NUM_CUBE *
CUBE_SIZE],
scalar_zero,
CUBE_SIZE - self.src_shape[-2] %
CUBE_SIZE,
self.num_byte // 2,
self.dst_shape[-4] *
self.dst_shape[-1] //
self.num_data)
if self.vadds_mask == MAX_MASK // 2:
tik_instance.vector_dup(self.vadds_mask,
ub_ori[((loop_num - 1) *
self.dst_shape[-2] +
self.src_shape[-2] %
CUBE_SIZE) *
self.dst_shape[-1] *
self.dst_shape[-4] +
num_col_cube *
NUM_CUBE *
CUBE_SIZE +
CUBE_SIZE // 2],
scalar_zero,
CUBE_SIZE -
self.src_shape[-2] % CUBE_SIZE,
self.num_byte // 2,
self.dst_shape[-4] *
self.dst_shape[-1] //
self.num_data)
if self.dst_shape[-4] % NUM_CUBE != 0:
tik_instance.vector_dup((self.dst_shape[-4] % NUM_CUBE) *
CUBE_SIZE * self.vadds_mask //
MAX_MASK,
ub_ori[((loop_num - 1) *
self.dst_shape[-2] +
self.src_shape[-2] %
CUBE_SIZE) *
self.dst_shape[-1] *
self.dst_shape[-4] +
self.dst_shape[-4] //
NUM_CUBE * NUM_CUBE *
CUBE_SIZE],
scalar_zero,
CUBE_SIZE - self.src_shape[-2] %
CUBE_SIZE,
self.num_byte // 2,
self.dst_shape[-4] *
self.dst_shape[-1] //
self.num_data)
if self.vadds_mask == MAX_MASK // 2:
tik_instance.vector_dup((self.dst_shape[-4] %
NUM_CUBE) * CUBE_SIZE *
self.vadds_mask // MAX_MASK,
ub_ori[((loop_num - 1) *
self.dst_shape[-2] +
self.src_shape[-2] %
CUBE_SIZE) *
self.dst_shape[-1] *
self.dst_shape[-4] +
self.dst_shape[-4] //
NUM_CUBE * NUM_CUBE *
CUBE_SIZE +
CUBE_SIZE // 2],
scalar_zero,
CUBE_SIZE -
self.src_shape[-2] % CUBE_SIZE,
self.num_byte // 2,
self.dst_shape[-4] *
self.dst_shape[-1] //
self.num_data)
with tik_instance.for_range(0, self.dst_shape[-4] // NUM_CUBE) \
as num_col_cube:
with tik_instance.for_range(0, CUBE_SIZE * loop_num //
MAX_REPEATS) as num_repeat_one:
tik_instance.vadds(self.vadds_mask,
ub_trans[num_col_cube * NUM_CUBE *
loop_num * self.dst_shape[-2] *
self.dst_shape[-1] +
MAX_REPEATS *
num_repeat_one * CUBE_SIZE +
MAX_MASK * num_col_cube],
ub_ori[MAX_REPEATS * num_repeat_one *
self.dst_shape[-1] *
self.dst_shape[-4] +
num_col_cube * MAX_MASK],
scalar_zero, MAX_REPEATS, loop_num *
self.dst_shape[-2] *
self.dst_shape[-1] // self.num_data +
self.num_byte // 2, self.num_byte // 2,
self.num_byte // 2,
self.dst_shape[-4] * self.dst_shape[-1] //
self.num_data)
if self.vadds_mask == MAX_MASK // 2:
tik_instance.vadds(self.vadds_mask,
ub_trans[num_col_cube * NUM_CUBE *
loop_num *
self.dst_shape[-2] *
self.dst_shape[-1] +
MAX_REPEATS *
num_repeat_one * CUBE_SIZE +
MAX_MASK * num_col_cube +
CUBE_SIZE // 2],
ub_ori[MAX_REPEATS * num_repeat_one *
self.dst_shape[-1] *
self.dst_shape[-4] +
num_col_cube * MAX_MASK +
CUBE_SIZE // 2],
scalar_zero, MAX_REPEATS, loop_num *
self.dst_shape[-2] *
self.dst_shape[-1] // self.num_data +
self.num_byte // 2, self.num_byte // 2,
self.num_byte // 2,
self.dst_shape[-4] *
self.dst_shape[-1] // self.num_data)
with tik_instance.if_scope((CUBE_SIZE * loop_num) %
MAX_REPEATS != 0):
tik_instance.vadds(self.vadds_mask,
ub_trans[num_col_cube * NUM_CUBE *
loop_num * self.dst_shape[-2] *
self.dst_shape[-1] +
MAX_REPEATS *
((CUBE_SIZE * loop_num) //
MAX_REPEATS) * CUBE_SIZE +
MAX_MASK * num_col_cube],
ub_ori[MAX_REPEATS *
((CUBE_SIZE * loop_num) //
MAX_REPEATS) *
self.dst_shape[-1] *
self.dst_shape[-4] + num_col_cube *
MAX_MASK],
scalar_zero,
(CUBE_SIZE * loop_num) % MAX_REPEATS,
loop_num *
self.dst_shape[-2] *
self.dst_shape[-1] // self.num_data +
self.num_byte // 2, self.num_byte // 2,
self.num_byte // 2,
self.dst_shape[-4] * self.dst_shape[-1] //
self.num_data)
if self.vadds_mask == MAX_MASK // 2:
tik_instance.vadds(self.vadds_mask,
ub_trans[num_col_cube * NUM_CUBE *
loop_num *
self.dst_shape[-2] *
self.dst_shape[-1] +
MAX_REPEATS *
((CUBE_SIZE * loop_num) //
MAX_REPEATS) * CUBE_SIZE +
MAX_MASK * num_col_cube +
CUBE_SIZE // 2],
ub_ori[MAX_REPEATS *
((CUBE_SIZE * loop_num) //
MAX_REPEATS) *
self.dst_shape[-1] *
self.dst_shape[-4] +
num_col_cube * MAX_MASK +
CUBE_SIZE // 2],
scalar_zero,
(CUBE_SIZE * loop_num) % MAX_REPEATS,
loop_num *
self.dst_shape[-2] *
self.dst_shape[-1] // self.num_data +
self.num_byte // 2, self.num_byte // 2,
self.num_byte // 2,
self.dst_shape[-4] *
self.dst_shape[-1] //
self.num_data)
if self.dst_shape[-4] % NUM_CUBE != 0:
with tik_instance.for_range(0, CUBE_SIZE * loop_num //
MAX_REPEATS) as num_repeat_one:
tik_instance.vadds((self.dst_shape[-4] % NUM_CUBE) *
CUBE_SIZE * self.vadds_mask // MAX_MASK,
ub_trans[self.dst_shape[-4] // NUM_CUBE *
NUM_CUBE * loop_num *
self.dst_shape[-2] *
self.dst_shape[-1] + MAX_REPEATS *
num_repeat_one * CUBE_SIZE +
self.dst_shape[-4] // NUM_CUBE *
MAX_MASK],
ub_ori[MAX_REPEATS * num_repeat_one *
self.dst_shape[-1] *
self.dst_shape[-4] +
self.dst_shape[-4] // NUM_CUBE *
MAX_MASK],
scalar_zero, MAX_REPEATS, loop_num *
self.dst_shape[-2] *
self.dst_shape[-1] // self.num_data +
self.num_byte // 2, self.num_byte // 2,
self.num_byte // 2,
self.dst_shape[-4] * self.dst_shape[-1] //
self.num_data)
if self.vadds_mask == MAX_MASK // 2:
tik_instance.vadds((self.dst_shape[-4] % NUM_CUBE) *
CUBE_SIZE * self.vadds_mask // MAX_MASK,
ub_trans[self.dst_shape[-4] //
NUM_CUBE * NUM_CUBE *
loop_num *
self.dst_shape[-2] *
self.dst_shape[-1] +
MAX_REPEATS *
num_repeat_one * CUBE_SIZE +
self.dst_shape[-4] //
NUM_CUBE * MAX_MASK +
CUBE_SIZE // 2],
ub_ori[MAX_REPEATS * num_repeat_one *
self.dst_shape[-1] *
self.dst_shape[-4] +
self.dst_shape[-4] // NUM_CUBE *
MAX_MASK + CUBE_SIZE // 2],
scalar_zero, MAX_REPEATS, loop_num *
self.dst_shape[-2] *
self.dst_shape[-1] // self.num_data +
self.num_byte // 2, self.num_byte // 2,
self.num_byte // 2,
self.dst_shape[-4] *
self.dst_shape[-1] // self.num_data)
with tik_instance.if_scope((CUBE_SIZE * loop_num) %
MAX_REPEATS != 0):
tik_instance.vadds((self.dst_shape[-4] % NUM_CUBE) *
CUBE_SIZE * self.vadds_mask // MAX_MASK,
ub_trans[self.dst_shape[-4] // NUM_CUBE *
NUM_CUBE * loop_num *
self.dst_shape[-2] *
self.dst_shape[-1] +
MAX_REPEATS *
((CUBE_SIZE * loop_num) //
MAX_REPEATS) * CUBE_SIZE +
self.dst_shape[-4] // NUM_CUBE *
MAX_MASK],
ub_ori[MAX_REPEATS *
((CUBE_SIZE * loop_num) //
MAX_REPEATS) *
self.dst_shape[-1] *
self.dst_shape[-4] +
self.dst_shape[-4] // NUM_CUBE *
MAX_MASK],
scalar_zero,
(CUBE_SIZE * loop_num) % MAX_REPEATS,
loop_num *
self.dst_shape[-2] *
self.dst_shape[-1] // self.num_data +
self.num_byte // 2, self.num_byte // 2,
self.num_byte // 2,
self.dst_shape[-4] * self.dst_shape[-1] //
self.num_data)
if self.vadds_mask == MAX_MASK // 2:
tik_instance.vadds((self.dst_shape[-4] % NUM_CUBE) *
CUBE_SIZE * self.vadds_mask // MAX_MASK,
ub_trans[self.dst_shape[-4] //
NUM_CUBE * NUM_CUBE *
loop_num *
self.dst_shape[-2] *
self.dst_shape[-1] +
MAX_REPEATS *
((CUBE_SIZE * loop_num) //
MAX_REPEATS) * CUBE_SIZE +
self.dst_shape[-4] //
NUM_CUBE * MAX_MASK +
CUBE_SIZE // 2],
ub_ori[MAX_REPEATS *
((CUBE_SIZE * loop_num) //
MAX_REPEATS) *
self.dst_shape[-1] *
self.dst_shape[-4] +
self.dst_shape[-4] // NUM_CUBE *
MAX_MASK + CUBE_SIZE // 2],
scalar_zero,
(CUBE_SIZE * loop_num) % MAX_REPEATS,
loop_num *
self.dst_shape[-2] *
self.dst_shape[-1] // self.num_data +
self.num_byte // 2, self.num_byte // 2,
self.num_byte // 2,
self.dst_shape[-4] *
self.dst_shape[-1] //
self.num_data)
def data_rearrange_case_four(self, tik_instance, ub_ori, ub_trans,
num_loop_time, loop_num, is_last):
"""
rearrange data when UB can not put in last axis * 16 data and
the shape of dst is 4-D
"""
scalar_zero = tik_instance.Scalar(dtype=self.dtype, init_value=0.0)
with tik_instance.if_scope(is_last == 1):
if self.src_shape[-1] % CUBE_SIZE != 0:
mask = 0
for i, _ in enumerate(range(CUBE_SIZE -
self.src_shape[-1] % CUBE_SIZE)):
mask += 2 ** (CUBE_SIZE - 1 - i)
tik_instance.vector_dup([0, mask],
ub_ori[loop_num * CUBE_SIZE -
CUBE_SIZE],
scalar_zero, CUBE_SIZE,
0, loop_num * CUBE_SIZE //
self.num_data)
if (self.src_shape[-2] % CUBE_SIZE) != 0:
with tik_instance.if_scope(num_loop_time == self.dst_shape[-3] - 1):
with tik_instance.for_range(0, loop_num // NUM_CUBE) \
as num_col_cube:
tik_instance.vector_dup(self.vadds_mask,
ub_ori[(self.src_shape[-2] %
CUBE_SIZE) *
self.dst_shape[-1] *
loop_num + num_col_cube *
NUM_CUBE *
CUBE_SIZE],
scalar_zero,
CUBE_SIZE - self.src_shape[-2] %
CUBE_SIZE,
self.num_byte // 2,
loop_num * self.dst_shape[-1] //
self.num_data)
if self.vadds_mask == MAX_MASK // 2:
tik_instance.vector_dup(self.vadds_mask,
ub_ori[(self.src_shape[-2] %
CUBE_SIZE) *
self.dst_shape[-1] *
loop_num +
num_col_cube *
NUM_CUBE * CUBE_SIZE +
CUBE_SIZE // 2],
scalar_zero,
CUBE_SIZE -
self.src_shape[-2] % CUBE_SIZE,
self.num_byte // 2,
loop_num *
self.dst_shape[-1] //
self.num_data)
if loop_num % NUM_CUBE != 0:
tik_instance.vector_dup((loop_num % NUM_CUBE) *
CUBE_SIZE * self.vadds_mask //
MAX_MASK,
ub_ori[(self.src_shape[-2] %
CUBE_SIZE) *
self.dst_shape[-1] *
loop_num + loop_num //
NUM_CUBE * NUM_CUBE *
CUBE_SIZE],
scalar_zero,
CUBE_SIZE - self.src_shape[-2] %
CUBE_SIZE,
self.num_byte // 2,
loop_num * self.dst_shape[-1] //
self.num_data)
if self.vadds_mask == MAX_MASK // 2:
tik_instance.vector_dup((loop_num % NUM_CUBE) *
CUBE_SIZE * self.vadds_mask //
MAX_MASK,
ub_ori[(self.src_shape[-2] %
CUBE_SIZE) *
self.dst_shape[-1] *
loop_num + loop_num //
NUM_CUBE * NUM_CUBE *
CUBE_SIZE +
CUBE_SIZE // 2],
scalar_zero,
CUBE_SIZE -
self.src_shape[-2] %
CUBE_SIZE,
self.num_byte // 2,
loop_num *
self.dst_shape[-1] //
self.num_data)
with tik_instance.for_range(0, loop_num // NUM_CUBE) \
as num_col_cube:
tik_instance.vadds(self.vadds_mask,
ub_trans[num_col_cube * NUM_CUBE *
self.dst_shape[-2] *
self.dst_shape[-1] +
MAX_MASK * num_col_cube],
ub_ori[num_col_cube * MAX_MASK],
scalar_zero, CUBE_SIZE,
self.dst_shape[-2] * self.dst_shape[-1] //
self.num_data + self.num_byte // 2,
self.num_byte // 2, self.num_byte // 2,
loop_num * self.dst_shape[-1] // self.num_data)
if self.vadds_mask == MAX_MASK // 2:
tik_instance.vadds(self.vadds_mask,
ub_trans[num_col_cube * NUM_CUBE *
self.dst_shape[-2] *
self.dst_shape[-1] +
MAX_MASK * num_col_cube +
CUBE_SIZE // 2],
ub_ori[num_col_cube * MAX_MASK +
CUBE_SIZE // 2],
scalar_zero, CUBE_SIZE,
self.dst_shape[-2] * self.dst_shape[-1] //
self.num_data + self.num_byte // 2,
self.num_byte // 2, self.num_byte // 2,
loop_num * self.dst_shape[-1] //
self.num_data)
if loop_num % NUM_CUBE != 0:
tik_instance.vadds((loop_num % NUM_CUBE) * CUBE_SIZE *
self.vadds_mask // MAX_MASK,
ub_trans[loop_num // NUM_CUBE * NUM_CUBE *
self.dst_shape[-2] *
self.dst_shape[-1] + loop_num //
NUM_CUBE * MAX_MASK],
ub_ori[loop_num // NUM_CUBE * MAX_MASK],
scalar_zero, CUBE_SIZE,
self.dst_shape[-2] * self.dst_shape[-1] //
self.num_data + self.num_byte // 2,
self.num_byte // 2, self.num_byte // 2,
loop_num * self.dst_shape[-1] // self.num_data)
if self.vadds_mask == MAX_MASK // 2:
tik_instance.vadds((loop_num % NUM_CUBE) * CUBE_SIZE *
self.vadds_mask // MAX_MASK,
ub_trans[loop_num // NUM_CUBE *
NUM_CUBE * self.dst_shape[-2] *
self.dst_shape[-1] +
loop_num // NUM_CUBE *
MAX_MASK + CUBE_SIZE // 2],
ub_ori[loop_num // NUM_CUBE *
MAX_MASK + CUBE_SIZE // 2],
scalar_zero, CUBE_SIZE,
self.dst_shape[-2] * self.dst_shape[-1] //
self.num_data +
self.num_byte // 2, self.num_byte // 2,
self.num_byte // 2,
loop_num * self.dst_shape[-1] //
self.num_data)
def data_rearrange_case_five(self, tik_instance, ub_ori, ub_trans,
loop_num):
"""
rearrange data when UB // 2 can put in last axis * 16 data and
the shape of dst is 4-D
"""
scalar_zero = tik_instance.Scalar(dtype=self.dtype, init_value=0.0)
with tik_instance.for_range(0, self.dst_shape[-4] // NUM_CUBE) \
as num_col_cube:
with tik_instance.for_range(0, CUBE_SIZE * loop_num //
MAX_REPEATS) as num_repeat_one:
tik_instance.vadds(self.vadds_mask,
ub_trans[num_col_cube * NUM_CUBE *
loop_num * self.dst_shape[-2] *
self.dst_shape[-1] +
MAX_REPEATS *
num_repeat_one * CUBE_SIZE +
MAX_MASK * num_col_cube],
ub_ori[MAX_REPEATS * num_repeat_one *
self.dst_shape[-1] *
self.dst_shape[-4] +
num_col_cube * MAX_MASK],
scalar_zero, MAX_REPEATS, loop_num *
self.dst_shape[-2] *
self.dst_shape[-1] // self.num_data +
self.num_byte // 2, self.num_byte // 2,
self.num_byte // 2,
self.dst_shape[-4] * self.dst_shape[-1] //
self.num_data)
if self.vadds_mask == MAX_MASK // 2:
tik_instance.vadds(self.vadds_mask,
ub_trans[num_col_cube * NUM_CUBE *
loop_num *
self.dst_shape[-2] *
self.dst_shape[-1] +
MAX_REPEATS *
num_repeat_one * CUBE_SIZE +
MAX_MASK * num_col_cube +
CUBE_SIZE // 2],
ub_ori[MAX_REPEATS * num_repeat_one *
self.dst_shape[-1] *
self.dst_shape[-4] +
num_col_cube * MAX_MASK +
CUBE_SIZE // 2],
scalar_zero, MAX_REPEATS, loop_num *
self.dst_shape[-2] *
self.dst_shape[-1] // self.num_data +
self.num_byte // 2, self.num_byte // 2,
self.num_byte // 2,
self.dst_shape[-4] *
self.dst_shape[-1] // self.num_data)
with tik_instance.if_scope((CUBE_SIZE * loop_num) %
MAX_REPEATS != 0):
tik_instance.vadds(self.vadds_mask,
ub_trans[num_col_cube * NUM_CUBE *
loop_num * self.dst_shape[-2] *
self.dst_shape[-1] +
MAX_REPEATS *
((CUBE_SIZE * loop_num) //
MAX_REPEATS) * CUBE_SIZE +
MAX_MASK * num_col_cube],
ub_ori[MAX_REPEATS *
((CUBE_SIZE * loop_num) //
MAX_REPEATS) *
self.dst_shape[-1] *
self.dst_shape[-4] + num_col_cube *
MAX_MASK],
scalar_zero,
(CUBE_SIZE * loop_num) % MAX_REPEATS,
loop_num *
self.dst_shape[-2] *
self.dst_shape[-1] // self.num_data +
self.num_byte // 2, self.num_byte // 2,
self.num_byte // 2,
self.dst_shape[-4] * self.dst_shape[-1] //
self.num_data)
if self.vadds_mask == MAX_MASK // 2:
tik_instance.vadds(self.vadds_mask,
ub_trans[num_col_cube * NUM_CUBE *
loop_num *
self.dst_shape[-2] *
self.dst_shape[-1] +
MAX_REPEATS *
((CUBE_SIZE * loop_num) //
MAX_REPEATS) * CUBE_SIZE +
MAX_MASK * num_col_cube +
CUBE_SIZE // 2],
ub_ori[MAX_REPEATS *
((CUBE_SIZE * loop_num) //
MAX_REPEATS) *
self.dst_shape[-1] *
self.dst_shape[-4] +
num_col_cube * MAX_MASK +
CUBE_SIZE // 2],
scalar_zero,
(CUBE_SIZE * loop_num) % MAX_REPEATS,
loop_num *
self.dst_shape[-2] *
self.dst_shape[-1] // self.num_data +
self.num_byte // 2, self.num_byte // 2,
self.num_byte // 2,
self.dst_shape[-4] *
self.dst_shape[-1] //
self.num_data)
if self.dst_shape[-4] % NUM_CUBE != 0:
with tik_instance.for_range(0, CUBE_SIZE * loop_num //
MAX_REPEATS) as num_repeat_one:
tik_instance.vadds((self.dst_shape[-4] % NUM_CUBE) *
CUBE_SIZE * self.vadds_mask // MAX_MASK,
ub_trans[self.dst_shape[-4] // NUM_CUBE *
NUM_CUBE * loop_num *
self.dst_shape[-2] *
self.dst_shape[-1] + MAX_REPEATS *
num_repeat_one * CUBE_SIZE +
self.dst_shape[-4] // NUM_CUBE *
MAX_MASK],
ub_ori[MAX_REPEATS * num_repeat_one *
self.dst_shape[-1] *
self.dst_shape[-4] +
self.dst_shape[-4] // NUM_CUBE *
MAX_MASK],
scalar_zero, MAX_REPEATS, loop_num *
self.dst_shape[-2] *
self.dst_shape[-1] // self.num_data +
self.num_byte // 2, self.num_byte // 2,
self.num_byte // 2,
self.dst_shape[-4] * self.dst_shape[-1] //
self.num_data)
if self.vadds_mask == MAX_MASK // 2:
tik_instance.vadds((self.dst_shape[-4] % NUM_CUBE) *
CUBE_SIZE * self.vadds_mask // MAX_MASK,
ub_trans[self.dst_shape[-4] //
NUM_CUBE * NUM_CUBE *
loop_num *
self.dst_shape[-2] *
self.dst_shape[-1] +
MAX_REPEATS *
num_repeat_one * CUBE_SIZE +
self.dst_shape[-4] //
NUM_CUBE * MAX_MASK +
CUBE_SIZE // 2],
ub_ori[MAX_REPEATS * num_repeat_one *
self.dst_shape[-1] *
self.dst_shape[-4] +
self.dst_shape[-4] // NUM_CUBE *
MAX_MASK + CUBE_SIZE // 2],
scalar_zero, MAX_REPEATS, loop_num *
self.dst_shape[-2] *
self.dst_shape[-1] // self.num_data +
self.num_byte // 2, self.num_byte // 2,
self.num_byte // 2,
self.dst_shape[-4] *
self.dst_shape[-1] // self.num_data)
with tik_instance.if_scope((CUBE_SIZE * loop_num) %
MAX_REPEATS != 0):
tik_instance.vadds((self.dst_shape[-4] % NUM_CUBE) *
CUBE_SIZE * self.vadds_mask // MAX_MASK,
ub_trans[self.dst_shape[-4] // NUM_CUBE *
NUM_CUBE * loop_num *
self.dst_shape[-2] *
self.dst_shape[-1] +
MAX_REPEATS *
((CUBE_SIZE * loop_num) //
MAX_REPEATS) * CUBE_SIZE +
self.dst_shape[-4] // NUM_CUBE *
MAX_MASK],
ub_ori[MAX_REPEATS *
((CUBE_SIZE * loop_num) //
MAX_REPEATS) *
self.dst_shape[-1] *
self.dst_shape[-4] +
self.dst_shape[-4] // NUM_CUBE *
MAX_MASK],
scalar_zero,
(CUBE_SIZE * loop_num) % MAX_REPEATS,
loop_num *
self.dst_shape[-2] *
self.dst_shape[-1] // self.num_data +
self.num_byte // 2, self.num_byte // 2,
self.num_byte // 2,
self.dst_shape[-4] * self.dst_shape[-1] //
self.num_data)
if self.vadds_mask == MAX_MASK // 2:
tik_instance.vadds((self.dst_shape[-4] % NUM_CUBE) *
CUBE_SIZE * self.vadds_mask // MAX_MASK,
ub_trans[self.dst_shape[-4] //
NUM_CUBE * NUM_CUBE *
loop_num *
self.dst_shape[-2] *
self.dst_shape[-1] +
MAX_REPEATS *
((CUBE_SIZE * loop_num) //
MAX_REPEATS) * CUBE_SIZE +
self.dst_shape[-4] //
NUM_CUBE * MAX_MASK +
CUBE_SIZE // 2],
ub_ori[MAX_REPEATS *
((CUBE_SIZE * loop_num) //
MAX_REPEATS) *
self.dst_shape[-1] *
self.dst_shape[-4] +
self.dst_shape[-4] // NUM_CUBE *
MAX_MASK + CUBE_SIZE // 2],
scalar_zero,
(CUBE_SIZE * loop_num) % MAX_REPEATS,
loop_num *
self.dst_shape[-2] *
self.dst_shape[-1] // self.num_data +
self.num_byte // 2, self.num_byte // 2,
self.num_byte // 2,
self.dst_shape[-4] *
self.dst_shape[-1] //
self.num_data)
def format_transfer_case_zero(self, tik_instance):
"""
the transfer process when UB can put in
second last axis * last axis data, last axis is 32B align, and
the shape of dst is not 4-D
"""
ub_ori_data = self.ub_memory
ub_trans_data = ub_ori_data
if len(self.dst_shape) == 4:
total_core_loop_num = 1
else:
total_core_loop_num = functools_reduce(lambda x1, x2: x1 * x2,
self.dst_shape[:-4])
core_number = _set_core_num(total_core_loop_num)
num_data_one_loop = self.dst_shape[-4] * self.dst_shape[-3] * \
self.dst_shape[-2] * self.dst_shape[-1]
with tik_instance.for_range(0, core_number, block_num=core_number) \
as num_core:
ub_ori = tik_instance.Tensor(self.dtype,
(ub_ori_data,),
name="ub_ori",
scope=tik.scope_ubuf)
ub_trans = tik_instance.Tensor(self.dtype,
(ub_trans_data,),
name="ub_trans",
scope=tik.scope_ubuf)
core_loop, sum_core = _cal_core(tik_instance, total_core_loop_num,
num_core, core_number)
align_loop, remainder = _cal_core_loop(tik_instance,
num_data_one_loop +
self.dst_shape[-4] *
self.dst_shape[-1],
core_loop, ub_ori_data)
with tik_instance.for_range(0, core_loop) as num_core_loop:
total_core_loop = sum_core + num_core_loop
num_outer_axis = total_core_loop
src_gm_index = num_outer_axis * self.src_shape[-1] * \
self.src_shape[-2]
if self.src_shape[-2] % CUBE_SIZE != 0:
tik_instance.data_move(ub_ori[(num_core_loop %
align_loop) *
num_data_one_loop],
self.src_gm[src_gm_index],
0, 1,
self.dst_shape[-1] *
self.dst_shape[-4] *
self.src_shape[-2] // self.num_data,
0, 0)
with tik_instance.if_scope(tik.all((num_core_loop + 1) %
align_loop == 0,
num_core_loop !=
core_loop - 1)):
self.data_rearrange_case_zero(tik_instance, ub_ori,
ub_trans, align_loop)
dst_gm_index = num_outer_axis * num_data_one_loop - \
(align_loop - 1) * num_data_one_loop
tik_instance.data_move(self.dst_gm[dst_gm_index],
ub_trans[0], 0, align_loop *
self.dst_shape[-4],
num_data_one_loop //
self.dst_shape[-4] //
self.num_data,
self.num_byte // 2, 0)
with tik_instance.if_scope(num_core_loop == core_loop - 1):
self.data_rearrange_case_zero(tik_instance, ub_ori,
ub_trans, remainder)
dst_gm_index = num_outer_axis * num_data_one_loop - \
(remainder - 1) * num_data_one_loop
tik_instance.data_move(self.dst_gm[dst_gm_index],
ub_trans[0], 0, remainder *
self.dst_shape[-4],
num_data_one_loop //
self.dst_shape[-4] //
self.num_data,
self.num_byte // 2, 0)
else:
with tik_instance.if_scope(tik.all((num_core_loop + 1) %
align_loop == 0,
num_core_loop !=
core_loop - 1)):
tik_instance.data_move(ub_ori[0],
self.src_gm[src_gm_index - \
(align_loop - 1) *
num_data_one_loop],
0, 1,
align_loop *
self.dst_shape[-1] *
self.dst_shape[-4] *
self.src_shape[-2] //
self.num_data,
0, 0)
self.data_rearrange_case_zero(tik_instance, ub_ori,
ub_trans, align_loop)
dst_gm_index = num_outer_axis * num_data_one_loop - \
(align_loop - 1) * num_data_one_loop
tik_instance.data_move(self.dst_gm[dst_gm_index],
ub_trans[0], 0, align_loop *
self.dst_shape[-4],
num_data_one_loop //
self.dst_shape[-4] //
self.num_data,
self.num_byte // 2, 0)
with tik_instance.if_scope(num_core_loop == core_loop - 1):
tik_instance.data_move(ub_ori[0],
self.src_gm[src_gm_index - \
(remainder - 1) *
num_data_one_loop],
0, 1,
remainder * self.dst_shape[-1] *
self.dst_shape[-4] *
self.src_shape[-2] //
self.num_data,
0, 0)
self.data_rearrange_case_zero(tik_instance, ub_ori,
ub_trans, remainder)
dst_gm_index = num_outer_axis * num_data_one_loop - \
(remainder - 1) * num_data_one_loop
tik_instance.data_move(self.dst_gm[dst_gm_index],
ub_trans[0], 0, remainder *
self.dst_shape[-4],
num_data_one_loop //
self.dst_shape[-4] //
self.num_data,
self.num_byte // 2, 0)
return tik_instance
def format_transfer_case_one(self, tik_instance):
"""
the transfer process when UB can put in
second last axis * last axis data, last axis is not 32B align, and
the shape of dst is not 4-D
"""
ub_ori_data = self.ub_memory
ub_trans_data = ub_ori_data
if len(self.dst_shape) == 4:
total_core_loop_num = 1
else:
total_core_loop_num = functools_reduce(lambda x1, x2: x1 * x2,
self.dst_shape[:-4])
core_number = _set_core_num(total_core_loop_num)
num_data_one_loop = self.dst_shape[-4] * self.dst_shape[-3] * \
self.dst_shape[-2] * self.dst_shape[-1]
with tik_instance.for_range(0, core_number, block_num=core_number) \
as num_core:
ub_ori = tik_instance.Tensor(self.dtype,
(ub_ori_data,),
name="ub_ori",
scope=tik.scope_ubuf)
ub_trans = tik_instance.Tensor(self.dtype,
(ub_trans_data,),
name="ub_trans",
scope=tik.scope_ubuf)
core_loop, sum_core = _cal_core(tik_instance, total_core_loop_num,
num_core, core_number)
with tik_instance.for_range(0, core_loop) as num_core_loop:
total_core_loop = sum_core + num_core_loop
num_outer_axis = total_core_loop
align_loop, remainder = _cal_core_loop(tik_instance,
num_data_one_loop +
self.dst_shape[-4] *
self.dst_shape[-1],
core_loop,
ub_ori_data)
src_gm_index = num_outer_axis * self.src_shape[-1] * \
self.src_shape[-2]
src_ub_index = (num_core_loop % align_loop) * num_data_one_loop
with tik_instance.for_range(0, self.dst_shape[-3]) as num_cube:
with tik_instance.for_range(0, CUBE_SIZE) as num_cube_row:
with tik_instance.if_scope(num_cube * CUBE_SIZE +
num_cube_row >
self.src_shape[-2]):
pass
with tik_instance.else_scope():
tik_instance.data_move(ub_ori[src_ub_index +
(num_cube *
CUBE_SIZE +
num_cube_row) *
self.dst_shape[-1] *
self.dst_shape[-4]],
self.src_gm
[src_gm_index +
(num_cube * CUBE_SIZE +
num_cube_row) *
self.src_shape[-1]],
0, 1,
num_data_one_loop //
self.dst_shape[-2] //
self.dst_shape[-3] //
self.num_data, 0, 0)
# move data from ub to gm when ub is full
with tik_instance.if_scope(tik.all((num_core_loop + 1) %
align_loop == 0,
num_core_loop !=
core_loop - 1)):
self.data_rearrange_case_zero(tik_instance, ub_ori,
ub_trans, align_loop)
dst_gm_index = num_outer_axis * num_data_one_loop - \
(align_loop - 1) * num_data_one_loop
tik_instance.data_move(self.dst_gm[dst_gm_index],
ub_trans[0], 0, align_loop *
self.dst_shape[-4],
num_data_one_loop //
self.dst_shape[-4] // self.num_data,
self.num_byte // 2, 0)
# move the remaining data
with tik_instance.if_scope(num_core_loop == core_loop - 1):
self.data_rearrange_case_zero(tik_instance, ub_ori,
ub_trans, remainder)
dst_gm_index = num_outer_axis * num_data_one_loop - \
(remainder - 1) * num_data_one_loop
tik_instance.data_move(self.dst_gm[dst_gm_index],
ub_trans[0], 0,
remainder * self.dst_shape[-4],
num_data_one_loop //
self.dst_shape[-4] // self.num_data,
self.num_byte // 2, 0)
return tik_instance
def format_transfer_case_two(self, tik_instance):
"""
the transfer process when UB can not put in
second last axis * last axis data and last axis is 32B align
"""
ub_ori_data = self.ub_memory
ub_trans_data = ub_ori_data
loop_memory = ub_ori_data - ub_ori_data % \
(CUBE_SIZE * CUBE_SIZE * self.dst_shape[-3] + CUBE_SIZE)
num_data_one_loop = self.dst_shape[-4] * self.dst_shape[-3] * \
self.dst_shape[-2] * self.dst_shape[-1]
num_data_one_loop_padding = num_data_one_loop + self.dst_shape[-4] * \
self.dst_shape[-1]
loop_times = (num_data_one_loop_padding + loop_memory - 1) // \
loop_memory
if len(self.dst_shape) == 4:
total_core_loop_num = loop_times
else:
total_core_loop_num = functools_reduce(lambda x1, x2: x1 * x2,
self.dst_shape[:-4]) * \
loop_times
core_number = _set_core_num(total_core_loop_num)
with tik_instance.for_range(0, core_number, block_num=core_number) \
as num_core:
ub_ori = tik_instance.Tensor(self.dtype,
(ub_ori_data,),
name="ub_ori",
scope=tik.scope_ubuf)
ub_trans = tik_instance.Tensor(self.dtype,
(ub_trans_data,),
name="ub_trans",
scope=tik.scope_ubuf)
core_loop, sum_core = _cal_core(tik_instance, total_core_loop_num,
num_core, core_number)
src_ub_index = 0
with tik_instance.for_range(0, core_loop) as num_core_loop:
total_core_loop = sum_core + num_core_loop
num_loop_time = total_core_loop % loop_times
num_outer_axis = (total_core_loop - num_loop_time) // \
loop_times
handling_times = tik_instance.Scalar("uint64")
is_last = tik_instance.Scalar("uint64")
is_last.set_as(0)
handling_times.set_as(loop_memory //
(CUBE_SIZE * CUBE_SIZE *
self.dst_shape[-3] + CUBE_SIZE))
with tik_instance.if_scope(num_loop_time == loop_times - 1):
if num_data_one_loop_padding % loop_memory == 0:
remainder = loop_memory
else:
remainder = num_data_one_loop_padding % loop_memory
handling_times.set_as((remainder + CUBE_SIZE * CUBE_SIZE *
self.dst_shape[-3] +
CUBE_SIZE - 1) //
(CUBE_SIZE * CUBE_SIZE *
self.dst_shape[-3] + CUBE_SIZE))
is_last.set_as(1)
src_gm_index = num_outer_axis * self.src_shape[-1] * \
self.src_shape[-2] + loop_memory // \
(CUBE_SIZE * self.dst_shape[-3] + 1) * \
num_loop_time
with tik_instance.for_range(0, self.src_shape[-2] //
MAX_BURST_NUMBER) as num_repeat:
tik_instance.data_move(ub_ori[src_ub_index +
MAX_BURST_NUMBER *
num_repeat *
(loop_memory //
(CUBE_SIZE *
self.dst_shape[-3] + 1))],
self.src_gm[src_gm_index +
MAX_BURST_NUMBER *
num_repeat *
self.src_shape[-1]],
0, MAX_BURST_NUMBER,
handling_times * self.num_byte // 2,
(self.src_shape[-1] -
handling_times * CUBE_SIZE +
self.num_data - 1) //
self.num_data, 0)
with tik_instance.if_scope(self.src_shape[-2] %
MAX_BURST_NUMBER != 0):
tik_instance.data_move(ub_ori[src_ub_index +
(self.src_shape[-2] //
MAX_BURST_NUMBER) *
MAX_BURST_NUMBER *
(loop_memory //
(CUBE_SIZE *
self.dst_shape[-3] + 1))],
self.src_gm[src_gm_index +
(self.src_shape[-2] //
MAX_BURST_NUMBER) *
MAX_BURST_NUMBER *
self.src_shape[-1]], 0,
self.src_shape[-2] %
MAX_BURST_NUMBER,
handling_times * self.num_byte // 2,
(self.src_shape[-1] -
handling_times * CUBE_SIZE +
self.num_data - 1) //
self.num_data, 0)
self.data_rearrange_case_one(tik_instance, ub_ori, ub_trans,
handling_times, is_last)
dst_gm_index = num_outer_axis * num_data_one_loop + \
loop_memory // (CUBE_SIZE * CUBE_SIZE *
self.dst_shape[-3] +
CUBE_SIZE) * \
(CUBE_SIZE * CUBE_SIZE *
self.dst_shape[-3]) * num_loop_time
tik_instance.data_move(self.dst_gm[dst_gm_index], ub_trans[0],
0, handling_times,
CUBE_SIZE * self.dst_shape[-3] *
CUBE_SIZE // self.num_data,
self.num_byte // 2, 0)
return tik_instance
def format_transfer_case_three(self, tik_instance):
"""
the transfer process when UB can not put in
second last axis * last axis data and last axis is not 32B align
"""
ub_ori_data = self.ub_memory
ub_trans_data = ub_ori_data
loop_memory = ub_ori_data - ub_ori_data % \
(CUBE_SIZE * CUBE_SIZE * self.dst_shape[-3] + CUBE_SIZE)
num_data_one_loop = self.dst_shape[-4] * self.dst_shape[-3] * \
self.dst_shape[-2] * self.dst_shape[-1]
num_data_one_loop_padding = num_data_one_loop + self.dst_shape[-4] * \
self.dst_shape[-1]
loop_times = (num_data_one_loop_padding + loop_memory - 1) // \
loop_memory
if len(self.dst_shape) == 4:
total_core_loop_num = loop_times
else:
total_core_loop_num = functools_reduce(lambda x1, x2: x1 * x2,
self.dst_shape[:-4]) * \
loop_times
core_number = _set_core_num(total_core_loop_num)
with tik_instance.for_range(0, core_number, block_num=core_number) \
as num_core:
ub_ori = tik_instance.Tensor(self.dtype,
(ub_ori_data,),
name="ub_ori",
scope=tik.scope_ubuf)
ub_trans = tik_instance.Tensor(self.dtype,
(ub_trans_data,),
name="ub_trans",
scope=tik.scope_ubuf)
core_loop, sum_core = _cal_core(tik_instance, total_core_loop_num,
num_core, core_number)
with tik_instance.for_range(0, core_loop) as num_core_loop:
total_core_loop = sum_core + num_core_loop
num_loop_time = total_core_loop % loop_times
num_outer_axis = (total_core_loop - num_loop_time) // \
loop_times
handling_times = tik_instance.Scalar("uint64")
is_last = tik_instance.Scalar("uint64")
is_last.set_as(0)
handling_times.set_as(loop_memory // (CUBE_SIZE * CUBE_SIZE *
self.dst_shape[-3] +
CUBE_SIZE))
with tik_instance.if_scope(num_loop_time == loop_times - 1):
if num_data_one_loop_padding % loop_memory == 0:
remainder = loop_memory
else:
remainder = num_data_one_loop_padding % loop_memory
handling_times.set_as((remainder + CUBE_SIZE * CUBE_SIZE *
self.dst_shape[-3] + CUBE_SIZE -
1) // (CUBE_SIZE * CUBE_SIZE *
self.dst_shape[-3] +
CUBE_SIZE))
is_last.set_as(1)
src_ub_index = 0
src_gm_index = num_outer_axis * self.src_shape[-1] * \
self.src_shape[-2] + loop_memory // \
(CUBE_SIZE * CUBE_SIZE * self.dst_shape[-3] +
CUBE_SIZE) * num_loop_time * CUBE_SIZE
with tik_instance.for_range(0, self.src_shape[-2]) as num_cube:
tik_instance.data_move(ub_ori[src_ub_index +
num_cube *
handling_times *
self.dst_shape[-1]],
self.src_gm[src_gm_index +
num_cube *
self.src_shape[-1]],
0, 1,
(handling_times * CUBE_SIZE +
self.num_data - 1) //
self.num_data, 0, 0)
self.data_rearrange_case_one(tik_instance, ub_ori, ub_trans,
handling_times, is_last)
dst_gm_index = num_outer_axis * num_data_one_loop + \
loop_memory // \
(CUBE_SIZE * CUBE_SIZE * self.dst_shape[-3] +
CUBE_SIZE) * (CUBE_SIZE * CUBE_SIZE *
self.dst_shape[-3]) * \
num_loop_time
tik_instance.data_move(self.dst_gm[dst_gm_index], ub_trans[0],
0, handling_times,
CUBE_SIZE * self.dst_shape[-3] *
CUBE_SIZE // self.num_data,
self.num_byte // 2, 0)
return tik_instance
def format_transfer_case_four(self, tik_instance):
"""
the transfer process when UB can not put in second last axis * 16 data
"""
ub_ori_data = self.ub_memory
loop_row, loop_remainder = _cal_core_loop(tik_instance,
CUBE_SIZE * CUBE_SIZE,
self.dst_shape[-3],
ub_ori_data)
num_data_one_loop = self.dst_shape[-4] * self.dst_shape[-3] * \
self.dst_shape[-2] * self.dst_shape[-1]
loop_times = self.dst_shape[-4]
if len(self.dst_shape) == 4:
total_core_loop_num = loop_times
else:
total_core_loop_num = functools_reduce(lambda x1, x2: x1 * x2,
self.dst_shape[:-4]) * \
loop_times
core_number = _set_core_num(total_core_loop_num)
with tik_instance.for_range(0, core_number, block_num=core_number) \
as num_core:
ub_ori = tik_instance.Tensor(self.dtype,
(ub_ori_data,),
name="ub_ori",
scope=tik.scope_ubuf)
core_loop, sum_core = _cal_core(tik_instance, total_core_loop_num,
num_core, core_number)
with tik_instance.for_range(0, core_loop) as num_core_loop:
total_core_loop = sum_core + num_core_loop
num_loop_time = total_core_loop % loop_times
num_outer_axis = (total_core_loop - num_loop_time) // \
loop_times
is_last = tik_instance.Scalar("uint64")
is_last.set_as(0)
src_ub_index = 0
count_loop = tik_instance.Scalar("uint64")
count_loop.set_as(0)
with tik_instance.for_range(0, self.dst_shape[-3]) as num_cube:
with tik_instance.if_scope(
tik.all(num_cube % loop_row == 0,
num_cube != self.dst_shape[-3] -
loop_remainder)):
if self.src_shape[-1] % CUBE_SIZE != 0 or \
(self.src_shape[-1] - CUBE_SIZE) // \
self.num_data > MAX_STRIDE_BLK:
with tik_instance.for_range(0, loop_row) \
as num_loop_row:
with tik_instance.for_range(0, CUBE_SIZE) \
as num_cube_row:
src_gm_index = num_outer_axis * \
self.src_shape[-1] * \
self.src_shape[-2] + \
num_loop_time * \
CUBE_SIZE + \
((count_loop * loop_row +
num_loop_row) *
CUBE_SIZE +
num_cube_row) * \
self.src_shape[-1]
tik_instance.data_move(ub_ori
[src_ub_index +
(num_loop_row *
CUBE_SIZE +
num_cube_row) *
CUBE_SIZE],
self.src_gm
[src_gm_index],
0, 1,
self.num_byte // 2,
0, 0)
else:
src_gm_index = num_outer_axis * \
self.src_shape[-1] * \
self.src_shape[-2] + \
num_loop_time * CUBE_SIZE + \
(count_loop * loop_row *
CUBE_SIZE) * self.src_shape[-1]
tik_instance.data_move(ub_ori[src_ub_index],
self.src_gm
[src_gm_index],
0, loop_row * CUBE_SIZE,
self.num_byte // 2,
(self.src_shape[-1] -
CUBE_SIZE) //
self.num_data, 0)
count_loop.set_as(count_loop + 1)
self.data_rearrange_case_two(tik_instance, ub_ori,
num_loop_time, loop_row,
is_last)
dst_gm_index = num_outer_axis * num_data_one_loop + \
loop_row * (count_loop - 1) * \
CUBE_SIZE * CUBE_SIZE + \
num_loop_time * self.dst_shape[-3] * \
self.dst_shape[-2] * self.dst_shape[-1]
tik_instance.data_move(self.dst_gm[dst_gm_index],
ub_ori[0], 0, 1,
loop_row * self.dst_shape[-2] *
self.dst_shape[-1] //
self.num_data, 0, 0)
with tik_instance.if_scope(num_cube == self.dst_shape[-3] -
loop_remainder):
is_last.set_as(1)
if self.src_shape[-1] % CUBE_SIZE != 0 or \
(self.src_shape[-1] - CUBE_SIZE) // \
self.num_data > MAX_STRIDE_BLK:
with tik_instance.for_range(0, loop_remainder) \
as num_loop_row:
with tik_instance.for_range(0, CUBE_SIZE) \
as num_cube_row:
with tik_instance.if_scope(
((count_loop * loop_row +
num_loop_row) * CUBE_SIZE +
num_cube_row) >
self.src_shape[-2]):
pass
with tik_instance.else_scope():
src_gm_index = num_outer_axis * \
self.src_shape[-1] * \
self.src_shape[-2] + \
num_loop_time * \
CUBE_SIZE + \
((count_loop *
loop_row +
num_loop_row) *
CUBE_SIZE +
num_cube_row) * \
self.src_shape[-1]
tik_instance.data_move(
ub_ori[src_ub_index +
(num_loop_row * CUBE_SIZE +
num_cube_row) *
CUBE_SIZE],
self.src_gm[src_gm_index],
0, 1, self.num_byte // 2, 0, 0)
else:
src_gm_index = num_outer_axis * \
self.src_shape[-1] * \
self.src_shape[-2] + \
num_loop_time * CUBE_SIZE + \
(count_loop * loop_row *
CUBE_SIZE) * self.src_shape[-1]
if self.src_shape[-2] % CUBE_SIZE == 0:
tik_instance.data_move(ub_ori[0],
self.src_gm
[src_gm_index], 0,
loop_remainder *
CUBE_SIZE,
self.num_byte // 2,
(self.src_shape[-1] -
CUBE_SIZE) //
self.num_data, 0)
else:
tik_instance.data_move(ub_ori[0],
self.src_gm
[src_gm_index], 0,
loop_remainder *
CUBE_SIZE -
(CUBE_SIZE -
self.src_shape[-2] %
CUBE_SIZE),
self.num_byte // 2,
(self.src_shape[-1] -
CUBE_SIZE) //
self.num_data, 0)
self.data_rearrange_case_two(tik_instance, ub_ori,
num_loop_time,
loop_remainder, is_last)
dst_gm_index = num_outer_axis * num_data_one_loop + \
loop_row * count_loop * CUBE_SIZE * \
CUBE_SIZE + num_loop_time * \
self.dst_shape[-3] * \
self.dst_shape[-2] * \
self.dst_shape[-1]
tik_instance.data_move(self.dst_gm[dst_gm_index],
ub_ori[0], 0, 1,
loop_remainder *
self.dst_shape[-2] *
self.dst_shape[-1] //
self.num_data, 0, 0)
return tik_instance
def format_transfer_case_five(self, tik_instance):
"""
the transfer process when UB can put in
16 * last axis data, last axis is 32B align and
the shape of dst is 4-D
"""
ub_ori_data = self.ub_memory
ub_trans_data = ub_ori_data
total_core_loop_num = self.dst_shape[-3]
core_number = _set_core_num(total_core_loop_num)
num_data_one_loop = self.dst_shape[-4] * \
self.dst_shape[-2] * self.dst_shape[-1]
with tik_instance.for_range(0, core_number, block_num=core_number) \
as num_core:
ub_ori = tik_instance.Tensor(self.dtype,
(ub_ori_data,),
name="ub_ori",
scope=tik.scope_ubuf)
ub_trans = tik_instance.Tensor(self.dtype,
(ub_trans_data,),
name="ub_trans",
scope=tik.scope_ubuf)
core_loop, sum_core = _cal_core(tik_instance, total_core_loop_num,
num_core, core_number)
align_loop, remainder = _cal_core_loop(tik_instance,
num_data_one_loop,
core_loop, ub_ori_data)
src_ub_index = 0
with tik_instance.for_range(0, core_loop) as num_core_loop:
total_core_loop = sum_core + num_core_loop
num_third_last_axis = total_core_loop
is_last = tik_instance.Scalar("uint64")
is_last.set_as(0)
with tik_instance.if_scope(num_third_last_axis ==
self.dst_shape[-3] - 1):
is_last.set_as(1)
with tik_instance.if_scope(tik.all((num_core_loop + 1) %
align_loop == 0,
num_core_loop !=
core_loop - 1)):
src_gm_index = num_third_last_axis * num_data_one_loop - \
(align_loop - 1) * num_data_one_loop
tik_instance.data_move(ub_ori[src_ub_index],
self.src_gm[src_gm_index],
0, 1,
align_loop * num_data_one_loop //
self.num_data, 0, 0)
self.data_rearrange_case_three(tik_instance, ub_ori,
ub_trans, align_loop,
is_last)
with tik_instance.if_scope(
(self.dst_shape[-3] - align_loop) *
self.dst_shape[-1] * self.dst_shape[-2] //
self.num_data > MAX_STRIDE_BLK):
with tik_instance.for_range(0, self.dst_shape[-4]) \
as num_col_cube:
dst_gm_index = num_third_last_axis * \
self.dst_shape[-1] * \
self.dst_shape[-2] - \
(align_loop - 1) * \
self.dst_shape[-1] * \
self.dst_shape[-2] + \
num_col_cube * \
self.dst_shape[-1] * \
self.dst_shape[-2] * \
self.dst_shape[-3]
tik_instance.data_move(self.dst_gm[dst_gm_index],
ub_trans
[align_loop *
self.dst_shape[-1] *
self.dst_shape[-2] *
num_col_cube +
CUBE_SIZE * num_col_cube],
0, 1,
align_loop *
self.dst_shape[-1] *
self.dst_shape[-2] //
self.num_data, 0, 0)
with tik_instance.else_scope():
dst_gm_index = num_third_last_axis * \
self.dst_shape[-1] * \
self.dst_shape[-2] - \
(align_loop - 1) * \
self.dst_shape[-1] * \
self.dst_shape[-2]
tik_instance.data_move(self.dst_gm[dst_gm_index],
ub_trans[0], 0,
self.dst_shape[-4],
(align_loop *
self.dst_shape[-1] *
self.dst_shape[-2] +
self.num_data - 1) //
self.num_data,
self.num_byte // 2,
(self.dst_shape[-3] -
align_loop) *
self.dst_shape[-1] *
self.dst_shape[-2] //
self.num_data)
with tik_instance.if_scope(num_core_loop == core_loop - 1):
src_gm_index = num_third_last_axis * num_data_one_loop - \
(remainder - 1) * num_data_one_loop
with tik_instance.if_scope(is_last == 1):
if self.src_shape[-2] % CUBE_SIZE != 0:
tik_instance.data_move(ub_ori[src_ub_index],
self.src_gm[src_gm_index],
0, 1,
(remainder *
num_data_one_loop -
(CUBE_SIZE -
self.src_shape[-2] %
CUBE_SIZE) *
self.dst_shape[-4] *
self.dst_shape[-1]) //
self.num_data, 0, 0)
else:
tik_instance.data_move(ub_ori[src_ub_index],
self.src_gm[src_gm_index],
0, 1,
remainder *
num_data_one_loop //
self.num_data, 0, 0)
with tik_instance.else_scope():
tik_instance.data_move(ub_ori[src_ub_index],
self.src_gm[src_gm_index],
0, 1,
remainder * num_data_one_loop //
self.num_data, 0, 0)
self.data_rearrange_case_three(tik_instance, ub_ori,
ub_trans, remainder,
is_last)
with tik_instance.if_scope(
(self.dst_shape[-3] - remainder) *
self.dst_shape[-1] * self.dst_shape[-2] //
self.num_data > MAX_STRIDE_BLK):
with tik_instance.for_range(0, self.dst_shape[-4]) \
as num_col_cube:
dst_gm_index = num_third_last_axis * \
self.dst_shape[-1] * \
self.dst_shape[-2] - \
(remainder - 1) * \
self.dst_shape[-1] * \
self.dst_shape[-2] + \
num_col_cube * \
self.dst_shape[-1] * \
self.dst_shape[-2] * \
self.dst_shape[-3]
tik_instance.data_move(self.dst_gm[dst_gm_index],
ub_trans
[remainder *
self.dst_shape[-1] *
self.dst_shape[-2] *
num_col_cube +
CUBE_SIZE * num_col_cube],
0, 1,
remainder *
self.dst_shape[-1] *
self.dst_shape[-2] //
self.num_data, 0, 0)
with tik_instance.else_scope():
dst_gm_index = num_third_last_axis * \
self.dst_shape[-1] * \
self.dst_shape[-2] - \
(remainder - 1) * self.dst_shape[-1] * \
self.dst_shape[-2]
tik_instance.data_move(self.dst_gm[dst_gm_index],
ub_trans[0], 0,
self.dst_shape[-4],
(remainder *
self.dst_shape[-1] *
self.dst_shape[-2] +
self.num_data - 1) //
self.num_data,
self.num_byte // 2,
(self.dst_shape[-3] -
remainder) *
self.dst_shape[-1] *
self.dst_shape[-2] //
self.num_data)
return tik_instance
def format_transfer_case_six(self, tik_instance):
"""
the transfer process when UB can put in
16 * last axis data, last axis is not 32B align and
the shape of dst is 4-D
"""
ub_ori_data = self.ub_memory
ub_trans_data = ub_ori_data
total_core_loop_num = self.dst_shape[-3]
core_number = _set_core_num(total_core_loop_num)
num_data_one_loop = self.dst_shape[-4] * \
self.dst_shape[-2] * self.dst_shape[-1]
with tik_instance.for_range(0, core_number, block_num=core_number) \
as num_core:
ub_ori = tik_instance.Tensor(self.dtype,
(ub_ori_data,),
name="ub_ori",
scope=tik.scope_ubuf)
ub_trans = tik_instance.Tensor(self.dtype,
(ub_trans_data,),
name="ub_trans",
scope=tik.scope_ubuf)
core_loop, sum_core = _cal_core(tik_instance, total_core_loop_num,
num_core, core_number)
align_loop, remainder = _cal_core_loop(tik_instance,
num_data_one_loop,
core_loop, ub_ori_data)
with tik_instance.for_range(0, core_loop) as num_core_loop:
total_core_loop = sum_core + num_core_loop
num_third_last_axis = total_core_loop
src_gm_index = num_third_last_axis * self.src_shape[-1] * \
CUBE_SIZE
src_ub_index = (num_core_loop % align_loop) * \
num_data_one_loop
with tik_instance.if_scope(num_core_loop == core_loop - 1):
with tik_instance.if_scope(num_third_last_axis ==
self.dst_shape[-3] - 1):
if self.src_shape[-2] % CUBE_SIZE != 0:
with tik_instance.for_range(0, self.src_shape[-2] %
CUBE_SIZE) as \
num_cube_row:
tik_instance.data_move(ub_ori
[src_ub_index +
num_cube_row *
self.dst_shape[-1] *
self.dst_shape[-4]],
self.src_gm
[src_gm_index +
num_cube_row *
self.src_shape[-1]],
0, 1,
self.dst_shape[-1] *
self.dst_shape[-4] //
self.num_data, 0, 0)
else:
with tik_instance.for_range(0, CUBE_SIZE) as \
num_cube_row:
tik_instance.data_move(ub_ori
[src_ub_index +
num_cube_row *
self.dst_shape[-1] *
self.dst_shape[-4]],
self.src_gm
[src_gm_index +
num_cube_row *
self.src_shape[-1]],
0, 1,
self.dst_shape[-1] *
self.dst_shape[-4] //
self.num_data, 0, 0)
with tik_instance.else_scope():
with tik_instance.for_range(0, CUBE_SIZE) as \
num_cube_row:
tik_instance.data_move(ub_ori[src_ub_index +
num_cube_row *
self.dst_shape[-1] *
self.dst_shape[-4]],
self.src_gm
[src_gm_index +
num_cube_row *
self.src_shape[-1]],
0, 1,
self.dst_shape[-1] *
self.dst_shape[-4] //
self.num_data, 0, 0)
with tik_instance.else_scope():
with tik_instance.for_range(0, CUBE_SIZE) as num_cube_row:
tik_instance.data_move(ub_ori[src_ub_index +
num_cube_row *
self.dst_shape[-1] *
self.dst_shape[-4]],
self.src_gm[src_gm_index +
num_cube_row *
self.src_shape[-1]],
0, 1,
self.dst_shape[-1] *
self.dst_shape[-4] //
self.num_data, 0, 0)
is_last = tik_instance.Scalar("uint64")
is_last.set_as(0)
with tik_instance.if_scope(num_third_last_axis ==
self.dst_shape[-3] - 1):
is_last.set_as(1)
with tik_instance.if_scope(tik.all((num_core_loop + 1) %
align_loop == 0,
num_core_loop !=
core_loop - 1)):
self.data_rearrange_case_three(tik_instance, ub_ori,
ub_trans, align_loop,
is_last)
with tik_instance.if_scope(
(self.dst_shape[-3] - align_loop) *
self.dst_shape[-1] * self.dst_shape[-2] //
self.num_data > MAX_STRIDE_BLK):
with tik_instance.for_range(0, self.dst_shape[-4]) \
as num_col_cube:
dst_gm_index = num_third_last_axis * \
self.dst_shape[-1] * \
self.dst_shape[-2] - \
(align_loop - 1) * \
self.dst_shape[-1] * \
self.dst_shape[-2] + \
num_col_cube * \
self.dst_shape[-1] * \
self.dst_shape[-2] * \
self.dst_shape[-3]
tik_instance.data_move(self.dst_gm[dst_gm_index],
ub_trans
[align_loop *
self.dst_shape[-1] *
self.dst_shape[-2] *
num_col_cube +
CUBE_SIZE * num_col_cube],
0, 1,
align_loop *
self.dst_shape[-1] *
self.dst_shape[-2] //
self.num_data, 0, 0)
with tik_instance.else_scope():
dst_gm_index = num_third_last_axis * \
self.dst_shape[-1] * \
self.dst_shape[-2] - \
(align_loop - 1) * \
self.dst_shape[-1] * \
self.dst_shape[-2]
tik_instance.data_move(self.dst_gm[dst_gm_index],
ub_trans[0], 0,
self.dst_shape[-4],
(align_loop *
self.dst_shape[-1] *
self.dst_shape[-2] +
self.num_data - 1) //
self.num_data,
self.num_byte // 2,
(self.dst_shape[-3] -
align_loop) *
self.dst_shape[-1] *
self.dst_shape[-2] //
self.num_data)
with tik_instance.if_scope(num_core_loop == core_loop - 1):
self.data_rearrange_case_three(tik_instance, ub_ori,
ub_trans, remainder,
is_last)
with tik_instance.if_scope(
(self.dst_shape[-3] - remainder) *
self.dst_shape[-1] * self.dst_shape[-2] //
self.num_data > MAX_STRIDE_BLK):
with tik_instance.for_range(0, self.dst_shape[-4]) \
as num_col_cube:
dst_gm_index = num_third_last_axis * \
self.dst_shape[-1] * \
self.dst_shape[-2] - \
(remainder - 1) * \
self.dst_shape[-1] * \
self.dst_shape[-2] + \
num_col_cube * \
self.dst_shape[-1] * \
self.dst_shape[-2] * \
self.dst_shape[-3]
tik_instance.data_move(self.dst_gm[dst_gm_index],
ub_trans
[remainder *
self.dst_shape[-1] *
self.dst_shape[-2] *
num_col_cube +
CUBE_SIZE * num_col_cube],
0, 1,
remainder *
self.dst_shape[-1] *
self.dst_shape[-2] //
self.num_data, 0, 0)
with tik_instance.else_scope():
dst_gm_index = num_third_last_axis * \
self.dst_shape[-1] * \
self.dst_shape[-2] - \
(remainder - 1) * self.dst_shape[-1] * \
self.dst_shape[-2]
tik_instance.data_move(self.dst_gm[dst_gm_index],
ub_trans[0], 0,
self.dst_shape[-4],
(remainder *
self.dst_shape[-1] *
self.dst_shape[-2] +
self.num_data - 1) //
self.num_data,
self.num_byte // 2,
(self.dst_shape[-3] -
remainder) *
self.dst_shape[-1] *
self.dst_shape[-2] //
self.num_data)
return tik_instance
def format_transfer_case_seven(self, tik_instance):
"""
the transfer process when UB can not put in second last axis * 16 data
and the shape of dst is 4-D
"""
def data_move_case_zero(tik_instance, ub_ori, ub_trans,
is_last, num_outer_axis, num_loop_time,
loop_time, loop_col, loop_len):
"""
the process of date move
"""
with tik_instance.if_scope(tik.all(loop_time ==
self.dst_shape[-4] //
loop_col - 1,
self.dst_shape[-4] % loop_col ==
0)):
is_last.set_as(1)
num_data_one_loop = self.dst_shape[-4] * self.dst_shape[-3] * \
self.dst_shape[-2] * self.dst_shape[-1]
src_ub_index = 0
if self.src_shape[-1] % CUBE_SIZE != 0 or \
(self.src_shape[-1] - loop_len * CUBE_SIZE) // \
self.num_data > MAX_STRIDE_BLK:
if self.src_shape[-2] % CUBE_SIZE != 0:
with tik_instance.if_scope(num_loop_time ==
self.dst_shape[-3] - 1):
with tik_instance.for_range(0,
self.src_shape[-2] %
CUBE_SIZE) as num_cube_col:
src_gm_index = num_outer_axis *\
self.src_shape[-1] * \
self.src_shape[-2] + \
(num_loop_time * CUBE_SIZE +
num_cube_col) *\
self.src_shape[-1] + loop_time *\
loop_col * CUBE_SIZE
tik_instance.data_move(ub_ori[loop_len *
CUBE_SIZE *
num_cube_col],
self.src_gm[src_gm_index],
0, 1,
loop_len * self.num_byte //
2, 0, 0)
with tik_instance.else_scope():
with tik_instance.for_range(0, CUBE_SIZE) \
as num_cube_col:
src_gm_index = num_outer_axis * \
self.src_shape[-1] * \
self.src_shape[-2] + \
(num_loop_time * CUBE_SIZE +
num_cube_col) *\
self.src_shape[-1] + loop_time *\
loop_col * CUBE_SIZE
tik_instance.data_move(ub_ori[loop_len *
CUBE_SIZE *
num_cube_col],
self.src_gm[src_gm_index],
0, 1,
loop_len * self.num_byte //
2, 0, 0)
else:
with tik_instance.for_range(0, CUBE_SIZE) as num_cube_col:
src_gm_index = num_outer_axis * self.src_shape[-1] * \
self.src_shape[-2] + \
(num_loop_time * CUBE_SIZE +
num_cube_col) * self.src_shape[-1] + \
loop_time * loop_col * CUBE_SIZE
tik_instance.data_move(ub_ori[loop_len * CUBE_SIZE *
num_cube_col],
self.src_gm[src_gm_index],
0, 1,
loop_len * self.num_byte //
2, 0, 0)
else:
src_gm_index = num_outer_axis * self.src_shape[-1] * \
self.src_shape[-2] + num_loop_time *\
CUBE_SIZE * self.src_shape[-1] +\
loop_time * loop_col * CUBE_SIZE
if self.src_shape[-2] % CUBE_SIZE != 0:
with tik_instance.if_scope(num_loop_time ==
self.dst_shape[-3] - 1):
tik_instance.data_move(ub_ori[src_ub_index],
self.src_gm[src_gm_index], 0,
self.src_shape[-2] % CUBE_SIZE,
loop_len * self.num_byte // 2,
(self.src_shape[-1] -
loop_len * CUBE_SIZE) //
self.num_data,
0)
with tik_instance.else_scope():
tik_instance.data_move(ub_ori[src_ub_index],
self.src_gm[src_gm_index],
0, CUBE_SIZE,
loop_len * self.num_byte // 2,
(self.src_shape[-1] -
loop_len * CUBE_SIZE) //
self.num_data,
0)
else:
tik_instance.data_move(ub_ori[src_ub_index],
self.src_gm[src_gm_index],
0, CUBE_SIZE,
loop_len * self.num_byte // 2,
(self.src_shape[-1] - loop_len *
CUBE_SIZE) // self.num_data, 0)
self.data_rearrange_case_four(tik_instance, ub_ori,
ub_trans, num_loop_time,
loop_len, is_last)
if((self.dst_shape[-3] - 1) * self.dst_shape[-1] *
self.dst_shape[-2] // self.num_data > MAX_STRIDE_BLK):
with tik_instance.for_range(0, loop_len) as \
num_col_cube:
dst_gm_index = num_outer_axis * num_data_one_loop + \
num_loop_time * self.dst_shape[-1] * \
self.dst_shape[-2] + \
(loop_time * loop_col + num_col_cube) * \
self.dst_shape[-1] * self.dst_shape[-2] * \
self.dst_shape[-3]
tik_instance.data_move(self.dst_gm[dst_gm_index],
ub_trans[num_col_cube *
CUBE_SIZE *
(CUBE_SIZE + 1)],
0, 1,
self.dst_shape[-1] *
self.dst_shape[-2] //
self.num_data,
0, 0)
else:
dst_gm_index = num_outer_axis * num_data_one_loop + \
num_loop_time * self.dst_shape[-1] * \
self.dst_shape[-2] + loop_time * \
loop_col * self.dst_shape[-1] * \
self.dst_shape[-2] * \
self.dst_shape[-3]
tik_instance.data_move(self.dst_gm[dst_gm_index],
ub_trans[0],
0, loop_len,
self.dst_shape[-1] *
self.dst_shape[-2] // self.num_data,
self.num_byte // 2,
(self.dst_shape[-3] - 1) *
self.dst_shape[-1] *
self.dst_shape[-2] // self.num_data)
if self.src_shape[-2] // (MAX_CORE_NUM * CUBE_SIZE) < 2:
ub_ori_data = self.ub_memory
ub_trans_data = ub_ori_data
ub_ori = tik_instance.Tensor(self.dtype,
(ub_ori_data,),
name="ub_ori",
scope=tik.scope_ubuf)
ub_trans = tik_instance.Tensor(self.dtype,
(ub_trans_data,),
name="ub_trans",
scope=tik.scope_ubuf)
loop_col, loop_remainder = _cal_core_loop_python_one(
CUBE_SIZE * (CUBE_SIZE + 1), self.dst_shape[-4], ub_ori_data)
loop_times = self.dst_shape[-3]
if len(self.dst_shape) == 4:
total_core_loop_num = loop_times
else:
total_core_loop_num = functools_reduce(lambda x1, x2: x1 * x2,
self.dst_shape[:-4]) * \
loop_times
core_number = _set_core_num(total_core_loop_num)
with tik_instance.for_range(0, core_number, block_num=core_number) \
as num_core:
core_loop, sum_core = _cal_core(tik_instance,
total_core_loop_num,
num_core, core_number)
with tik_instance.for_range(0, core_loop) as num_core_loop:
total_core_loop = sum_core + num_core_loop
num_loop_time = total_core_loop % loop_times
num_outer_axis = (total_core_loop - num_loop_time) // \
loop_times
is_last = tik_instance.Scalar("uint64")
is_last.set_as(0)
with tik_instance.for_range(0, self.dst_shape[-4] //
loop_col) as num_cube:
data_move_case_zero(tik_instance, ub_ori,
ub_trans, is_last,
num_outer_axis,
num_loop_time, num_cube,
loop_col, loop_col)
if loop_remainder != 0:
is_last.set_as(1)
data_move_case_zero(tik_instance, ub_ori,
ub_trans, is_last, num_outer_axis,
num_loop_time,
self.dst_shape[-4] // loop_col,
loop_col, loop_remainder)
else:
ub_ori_data = self.ub_memory // 2
ub_trans_data = ub_ori_data
loop_col, loop_remainder = _cal_core_loop_python_one(
CUBE_SIZE * (CUBE_SIZE + 1), self.dst_shape[-4], ub_ori_data)
loop_times = self.dst_shape[-3]
if len(self.dst_shape) == 4:
total_core_loop_num = loop_times
else:
total_core_loop_num = functools_reduce(lambda x1, x2: x1 * x2,
self.dst_shape[:-4]) * \
loop_times
core_number = _set_core_num(total_core_loop_num)
with tik_instance.for_range(0, core_number, block_num=core_number) \
as num_core:
core_loop, sum_core = _cal_core(tik_instance,
total_core_loop_num,
num_core, core_number)
with tik_instance.for_range(0, core_loop, thread_num=2)\
as num_core_loop:
ub_ori = tik_instance.Tensor(self.dtype,
(ub_ori_data,),
name="ub_ori",
scope=tik.scope_ubuf)
ub_trans = tik_instance.Tensor(self.dtype,
(ub_trans_data,),
name="ub_trans",
scope=tik.scope_ubuf)
total_core_loop = sum_core + num_core_loop
num_loop_time = total_core_loop % loop_times
num_outer_axis = (total_core_loop - num_loop_time) // \
loop_times
is_last = tik_instance.Scalar("uint64")
is_last.set_as(0)
with tik_instance.for_range(0, self.dst_shape[-4] //
loop_col) as num_cube:
data_move_case_zero(tik_instance, ub_ori,
ub_trans, is_last, num_outer_axis,
num_loop_time, num_cube,
loop_col, loop_col)
if loop_remainder != 0:
is_last.set_as(1)
data_move_case_zero(tik_instance, ub_ori,
ub_trans, is_last, num_outer_axis,
num_loop_time,
self.dst_shape[-4] // loop_col,
loop_col, loop_remainder)
return tik_instance
def format_transfer_case_eight(self, tik_instance):
"""
the transfer process when UB // 2 can put in
16 * last axis data, last axis is 32B align and
the shape of dst is 4-D
"""
ub_ori_data = self.ub_memory // 2
ub_trans_data = ub_ori_data
total_core_loop_num = self.dst_shape[-3]
core_number = _set_core_num(total_core_loop_num)
num_data_one_loop = self.dst_shape[-4] * \
self.dst_shape[-2] * self.dst_shape[-1]
with tik_instance.for_range(0, core_number, block_num=core_number) \
as num_core:
core_loop = total_core_loop_num // core_number
sum_core = num_core*core_loop
align_loop, remainder = _cal_core_loop_python(num_data_one_loop,
core_loop,
ub_ori_data -
self.dst_shape[-4] *
self.dst_shape[-1])
core_loop_num = core_loop // align_loop
thread_number = 1 if core_loop_num == 1 else 2
src_ub_index = 0
with tik_instance.for_range(0, core_loop_num,
thread_num=thread_number)\
as num_core_loop:
ub_ori = tik_instance.Tensor(self.dtype,
(ub_ori_data,),
name="ub_ori",
scope=tik.scope_ubuf)
ub_trans = tik_instance.Tensor(self.dtype,
(ub_trans_data,),
name="ub_trans",
scope=tik.scope_ubuf)
total_core_loop = sum_core + num_core_loop * align_loop
num_third_last_axis = total_core_loop
src_gm_index = num_third_last_axis * num_data_one_loop
tik_instance.data_move(ub_ori[src_ub_index],
self.src_gm[src_gm_index],
0, 1,
align_loop * num_data_one_loop //
self.num_data, 0, 0)
self.data_rearrange_case_five(tik_instance, ub_ori,
ub_trans, align_loop)
if (self.dst_shape[-3] - align_loop) * self.dst_shape[-1] *\
self.dst_shape[-2] // self.num_data > MAX_STRIDE_BLK:
with tik_instance.for_range(0, self.dst_shape[-4]) \
as num_col_cube:
dst_gm_index = num_third_last_axis * \
self.dst_shape[-1] * \
self.dst_shape[-2] + \
num_col_cube * \
self.dst_shape[-1] * \
self.dst_shape[-2] * \
self.dst_shape[-3]
tik_instance.data_move(self.dst_gm[dst_gm_index],
ub_trans
[align_loop *
self.dst_shape[-1] *
self.dst_shape[-2] *
num_col_cube +
CUBE_SIZE * num_col_cube],
0, 1,
align_loop *
self.dst_shape[-1] *
self.dst_shape[-2] //
self.num_data, 0, 0)
else:
dst_gm_index = num_third_last_axis * \
self.dst_shape[-1] * \
self.dst_shape[-2]
tik_instance.data_move(self.dst_gm[dst_gm_index],
ub_trans[0], 0,
self.dst_shape[-4],
(align_loop *
self.dst_shape[-1] *
self.dst_shape[-2] +
self.num_data - 1) //
self.num_data,
self.num_byte // 2,
(self.dst_shape[-3] -
align_loop) *
self.dst_shape[-1] *
self.dst_shape[-2] //
self.num_data)
if remainder != 0:
ub_ori = tik_instance.Tensor(self.dtype,
(ub_ori_data,),
name="ub_ori",
scope=tik.scope_ubuf)
ub_trans = tik_instance.Tensor(self.dtype,
(ub_trans_data,),
name="ub_trans",
scope=tik.scope_ubuf)
num_third_last_axis = sum_core + core_loop_num * align_loop
src_gm_index = num_third_last_axis * num_data_one_loop
tik_instance.data_move(ub_ori[src_ub_index],
self.src_gm[src_gm_index],
0, 1,
remainder *
num_data_one_loop //
self.num_data, 0, 0)
self.data_rearrange_case_five(tik_instance, ub_ori,
ub_trans, remainder)
if (self.dst_shape[-3] - remainder) * self.dst_shape[-1] * \
self.dst_shape[-2] // self.num_data > MAX_STRIDE_BLK:
with tik_instance.for_range(0, self.dst_shape[-4]) \
as num_col_cube:
dst_gm_index = num_third_last_axis * \
self.dst_shape[-1] * \
self.dst_shape[-2] + \
num_col_cube * \
self.dst_shape[-1] * \
self.dst_shape[-2] * \
self.dst_shape[-3]
tik_instance.data_move(self.dst_gm[dst_gm_index],
ub_trans
[remainder *
self.dst_shape[-1] *
self.dst_shape[-2] *
num_col_cube +
CUBE_SIZE * num_col_cube],
0, 1,
remainder *
self.dst_shape[-1] *
self.dst_shape[-2] //
self.num_data, 0, 0)
else:
dst_gm_index = num_third_last_axis * \
self.dst_shape[-1] * \
self.dst_shape[-2]
tik_instance.data_move(self.dst_gm[dst_gm_index],
ub_trans[0], 0,
self.dst_shape[-4],
(remainder *
self.dst_shape[-1] *
self.dst_shape[-2] +
self.num_data - 1) //
self.num_data,
self.num_byte // 2,
(self.dst_shape[-3] -
remainder) *
self.dst_shape[-1] *
self.dst_shape[-2] //
self.num_data)
return tik_instance
def nd_2_nz_compute(self):
"""
the overall data move process
"""
tik_instance = self.set_tik_instance()
format_transfer_case = self.set_format_transfer_case()
if format_transfer_case == 0:
tik_instance = self.format_transfer_case_zero(tik_instance)
elif format_transfer_case == 1:
tik_instance = self.format_transfer_case_one(tik_instance)
elif format_transfer_case == 2:
tik_instance = self.format_transfer_case_two(tik_instance)
elif format_transfer_case == 3:
tik_instance = self.format_transfer_case_three(tik_instance)
elif format_transfer_case == 4:
tik_instance = self.format_transfer_case_four(tik_instance)
elif format_transfer_case == 5:
tik_instance = self.format_transfer_case_five(tik_instance)
elif format_transfer_case == 6:
tik_instance = self.format_transfer_case_six(tik_instance)
elif format_transfer_case == 7:
tik_instance = self.format_transfer_case_seven(tik_instance)
elif format_transfer_case == 8:
tik_instance = self.format_transfer_case_eight(tik_instance)
return tik_instance
def get_tik_instance(self):
"""
obtain tik instance
"""
tik_instance = self.nd_2_nz_compute()
tik_instance.BuildCCE(kernel_name=self.kernel_name,
inputs=[self.src_gm],
outputs=[self.dst_gm])
return tik_instance
class ND2NzComputeInt8:
"""
Rearranges data from ND format into FRACTAL_NZ format
Functions
----------
__init__:
initialize some properties
set_tik_instance:
set tik_instance
set_src_dst_tensor:
set input and output tensor
cal_core_loop:
calculate the loop number on each core
set_format_transfer_case:
divide the transfer case from nd to nz
vector_dup_zero:
vector_dup zeros when dup_number is python variable
data_rearrange_case_zero:
rearrange data when UB can put in last axis * 16 data and
the shape of dst is 4-D
data_rearrange_case_one:
rearrange data when UB can not put in last axis * 16 data
def format_transfer_case_zero:
the transfer process when the transfer case is 0
def format_transfer_case_one:
the transfer process when the transfer case is 1
def format_transfer_case_two:
the transfer process when the transfer case is 2
def data_move_case_zero:
the data move process of the transfer case is 2
nd_2_nz_compute:
the overall transfer process
get_tik_instance:
obtain tik instance
Returns
-------
None
"""
def __init__(self, src_shape, dtype, kernel_name):
"""
initialize some properties
"""
self.src_shape_ori = src_shape[:]
self.src_shape = src_shape[:]
self.dtype = dtype
self.kernel_name = kernel_name
if len(src_shape) == 1:
self.src_shape = [1, src_shape[0]]
self.dst_shape = self.src_shape[:]
self.dst_shape[-2:] = [(self.src_shape[-1] + CUBE_SIZE_2 - 1) //
CUBE_SIZE_2,
(self.src_shape[-2] + CUBE_SIZE - 1) //
CUBE_SIZE,
CUBE_SIZE,
CUBE_SIZE_2]
self.num_byte = SIZE_ONE_BYTES
self.cast_num_byte = SIZE_TWO_BYTES
self.vadds_mask = MAX_MASK
# the number of data that can be moved in each data_move
self.num_data = DATA_MOVE_MIN_UNIT // self.num_byte
# the number of float16 data that can be moved in each data_move
self.cast_num_data = DATA_MOVE_MIN_UNIT // self.cast_num_byte
util.check_shape_rule(self.dst_shape)
# the number of data that UB can put in
self.ub_memory = min(TOTAL_UB_MEMORY, 252 * 1024) // self.cast_num_byte // 4
self.src_gm = None
self.dst_gm = None
def set_tik_instance(self):
"""
set tik_instance
"""
tik_instance = tik.Tik()
self.set_src_dst_tensor(tik_instance)
return tik_instance
def set_src_dst_tensor(self, tik_instance):
"""
set input and output tensor
"""
src_element_number = functools_reduce(lambda x1, x2: x1 * x2,
self.src_shape_ori[:])
dst_element_number = functools_reduce(lambda x1, x2: x1 * x2,
self.dst_shape[:])
self.src_gm = tik_instance.Tensor(self.dtype,
(src_element_number,),
name="src_gm",
scope=tik.scope_gm)
self.dst_gm = tik_instance.Tensor(self.dtype,
(dst_element_number,),
name="dst_gm",
scope=tik.scope_gm)
def set_format_transfer_case(self):
"""
divide the transfer case from nd to nz
"""
if len(self.dst_shape) == 4:
is_four_d = 1
else:
is_four_d = (functools_reduce(lambda x1, x2: x1 * x2,
self.src_shape[:-2]) == 1)
if is_four_d:
if self.src_shape[-1] % CUBE_SIZE_2 == 0:
format_transfer_case = 0
if self.dst_shape[-4] * self.dst_shape[-1] * \
(self.dst_shape[-2] + 1) > self.ub_memory:
format_transfer_case = 2
else:
format_transfer_case = 1
if self.dst_shape[-4] * self.dst_shape[-1] * \
(self.dst_shape[-2] + 1) > self.ub_memory:
format_transfer_case = 2
else:
raise RuntimeError("ND2Nz only support 2D now when dtype is int8")
return format_transfer_case
def vector_dup_zero(self, tik_instance, ub_trans, dup_number, offset):
"""
vector_dup zeros when dup_number is python variable
"""
scalar_zero = tik_instance.Scalar(dtype="float16", init_value=0.0)
repeat_number = dup_number // MAX_MASK
tail = dup_number % MAX_MASK
with tik_instance.for_range(0, repeat_number // MAX_REPEATS) as \
num_repeat_loop:
tik_instance.vector_dup(MAX_MASK,
ub_trans[MAX_MASK * MAX_REPEATS *
num_repeat_loop + offset],
scalar_zero,
MAX_REPEATS,
self.cast_num_byte // 2,
MAX_MASK // self.cast_num_data)
if repeat_number % MAX_REPEATS != 0:
tik_instance.vector_dup(MAX_MASK,
ub_trans[repeat_number // MAX_REPEATS *
MAX_MASK * MAX_REPEATS + offset],
scalar_zero,
repeat_number % MAX_REPEATS,
self.cast_num_byte // 2,
MAX_MASK // self.cast_num_data)
if tail != 0:
tik_instance.vector_dup(tail,
ub_trans[MAX_MASK * repeat_number +
offset],
scalar_zero,
1,
self.cast_num_byte // 2,
MAX_MASK // self.cast_num_data)
def data_rearrange_case_zero(self, tik_instance, ub_ori, ub_cast_fp16,
ub_trans, ub_cast_int8, loop_num, is_last):
"""
rearrange data when UB can put in last axis * 16 data and
the shape of dst is 4-D
"""
cast_repeat_time = tik_instance.Scalar("uint64")
cast_remainder = tik_instance.Scalar("uint64")
with tik_instance.if_scope(is_last == 1):
if (self.src_shape[-2] % CUBE_SIZE) == 0:
cast_repeat_time.set_as(loop_num * CUBE_SIZE *
self.dst_shape[-4] *
self.dst_shape[-1] // MAX_MASK)
cast_remainder.set_as(loop_num * CUBE_SIZE *
self.dst_shape[-4] *
self.dst_shape[-1] % MAX_MASK)
else:
cast_repeat_time.set_as((loop_num * CUBE_SIZE - CUBE_SIZE +
self.src_shape[-2] % CUBE_SIZE) *
self.dst_shape[-4] *
self.dst_shape[-1] // MAX_MASK)
cast_remainder.set_as((loop_num * CUBE_SIZE - CUBE_SIZE +
self.src_shape[-2] % CUBE_SIZE) *
self.dst_shape[-4] * self.dst_shape[-1] %
MAX_MASK)
with tik_instance.else_scope():
cast_repeat_time.set_as(loop_num * CUBE_SIZE * self.dst_shape[-4] *
self.dst_shape[-1] // MAX_MASK)
cast_remainder.set_as(loop_num * CUBE_SIZE * self.dst_shape[-4] *
self.dst_shape[-1] % MAX_MASK)
# cast the data from int8 to float16
_cast_dtype(tik_instance, ub_cast_fp16, ub_ori, cast_repeat_time,
cast_remainder, "int8_2_float16")
num_row_one_loop = loop_num * CUBE_SIZE
scalar_zero = tik_instance.Scalar(dtype="float16", init_value=0.0)
if self.src_shape[-1] % CUBE_SIZE_2 != 0:
mask = 0
for i, _ in enumerate(range(CUBE_SIZE_2 -
self.src_shape[-1] % CUBE_SIZE_2)):
mask += 2 ** (CUBE_SIZE_2 - 1 - i)
with tik_instance.for_range(0, num_row_one_loop // MAX_REPEATS) \
as num_repeat:
tik_instance.vector_dup([0, mask],
ub_cast_fp16[(MAX_REPEATS *
num_repeat + 1) *
self.dst_shape[-4] *
self.dst_shape[-1] -
CUBE_SIZE_2],
scalar_zero, MAX_REPEATS,
self.cast_num_byte // 2,
self.dst_shape[-4] *
self.dst_shape[-1] //
self.cast_num_data)
with tik_instance.if_scope(num_row_one_loop % MAX_REPEATS != 0):
tik_instance.vector_dup([0, mask],
ub_cast_fp16[((num_row_one_loop //
MAX_REPEATS) *
MAX_REPEATS + 1) *
self.dst_shape[-4] *
self.dst_shape[-1] -
CUBE_SIZE_2],
scalar_zero,
num_row_one_loop % MAX_REPEATS,
0, self.dst_shape[-4] *
self.dst_shape[-1] //
self.cast_num_data)
with tik_instance.if_scope(is_last == 1):
if (self.src_shape[-2] % CUBE_SIZE) != 0:
dup_number = (CUBE_SIZE - self.src_shape[-2] % CUBE_SIZE) * \
self.dst_shape[-1] * self.dst_shape[-4]
offset = ((loop_num - 1) * self.dst_shape[-2] +
self.src_shape[-2] % CUBE_SIZE) * \
self.dst_shape[-1] * self.dst_shape[-4]
self.vector_dup_zero(tik_instance, ub_cast_fp16,
dup_number, offset)
with tik_instance.for_range(0, self.dst_shape[-4]) as num_col_cube:
with tik_instance.for_range(0, CUBE_SIZE * loop_num //
MAX_REPEATS) as num_repeat_one:
tik_instance.vadds(CUBE_SIZE_2,
ub_trans[num_col_cube * loop_num *
self.dst_shape[-2] *
self.dst_shape[-1] +
MAX_REPEATS *
num_repeat_one * CUBE_SIZE_2 +
CUBE_SIZE_2 * num_col_cube],
ub_cast_fp16[MAX_REPEATS * num_repeat_one *
self.dst_shape[-1] *
self.dst_shape[-4] +
num_col_cube * CUBE_SIZE_2],
scalar_zero, MAX_REPEATS,
self.cast_num_byte // 2,
self.cast_num_byte // 2,
self.cast_num_byte,
self.dst_shape[-4] * self.dst_shape[-1] //
self.cast_num_data)
with tik_instance.if_scope((CUBE_SIZE * loop_num) %
MAX_REPEATS != 0):
tik_instance.vadds(CUBE_SIZE_2,
ub_trans[num_col_cube * loop_num *
self.dst_shape[-2] *
self.dst_shape[-1] +
(CUBE_SIZE * loop_num) //
MAX_REPEATS * MAX_REPEATS *
CUBE_SIZE_2 + CUBE_SIZE_2 *
num_col_cube],
ub_cast_fp16[(CUBE_SIZE * loop_num) //
MAX_REPEATS * MAX_REPEATS *
self.dst_shape[-1] *
self.dst_shape[-4] +
num_col_cube * CUBE_SIZE_2],
scalar_zero,
(CUBE_SIZE * loop_num) % MAX_REPEATS,
self.cast_num_byte // 2,
self.cast_num_byte // 2,
self.cast_num_byte,
self.dst_shape[-4] * self.dst_shape[-1] //
self.cast_num_data)
cast_repeat_time.set_as((loop_num * CUBE_SIZE + 1) *
self.dst_shape[-4] * self.dst_shape[-1] //
MAX_MASK)
cast_remainder.set_as((loop_num * CUBE_SIZE + 1) * self.dst_shape[-4] *
self.dst_shape[-1] % MAX_MASK)
# cast the data from float16 to int8
_cast_dtype(tik_instance, ub_cast_int8, ub_trans, cast_repeat_time,
cast_remainder, "float16_2_int8")
def data_rearrange_case_one(self, tik_instance, ub_ori, ub_cast_fp16,
ub_trans, ub_cast_int8,
num_loop_time, loop_num, is_last):
"""
rearrange data when UB can not put in last axis * 16 data
"""
cast_repeat_time = tik_instance.Scalar("uint64")
cast_remainder = tik_instance.Scalar("uint64")
with tik_instance.if_scope(num_loop_time == self.dst_shape[-3] - 1):
if (self.src_shape[-2] % CUBE_SIZE) == 0:
cast_repeat_time.set_as(loop_num * self.dst_shape[-1] *
self.dst_shape[-2] // MAX_MASK)
cast_remainder.set_as(loop_num * self.dst_shape[-1] *
self.dst_shape[-2] % MAX_MASK)
else:
cast_repeat_time.set_as((self.src_shape[-2] % CUBE_SIZE) *
loop_num * self.dst_shape[-1] //
MAX_MASK)
cast_remainder.set_as((self.src_shape[-2] % CUBE_SIZE) *
loop_num * self.dst_shape[-1] %
MAX_MASK)
with tik_instance.else_scope():
cast_repeat_time.set_as(loop_num * self.dst_shape[-1] *
self.dst_shape[-2] // MAX_MASK)
cast_remainder.set_as(loop_num * self.dst_shape[-1] *
self.dst_shape[-2] % MAX_MASK)
# cast the data from int8 to float16
_cast_dtype(tik_instance, ub_cast_fp16, ub_ori, cast_repeat_time,
cast_remainder, "int8_2_float16")
scalar_zero = tik_instance.Scalar(dtype="float16", init_value=0.0)
with tik_instance.if_scope(is_last == 1):
if self.src_shape[-1] % CUBE_SIZE_2 != 0:
mask = 0
for i, _ in enumerate(range(CUBE_SIZE_2 -
self.src_shape[-1] % CUBE_SIZE_2)):
mask += 2 ** (CUBE_SIZE_2 - 1 - i)
tik_instance.vector_dup([0, mask],
ub_cast_fp16[loop_num * CUBE_SIZE_2 -
CUBE_SIZE_2],
scalar_zero, CUBE_SIZE,
self.cast_num_byte // 2,
loop_num * CUBE_SIZE_2 //
self.cast_num_data)
with tik_instance.if_scope(num_loop_time == self.dst_shape[-3] - 1):
if (self.src_shape[-2] % CUBE_SIZE) != 0:
dup_number = (CUBE_SIZE - self.src_shape[-2] % CUBE_SIZE) * \
self.dst_shape[-1] * loop_num
offset = (self.src_shape[-2] % CUBE_SIZE) * \
self.dst_shape[-1] * loop_num
self.vector_dup_zero(tik_instance, ub_cast_fp16,
dup_number, offset)
with tik_instance.for_range(0, loop_num) as num_col_cube:
tik_instance.vadds(CUBE_SIZE_2,
ub_trans[num_col_cube *
self.dst_shape[-2] *
self.dst_shape[-1] +
CUBE_SIZE_2 * num_col_cube],
ub_cast_fp16[num_col_cube * CUBE_SIZE_2],
scalar_zero, CUBE_SIZE,
self.cast_num_byte // 2,
self.cast_num_byte // 2,
self.cast_num_byte,
loop_num * self.dst_shape[-1] //
self.cast_num_data)
cast_repeat_time.set_as((CUBE_SIZE + 1) * loop_num *
self.dst_shape[-1] // MAX_MASK)
cast_remainder.set_as((CUBE_SIZE + 1) * loop_num * self.dst_shape[-1] %
MAX_MASK)
# cast the data from float16 to int8
_cast_dtype(tik_instance, ub_cast_int8, ub_trans, cast_repeat_time,
cast_remainder, "float16_2_int8")
def format_transfer_case_zero(self, tik_instance):
"""
the transfer process when UB can put in 16 * last axis data,
last axis is 32B align and the shape of dst is 4-D
"""
ub_ori_data = self.ub_memory
ub_trans_data = ub_ori_data
total_core_loop_num = self.dst_shape[-3]
core_number = _set_core_num(total_core_loop_num)
num_data_one_loop = self.dst_shape[-4] * \
self.dst_shape[-2] * self.dst_shape[-1]
with tik_instance.for_range(0, core_number, block_num=core_number) \
as num_core:
ub_ori = tik_instance.Tensor("int8",
(ub_ori_data,),
name="ub_ori",
scope=tik.scope_ubuf)
ub_cast_fp16 = tik_instance.Tensor("float16",
(ub_trans_data,),
name="ub_cast_fp16",
scope=tik.scope_ubuf)
ub_trans = tik_instance.Tensor("float16",
(ub_trans_data,),
name="ub_trans",
scope=tik.scope_ubuf)
ub_cast_int8 = tik_instance.Tensor("int8",
(ub_trans_data,),
name="ub_cast_int8",
scope=tik.scope_ubuf)
core_loop, sum_core = _cal_core(tik_instance, total_core_loop_num,
num_core, core_number)
align_loop, remainder = _cal_core_loop(tik_instance,
num_data_one_loop,
core_loop, ub_ori_data)
src_ub_index = 0
with tik_instance.for_range(0, core_loop) as num_core_loop:
total_core_loop = sum_core + num_core_loop
num_third_last_axis = total_core_loop
is_last = tik_instance.Scalar("uint64")
is_last.set_as(0)
with tik_instance.if_scope(num_third_last_axis ==
self.dst_shape[-3] - 1):
is_last.set_as(1)
with tik_instance.if_scope(tik.all((num_core_loop + 1) %
align_loop == 0,
num_core_loop !=
core_loop - 1)):
src_gm_index = num_third_last_axis * num_data_one_loop - \
(align_loop - 1) * num_data_one_loop
tik_instance.data_move(ub_ori[src_ub_index],
self.src_gm[src_gm_index],
0, 1,
align_loop * num_data_one_loop //
self.num_data, 0, 0)
self.data_rearrange_case_zero(tik_instance, ub_ori,
ub_cast_fp16, ub_trans,
ub_cast_int8, align_loop,
is_last)
with tik_instance.if_scope(
(self.dst_shape[-3] - align_loop) *
self.dst_shape[-1] * self.dst_shape[-2] //
self.num_data > MAX_STRIDE_BLK):
with tik_instance.for_range(0, self.dst_shape[-4]) \
as num_col_cube:
dst_gm_index = num_third_last_axis * \
self.dst_shape[-1] * \
self.dst_shape[-2] - \
(align_loop - 1) * \
self.dst_shape[-1] * \
self.dst_shape[-2] + \
num_col_cube * \
self.dst_shape[-1] * \
self.dst_shape[-2] * \
self.dst_shape[-3]
tik_instance.data_move(self.dst_gm[dst_gm_index],
ub_cast_int8
[align_loop *
self.dst_shape[-1] *
self.dst_shape[-2] *
num_col_cube +
CUBE_SIZE_2 *
num_col_cube],
0, 1,
align_loop *
self.dst_shape[-1] *
self.dst_shape[-2] //
self.num_data, 0, 0)
with tik_instance.else_scope():
dst_gm_index = num_third_last_axis * \
self.dst_shape[-1] * \
self.dst_shape[-2] - \
(align_loop - 1) * \
self.dst_shape[-1] * \
self.dst_shape[-2]
tik_instance.data_move(self.dst_gm[dst_gm_index],
ub_cast_int8[0], 0,
self.dst_shape[-4],
align_loop *
self.dst_shape[-1] *
self.dst_shape[-2] //
self.num_data,
self.num_byte,
(self.dst_shape[-3] -
align_loop) *
self.dst_shape[-1] *
self.dst_shape[-2] //
self.num_data)
with tik_instance.if_scope(num_core_loop == core_loop - 1):
src_gm_index = num_third_last_axis * num_data_one_loop - \
(remainder - 1) * num_data_one_loop
with tik_instance.if_scope(is_last == 1):
if self.src_shape[-2] % CUBE_SIZE != 0:
tik_instance.data_move(ub_ori[src_ub_index],
self.src_gm[src_gm_index],
0, 1,
(remainder *
num_data_one_loop -
(CUBE_SIZE -
self.src_shape[-2] %
CUBE_SIZE) *
self.dst_shape[-4] *
self.dst_shape[-1]) //
self.num_data, 0, 0)
else:
tik_instance.data_move(ub_ori[src_ub_index],
self.src_gm[src_gm_index],
0, 1,
remainder *
num_data_one_loop //
self.num_data, 0, 0)
with tik_instance.else_scope():
tik_instance.data_move(ub_ori[src_ub_index],
self.src_gm[src_gm_index],
0, 1,
remainder * num_data_one_loop //
self.num_data, 0, 0)
self.data_rearrange_case_zero(tik_instance, ub_ori,
ub_cast_fp16, ub_trans,
ub_cast_int8, remainder,
is_last)
with tik_instance.if_scope(
(self.dst_shape[-3] - remainder) *
self.dst_shape[-1] * self.dst_shape[-2] //
self.num_data > MAX_STRIDE_BLK):
with tik_instance.for_range(0, self.dst_shape[-4]) \
as num_col_cube:
dst_gm_index = num_third_last_axis * \
self.dst_shape[-1] * \
self.dst_shape[-2] - \
(remainder - 1) * \
self.dst_shape[-1] * \
self.dst_shape[-2] + \
num_col_cube * \
self.dst_shape[-1] * \
self.dst_shape[-2] * \
self.dst_shape[-3]
tik_instance.data_move(self.dst_gm[dst_gm_index],
ub_cast_int8
[remainder *
self.dst_shape[-1] *
self.dst_shape[-2] *
num_col_cube +
CUBE_SIZE_2 *
num_col_cube],
0, 1,
remainder *
self.dst_shape[-1] *
self.dst_shape[-2] //
self.num_data, 0, 0)
with tik_instance.else_scope():
dst_gm_index = num_third_last_axis * \
self.dst_shape[-1] * \
self.dst_shape[-2] - \
(remainder - 1) * self.dst_shape[-1] * \
self.dst_shape[-2]
tik_instance.data_move(self.dst_gm[dst_gm_index],
ub_cast_int8[0], 0,
self.dst_shape[-4],
remainder * self.dst_shape[-1] *
self.dst_shape[-2] //
self.num_data,
self.num_byte,
(self.dst_shape[-3] -
remainder) *
self.dst_shape[-1] *
self.dst_shape[-2] //
self.num_data)
return tik_instance
def format_transfer_case_one(self, tik_instance):
"""
the transfer process when UB can put in 16 * last axis data,
last axis % 32 != 0 and the shape of dst is 4-D
"""
ub_ori_data = self.ub_memory
ub_trans_data = ub_ori_data
total_core_loop_num = self.dst_shape[-3]
core_number = _set_core_num(total_core_loop_num)
num_data_one_loop = self.dst_shape[-4] * \
self.dst_shape[-2] * self.dst_shape[-1]
with tik_instance.for_range(0, core_number, block_num=core_number) \
as num_core:
ub_ori = tik_instance.Tensor("int8",
(ub_ori_data,),
name="ub_ori",
scope=tik.scope_ubuf)
ub_cast_fp16 = tik_instance.Tensor("float16",
(ub_trans_data,),
name="ub_cast_fp16",
scope=tik.scope_ubuf)
ub_trans = tik_instance.Tensor("float16",
(ub_trans_data,),
name="ub_trans",
scope=tik.scope_ubuf)
ub_cast_int8 = tik_instance.Tensor("int8",
(ub_trans_data,),
name="ub_cast_int8",
scope=tik.scope_ubuf)
core_loop, sum_core = _cal_core(tik_instance, total_core_loop_num,
num_core, core_number)
align_loop, remainder = _cal_core_loop(tik_instance,
num_data_one_loop,
core_loop, ub_ori_data)
with tik_instance.for_range(0, core_loop) as num_core_loop:
total_core_loop = sum_core + num_core_loop
num_third_last_axis = total_core_loop
src_gm_index = num_third_last_axis * self.src_shape[-1] * \
CUBE_SIZE
src_ub_index = (num_core_loop % align_loop) * num_data_one_loop
with tik_instance.if_scope(num_third_last_axis ==
self.dst_shape[-3] - 1):
if self.src_shape[-2] % CUBE_SIZE != 0:
with tik_instance.for_range(
0, self.src_shape[-2] % CUBE_SIZE) as num_cube_row:
tik_instance.data_move(ub_ori
[src_ub_index +
num_cube_row *
self.dst_shape[-1] *
self.dst_shape[-4]],
self.src_gm
[src_gm_index +
num_cube_row *
self.src_shape[-1]],
0, 1,
self.dst_shape[-1] *
self.dst_shape[-4] //
self.num_data, 0, 0)
else:
with tik_instance.for_range(0, CUBE_SIZE) as \
num_cube_row:
tik_instance.data_move(ub_ori
[src_ub_index +
num_cube_row *
self.dst_shape[-1] *
self.dst_shape[-4]],
self.src_gm
[src_gm_index +
num_cube_row *
self.src_shape[-1]],
0, 1,
self.dst_shape[-1] *
self.dst_shape[-4] //
self.num_data, 0, 0)
with tik_instance.else_scope():
with tik_instance.for_range(0, CUBE_SIZE) as \
num_cube_row:
tik_instance.data_move(ub_ori[src_ub_index +
num_cube_row *
self.dst_shape[-1] *
self.dst_shape[-4]],
self.src_gm
[src_gm_index +
num_cube_row *
self.src_shape[-1]],
0, 1,
self.dst_shape[-1] *
self.dst_shape[-4] //
self.num_data, 0, 0)
is_last = tik_instance.Scalar("uint64")
is_last.set_as(0)
with tik_instance.if_scope(num_third_last_axis ==
self.dst_shape[-3] - 1):
is_last.set_as(1)
with tik_instance.if_scope(tik.all((num_core_loop + 1) %
align_loop == 0,
num_core_loop !=
core_loop - 1)):
self.data_rearrange_case_zero(tik_instance, ub_ori,
ub_cast_fp16, ub_trans,
ub_cast_int8, align_loop,
is_last)
with tik_instance.if_scope(
(self.dst_shape[-3] - align_loop) *
self.dst_shape[-1] * self.dst_shape[-2] //
self.num_data > MAX_STRIDE_BLK):
with tik_instance.for_range(0, self.dst_shape[-4]) \
as num_col_cube:
dst_gm_index = num_third_last_axis * \
self.dst_shape[-1] * \
self.dst_shape[-2] - \
(align_loop - 1) * \
self.dst_shape[-1] * \
self.dst_shape[-2] + \
num_col_cube * \
self.dst_shape[-1] * \
self.dst_shape[-2] * \
self.dst_shape[-3]
tik_instance.data_move(self.dst_gm[dst_gm_index],
ub_cast_int8
[align_loop *
self.dst_shape[-1] *
self.dst_shape[-2] *
num_col_cube +
CUBE_SIZE_2 *
num_col_cube],
0, 1,
align_loop *
self.dst_shape[-1] *
self.dst_shape[-2] //
self.num_data, 0, 0)
with tik_instance.else_scope():
dst_gm_index = num_third_last_axis * \
self.dst_shape[-1] * \
self.dst_shape[-2] - \
(align_loop - 1) * \
self.dst_shape[-1] * \
self.dst_shape[-2]
tik_instance.data_move(self.dst_gm[dst_gm_index],
ub_cast_int8[0], 0,
self.dst_shape[-4],
align_loop *
self.dst_shape[-1] *
self.dst_shape[-2] //
self.num_data,
self.num_byte,
(self.dst_shape[-3] -
align_loop) *
self.dst_shape[-1] *
self.dst_shape[-2] //
self.num_data)
with tik_instance.if_scope(num_core_loop == core_loop - 1):
self.data_rearrange_case_zero(tik_instance, ub_ori,
ub_cast_fp16, ub_trans,
ub_cast_int8, remainder,
is_last)
with tik_instance.if_scope(
(self.dst_shape[-3] - remainder) *
self.dst_shape[-1] * self.dst_shape[-2] //
self.num_data > MAX_STRIDE_BLK):
with tik_instance.for_range(0, self.dst_shape[-4]) \
as num_col_cube:
dst_gm_index = num_third_last_axis * \
self.dst_shape[-1] * \
self.dst_shape[-2] - \
(remainder - 1) * \
self.dst_shape[-1] * \
self.dst_shape[-2] + \
num_col_cube * \
self.dst_shape[-1] * \
self.dst_shape[-2] * \
self.dst_shape[-3]
tik_instance.data_move(self.dst_gm[dst_gm_index],
ub_cast_int8
[remainder *
self.dst_shape[-1] *
self.dst_shape[-2] *
num_col_cube +
CUBE_SIZE_2 *
num_col_cube],
0, 1,
remainder *
self.dst_shape[-1] *
self.dst_shape[-2] //
self.num_data, 0, 0)
with tik_instance.else_scope():
dst_gm_index = num_third_last_axis * \
self.dst_shape[-1] * \
self.dst_shape[-2] - \
(remainder - 1) * \
self.dst_shape[-1] * \
self.dst_shape[-2]
tik_instance.data_move(self.dst_gm[dst_gm_index],
ub_cast_int8[0], 0,
self.dst_shape[-4],
remainder * self.dst_shape[-1] *
self.dst_shape[-2] //
self.num_data, self.num_byte,
(self.dst_shape[-3] -
remainder) *
self.dst_shape[-1] *
self.dst_shape[-2] //
self.num_data)
return tik_instance
def format_transfer_case_two(self, tik_instance):
"""
the transfer process when UB can not put in last axis * 16 data
"""
self.ub_memory = self.ub_memory - self.ub_memory % \
(CUBE_SIZE_2 * (CUBE_SIZE + 1))
ub_ori_data = self.ub_memory
ub_trans_data = ub_ori_data
loop_col, loop_remainder = _cal_core_loop_python_one(
CUBE_SIZE_2 * (CUBE_SIZE + 1), self.dst_shape[-4], ub_ori_data)
loop_times = self.dst_shape[-3]
if len(self.dst_shape) == 4:
total_core_loop_num = loop_times
else:
total_core_loop_num = functools_reduce(lambda x1, x2: x1 * x2,
self.dst_shape[:-4]) * \
loop_times
core_number = _set_core_num(total_core_loop_num)
with tik_instance.for_range(0, core_number, block_num=core_number) \
as num_core:
ub_ori = tik_instance.Tensor("int8",
(ub_ori_data,),
name="ub_ori",
scope=tik.scope_ubuf)
ub_cast_fp16 = tik_instance.Tensor("float16",
(ub_trans_data,),
name="ub_cast_fp16",
scope=tik.scope_ubuf)
ub_trans = tik_instance.Tensor("float16",
(ub_trans_data,),
name="ub_trans",
scope=tik.scope_ubuf)
ub_cast_int8 = tik_instance.Tensor("int8",
(ub_trans_data,),
name="ub_cast_int8",
scope=tik.scope_ubuf)
core_loop, sum_core = _cal_core(tik_instance, total_core_loop_num,
num_core, core_number)
with tik_instance.for_range(0, core_loop) as num_core_loop:
total_core_loop = sum_core + num_core_loop
num_loop_time = total_core_loop % loop_times
num_outer_axis = (total_core_loop - num_loop_time) // \
loop_times
is_last = tik_instance.Scalar("uint64")
is_last.set_as(0)
with tik_instance.for_range(0, self.dst_shape[-4] //
loop_col) as num_cube:
self.data_move_case_zero(tik_instance, ub_ori,
ub_cast_fp16, ub_trans,
ub_cast_int8, is_last,
num_outer_axis, num_loop_time,
num_cube,
loop_col, loop_col)
if loop_remainder != 0:
is_last.set_as(1)
self.data_move_case_zero(tik_instance, ub_ori,
ub_cast_fp16, ub_trans,
ub_cast_int8, is_last,
num_outer_axis, num_loop_time,
self.dst_shape[-4] // loop_col,
loop_col, loop_remainder)
return tik_instance
def data_move_case_zero(self, tik_instance, ub_ori, ub_cast_fp16, ub_trans,
ub_cast_int8, is_last, num_outer_axis,
num_loop_time, loop_time, loop_col, loop_len):
"""
the data move process of the transfer case is 2
"""
with tik_instance.if_scope(tik.all(loop_time == self.dst_shape[-4] //
loop_col - 1,
self.dst_shape[-4] % loop_col ==
0)):
is_last.set_as(1)
num_data_one_loop = self.dst_shape[-4] * self.dst_shape[-3] * \
self.dst_shape[-2] * self.dst_shape[-1]
src_ub_index = 0
if self.src_shape[-1] % CUBE_SIZE_2 != 0 or \
(self.src_shape[-1] - loop_len * CUBE_SIZE_2) // \
self.num_data > MAX_STRIDE_BLK:
with tik_instance.if_scope(num_loop_time ==
self.dst_shape[-3] - 1):
if self.src_shape[-2] % CUBE_SIZE != 0:
with tik_instance.for_range(0, self.src_shape[-2] %
CUBE_SIZE) as num_cube_col:
src_gm_index = num_outer_axis * self.src_shape[-1] * \
self.src_shape[-2] + \
(num_loop_time * CUBE_SIZE +
num_cube_col) * self.src_shape[-1] + \
loop_time * loop_col * CUBE_SIZE_2
tik_instance.data_move(ub_ori[loop_len *
CUBE_SIZE_2 *
num_cube_col],
self.src_gm[src_gm_index],
0, 1,
loop_len * CUBE_SIZE_2 //
self.num_data, 0, 0)
else:
with tik_instance.for_range(0, CUBE_SIZE) \
as num_cube_col:
src_gm_index = num_outer_axis * self.src_shape[-1] * \
self.src_shape[-2] + \
(num_loop_time * CUBE_SIZE +
num_cube_col) * self.src_shape[-1] + \
loop_time * loop_col * CUBE_SIZE_2
tik_instance.data_move(ub_ori[loop_len *
CUBE_SIZE_2 *
num_cube_col],
self.src_gm[src_gm_index],
0, 1,
loop_len * CUBE_SIZE_2 //
self.num_data, 0, 0)
with tik_instance.else_scope():
with tik_instance.for_range(0, CUBE_SIZE) as num_cube_col:
src_gm_index = num_outer_axis * self.src_shape[-1] * \
self.src_shape[-2] + \
(num_loop_time * CUBE_SIZE +
num_cube_col) * self.src_shape[-1] + \
loop_time * loop_col * CUBE_SIZE_2
tik_instance.data_move(ub_ori[loop_len * CUBE_SIZE_2 *
num_cube_col],
self.src_gm[src_gm_index],
0, 1,
loop_len * CUBE_SIZE_2 //
self.num_data, 0, 0)
else:
src_gm_index = num_outer_axis * self.src_shape[-1] * \
self.src_shape[-2] + num_loop_time * CUBE_SIZE * \
self.src_shape[-1] + loop_time * loop_col * \
CUBE_SIZE_2
with tik_instance.if_scope(num_loop_time ==
self.dst_shape[-3] - 1):
if self.src_shape[-2] % CUBE_SIZE != 0:
tik_instance.data_move(ub_ori[src_ub_index],
self.src_gm[src_gm_index], 0,
self.src_shape[-2] % CUBE_SIZE,
loop_len,
(self.src_shape[-1] -
loop_len * CUBE_SIZE_2) //
self.num_data,
0)
else:
tik_instance.data_move(ub_ori[src_ub_index],
self.src_gm[src_gm_index],
0, CUBE_SIZE,
loop_len,
(self.src_shape[-1] -
loop_len * CUBE_SIZE_2) //
self.num_data,
0)
with tik_instance.else_scope():
tik_instance.data_move(ub_ori[src_ub_index],
self.src_gm[src_gm_index],
0, CUBE_SIZE,
loop_len,
(self.src_shape[-1] - loop_len *
CUBE_SIZE_2) // self.num_data, 0)
self.data_rearrange_case_one(tik_instance, ub_ori, ub_cast_fp16,
ub_trans, ub_cast_int8, num_loop_time,
loop_len, is_last)
if((self.dst_shape[-3] - 1) * self.dst_shape[-1] *
self.dst_shape[-2] // self.num_data > MAX_STRIDE_BLK):
with tik_instance.for_range(0, loop_len) as \
num_col_cube:
dst_gm_index = num_outer_axis * num_data_one_loop + \
num_loop_time * self.dst_shape[-1] * \
self.dst_shape[-2] + \
(loop_time * loop_col + num_col_cube) * \
self.dst_shape[-1] * self.dst_shape[-2] * \
self.dst_shape[-3]
tik_instance.data_move(self.dst_gm[dst_gm_index],
ub_cast_int8[num_col_cube *
CUBE_SIZE_2 *
(CUBE_SIZE + 1)],
0, 1,
self.dst_shape[-1] *
self.dst_shape[-2] //
self.num_data,
0, 0)
else:
dst_gm_index = num_outer_axis * num_data_one_loop + \
num_loop_time * self.dst_shape[-1] * \
self.dst_shape[-2] + loop_time * \
loop_col * self.dst_shape[-1] * \
self.dst_shape[-2] * \
self.dst_shape[-3]
tik_instance.data_move(self.dst_gm[dst_gm_index],
ub_cast_int8[0],
0, loop_len,
self.dst_shape[-1] * self.dst_shape[-2] //
self.num_data, self.num_byte,
(self.dst_shape[-3] - 1) *
self.dst_shape[-1] *
self.dst_shape[-2] // self.num_data)
def nd_2_nz_compute(self):
"""
the overall data move process
"""
tik_instance = self.set_tik_instance()
format_transfer_case = self.set_format_transfer_case()
if format_transfer_case == 0:
tik_instance = self.format_transfer_case_zero(tik_instance)
elif format_transfer_case == 1:
tik_instance = self.format_transfer_case_one(tik_instance)
elif format_transfer_case == 2:
tik_instance = self.format_transfer_case_two(tik_instance)
return tik_instance
def get_tik_instance(self):
"""
obtain tik instance
"""
tik_instance = self.nd_2_nz_compute()
tik_instance.BuildCCE(kernel_name=self.kernel_name,
inputs=[self.src_gm],
outputs=[self.dst_gm])
return tik_instance
@util.check_input_type(dict, dict, str, str, str)
def nd_2_nz(src, dst, src_format, dst_format, kernel_name="nd_2_nz"):
"""
algorithm: nd_2_nz
Parameters
----------
src: dict
dict with keys(shape, dtype) of src
dst: dict
dict with keys(shape, dtype) of dst
src_format: str
data format of src
dst_format: str
data format of dst
kernel_name: str
kernel name, default value is "nd_2_nz"
Returns
-------
tik_instance: tik_instance
"""
src_shape = src.get("shape")
src_dtype = src.get("dtype").lower()
util.check_kernel_name(kernel_name)
util.check_shape_rule(src_shape)
check_list = ("float16", "float32", "int8")
util.check_dtype_rule(src_dtype, check_list)
if src_format.upper() not in {"NHWC", "NCHW", "ND"}:
raise RuntimeError("The src_format of ND2Nz"
"only support NHWC, NCHW, ND.")
if dst_format.upper() != "FRACTAL_NZ":
raise RuntimeError("The dat_format of ND2Nz"
"only support FRACTAL_NZ.")
src_shape = list(src_shape)
if src_dtype == "int8":
nd_2_nz_template_int8 = ND2NzComputeInt8(src_shape, src_dtype,
kernel_name)
return nd_2_nz_template_int8.get_tik_instance()
else:
nd_2_nz_template = ND2NzCompute(src_shape, src_dtype, kernel_name)
return nd_2_nz_template.get_tik_instance()
|
13,535 | 8c5972d22855a5caf53cb1aedf828309d39b91f3 | # (c) 2017 Apstra Inc, <community@apstra.com>
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: aos_bp_security_zone
author: ryan@apstra.com (@that1guy15)
version_added: "2.7"
short_description: Manage security-zones within an AOS blueprint
description:
- Create, update and manage security-zones within an existing AOS
blueprint.
options:
session:
description:
- Session details from aos_login generated session.
required: true
type: dict
blueprint_id:
description:
- Name of blueprint, as defined by AOS when created
required: true
type: str
name:
description:
- Name of security-zone and vrf.
required: false
type: str
id:
description:
- ID of virtual network, as defined by AOS when created.
required: false
type: str
state:
description:
- Indicates the expected state of the security-zone.
default: present
choices: ['present', 'absent']
required: false
type: str
vni_id:
description:
- VNI ID number used by security-zone.
choices: 4094 - 16777214
required: false
type: int
vlan_id:
description:
- VLAN ID number used by security-zone.
choices: 1 - 4094
required: false
type: int
routing_policy:
description:
- Import and export policies along with aggregate and
extra prefix definition
required: false
type: dict
'''
EXAMPLES = '''
- name: Create new Security Zone
aos_bp_security_zone
session: "{{ aos_session }}"
blueprint_id: "{{bp_id}}"
name: "my-sec-zone"
state: present
register: seczone
- name: Create new Security Zone static VNI
aos_bp_security_zone
session: "{{ aos_session }}"
blueprint_id: "{{bp_id}}"
name: "my-sec-zone"
vni_id: 4096
state: present
register: seczone
- name: Delete Security Zone
aos_bp_security_zone
session: "{{ aos_session }}"
blueprint_id: "{{bp_id}}"
id: {{seczone.id}}
state: absent
'''
RETURNS = '''
sz_id:
description: ID of the AOS security-zone created
returned: always
type: string
sample: "db6588fe-9f36-4b04-8def-89e7dcd00c17"
sz_name:
description: name of the AOS security-zone created
returned: always
type: string
sample: "vlan-101"
value:
description: Value of the object as returned by the AOS Server
returned: always
type: dict
sample: {'...'}
'''
from ansible.module_utils.basic import AnsibleModule
from library.aos import aos_get, aos_post, aos_put, aos_delete, validate_vni_id, \
validate_vlan_id
ENDPOINT = 'security-zones'
def sec_zone_absent(module, session, endpoint, my_sz):
"""
Remove security-zone if exist and is not in use
:param module: Ansible built in
:param session: dict
:param endpoint: str
:param my_sz: dict
:return: success(bool), changed(bool), results(dict)
"""
if not my_sz:
return True, False, {'label': '',
'id': '',
'msg': 'security-zone does not exist'}
if not module.check_mode:
aos_delete(session, endpoint, my_sz['id'])
return True, True, my_sz
return True, False, my_sz
def sec_zone_present(module, session, endpoint, my_sz, vni_id, vlan_id):
"""
Create new security-zone or modify existing pool
:param module: Ansible built in
:param session: dict
:param endpoint: str
:param my_sz: dict
:param vni_id: int
:param vlan_id: int
:return: success(bool), changed(bool), results(dict)
"""
margs = module.params
if not my_sz:
if 'name' not in margs.keys():
return False, False, {"msg": "name required to create a new "
"security-zone"}
new_sz = {"sz_type": "evpn",
"label": margs['name'],
"vrf_name": margs['name']}
if vni_id:
new_sz["vni_id"] = vni_id
if vlan_id:
new_sz["vlan_id"] = vlan_id
if not module.check_mode:
resp = aos_post(session, endpoint, new_sz)
new_sz['id'] = resp['id']
return True, True, new_sz
return True, False, new_sz
else:
if vni_id or vlan_id:
endpoint_put = "{}/{}".format(endpoint, my_sz['id'])
new_sz = {"sz_type": "evpn",
"label": my_sz['label'],
"vrf_name": my_sz['vrf_name'],
"id": my_sz['id']}
if vni_id:
new_sz["vni_id"] = vni_id
if vlan_id:
new_sz["vlan_id"] = vlan_id
if not module.check_mode:
aos_put(session, endpoint_put, new_sz)
return True, True, new_sz
return True, False, new_sz
return True, False, my_sz
def sec_zone(module):
"""
Main function to create, change or delete security zones within an AOS blueprint
"""
margs = module.params
endpoint = 'blueprints/{}/security-zones'.format(margs['blueprint_id'])
name = margs.get('name', None)
uuid = margs.get('id', None)
vni_id = margs.get('vni_id', None)
vlan_id = margs.get('vlan_id', None)
if vni_id:
try:
vni_id = int(vni_id)
except ValueError:
module.fail_json(msg="Invalid ID: must be an integer")
errors = validate_vni_id(vni_id)
if errors:
module.fail_json(msg=errors)
if vlan_id:
try:
vlan_id = int(vlan_id)
except ValueError:
module.fail_json(msg="Invalid ID: must be an integer")
errors = validate_vlan_id(vlan_id)
if errors:
module.fail_json(msg=errors)
sz_data = aos_get(margs['session'], endpoint)
my_sz = {}
if not uuid:
for k, v in sz_data['items'].items():
if v['label'] == name:
my_sz = v
else:
for k, v in sz_data['items'].items():
if v['id'] == uuid:
my_sz = v
if margs['state'] == 'absent':
success, changed, results = sec_zone_absent(module, margs['session'],
endpoint, my_sz)
elif margs['state'] == 'present':
success, changed, results = sec_zone_present(module, margs['session'],
endpoint, my_sz, vni_id,
vlan_id)
if success:
module.exit_json(changed=changed, name=results['label'],
id=results['id'], value=results)
else:
module.fail_json(msg=results)
def main():
"""
Main function to setup inputs
"""
module = AnsibleModule(
argument_spec=dict(
session=dict(required=True, type='dict'),
blueprint_id=dict(required=True,),
name=dict(required=False),
id=dict(required=False),
state=dict(required=False,
choices=['present', 'absent'],
default="present",),
vni_id=dict(required=False),
vlan_id=dict(required=False),
),
mutually_exclusive=[('name', 'id')],
required_one_of=[('name', 'id')],
supports_check_mode=True
)
sec_zone(module)
if __name__ == "__main__":
main()
|
13,536 | dd8202c5fcd5796cf68e9a927a57a8140bd45b38 | class Koło:
def pole(r):
pole = 3.14*r*r
print('Pole = ',pole)
def obwod(r):
obwod = 2*3.14*r
print('Obwód = ',obwod)
Koło.pole(3) |
13,537 | 90431950df772012bc87bc0b1a47d2156fa1e811 | import re
import datetime
from pyspark.sql import Row
month_map = {'Jan': 1, 'Feb': 2, 'Mar':3, 'Apr':4, 'May':5, 'Jun':6, 'Jul':7,
'Aug':8, 'Sep': 9, 'Oct':10, 'Nov': 11, 'Dec': 12}
def parse_apache_time(s):
""" Convert Apache time format into a Python datetime object
Args:
s (str): date and time in Apache time format
Returns:
datetime: datetime object (ignore timezone for now)
"""
return datetime.datetime(int(s[7:11]),
month_map[s[3:6]],
int(s[0:2]),
int(s[12:14]),
int(s[15:17]),
int(s[18:20]))
def parseApacheLogLine(logline):
""" Parse a line in the Apache Common Log format
Args:
logline (str): a line of text in the Apache Common Log format
Returns:
tuple: either a dictionary containing the parts of the Apache Access Log and 1,
or the original invalid log line and 0
"""
match = re.search(APACHE_ACCESS_LOG_PATTERN, logline)
if match is None:
return (logline, 0)
size_field = match.group(9)
if size_field == '-':
size = long(0)
else:
size = long(match.group(9))
return (Row(
host = match.group(1),
client_identd = match.group(2),
user_id = match.group(3),
date_time = parse_apache_time(match.group(4)),
method = match.group(5),
endpoint = match.group(6),
protocol = match.group(7),
response_code = int(match.group(8)),
content_size = size
), 1)
APACHE_ACCESS_LOG_PATTERN = '^(\S+) (\S+) (\S+).*\[([\w:/]+\s[+\-]\d{4})\] "(\S+) (\S*) (\S* *)" (\d{3}) (\S+)'
import sys
import os
from test_helper import Test
baseDir = os.path.join('data')
inputPath = os.path.join('cs100', 'lab2', 'apache.access.log.PROJECT')
logFile = os.path.join(baseDir, inputPath)
def parseLogs():
""" Read and parse log file """
parsed_logs = (sc
.textFile(logFile)
.map(parseApacheLogLine)
.cache())
access_logs = (parsed_logs
.filter(lambda s: s[1] == 1)
.map(lambda s: s[0])
.cache())
failed_logs = (parsed_logs
.filter(lambda s: s[1] == 0)
.map(lambda s: s[0]))
failed_logs_count = failed_logs.count()
if failed_logs_count > 0:
print 'Number of invalid logline: {}'.format(failed_logs.count())
for line in failed_logs.take(20):
print 'Invalid logline: {}'.format(line)
print 'Read {} lines, successfully parsed {} lines, failed to parse {} lines'.format((parsed_logs.count(), access_logs.count(), failed_logs.count()))
return parsed_logs, access_logs, failed_logs
parsed_logs, access_logs, failed_logs = parseLogs()
APACHE_ACCESS_LOG_PATTERN = '^(\S+) (\S+) (\S+).*\[([\w:\/]+\s[+\-]\d{4})\] "(\S+) (\S*)( *\S+ *)*" (\d{3}) (\S+)'
parsed_logs, access_logs, failed_logs = parseLogs()
Test.assertEquals(failed_logs.count(), 0, 'incorrect failed_logs.count()')
Test.assertEquals(parsed_logs.count(), 1043177 , 'incorrect parsed_logs.count()')
Test.assertEquals(access_logs.count(), parsed_logs.count(), 'incorrect access_logs.count()')
content_sizes = access_logs.map(lambda log: log.content_size).cache()
print 'Content Size Avg: %i, Min: %i, Max: {}'.format((
content_sizes.reduce(lambda a, b : a + b) / content_sizes.count(),
content_sizes.min(),
content_sizes.max()))
responseCodeToCount = (access_logs
.map(lambda log: (log.response_code, 1))
.reduceByKey(lambda a, b : a + b)
.cache())
responseCodeToCountList = responseCodeToCount.take(100)
print 'Found {} response codes'.format(len(responseCodeToCountList))
print 'Response Code Counts: {}'.format(responseCodeToCountList)
assert len(responseCodeToCountList) == 7
assert sorted(responseCodeToCountList) == [(200, 940847), (302, 16244), (304, 79824), (403, 58), (404, 6185), (500, 2), (501, 17)]
labels = responseCodeToCount.map(lambda (x, y): x).collect()
print labels
count = access_logs.count()
fracs = responseCodeToCount.map(lambda (x, y): (float(y) / count)).collect()
print fracs
import matplotlib.pyplot as plt
def pie_pct_format(value):
""" Determine the appropriate format string for the pie chart percentage label
Args:
value: value of the pie slice
Returns:
str: formated string label; if the slice is too small to fit, returns an empty string for label
"""
return '' if value < 7 else '{}'.format(value)
fig = plt.figure(figsize=(4.5, 4.5), facecolor='white', edgecolor='white')
colors = ['yellowgreen', 'lightskyblue', 'gold', 'purple', 'lightcoral', 'yellow', 'black']
explode = (0.05, 0.05, 0.1, 0, 0, 0, 0)
patches, texts, autotexts = plt.pie(fracs, labels=labels, colors=colors,
explode=explode, autopct=pie_pct_format,
shadow=False, startangle=125)
for text, autotext in zip(texts, autotexts):
if autotext.get_text() == '':
text.set_text('') # If the slice is small to fit, don't show a text label
plt.legend(labels, loc=(0.80, -0.1), shadow=True)
hostCountPairTuple = access_logs.map(lambda log: (log.host, 1))
hostSum = hostCountPairTuple.reduceByKey(lambda a, b : a + b)
hostMoreThan10 = hostSum.filter(lambda s: s[1] > 10)
hostsPick20 = (hostMoreThan10
.map(lambda s: s[0])
.take(20))
print 'Any 20 hosts that have accessed more then 10 times: {}'.format(hostsPick20)
endpoints = (access_logs
.map(lambda log: (log.endpoint, 1))
.reduceByKey(lambda a, b : a + b)
.cache())
ends = endpoints.map(lambda (x, y): x).collect()
counts = endpoints.map(lambda (x, y): y).collect()
fig = plt.figure(figsize=(8,4.2), facecolor='white', edgecolor='white')
plt.axis([0, len(ends), 0, max(counts)])
plt.grid(b=True, which='major', axis='y')
plt.xlabel('Endpoints')
plt.ylabel('Number of Hits')
plt.plot(counts)
endpointCounts = (access_logs
.map(lambda log: (log.endpoint, 1))
.reduceByKey(lambda a, b : a + b))
topEndpoints = endpointCounts.takeOrdered(10, lambda s: -1 * s[1])
print 'Top Ten Endpoints: {}'.format(topEndpoints)
assert topEndpoints == [(u'/images/NASA-logosmall.gif', 59737), (u'/images/KSC-logosmall.gif', 50452), (u'/images/MOSAIC-logosmall.gif', 43890), (u'/images/USA-logosmall.gif', 43664), (u'/images/WORLD-logosmall.gif', 43277), (u'/images/ksclogo-medium.gif', 41336), (u'/ksc.html', 28582), (u'/history/apollo/images/apollo-logo1.gif', 26778), (u'/images/launch-logo.gif', 24755), (u'/', 20290)], 'incorrect Top Ten Endpoints'
not200 = access_logs.filter(lambda x: x.response_code != 200)
endpointCountPairTuple = not200.map(lambda x: (x.endpoint,1))
endpointSum = endpointCountPairTuple.reduceByKey(lambda a,b: a+b)
topTenErrURLs = endpointSum.takeOrdered(10, key=lambda x: -x[1])
print 'Top Ten failed URLs: {}'.format(topTenErrURLs)
Test.assertEquals(endpointSum.count(), 7689, 'incorrect count for endpointSum')
Test.assertEquals(topTenErrURLs, [(u'/images/NASA-logosmall.gif', 8761), (u'/images/KSC-logosmall.gif', 7236), (u'/images/MOSAIC-logosmall.gif', 5197), (u'/images/USA-logosmall.gif', 5157), (u'/images/WORLD-logosmall.gif', 5020), (u'/images/ksclogo-medium.gif', 4728), (u'/history/apollo/images/apollo-logo1.gif', 2907), (u'/images/launch-logo.gif', 2811), (u'/', 2199), (u'/images/ksclogosmall.gif', 1622)], 'incorrect Top Ten failed URLs (topTenErrURLs)')
hosts = access_logs.map(lambda x: x.host)
uniqueHosts = hosts.distinct()
uniqueHostCount = uniqueHosts.count()
print 'Unique hosts: {}'.format(uniqueHostCount)
Test.assertEquals(uniqueHostCount, 54507, 'incorrect uniqueHostCount')
dayToHostPairTuple = access_logs.map(lambda x: (x.date_time.day,x.host))
dayGroupedHosts = dayToHostPairTuple.groupByKey()
dayHostCount = dayGroupedHosts.map(lambda x: (x[0],set(x[1])))
dailyHosts = (dayHostCount.map(lambda x: (x[0],len(x[1]))))
dailyHostsList = dailyHosts.takeOrdered(30)
print 'Unique hosts per day: {}'.format(dailyHostsList)
dailyHosts.cache()
Test.assertEquals(dailyHosts.count(), 21, 'incorrect dailyHosts.count()')
Test.assertEquals(dailyHostsList, [(1, 2582), (3, 3222), (4, 4190), (5, 2502), (6, 2537), (7, 4106), (8, 4406), (9, 4317), (10, 4523), (11, 4346), (12, 2864), (13, 2650), (14, 4454), (15, 4214), (16, 4340), (17, 4385), (18, 4168), (19, 2550), (20, 2560), (21, 4134), (22, 4456)], 'incorrect dailyHostsList')
Test.assertTrue(dailyHosts.is_cached, 'incorrect dailyHosts.is_cached')
daysWithHosts = sorted(dailyHosts.map(lambda x: x[0]).collect())
hosts = [x[1] for x in sorted(dailyHosts.collect())]
print '{}'.format(hosts)
test_days = range(1, 23)
test_days.remove(2)
Test.assertEquals(daysWithHosts, test_days, 'incorrect days')
Test.assertEquals(hosts, [2582, 3222, 4190, 2502, 2537, 4106, 4406, 4317, 4523, 4346, 2864, 2650, 4454, 4214, 4340, 4385, 4168, 2550, 2560, 4134, 4456], 'incorrect hosts')
fig = plt.figure(figsize=(8,4.5), facecolor='white', edgecolor='white')
plt.axis([min(daysWithHosts), max(daysWithHosts), 0, max(hosts)+500])
plt.grid(b=True, which='major', axis='y')
plt.xlabel('Day')
plt.ylabel('Hosts')
plt.plot(daysWithHosts, hosts)
dayAndHostTuple = access_logs.map(lambda x: (x.date_time.day,x.host))
dailyHostsList
dayReqs = access_logs.map(lambda x: (x.date_time.day,1)).reduceByKey(lambda a,b: a+b)
summed = dayReqs.reduceByKey(lambda a,b: a+b)
joined = dayReqs.join(dailyHosts)
avgDailyReqPerHost = joined.map(lambda x: (x[0],x[1][0]/x[1][1]))
avgDailyReqPerHostList = avgDailyReqPerHost.takeOrdered(30)
print 'Average number of daily requests per Hosts is {}'.format(avgDailyReqPerHostList)
avgDailyReqPerHost.cache()
Test.assertEquals(avgDailyReqPerHostList, [(1, 13), (3, 12), (4, 14), (5, 12), (6, 12), (7, 13), (8, 13), (9, 14), (10, 13), (11, 14), (12, 13), (13, 13), (14, 13), (15, 13), (16, 13), (17, 13), (18, 13), (19, 12), (20, 12), (21, 13), (22, 12)], 'incorrect avgDailyReqPerHostList')
Test.assertTrue(avgDailyReqPerHost.is_cached, 'incorrect avgDailyReqPerHost.is_cache')
daysWithAvg = sorted(avgDailyReqPerHost.map(lambda x: x[0]).collect())
avgs = [x[1] for x in avgDailyReqPerHost.takeOrdered(30, key=lambda x: x[0])]
print '{}'.format(avgs)
Test.assertEquals(daysWithAvg, [1, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22], 'incorrect days')
Test.assertEquals(avgs, [13, 12, 14, 12, 12, 13, 13, 14, 13, 14, 13, 13, 13, 13, 13, 13, 13, 12, 12, 13, 12], 'incorrect avgs')
fig = plt.figure(figsize=(8,4.2), facecolor='white', edgecolor='white')
plt.axis([0, max(daysWithAvg), 0, max(avgs)+2])
plt.grid(b=True, which='major', axis='y')
plt.xlabel('Day')
plt.ylabel('Average')
plt.plot(daysWithAvg, avgs)
badRecords = (access_logs.filter(lambda x: x.response_code == 404))
print 'Found {} 404 URLs'.format(badRecords.count())
badRecords.cache()
Test.assertEquals(badRecords.count(), 6185, 'incorrect badRecords.count()')
Test.assertTrue(badRecords.is_cached, 'incorrect badRecords.is_cached')
badEndpoints = badRecords.map(lambda x: x.endpoint)
badUniqueEndpoints = badEndpoints.distinct()
badUniqueEndpointsPick40 = badUniqueEndpoints.take(40)
print '404 URLS: {}'.format(badUniqueEndpointsPick40)
badUniqueEndpointsSet40 = set(badUniqueEndpointsPick40)
Test.assertEquals(len(badUniqueEndpointsSet40), 40, 'badUniqueEndpointsPick40 not distinct')
badEndpointsCountPairTuple = badRecords.map(lambda x: (x.endpoint,1))
badEndpointsSum = badEndpointsCountPairTuple.reduceByKey(lambda a,b: a+b)
print 'Top twenty {}'.format(badEndpointsSum)
badEndpointsTop20 = badEndpointsSum.takeOrdered(20, key= lambda x: -x[1])
print 'Top Twenty 404 URLs: {}'.format(badEndpointsTop20)
Test.assertEquals(badEndpointsTop20, [(u'/pub/winvn/readme.txt', 633), (u'/pub/winvn/release.txt', 494), (u'/shuttle/missions/STS-69/mission-STS-69.html', 431), (u'/images/nasa-logo.gif', 319), (u'/elv/DELTA/uncons.htm', 178), (u'/shuttle/missions/sts-68/ksc-upclose.gif', 156), (u'/history/apollo/sa-1/sa-1-patch-small.gif', 146), (u'/images/crawlerway-logo.gif', 120), (u'/://spacelink.msfc.nasa.gov', 117), (u'/history/apollo/pad-abort-test-1/pad-abort-test-1-patch-small.gif', 100), (u'/history/apollo/a-001/a-001-patch-small.gif', 97), (u'/images/Nasa-logo.gif', 85), (u'/shuttle/resources/orbiters/atlantis.gif', 64), (u'/history/apollo/images/little-joe.jpg', 62), (u'/images/lf-logo.gif', 59), (u'/shuttle/resources/orbiters/discovery.gif', 56), (u'/shuttle/resources/orbiters/challenger.gif', 54), (u'/robots.txt', 53), (u'/elv/new01.gif', 44), (u'/history/apollo/pad-abort-test-2/pad-abort-test-2-patch-small.gif', 38)], 'incorrect badEndpointsTop20')
errHostsCountPairTuple = badRecords.map(lambda x: (x.host,1))
errHostsSum = errHostsCountPairTuple.reduceByKey(lambda a,b: a+b)
errHostsTop25 = errHostsSum.takeOrdered(25, key= lambda x: -x[1])
print 'Top 25 hosts that generated errors: {}'.format(errHostsTop25)
Test.assertEquals(len(errHostsTop25), 25, 'length of errHostsTop25 is not 25')
Test.assertEquals(len(set(errHostsTop25) - set([(u'maz3.maz.net', 39), (u'piweba3y.prodigy.com', 39), (u'gate.barr.com', 38), (u'm38-370-9.mit.edu', 37), (u'ts8-1.westwood.ts.ucla.edu', 37), (u'nexus.mlckew.edu.au', 37), (u'204.62.245.32', 33), (u'163.206.104.34', 27), (u'spica.sci.isas.ac.jp', 27), (u'www-d4.proxy.aol.com', 26), (u'www-c4.proxy.aol.com', 25), (u'203.13.168.24', 25), (u'203.13.168.17', 25), (u'internet-gw.watson.ibm.com', 24), (u'scooter.pa-x.dec.com', 23), (u'crl5.crl.com', 23), (u'piweba5y.prodigy.com', 23), (u'onramp2-9.onr.com', 22), (u'slip145-189.ut.nl.ibm.net', 22), (u'198.40.25.102.sap2.artic.edu', 21), (u'gn2.getnet.com', 20), (u'msp1-16.nas.mr.net', 20), (u'isou24.vilspa.esa.es', 19), (u'dial055.mbnet.mb.ca', 19), (u'tigger.nashscene.com', 19)])), 0, 'incorrect errHostsTop25')
errDateCountPairTuple = badRecords.map(lambda x: (x.date_time.day,1))
errDateSum = errDateCountPairTuple.reduceByKey(lambda a,b: a+b)
#print '{}'.format(errDateSum.take(10))
errDateSorted = (errDateSum)
#print errDateSorted
errByDate = errDateSorted.takeOrdered(30)
print '404 Errors by day: {}'.format(errByDate)
errDateSorted.cache()
Test.assertEquals(errByDate, [(1, 243), (3, 303), (4, 346), (5, 234), (6, 372), (7, 532), (8, 381), (9, 279), (10, 314), (11, 263), (12, 195), (13, 216), (14, 287), (15, 326), (16, 258), (17, 269), (18, 255), (19, 207), (20, 312), (21, 305), (22, 288)], 'incorrect errByDate')
Test.assertTrue(errDateSorted.is_cached, 'incorrect errDateSorted.is_cached')
daysWithErrors404 = errDateSorted.map(lambda x: x[0]).takeOrdered(30)
errors404ByDay = [x[1] for x in errDateSorted.takeOrdered(30, key= lambda x: x[0])]
print errors404ByDay
Test.assertEquals(daysWithErrors404, [1, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22], 'incorrect daysWithErrors404')
Test.assertEquals(errors404ByDay, [243, 303, 346, 234, 372, 532, 381, 279, 314, 263, 195, 216, 287, 326, 258, 269, 255, 207, 312, 305, 288], 'incorrect errors404ByDay')
fig = plt.figure(figsize=(8,4.2), facecolor='white', edgecolor='white')
plt.axis([0, max(daysWithErrors404), 0, max(errors404ByDay)])
plt.grid(b=True, which='major', axis='y')
plt.xlabel('Day')
plt.ylabel('404 Errors')
plt.plot(daysWithErrors404, errors404ByDay)
pass
topErrDate = errDateSorted.takeOrdered(5, key= lambda x: -x[1])
print 'Top Five dates for 404 requests: {}'.format(topErrDate)
Test.assertEquals(topErrDate, [(7, 532), (8, 381), (6, 372), (4, 346), (15, 326)], 'incorrect topErrDate')
hourCountPairTuple = badRecords.map(lambda x: (x.date_time.hour,1))
hourRecordsSum = hourCountPairTuple.reduceByKey(lambda a,b: a+b)
hourRecordsSorted = hourRecordsSum
errHourList = hourRecordsSorted.takeOrdered(30)
print 'Top hours for 404 requests: {}'.format(errHourList)
hourRecordsSorted.cache()
Test.assertEquals(errHourList, [(0, 175), (1, 171), (2, 422), (3, 272), (4, 102), (5, 95), (6, 93), (7, 122), (8, 199), (9, 185), (10, 329), (11, 263), (12, 438), (13, 397), (14, 318), (15, 347), (16, 373), (17, 330), (18, 268), (19, 269), (20, 270), (21, 241), (22, 234), (23, 272)], 'incorrect errHourList')
Test.assertTrue(hourRecordsSorted.is_cached, 'incorrect hourRecordsSorted.is_cached')
hoursWithErrors404 = hourRecordsSorted.map(lambda x: x[0]).takeOrdered(30)
errors404ByHours = [x[1] for x in hourRecordsSorted.takeOrdered(30, key = lambda x: -x[0])]
Test.assertEquals(hoursWithErrors404, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23], 'incorrect hoursWithErrors404')
Test.assertEquals(errors404ByHours, [175, 171, 422, 272, 102, 95, 93, 122, 199, 185, 329, 263, 438, 397, 318, 347, 373, 330, 268, 269, 270, 241, 234, 272], 'incorrect errors404ByHours')
fig = plt.figure(figsize=(8,4.2), facecolor='white', edgecolor='white')
plt.axis([0, max(hoursWithErrors404), 0, max(errors404ByHours)])
plt.grid(b=True, which='major', axis='y')
plt.xlabel('Hour')
plt.ylabel('404 Errors')
plt.plot(hoursWithErrors404, errors404ByHours)
|
13,538 | 3852d22c98dbdfc3ab64af8c3a00ab2cec8e62af | # -*- coding: utf-8 -*-
"""Views for PyBEL-Web."""
from .base_view import ModelView
__all__ = [
'ReportView',
'ExperimentView',
'QueryView',
]
class ReportView(ModelView):
"""View for reports."""
column_exclude_list = ['source', 'calculations', 'source_hash']
column_display_pk = True
column_default_sort = ('created', True)
page_size = 50
can_set_page_size = True
class ExperimentView(ModelView):
"""View for experiments."""
column_exclude_list = ['source', 'result']
class QueryView(ModelView):
"""View for queries."""
column_exclude_list = ['dump']
column_default_sort = ('created', True)
column_display_pk = True
|
13,539 | 322f604b1aec80d402a812506dbe5319d96db7a8 | import os
import glob
path = "dets/*.*"
last_created_file = (max(glob.glob(path), key=os.path.getmtime)).split('/')[1][:-4]
for i in range(int(last_created_file)):
if not os.path.isfile('dets/'+str(i)+'.txt'):
with open('dets/'+str(i) + '.txt', 'a') as logfile:
continue # create empty file if not found |
13,540 | cdfe6d82b852420bfbcc7fbb96cd0b96af5002c6 | def CountDigit(number,digit ):
str_numbers = str(number)
str_number = str(digit)
count = 0
for i in str_numbers:
if i == str_number:
count+=1
return count |
13,541 | fc42cf1245a694017ca0a47e3c01d6bb28d128b4 | import pandas as pd
import numpy as np
iris_data = pd.read_excel('iris_data.xlxs')
print(iris_data) |
13,542 | ab693173dc8b37c6db4a3d56da5a8dfb8fb91f25 | '''
Sample Input:
2
Sun 10 May 2015 13:54:36 -0700
Sun 10 May 2015 13:54:36 -0000
Sat 02 May 2015 19:54:36 +0530
Fri 01 May 2015 13:54:36 -0000
Sample Output:
25200
88200
Explaination:
In the first query, when we compare the time in UTC for both the time stamps,
we see a difference of 7 hours. which is 7x3600 seconds or 25200 seconds.
Similarly, in the second query, time difference is 5 hours and
30 minutes for time zone adjusting for that we have a difference of 1 day and 30 minutes. Or
24 x 3600 + 30 x 60 = 88200
# 格式化成2016-03-20 11:45:39形式
print time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
# 格式化成Sat Mar 28 22:24:24 2016形式
print time.strftime("%a %b %d %H:%M:%S %Y", time.localtime())
'''
import math
import os
import random
import re
import sys
from datetime import datetime
# Complete the time_delta function below.
def time_delta(t1, t2):
d1=datetime.strptime(t1,'%a %d %b %Y %H:%M:%S %z')
d2=datetime.strptime(t2,'%a %d %b %Y %H:%M:%S %z')
print (int(abs((d1-d2).total_seconds())))
return str(int(abs((d1-d2).total_seconds())))
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
t = int(input())
for t_itr in range(t):
t1 = input()
t2 = input()
delta = time_delta(t1, t2)
fptr.write(delta + '\n')
fptr.close()
|
13,543 | 548b7995312997f028e6887a612e96757c4a6042 | #------------------------------------------------------------------------------+
# Villian Class
#
# Villians are characters very similar like players. The main difference is
# that they are controlled by the game.
#
#------------------------------------------------------------------------------+
import pygame, custom_classes
import BasicObjects
from custom_classes import Character
class Villian(custom_classes.Character.Character):
name = "zombie" # Determines used image and behaviour(?)
angle = 0 # Angle of view
viewing_range = 10 # When does the villian react to the player? texture grid dimension
position = [] # Level position
inventory = [] # Available weapons
equiped_weapon = 0 # Currently used weapon
# Constructor
def __init__(self, name, level, position):
pygame.sprite.Sprite.__init__(self) # needed for subclasses of sprites
self.baseimage = level.all_images[ name ] # Load the right image
self.image = self.baseimage
self.rect = self.image.get_rect()
self.position = position
# By default add a gun to the inventory
self.inventory.append(BasicObjects.generateGun())
self.hitpoints = [100, 100]
# self.angle = 0
|
13,544 | 1d3074a3c07c32edfc085ebf8641c5196937ba4f | #https://open.kattis.com/problems/apaxiaaans
name = input()
newname = ""
for i in range(len(name)-1):
if name[i] != name[i+1]:
newname += name[i]
print(newname + name[-1])
|
13,545 | 062b6a942784bdbbe0ed88c50b15933fd32783da | 'bag predictions from imputed data sets'
import numpy as np
from glob import glob
input_pattern = '/path/to/many/predictions/dir/*.csv'
output_file = '/path/to/where/you/want/p_imputed_bagged.csv'
# in case we re-run the script in ipython
p = None
files = glob( input_pattern )
for input_file in files:
print input_file
data = np.loadtxt( input_file, skiprows = 1, delimiter = ',' )
try:
p = np.hstack(( p, data[:,1:2] ))
except ValueError:
# the first file
p = data[:,1:2]
ids = data[:,0:1]
print p.shape
# average
p = np.mean( p, axis = 1 ).reshape( -1, 1 )
ids_and_p = np.hstack(( ids, p ))
np.savetxt( output_file, ids_and_p, fmt = [ '%d', '%.10f' ], delimiter = ',', header = 'UserID,Probability1', comments = '' )
|
13,546 | bee83d4ef4b05bc65febb540853bbcff003bcc6e | import numpy as np
import matplotlib.pyplot as plt
def quantile_exponential_distribution(lambda_param, y):
return -np.log(1-y) / lambda_param
def main():
n = 10000
exponential_samples = np.random.exponential(1,n)
plt.hist(exponential_samples)
plt.show()
uniform_samples = np.random.uniform(0,1,n)
plt.hist(uniform_samples)
plt.show()
transformed_exponential_samples = quantile_exponential_distribution(lambda_param=1, y=uniform_samples)
plt.hist(transformed_exponential_samples)
plt.show()
if __name__ == "__main__":
main() |
13,547 | 3ff09d4c5b6e62687e2beba5b0ad5e44bd75a1ae | # Generated by Django 2.2.7 on 2019-11-27 15:30
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('desecapi', '0011_user_id_to_uuid'),
]
operations = [
migrations.AlterModelOptions(
name='donation',
options={'managed': False},
),
]
|
13,548 | d093c80fed7a3dfc8f74e017ada74d4d5ae81723 | class TransactionOutputRow:
def __init__(self, transaction_output, row_index):
self.transaction_output = transaction_output
self.row_index = row_index
def __getitem__(self, item):
return self.transaction_output.data[item][self.row_index]
def __contains__(self, item):
return item in self.transaction_output.data.keys()
def as_dict(self):
return {key:self.transaction_output.data[key][self.row_index] for key in self.transaction_output.data}
def explain(self):
for pred_col in self.transaction_output.evaluations:
self.transaction_output.evaluations[pred_col][self.row_index].explain()
def __str__(self):
return str(self.as_dict())
def as_list(self):
#Note that here we will not output the confidence columns
return [self.transaction_output.evaluations[col][self.row_index] for col in self.transaction_output.transaction.lmd['columns']]
@property
def _predicted_values(self):
return {pred_col:self.transaction_output.evaluations[pred_col][self.row_index].predicted_value for pred_col in self.transaction_output.evaluations}
|
13,549 | c5869fd46cd70044af9687ae3ce7e0bb0854c570 | import re
def get_link_from_query(query):
m = re.findall("(https?://\S+)", query)
if len(m) > 0:
return m[0]
return None
def validate_query(query):
return get_link_from_query(query) or re.search("aliexpress", query) or is_digits(query)
def is_digits(query):
return re.search("^\d+$", query)
|
13,550 | 957d73101d1315fd1061eda5bfd4c38e063cd58d | """ Setup dependencies """
# TODO...
|
13,551 | 564da178e367fd93d75ef9dcdd0d63877bc48ccc | from scipy.stats import norm
from scipy.stats.mstats import mquantiles
from numpy.random import choice,seed
from numpy import array
seed(756)
Den=[ 0.08015092, 0.10789958, 0.12167541, 0.21219431, 0.07920235, 0.19467892, 0.5431346, 0.13779066]
MnWgt=norm(2.804086007439/2.2,0.02273154621778/2.2)
class BedArea():
def __init__(self,mu,sigma):
self.mu=mu
self.sigma=sigma
self.nsource=norm(0,1)
def rvs(self,size=None):
z=self.nsource.rvs(size=size)
result=self.mu+self.sigma*z
return(result)
BA=BedArea(26.902999877929*10000,26.902999877929*1000)
def RandDen(n):
result=choice(array(Den),size=n,replace=True)
return(result)
p=[.005,.025,.05,.125,.5,.875,.95,975,.995]
p=[.125,.5,.875]
n=10000
B=BA.rvs(size=n)*MnWgt.rvs(size=n)* RandDen(n)
#print(B)
print(mquantiles(B,p))
|
13,552 | f55a2fdad886867945580c168fc99fba6d352558 | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
xls2001 = pd.read_excel("./2001.xls")
xls2002 = pd.read_excel("./2002.xls")
xls2003 = pd.read_excel("./2003.xls")
xls2004 = pd.read_excel("./2004.xls")
xls2005 = pd.read_excel("./2005.xls")
xls2006 = pd.read_excel("./2006.xls")
xls2007 = pd.read_excel("./2007.xls")
xls2008 = pd.read_excel("./2008.xls")
xls2009 = pd.read_excel("./2009.xls")
xls2010 = pd.read_excel("./2010.xls")
xls2011 = pd.read_excel("./2011.xls")
xls2012 = pd.read_excel("./2012.xls")
xls2013 = pd.read_excel("./2013.xls")
xls2014 = pd.read_excel("./2014.xls")
xls2015 = pd.read_excel("./2015.xls")
xls2015.shape
|
13,553 | 1342ed1686f80f6e8681a6f8f359a5b7a34b1d91 | from django.contrib import admin
from tagbase.models import *
admin.site.register(FunctionTag)
admin.site.register(NounTag)
admin.site.register(VerbTag)
autorobo, success = User.objects.get_or_create(username='autorobo', password='autorobo')
def import_tags():
HUB_FILE_PATH = 'tagbase/fixtures/wired_hubs.txt'
TERMINAL_FILE_PATH = 'tagbase/fixtures/wired_terminals.txt'
f = file(HUB_FILE_PATH, "r")
lines = f.readlines()
f.close()
for s in lines:
s = s.replace('\n', '')
NounTag.objects.get_or_create(user=autorobo, name=s, sub_type='H')
f = file(TERMINAL_FILE_PATH, "r")
lines = f.readlines()
f.close()
for s in lines:
s = s.replace('\n', '')
NounTag.objects.get_or_create(user=autorobo, name=s, sub_type='T')
|
13,554 | 5d2f821c06c8641574f8e53e4200c7c35e92ae49 | from sqlalchemy.ext.hybrid import hybrid_property
from sqlalchemy.orm import relationship
from sqlalchemy.schema import Column, ForeignKey
from sqlalchemy.types import Integer, String, Date, CLOB, Float
from src.sgd.model import EqualityByIDMixin
from src.sgd.model.bud import Base
from feature import Feature
class Sequence(Base, EqualityByIDMixin):
__tablename__ = 'seq'
id = Column('seq_no', Integer, primary_key = True)
feature_id = Column('feature_no', String, ForeignKey(Feature.id))
seq_version = Column('seq_version', Date)
seq_type = Column('seq_type', String)
source = Column('source', String)
is_current = Column('is_current', String)
seq_length = Column('seq_length', Integer)
ftp_file = Column('ftp_file', String)
residues = Column('residues', CLOB)
date_created = Column('date_created', Date)
created_by = Column('created_by', String)
feat_locations = relationship('Feat_Location', primaryjoin="Feat_Location.sequence_id==Sequence.id")
feature = relationship('Feature', backref='sequences')
@hybrid_property
def current_feat_location(self):
current = [ofl for ofl in self.feat_locations if ofl.is_current == 'Y']
if len(current) > 0:
return current[0]
else:
return None
def __repr__(self):
data = self.id, self.seq_type, self.is_current
return 'Sequence(id=%s, type=%s, is_current=%s)' % data
class Feat_Location(Base, EqualityByIDMixin):
__tablename__ = 'feat_location'
id = Column('feat_location_no', Integer, primary_key=True)
feature_id = Column('feature_no', Integer, ForeignKey(Feature.id))
sequence_id = Column('seq_no', Integer, ForeignKey(Sequence.id))
rootseq_id = Column('rootseq_no', Integer, ForeignKey(Sequence.id))
coord_version = Column('coord_version', Date)
min_coord = Column('min_coord', Integer)
max_coord = Column('max_coord', Integer)
strand = Column('strand', String)
is_current = Column('is_current', String)
date_created = Column('date_created', Date)
created_by = Column('created_by', String)
sequence = relationship(Sequence, uselist=False, primaryjoin="Feat_Location.sequence_id==Sequence.id")
feature = relationship(Feature, uselist=False)
class ProteinInfo(Base, EqualityByIDMixin):
__tablename__ = 'protein_info'
id = Column('protein_info_no', Integer, primary_key=True)
feature_id = Column('feature_no', Integer, ForeignKey(Feature.id))
date_created = Column('date_created', Date)
created_by = Column('created_by', String)
molecular_weight = Column('molecular_weight', Integer)
pi = Column('pi', Float)
cai = Column('cai', Float)
length = Column('protein_length', Integer)
n_term_seq = Column('n_term_seq', String)
c_term_seq = Column('c_term_seq', String)
codon_bias = Column('codon_bias', Float)
fop_score = Column('fop_score', Float)
gravy_score = Column('gravy_score', Float)
aromaticity_score = Column('aromaticity_score', Float)
feature = relationship(Feature, uselist=False)
ala = Column('ala', Integer)
arg = Column('arg', Integer)
asn = Column('asn', Integer)
asp = Column('asp', Integer)
cys = Column('cys', Integer)
gln = Column('gln', Integer)
glu = Column('glu', Integer)
gly = Column('gly', Integer)
his = Column('his', Integer)
ile = Column('ile', Integer)
leu = Column('leu', Integer)
lys = Column('lys', Integer)
met = Column('met', Integer)
phe = Column('phe', Integer)
pro = Column('pro', Integer)
thr = Column('thr', Integer)
ser = Column('ser', Integer)
trp = Column('trp', Integer)
tyr = Column('tyr', Integer)
val = Column('val', Integer)
class ProteinDetail(Base, EqualityByIDMixin):
__tablename__ = 'protein_detail'
id = Column('protein_detail_no', Integer, primary_key=True)
info_id = Column('protein_info_no', Integer, ForeignKey(ProteinInfo.id))
group = Column('protein_detail_group', String)
type = Column('protein_detail_type', String)
value = Column('protein_detail_value', String)
min_coord = Column('min_coord', Integer)
max_coord = Column('max_coord', Integer)
date_created = Column('date_created', Date)
created_by = Column('created_by', String)
info = relationship(ProteinInfo, uselist=False, backref='details') |
13,555 | 526e1768d904d15bd5b92091bb64df8a7e71070a | from mnis.mnislib import \
getCurrentCommonsMembers, \
getCommonsMembersOn, \
getCommonsMembersBetween, \
getCommonsMembersAtElection, \
getCommonsMembers, \
getIdForMember, \
getListNameForMember, \
getGenderForMember, \
getDateOfBirthForMember, \
getConstituencyForMember, \
getPartyForMember, \
getServiceDataForMember, \
getSummaryDataForMembers, \
saveSummaryDataForMembers, \
downloadMembers \ |
13,556 | 3fa6e4f865210bbbd58ff82871fabc50ad399f6b | import tensorflow as tf
import keras.backend.tensorflow_backend as K
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers.convolutional import Conv1D
from keras.layers.pooling import MaxPooling1D
from keras.optimizers import SGD
from keras.layers.normalization import BatchNormalization
from keras import losses
from keras.callbacks import ModelCheckpoint
from keras.models import model_from_json
import data
import numpy as np
import json
import sys, os
import matplotlib.pyplot as pp
json_str = json.load(open('mymodel.json'))
model = model_from_json(json_str)
model_path = 'mymodel.h5'
if os.path.exists(model_path):
model.load_weights(model_path)
params = {'data_dir':'../data/day','batch_size':32, 'win_len':100, 'predict_len':200}
feeder = data.HSFeeder(params)
batch_data= feeder.generate_one_sample()
X = batch_data['x']
Y = batch_data['y']
base = batch_data['base_price']
original_x = batch_data['original_x']
preds = model.predict(X).reshape(-1,3)
Y = Y
print(preds)
i = -1
for pred in preds:
i = i + 1
sample = original_x[i,:,1].reshape(-1,1)
pp.figure(figsize = [300, 100])
pp.plot(range(100),sample, c='b')
if pred[1] > 0.5:
pp.plot(range(100,300),Y[i],c='r')
if pred[2] > 0.5:
pp.plot(range(100,300),Y[i],c='g')
if pred[0] > 0.5:
pp.plot(range(100,300),Y[i],c='y')
pp.show() |
13,557 | f79c1efe561f22a348d2774ec02c630759cdcd42 | from hurley.server import app
|
13,558 | 6f62bb245df6da4e660f68cc18f3bef80e452106 | from enum import Enum
class Currencies(Enum):
USD = "$ US Dollar"
GBP = "£ Pound Sterling"
EUR = "€ Euro"
|
13,559 | 31618f55a14b74f1e7ec067890e19263fbf3aa27 | from typing import Dict
# Path to Google's pre-trained word2vec model (.bin file)
WORD2VEC_MODEL_FQN: str = "models/word2vec/GoogleNews-vectors-negative300.bin"
# Path to Stanford's pre-trained GloVe model (.txt file)
GLOVE_MODEL_FQN: str = "models/glove/glove.840B.300d.txt"
# Dimension of each embedding.
F: Dict[str, int] = {"word2vec": 300, "glove": 300, "bert": 1024}
# Path to the metadata file.
META_FQN = "results/_phq9_diffs_with_paths.tsv"
# Path to the paired.json corpus and predictions file.
PAIRED_FQN = "results/paired7.json"
# Path to the paired.json corpus and predictions file.
TABLE2_FQN = "results/table2.tsv"
# Path to the table 3 intermediate file.
TABLE3_FQN = "results/table3.tsv"
# Path to the PHQ term file.
PHQ_TERM_FQN = "data/clinical-terms-v3.tsv"
# Path to the vocabulary file of all possible words.
VOCAB_FQN = "data/english.txt"
# Which dimensions to analyze. That is, each item in the below list
# will be a subsection of the table. i.e., Gender will be broken down into male/female (its unique values).
DIMENSIONS = ["gender_imputed", "Num_sess", "Age_ses1", "PHQ9_total_ses"]
|
13,560 | 45c93b61fe2caced9b39340962ad31497ce53a04 | import turtle
from random import randrange
def draw_tree(branch_len: float, t: turtle.Turtle):
if branch_len > 5:
t.width(width=int(branch_len) // 10)
t.pencolor((0.15, (150 - branch_len) / 150, 0.25))
t.forward(branch_len)
t.right(20)
draw_tree(branch_len=branch_len - 15, t=t)
t.left(40)
draw_tree(branch_len=branch_len - 15, t=t)
t.right(20)
t.penup()
t.backward(branch_len)
t.pendown()
def draw_randomish_tree(branch_len: float, t: turtle.Turtle):
if branch_len > 5:
branch_len = branch_len + randrange(0, 10)
turn_angle_1 = randrange(5, 55)
turn_angle_2 = randrange(5, 55)
t.width(width=int(branch_len) // 10)
t.pencolor((0.15, (150 + 10 - branch_len) / (150 + 10), 0.25))
t.forward(branch_len)
t.right(turn_angle_1)
draw_randomish_tree(branch_len=branch_len - 15, t=t)
t.left(turn_angle_1 + turn_angle_2)
draw_randomish_tree(branch_len=branch_len - 15, t=t)
t.right(turn_angle_2)
t.penup()
t.backward(branch_len)
t.pendown()
def main():
t = turtle.Turtle()
s = turtle.Screen()
t.left(90)
t.up()
t.backward(100)
t.down()
# draw_tree(150, t)
# draw_randomish_tree(150, t)
draw_randomish_tree(75, t)
s.exitonclick()
main()
|
13,561 | e79b731baf4fab3d4019971dd124c92beba2148f | from .env import Env
|
13,562 | edbdb9c336216bda071e46253c9a4a61093711e9 | __author__ = 'Koh Wen Yao'
from collections import OrderedDict
########################################################################
# General Constants
########################################################################
SCRIPT_RUN_INTERVAL_MINUTES = 1
DATA_RETRIEVAL_BUFFER_MINUTES = 3
MONITORING_TIME_MINUTES = SCRIPT_RUN_INTERVAL_MINUTES
DATA_RETRIEVAL_TIME_DELTA_MINUTES = SCRIPT_RUN_INTERVAL_MINUTES + DATA_RETRIEVAL_BUFFER_MINUTES
ROWS_TO_FETCH = 2000 # REDUCE IF THERE ARE TIMEOUT ERRORS WHILE IMPORTING TO ELASTICSEARCH
########################################################################
# Authentication Constants
########################################################################
# [i.name for i in ec2connection.get_all_regions()] << USE THIS FUNCTION TO GET ALL REGIONS
REGION_LIST = ['ap-northeast-1',
'ap-southeast-2',
'ap-southeast-1',
'eu-central-1',
'eu-west-1',
'sa-east-1',
'us-east-1',
'us-west-1',
'us-west-2']
MYSQL_HOST = 'localhost'
MYSQL_USER = 'root'
MYSQL_PASSWORD = 'root'
########################################################################
# Database Constants
########################################################################
DATABASE_NAME = 'monitoring'
TABLE_NAME_EC2 = 'ec2datapoints'
TABLE_NAME_ELB = 'elbdatapoints'
TABLE_NAME_RDS = 'rdsdatapoints'
COLUMN_NAME_EC2_ACCOUNT_NAME = 'account_name'
COLUMN_NAME_EC2_AMI_ID = 'ami_id'
COLUMN_NAME_EC2_INSTANCE_ID = 'instance_id'
COLUMN_NAME_EC2_INSTANCE_TYPE = 'instance_type'
COLUMN_NAME_EC2_KEY_NAME = 'key_name'
COLUMN_NAME_EC2_METRIC = 'metric'
COLUMN_NAME_EC2_REGION = 'region'
COLUMN_NAME_EC2_SECURITY_GRP = 'security_group'
COLUMN_NAME_EC2_SERVICE_TYPE = 'service_type'
COLUMN_NAME_EC2_TIMESTAMP = 'timestamp'
COLUMN_NAME_EC2_UNIT = 'unit'
COLUMN_NAME_EC2_VALUE = 'value'
COLUMN_NAME_EC2_VIRT_TYPE = 'virtualization_type'
COLUMN_NAME_ELB_ACCOUNT_NAME = 'account_name'
COLUMN_NAME_ELB_LOAD_BALANCER_NAME = 'load_balancer_name'
COLUMN_NAME_ELB_METRIC = 'metric'
COLUMN_NAME_ELB_REGION = 'region'
COLUMN_NAME_ELB_TIMESTAMP = 'timestamp'
COLUMN_NAME_ELB_SERVICE_TYPE = 'service_type'
COLUMN_NAME_ELB_UNIT = 'unit'
COLUMN_NAME_ELB_VALUE = 'value'
COLUMN_NAME_RDS_ACCOUNT_NAME = 'account_name'
COLUMN_NAME_RDS_ENGINE = 'engine'
COLUMN_NAME_RDS_INSTANCE_CLASS = 'instance_class'
COLUMN_NAME_RDS_METRIC = 'metric'
COLUMN_NAME_RDS_MULTI_AZ = 'multi_az'
COLUMN_NAME_RDS_NAME = 'rds_name'
COLUMN_NAME_RDS_REGION = 'region'
COLUMN_NAME_RDS_SECURITY_GRP = 'security_group'
COLUMN_NAME_RDS_TIMESTAMP = 'timestamp'
COLUMN_NAME_RDS_UNIT = 'unit'
COLUMN_NAME_RDS_VALUE = 'value'
PRIMARY_KEYS_EC2 = [COLUMN_NAME_EC2_ACCOUNT_NAME,
COLUMN_NAME_EC2_REGION,
COLUMN_NAME_EC2_INSTANCE_ID,
COLUMN_NAME_EC2_METRIC,
COLUMN_NAME_EC2_SECURITY_GRP,
COLUMN_NAME_EC2_TIMESTAMP]
PRIMARY_KEYS_ELB = [COLUMN_NAME_ELB_ACCOUNT_NAME,
COLUMN_NAME_ELB_REGION,
COLUMN_NAME_ELB_LOAD_BALANCER_NAME,
COLUMN_NAME_ELB_METRIC,
COLUMN_NAME_ELB_TIMESTAMP]
PRIMARY_KEYS_RDS = [COLUMN_NAME_ELB_ACCOUNT_NAME,
COLUMN_NAME_RDS_REGION,
COLUMN_NAME_RDS_NAME,
COLUMN_NAME_RDS_METRIC,
COLUMN_NAME_RDS_SECURITY_GRP,
COLUMN_NAME_RDS_TIMESTAMP]
########################################################################
# Addresses/Ports Constants
########################################################################
ELASTICSEARCH_HOST = 'localhost'
ELASTICSEARCH_PORT = '9200'
ELASTICSEARCH_URL = 'http://{0}:{1}'.format(ELASTICSEARCH_HOST, ELASTICSEARCH_PORT)
KIBANA_PORT = "5601"
########################################################################
# Service Constants
########################################################################
SERVICE_TYPE_EC2 = 'EC2'
SERVICE_TYPE_ELB = 'ELB'
SERVICE_TYPE_RDS = 'RDS'
########################################################################
# Elasticsearch Constants
########################################################################
ELASTICSEARCH_INDEX_NAME = DATABASE_NAME
########################################################################
# Dictionaries
########################################################################
REGION_POOL_DICTIONARY = {'emily': 3,
'maxine': 2,
'voip': 3}
POOL_DICTIONARY = {'emily': 9,
'maxine': 2,
'voip': 2}
NAMESPACE_DICTIONARY = {SERVICE_TYPE_EC2: "AWS/EC2",
SERVICE_TYPE_ELB: "AWS/ELB",
SERVICE_TYPE_RDS: "AWS/RDS"}
PRIMARY_KEY_DICTIONARY = {SERVICE_TYPE_EC2: PRIMARY_KEYS_EC2,
SERVICE_TYPE_ELB: PRIMARY_KEYS_ELB,
SERVICE_TYPE_RDS: PRIMARY_KEYS_RDS}
# CHANGE NONE TO A SUITABLE UNIT TO ENABLE MONITORING OF THE PARTICULAR METRIC
EC2_METRIC_UNIT_DICTIONARY = {"CPUCreditBalance": (None, 'Average'),
"CPUCreditUsage": (None, 'Average'),
"CPUUtilization": ('Percent', 'Average'),
"DiskReadBytes": ('Bytes', 'Average'),
"DiskReadOps": (None, 'Average'),
"DiskWriteBytes": ('Bytes', 'Average'),
"DiskWriteOps": (None, 'Average'),
"NetworkIn": ('Bytes', 'Average'),
"NetworkOut": ('Bytes', 'Average'),
"StatusCheckFailed_Instance": (None, 'Average'),
"StatusCheckFailed_System": (None, 'Average'),
"StatusCheckFailed": (None, 'Average'),
"VolumeIdleTime": (None, 'Average'),
"VolumeQueueLength": (None, 'Average'),
"VolumeReadBytes": (None, 'Average'),
"VolumeReadOps": (None, 'Average'),
"VolumeTotalReadTime": (None, 'Average'),
"VolumeTotalWriteTime": (None, 'Average'),
"VolumeWriteBytes": (None, 'Average'),
"VolumeWriteOps": (None, 'Average')
}
# CHANGE NONE TO A SUITABLE UNIT TO ENABLE MONITORING OF THE PARTICULAR METRIC
ELB_METRIC_UNIT_DICTIONARY = {"BackendConnectionErrors": (None, 'Sum'),
"HealthyHostCount": ('Count', 'Average'),
"HTTPCode_Backend_2XX": ('Count', 'Sum'),
"HTTPCode_Backend_3XX": (None, 'Sum'),
"HTTPCode_Backend_4XX": ('Count', 'Sum'),
"HTTPCode_Backend_5XX": ('Count', 'Sum'),
"HTTPCode_ELB_4XX": (None, 'Sum'),
"HTTPCode_ELB_5XX": ('Count', 'Sum'),
"Latency": ('Seconds', 'Average'),
"RequestCount": ('Count', 'Sum'),
"SpilloverCount": (None, 'Sum'),
"SurgeQueueLength": (None, 'Sum'),
"UnHealthyHostCount": ('Count', 'Average')
}
# CHANGE NONE TO A SUITABLE UNIT TO ENABLE MONITORING OF THE PARTICULAR METRIC
RDS_METRIC_UNIT_DICTIONARY = {"BinLogDiskUsage": ('Bytes', 'Average'),
"CPUUtilization": ('Percent', 'Average'),
"CPUCreditUsage": (None, 'Average'),
"CPUCreditBalance": (None, 'Average'),
"DatabaseConnections": ('Count', 'Average'),
"DiskQueueDepth": ('Count', 'Average'),
"FreeableMemory": ('Bytes', 'Average'),
"FreeStorageSpace": ('Bytes', 'Average'),
"ReplicaLag": ('Seconds', 'Average'),
"SwapUsage": ('Bytes', 'Average'),
"ReadIOPS": ('Count/Second', 'Average'),
"WriteIOPS": ('Count/Second', 'Average'),
"ReadLatency": ('Seconds', 'Average'),
"WriteLatency": ('Seconds', 'Average'),
"ReadThroughput": ('Bytes/Second', 'Average'),
"WriteThroughput": ('Bytes/Second', 'Average'),
"NetworkReceiveThroughput": ('Bytes/Second', 'Average'),
"NetworkTransmitThroughput": ('Bytes/Second', 'Average')}
EC2_DATAPOINT_ATTR_TYPE_DICTIONARY = OrderedDict([(COLUMN_NAME_EC2_ACCOUNT_NAME, 'VARCHAR(32)'),
(COLUMN_NAME_EC2_AMI_ID, 'VARCHAR(16)',),
(COLUMN_NAME_EC2_INSTANCE_ID, 'VARCHAR(16)',),
(COLUMN_NAME_EC2_INSTANCE_TYPE, 'VARCHAR(16)'),
(COLUMN_NAME_EC2_KEY_NAME, 'VARCHAR(64)'),
(COLUMN_NAME_EC2_METRIC, 'VARCHAR(32)'),
(COLUMN_NAME_EC2_REGION, 'VARCHAR(16)'),
(COLUMN_NAME_EC2_SECURITY_GRP, 'VARCHAR(64)'),
(COLUMN_NAME_EC2_SERVICE_TYPE, 'VARCHAR(16)'),
(COLUMN_NAME_EC2_TIMESTAMP, 'DATETIME'),
(COLUMN_NAME_EC2_UNIT, 'VARCHAR(16)'),
(COLUMN_NAME_EC2_VALUE, 'FLOAT'),
(COLUMN_NAME_EC2_VIRT_TYPE, 'VARCHAR(16)')
])
ELB_DATAPOINT_ATTR_TYPE_DICTIONARY = OrderedDict([(COLUMN_NAME_ELB_ACCOUNT_NAME, 'VARCHAR(32)'),
(COLUMN_NAME_ELB_LOAD_BALANCER_NAME, 'VARCHAR(32)'),
(COLUMN_NAME_ELB_METRIC, 'VARCHAR(32)'),
(COLUMN_NAME_ELB_REGION, 'VARCHAR(16)'),
(COLUMN_NAME_ELB_SERVICE_TYPE, 'VARCHAR(16)'),
(COLUMN_NAME_ELB_TIMESTAMP, 'DATETIME'),
(COLUMN_NAME_ELB_UNIT, 'VARCHAR(16)'),
(COLUMN_NAME_ELB_VALUE, 'FLOAT'),
])
RDS_DATAPOINT_ATTR_TYPE_DICTIONARY = OrderedDict([(COLUMN_NAME_RDS_ACCOUNT_NAME, 'VARCHAR(32)'),
(COLUMN_NAME_RDS_ENGINE, 'VARCHAR(16)'),
(COLUMN_NAME_RDS_INSTANCE_CLASS, 'VARCHAR(32)'),
(COLUMN_NAME_RDS_METRIC, 'VARCHAR(32)'),
(COLUMN_NAME_RDS_MULTI_AZ, 'VARCHAR(5)'),
(COLUMN_NAME_RDS_NAME, 'VARCHAR(64)'),
(COLUMN_NAME_RDS_REGION, 'VARCHAR(16)'),
(COLUMN_NAME_RDS_SECURITY_GRP, 'VARCHAR(64)'),
(COLUMN_NAME_RDS_TIMESTAMP, 'DATETIME'),
(COLUMN_NAME_RDS_UNIT, 'VARCHAR(16)'),
(COLUMN_NAME_RDS_VALUE, 'FLOAT'),
])
|
13,563 | 4f9177860a247b3c4326f0d9dc9c43c69aa9c2d4 | import logics_func
print("Привет. Я буду загадывать тебе примеры а ты разгадывай.")
# запуск первой функции модуля logics_func (и последующие запуски функций)
logics_func.multiplication()
|
13,564 | 006e4ca3abac2de5bbfeac47d9afed9d3d3869f5 | from keras import applications
from keras.models import Sequential
from keras.models import Model
from keras.layers import Dropout, Flatten, Dense, Activation, AveragePooling1D
from keras.callbacks import CSVLogger
import tensorflow as tf
from scipy.ndimage import imread
import numpy as np
import random
from keras.layers import LSTM
from keras.layers import Conv1D, MaxPooling1D, LeakyReLU
from keras import backend as K
import keras
from keras.callbacks import CSVLogger, ModelCheckpoint
from keras.backend.tensorflow_backend import set_session
from keras import optimizers
import h5py
from sklearn.preprocessing import MinMaxScaler
import os
import pandas as pd
import matplotlib.pyplot as plt
import datetime
os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID'
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
with h5py.File(''.join(['bitcoin2015to2019_5m_256_16.h5']), 'r') as hf:
datas = hf['inputs'].value
labels = hf['outputs'].value
input_times = hf['input_times'].value
output_times = hf['output_times'].value
original_inputs = hf['original_inputs'].value
original_outputs = hf['original_outputs'].value
original_datas = hf['original_datas'].value
scaler = MinMaxScaler()
# split training validation
training_size = int(0.8 * datas.shape[0])
training_datas = datas[:training_size, :, :]
training_labels = labels[:training_size, :, :]
validation_datas = datas[training_size:, :, :]
validation_labels = labels[training_size:, :, :]
validation_original_outputs = original_outputs[training_size:, :, :]
validation_original_inputs = original_inputs[training_size:, :, :]
validation_input_times = input_times[training_size:, :, :]
validation_output_times = output_times[training_size:, :, :]
ground_true = np.append(validation_original_inputs, validation_original_outputs, axis=1)
ground_true_times = np.append(validation_input_times, validation_output_times, axis=1)
step_size = datas.shape[1]
#batch_size = 8
nb_features = datas.shape[2]
model = Sequential()
model.add(Conv1D(activation='relu', input_shape=(step_size, nb_features), strides=1, filters=16, kernel_size=64))
#model.add(LeakyReLU())
#model.add(Dropout(0.5))
#
#model.add(AveragePooling1D(pool_size=2, strides=None, padding='valid', data_format='channels_last'))
model.add(Conv1D(activation='relu', strides=2, filters=16, kernel_size=64))
#model.add(AveragePooling1D(pool_size=2, strides=None, padding='valid', data_format='channels_last'))
#model.add(LeakyReLU())
#model.add(Dropout(0.5))
model.add(Conv1D( strides=2, filters=nb_features, kernel_size=34))
# model.add(Conv1D(activation='relu', input_shape=(step_size, nb_features), strides=3, filters=8, kernel_size=20))
# #model.add(PReLU())
# model.add(Dropout(0.5))
# model.add(Conv1D( strides=4, filters=nb_features, kernel_size=16))
# model.add(Conv1D(activation='relu', input_shape=(step_size, nb_features), strides=3, filters=16, kernel_size=8))
# #model.add(LeakyReLU())
# #model.add(Dropout(0.5))
# model.add(Conv1D(activation='relu', strides=2, filters=16, kernel_size=8))
# #model.add(LeakyReLU())
# #model.add(Dropout(0.5))
# model.add(Conv1D( strides=2, filters=nb_features, kernel_size=8))
# 2 layers
# model.add(Conv1D(activation='relu', input_shape=(step_size, nb_features), strides=3, filters=16, kernel_size=20))
# model.add(Dropout(0.5))
# model.add(Conv1D( strides=4, filters=nb_features, kernel_size=16))
#
# model.load_weights('weights/bitcoin2015to2019_5m_close_CNN_2_relu_256_16-100-0.00004.hdf5')
# model.compile(loss='mse', optimizer='adam')
# 3 layers
# model.add(Conv1D(activation='relu', input_shape=(step_size, nb_features), strides=3, filters=8, kernel_size=8))
# #model.add(LeakyReLU())
# model.add(Dropout(0.5))
# model.add(Conv1D(activation='relu', strides=2, filters=8, kernel_size=8))
# #model.add(LeakyReLU())
# model.add(Dropout(0.5))
# model.add(Conv1D( strides=2, filters=nb_features, kernel_size=8))
#
model.load_weights('weights/bitcoin2015to2019_5m_close_CNN_3_relu_256_16-997-0.69469.hdf5')
model.compile(loss='mse', optimizer='adam')
# model.add(Conv1D(activation='relu', input_shape=(step_size, nb_features), strides=1, filters=8, kernel_size=33))
# #model.add(LeakyReLU())
# model.add(Dropout(0.5))
# model.add(Conv1D(activation='relu', strides=1, filters=8, kernel_size=33))
# #model.add(AveragePooling1D(pool_size=2, strides=None, padding='valid', data_format='channels_last'))
# #model.add(LeakyReLU())
#
# model.add(Dropout(0.5))
# model.add(Conv1D( strides=1, filters=nb_features, kernel_size=33))
# 스케일링된 가격을 원래대로 돌림
predicted = model.predict(validation_datas)
predicted_inverted = []
for i in range(original_datas.shape[1]):
scaler.fit(original_datas[:,i].reshape(-1,1))
predicted_inverted.append(scaler.inverse_transform(predicted[:,:,i]))
print (np.array(predicted_inverted).shape)
#get only the close data
ground_true = ground_true[:,:,0].reshape(-1)
ground_true_times = ground_true_times.reshape(-1)
ground_true_times = pd.to_datetime(ground_true_times, unit='s')
# since we are appending in the first dimension
predicted_inverted = np.array(predicted_inverted)[0,:,:].reshape(-1)
print (np.array(predicted_inverted).shape)
validation_output_times = pd.to_datetime(validation_output_times.reshape(-1), unit='s')
ground_true_df = pd.DataFrame()
ground_true_df['times'] = ground_true_times
ground_true_df['value'] = ground_true
prediction_df = pd.DataFrame()
prediction_df['times'] = validation_output_times
prediction_df['value'] = predicted_inverted
print('--정답--')
print(ground_true_df.tail())
print('--예측값--')
print(prediction_df.tail())
#print(ground_true_df.loc[:300])
#print(prediction_df.loc[:300])
ground_true_df = ground_true_df.drop_duplicates(['times'])
#print(ground_true_df.loc[:300])
#prediction_df = prediction_df.loc[(prediction_df["times"].dt.year == 2017 )&(prediction_df["times"].dt.month > 7 ),: ]
#ground_true_df = ground_true_df.loc[(ground_true_df["times"].dt.year == 2017 )&(ground_true_df["times"].dt.month > 7 ),:]
from sklearn.metrics import mean_squared_error
from math import sqrt
mse = mean_squared_error(validation_original_outputs[:,:,0].reshape(-1),predicted_inverted)
rmse = sqrt(mse)
print(rmse)
plt.figure(figsize=(20,10))
plt.plot(ground_true_df.times,ground_true_df.value, label = 'Actual')
plt.plot(prediction_df.times,prediction_df.value,'ro', label='Predicted')
plt.legend(loc='upper left')
plt_name = 'bitcoin2015to2019_5m_close_CNN_3_relu_256_16-997-0.69469_rmse : ' + str(rmse)
plt.title(plt_name)
plt.savefig(plt_name + '.png')
plt.show()
#real-time drawing
import datetime as dt
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import time
# Create figure for plotting
#plt.figure(3)
fig = plt.figure(4, figsize=(20,10))
# xs = []
# ys = []
# This function is called periodically from FuncAnimation
#def animate(i, pt, pv, gt, gv):
# Draw x and y lists
ax1 = fig.add_subplot(1,1,1)
# Format plot
plt.xticks(rotation=45, ha='right')
plt.subplots_adjust(bottom=0.30)
plt.title('predicted vs true (CNN)')
plt.ylabel('USDT')
#plt.show()
iter = 1
for gt, gv in zip(ground_true_df.times, ground_true_df.value):
idx = prediction_df.index[prediction_df['times'] == gt].tolist()
#ax1.clear()
ax1.plot(gt, gv, 'bo', label='predicted')
if idx:
print(idx)
print(idx[0])
print(gt)
print(prediction_df['times'][idx[0]])
#ani = animation.FuncAnimation(fig, animate, fargs=(pt, pv, prediction_df['times'][idx[0]], prediction_df['values'][idx[0]]), interval=10)
ax1.plot(prediction_df['times'][idx[0]], prediction_df['value'][idx[0]], 'r+', label='Actual')
plt.pause(0.001)
if iter >=2000:
ax1.clear()
iter = 1
iter += 1
#time.sleep(0.1)
#plt.show() |
13,565 | 582c42eab1774056e10ab6df6e156fff64c6605e | import matplotlib.pylab as plt
import numpy as np
x = np.arange(-8, 8, 0.1)
# # Basic Sigmoid
# f = 1 / (1 + np.exp(-x))
# plt.plot(x, f)
# plt.xlabel('x')
# plt.ylabel('f(x)')
# plt.show()
# # With weights
# w1 = 0.5
# w2 = 1.0
# w3 = 2.0
# l1 = 'w= 0.5'
# l2 = 'w = 1.0'
# l3 = 'w= 2.0'
# for w,l in [(w1,l1),(w2,l2),(w3,l3)]:
# f = 1/(1+np.exp(-x*w))
# plt.plot(x,f,label=l)
# plt.xlabel('x')
# plt.ylabel('h_w(x)')
# plt.legend(loc=2)
# plt.show()
# # Bias added
# w =5.0
# b1 = -8.0
# b2 =0.0
# b3 = 8.0
# l1 = 'b=-8.0'
# l2 = 'b=0.0'
# l3 = 'b= 8.0'
# for b,l in [(b1,l1),(b2,l2),(b3,l3)]:
# f = 1/(1+np.exp(-(x*w+b)))
# plt.plot(x,f,label=l)
# plt.xlabel('x')
# plt.ylabel('h_wb(x)')
# plt.legend(loc=2)
# plt.show()
# feed-forward nn
import numpy as np
w1 = np.array([[0.2, 0.2, 0.2], [0.4, 0.4, 0.4], [0.6, 0.6, 0.6]])
w2 = np.zeros((1, 3))
w2[0,:] = np.array([0.5, 0.5, 0.5])
b1 = np.array([0.8, 0.8, 0.8])
b2 = np.array([0.2])
def f(x):
return 1 / (1 + np.exp(-x))
def simple_looped_nn_calc(n_layers, x, w, b):
for l in range(n_layers-1):
if l == 0:
node_in = x
else:
node_in = h
h = np.zeros((w[l].shape[0],))
for i in range(w[l].shape[0]):
f_sum = 0
for j in range(w[l].shape[1]):
f_sum += w[l][i][j] * node_in[j]
f_sum += b[l][i]
h[i] = f(f_sum)
return h
# w = [w1, w2]
# b = [b1, b2]
# #a dummy x input vector
# x = [1.5, 2.0, 3.0]
# h = simple_looped_nn_calc(3, x, w, b)
# print(h)
# For I-'python'
# %timeit simple_looped_nn_calc(3, x, w, b)
# Using Vectorized implementations
def matrix_feed_forward_calc(n_layers, x, w, b):
for l in range(n_layers-1):
if l == 0:
node_in = x
else:
node_in = h
z = w[l].dot(node_in) + b[l]
h = f(z)
return h
## Gradient Descent
x_old = 0 # The value does not matter as long as abs(x_new - x_old) > precision
x_new = 6 # The algorithm starts at x=6
gamma = 0.01 # step size
precision = 0.00001
# def df(x):
# y = 4 * x**3 - 9 * x**2
# return y
# while abs(x_new - x_old) > precision:
# x_old = x_new
# x_new += -gamma * df(x_old)
# print("The local minimum occurs at %f" % x_new)
# implementating NN in Python
from sklearn.datasets import load_digits
digits = load_digits()
# print(digits.data.shape)
import matplotlib.pyplot as plt
# plt.gray()
# plt.matshow(digits.images[1])
# plt.show()
# Scale the data
# print(digits.data[0,:])
from sklearn.preprocessing import StandardScaler
X_scale = StandardScaler()
X = X_scale.fit_transform(digits.data)
# print(X[0,:])
from sklearn.model_selection import train_test_split
y = digits.target
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
import numpy as np
print(y_test)
def convert_y_to_vect(y):
y_vect = np.zeros((len(y), 10))
for i in range(len(y)):
y_vect[i, y[i]] = 1
return y_vect
y_v_train = convert_y_to_vect(y_train)
y_v_test = convert_y_to_vect(y_test)
print(y_train[0], y_v_train[0])
# nn_structure = [64, 30, 10]
# def f(x):
# return 1 / (1 + np.exp(-x))
# def f_deriv(x):
# return f(x) * (1 - f(x))
# import numpy.random as r
# def setup_and_init_weights(nn_structure):
# W = {}
# b = {}
# for l in range(1, len(nn_structure)):
# W[l] = r.random_sample((nn_structure[l], nn_structure[l-1]))
# b[l] = r.random_sample((nn_structure[l],))
# return W, b
# def init_tri_values(nn_structure):
# tri_W = {}
# tri_b = {}
# for l in range(1, len(nn_structure)):
# tri_W[l] = np.zeros((nn_structure[l], nn_structure[l-1]))
# tri_b[l] = np.zeros((nn_structure[l],))
# return tri_W, tri_b
# def feed_forward(x, W, b):
# h = {1: x}
# z = {}
# for l in range(1, len(W) + 1):
# # if it is the first layer, then the input into the weights is x, otherwise,
# # it is the output from the last layer
# if l == 1:
# node_in = x
# else:
# node_in = h[l]
# z[l+1] = W[l].dot(node_in) + b[l] # z^(l+1) = W^(l)*h^(l) + b^(l)
# h[l+1] = f(z[l+1]) # h^(l) = f(z^(l))
# return h, z |
13,566 | ab17cbd085336c24e3cb565645f371988ada74ac | import pandas as pd
import numpy as np
import os
import sys
def find_project_dir():
if os.path.isdir("project_git_repo"):
return os.path.realpath("project_git_repo/cpd35-clustering-demo")
else:
return os.getcwd()
PROJECT_DIR = find_project_dir()
SCRIPT_DIR = os.path.join(PROJECT_DIR, "assets/jupyterlab")
DATA_DIR = os.path.join(PROJECT_DIR, "assets/data_asset")
sys.path.append(os.path.normpath(SCRIPT_DIR))
print(SCRIPT_DIR)
print(DATA_DIR)
from training import train, evaluate, clusterings
reference_df = pd.read_csv(os.path.join(DATA_DIR, "credit_risk_reference.csv"))
input_df = reference_df.drop(['Risk'], axis=1)
# Training models and select winning one
results = []
for (clustering_name, clustering_op) in clusterings:
print(clustering_name)
model = train(input_df, clustering_name, clustering_op)
result = evaluate(reference_df, clustering_op)
print("---")
results.append(result)
best_score_idx = np.argmax(r['v_measure'] for r in results)
print("The winner is: '{}' with V-measure: {}!".format(clusterings[best_score_idx][0], results[best_score_idx]['v_measure'])) |
13,567 | 2e84960c6072e9e7bb0ed101aee68140327958b4 | #!/usr/bin/env python
"""
ARepA: Automated Repository Acquisition
ARepA is licensed under the MIT license.
Copyright (C) 2013 Yo Sup Moon, Daniela Boernigen, Levi Waldron, Eric Franzosa, Xochitl Morgan, and Curtis Huttenhower
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons
to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or
substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
SConscript_rpackage.py:
Shared SConscript script to generate R package across all of arepa's modules
R CMD BATCH build
Requirements for R package building:
* man/ directory with description files
** .Rd files per dataset, and a master package file
* data/ directory with matching .RData files
* NAMESPACE file assigning rule to find names
* DESCRIPTION file
/** example **/
Package: $package_name
Type: Package
Title: $dataset_title
Version: $arepa_version
Date: $arepa_date
Author: $arepa_authors
Maintainer: $arepa_maintainer
Description: Automatically generated R package by arepa
Depends: R (>= 2.10.0), affy
Suggests: survival
License: MIT
URL: http://huttenhower.sph.harvard.edu/arepa
"""
import sfle
import arepa
import sys
import pickle
c_strNAMESPACE = r"'exportPattern("'"^[[:alpha:]]+"'")'"
c_fileProgUnpickle = sfle.d( pE, arepa.path_arepa(), sfle.c_strDirSrc, "unpickle.py" )
def funcCheckRStructure( pE, strDatasetName, filePKL, fileNAMESPACE, fileDESCRIPTION, fileManMaster, strNAMESPACE = c_strNAMESPACE ):
'''
Completes necessary components for R package building
Assumes that data/ and man/ directories have the corresponding data and manual files per dataset
Input:
fileNAMESPACE = pointer to NAMESPACE file to be tracked
fileManMaster = pointer to the master manual file in man/
'''
def _makeDescription( target, source, env ):
strT, astrSs = sfle.ts( target, source )
pHash = pickle.load(open(astrSs[0]))
pHashDescription = { "Package": strDatasetName.replace("-", "."), "Type": "Package", "Title": pHash.get("title"),
"Version": arepa.c_strVersion, "Author": ", ".join(arepa.c_astrAuthors),
"Date": arepa.c_strDate, "Maintainer": arepa.c_strMaintainer,
"Depends": "R (>= 2.10.0), affy", "Suggests": "survival", "URL": arepa.c_strURL,
"License": arepa.c_strLicense, "Description": "ARepA generated package" }
with open(strT, "w") as outputf:
for k, v in list(pHashDescription.items()):
outputf.write( k + ": " + v + "\n" )
def _makeMasterMan( target, source, env ):
strT, astrSs = sfle.ts( target, source )
pHash = pickle.load(open(astrSs[0]))
def _metaStr( strDescription, strContent ):
return "\\"+ strDescription + "{" + strContent + "}"
strDataAccession = arepa.cwd( ) + "-package"
strDataTitle = pHash.get( "title" ) or ""
strDataGloss = pHash.get( "gloss" ) or ""
aastrOut = [("name", strDataAccession), ("title", strDataTitle), ("description", strDataGloss)]
with open( strT, "w" ) as outputf:
for strDescription, strContent in aastrOut:
outputf.write( _metaStr( strDescription, strContent ) + "\n" )
#print _metaStr( strDescription, strContent )
# Make NAMESPACE File
return ( sfle.scmd( pE, "echo " + strNAMESPACE, fileNAMESPACE ) +
# Make DESCRIPTION File
Command( fileDESCRIPTION, filePKL, _makeDescription ) +
# Make Master Man File
Command( fileManMaster, filePKL, _makeMasterMan ) )
def funcMakeRPackage( pE, strDirectory, filePackageLog ):
'''
Compile the R package
Input:
strDirectory = directory to look in
filePackageLog = log file for scons to track
'''
def _compileR( target, source, env ):
strT, astrSs = sfle.ts( target, source )
sfle.ex( ["chmod", "755", strDirectory] )
sfle.ex( ["R", "CMD", "build", strDirectory] )
with open( strT, "w" ) as outputf:
outputf.write( "R package compiled OK")
return pE.Command( filePackageLog, None, _compileR )
|
13,568 | a284a20dd9a00bef8f5875da4d86017cf6d0f129 | import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
sns.set(style="ticks")
fig, (ax1, ax2, ax3) = plt.subplots(3, 1, figsize=(7.5, 8.8))
fig.subplots_adjust(
left=0.13,
right=0.97, # {Define as distâncias entre os extremos}
bottom=0.08,
top=.94,
hspace=0.33, # Organiza espaçoes entre os subplots
wspace=0.24 # Organiza espaçoes entre os subplots
)
dados = np.loadtxt('outB0TMC2.avr.dat')
nmove = dados[:, 0]
U_N = dados[:, 1]
E_N = dados[:, 6]
dens = dados[:, 4]
ax1.plot(nmove, U_N, 'blue', alpha=0.55, linewidth=2.5)
ax1.set_xlabel('NMOVE')
ax1.set_ylabel('U/N')
ax2.plot(nmove, E_N, 'blue', alpha=0.55, linewidth=2.5)
ax2.set_xlabel('NMOVE')
ax2.set_ylabel('E/N')
ax3.plot(nmove, dens, 'blue', alpha=0.55, linewidth=2.5)
ax3.set_xlabel('NMOVE')
ax3.set_ylabel('Densidade')
plt.plot()
plt.show()
|
13,569 | 0152690f3d971d595431e38385e644a5f7cfb45f | import sys
import matplotlib.pyplot as plt
import random
import numpy as np
import argparse
from CSVManager import CSVManager
from KMeans import KMeans
import pandas as pd
import os
from SimplifiedSilhoutte import SimplifiedSilhouette
def main():
path = sys.argv[1]
csvManager = CSVManager()
df = csvManager.read(path)
df = csvManager.replaceNan(df)
formattedCSV = csvManager.deleteObjectColumns(df)
formattedCSV = csvManager.deleteObjectColumns(df)
matrix = csvManager.convertCSVToMatrix(formattedCSV)
try:
with open('result/result.txt', 'w') as file:
res = ''
for k in range(2, 5):
kmeans = KMeans(k)
kmeans.fit(matrix)
simplifiedSilhouette = SimplifiedSilhouette(
formattedCSV, kmeans)
sswc = simplifiedSilhouette.calculate()
res += 'K = ' + str(k) + '; ' + 'SSWC = ' + str(sswc) + '\n'
file.write(res)
except Exception:
print("An empty cluster was found, please run the program again. This program does not handle empty clusters")
if __name__ == "__main__":
main()
|
13,570 | 94e4eb97fbcfe2beef57258ffe3878978928f328 | a = int(input())
b = int(input())
l = []
def isPrime(n):
if n == 1:
return False
for j in range(2, int(n ** (1 / 2)) + 1):
if n % j == 0:
return False
else:
return True
def findMinArray(arr):
m = arr[0]
for i in arr:
if m > i:
m = i
return m
def sumArray(arr):
cnt = 0
for i in arr:
cnt += i
return cnt
for i in range(a, b + 1):
if isPrime(i):
l.append(i)
if len(l) != 0:
print(sumArray(l))
print(findMinArray(l))
else:
print(-1)
|
13,571 | 5872b3a54e1ac66069991f563e0e7c261425d661 | import random
desserts = ["red velvet cake", "souffle", "ice cream", "macaron", "sundae", "flan", "tarimisu"]
desertaa = random.choice(desserts)
print("A dessert" + dessertaa)
#2 months on SL! Thank ya'll!
import random
first = ("Super", "Retarded", "Great", "Sexy", "Vegan", "Brave", "Shy", "Cool", "Poor", "Rich", "Fast", "Gummy", "Yummy", "Masked", "Unusual", "American", "Bisexual", "MLG", "Mlg", "lil", "Lil")
second = ("Coder", "Vegan", "Man", "Hacker", "Horse", "Bear", "Goat", "Goblin", "Learner", "Killer", "Woman", "Programmer", "Spy", "Stalker", "Spooderman", "Carrot", "Goat", "Quickscoper", "Quickscoper")
firrst = random.choice(first)
seccond = random.choice(second)
name = (firrst + " " + seccond
)
print("Your name is: " + name)
|
13,572 | f277d633b979e2f37bd7f74cb07d5cf80821b5c1 | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.model_selection import train_test_split
from sklearn.naive_bayes import MultinomialNB
from sklearn.metrics import accuracy_score, f1_score, precision_score, recall_score
import random
# function to convert sequence strings into k-mer words, default size = 6 (hexamer words)
def getKmers(sequence, size=6):
return [sequence[x:x+size].lower() for x in range(len(sequence) - size + 1)]
def get_metrics(y_test, y_predicted):
accuracy = accuracy_score(y_test, y_predicted)
precision = precision_score(y_test, y_predicted, average='weighted')
recall = recall_score(y_test, y_predicted, average='weighted')
f1 = f1_score(y_test, y_predicted, average='weighted')
return accuracy, precision, recall, f1
def get_r0():
return random.choice([2, 2.1, 4, 4.1, 10, 18])
def get_similar_epidemic(r_0):
if r_0 == 2:
return "Ebola"
if r_0 == 2.1:
return "Hepatitis C"
elif r_0 == 4:
return "HIV"
elif r_0 == 4.1:
return "SARS"
elif r_0 == 10:
return "Mumps"
elif r_0 == 18:
return "Measels"
def check_similarity(new_gene):
virus3 = pd.read_table(new_gene)
virus1 = pd.read_table('virus1_data.txt')
virus2 = pd.read_table('virus2_data.txt')
virus1['words'] = virus1.apply(lambda x: getKmers(x['sequence']), axis=1)
virus1 = virus1.drop('sequence', axis=1)
virus2['words'] = virus2.apply(lambda x: getKmers(x['sequence']), axis=1)
virus2 = virus2.drop('sequence', axis=1)
virus3['words'] = virus3.apply(lambda x: getKmers(x['sequence']), axis=1)
virus3 = virus3.drop('sequence', axis=1)
virus1_texts = list(virus1['words'])
for item in range(len(virus1_texts)):
virus1_texts[item] = ' '.join(virus1_texts[item])
y_h = virus1.iloc[:, 0].values # y_h for virus1
virus2_texts = list(virus2['words'])
for item in range(len(virus2_texts)):
virus2_texts[item] = ' '.join(virus2_texts[item])
y_c = virus2.iloc[:, 0].values # y_c for virus2
virus3_texts = list(virus3['words'])
for item in range(len(virus3_texts)):
virus3_texts[item] = ' '.join(virus3_texts[item])
y_d = virus3.iloc[:, 0].values
cv = CountVectorizer(ngram_range=(4, 4))
X = cv.fit_transform(virus1_texts)
X_virus2 = cv.transform(virus2_texts)
X_virus3 = cv.transform(virus3_texts)
X_train, X_test, y_train, y_test = train_test_split(X, y_h, test_size=0.20, random_state=42)
### Multinomial Naive Bayes Classifier ###
# The alpha parameter was determined by grid search previously
classifier = MultinomialNB(alpha=0.1)
classifier.fit(X_train, y_train)
# y_pred = classifier.predict(X_test)
# print("Confusion matrix\n")
# print(pd.crosstab(pd.Series(y_test, name='Actual'), pd.Series(y_pred, name='Predicted')))
# Predicting the sequences
y_pred_virus3 = classifier.predict(X_virus3)
accuracy, precision, recall, f1 = get_metrics(y_d, y_pred_virus3)
# print("accuracy = %.3f \nprecision = %.3f \nrecall = %.3f \nf1 = %.3f" % (accuracy, precision, recall, f1))
r0 = get_r0()
similar_epidemic = get_similar_epidemic(r0)
return {"accuracy": accuracy, "precision": precision, "recall": recall, "f1": f1, "r_0": r0, "similar_epidemic": similar_epidemic}
file = 'virus3_data.txt'
res = check_similarity(file)
print(res)
|
13,573 | 744c53c51e0dc2ae98f1f7a72c88a0cb6dbac056 | from django.views.generic import TemplateView
from toko.apps.products.models import Product
class HomeView(TemplateView):
template_name = 'pages/home.html'
def get_context_data(self, **kwargs):
context = super(HomeView, self).get_context_data(**kwargs)
context['featured_products'] = Product.objects \
.prefetch_related('photos').featured()
return context
|
13,574 | 78e7591bd1e29421e798e49a4eff45b5ccfa11ec | print("Choi")
print("33age")
print("wear : glasses")
print("키 : 170cm")
print("서울 거주")
|
13,575 | 94ea009891366ffa8f1b8eb025f4acfa8998c740 | """
line_text_edit
tbd
author: Michael Redmond
"""
from __future__ import print_function, absolute_import
from .line_text_widget import LineTextWidget
|
13,576 | 57582f251fcd7124a21d7d94c07da8d2433e04fb | # Copyright (c) 2019 Ezybaas by Bhavik Shah.
# CTO @ Susthitsoft Technologies Private Limited.
# All rights reserved.
# Please see the LICENSE.txt included as part of this package.
# EZYBAAS RELEASE CONFIG
EZYBAAS_RELEASE_NAME = 'EzyBaaS'
EZYBAAS_RELEASE_AUTHOR = 'Bhavik Shah CTO @ SusthitSoft Technologies'
EZYBAAS_RELEASE_VERSION = '0.1.4'
EZYBAAS_RELEASE_DATE = '2019-07-20'
EZYBAAS_RELEASE_NOTES = 'https://github.com/bhavik1st/ezybaas'
EZYBAAS_RELEASE_STANDALONE = True
EZYBAAS_RELEASE_LICENSE = 'https://github.com/bhavik1st/ezybaas'
EZYBAAS_SWAGGER_ENABLED = True
# EZYBAAS OPERATIONAL CONFIG
BAAS_NAME = 'ezybaas'
SERIALIZERS_FILE_NAME = 'api'
VIEWS_FILE_NAME = 'api'
URLS_FILE_NAME = 'urls'
MODELS_FILE_NAME = 'models'
TESTS_FILE_NAME = 'tests'
|
13,577 | 104913f96a427954ae8b9417b2f88b2267d62431 | import xml.etree.ElementTree as ET
import pandas as pd
import wget
import urllib.request
# Downloading dataset
#urllib.request.urlretrieve("https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBMDeveloperSkillsNetwork-PY0101EN-SkillsNetwork/labs/Module%205/data/Sample-employee-XML-file.xml", "new_sample.xml")
#urllib.request.urlretrieve("https://hips.hearstapps.com/hmg-prod.s3.amazonaws.com/images/dog-puppy-on-garden-royalty-free-image-1586966191.jpg", "dog.jpg")
wget.download("https://hips.hearstapps.com/hmg-prod.s3.amazonaws.com/images/dog-puppy-on-garden-royalty-free-image-1586966191.jpg")
# create the file structure
employee = ET.Element('employee')
details = ET.SubElement(employee, 'details')
first = ET.SubElement(details, 'firstname')
second = ET.SubElement(details, 'lastname')
third = ET.SubElement(details, 'age')
first.text = 'Shiv'
second.text = 'Mishra'
third.text = '23'
# create a new XML file with the results
mydata1 = ET.ElementTree(employee)
# myfile = open("items2.xml", "wb")
# myfile.write(mydata)
with open("new_sample.xml", "wb") as files:
mydata1.write(files)
#wget.download("https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBMDeveloperSkillsNetwork-PY0101EN-SkillsNetwork/labs/Module%205/data/Sample-employee-XML-file.xml")
root = tree.getroot()
columns = ["firstname", "lastname", "title", "division", "building","room"]
datatframe = pd.DataFrame(columns = columns)
for node in root:
print("i")
firstname = node.find("firstname").text
lastname = node.find("lastname").text
title = node.find("title").text
division = node.find("division").text
building = node.find("building").text
room = node.find("room").text
datatframe = datatframe.append(pd.Series([firstname, lastname, title, division, building, room], index = columns), ignore_index = True)
|
13,578 | d5b95c26b3d18df6d57dfe9e2f46f81da13c5b7e | class Monad:
def __init__(self, value):
self._value = value
@classmethod
def unit(cls, val):
"""Lifts a single value into the elevated world
Signature: a -> E<a>
Alternative names: return, pure, unit, yield, point"""
raise NotImplementedError
@classmethod
def lift(cls, func):
"""Lifts a function into the elevated world
Signature: (a->b) -> E<a> -> E<b>
Alternative names: map, fmap, lift, select"""
raise NotImplementedError
@classmethod
def liftn(cls, func):
"""Combines two (or three, or four) elevated values using a specified function
Signature: lift2: (a->b->c) -> E<a> -> E<b> -> E<c>,
lift3: (a->b->c->d) -> E<a> -> E<b> -> E<c> -> E<d>, etc
Alternative names: map, fmap, lift, select"""
raise NotImplementedError
@classmethod
def apply(cls, func):
""" Unpacks a function wrapped inside a elevated value into a lifted function
Signature: E<(a->b)> -> E<a> -> E<b>
Alternative names: ap"""
raise NotImplementedError
@property
def value(self):
""" Returns wrapped values"""
return self._value
def __eq__(self, other):
raise NotImplementedError
def __str__(self):
raise NotImplementedError
|
13,579 | 67877075e0f8a318be0753a038ed5518cebdfa9e | """
https://en.wikipedia.org/wiki/Producer%E2%80%93consumer_problem
A semaphore manages an internal counter
acquire() count -= 1
release() count += 1
The counter can never go below zero
when acquire() finds that it is zero, it blocks, waiting until some other thread
calls release().
works fine when there is only one producer and consumer.
"""
from threading import Thread, Semaphore
# Buffer size
N = 10
# Buffer init
buf = [0] * N
fill_count = Semaphore(0)
empty_count = Semaphore(N)
def produce():
print("One time produced!")
return 1
def producer():
front = 0
while True:
x = produce()
empty_count.acquire()
buf[front] = x
fill_count.release()
front = (front+1) % N
def consume(y):
print("{} item consumed!".format(y))
def consumer():
rear = 0
while True:
fill_count.acquire()
y = buf[rear]
empty_count.release()
consume(y)
rear = (rear+1) % N
producer_thread = Thread(target=producer)
consumer_thread = Thread(target=consumer)
producer_thread.start()
consumer_thread.start()
|
13,580 | 4f81da715bfa6a044dd172f30445ad341e37f1c1 | """
Remember the story of Little Match Girl? By now, you know exactly what matchsticks the little match girl has, please find out a way you can make one square by using up all those matchsticks. You should not break any stick, but you can link them up, and each matchstick must be used exactly one time.
Your input will be several matchsticks the girl has, represented with their stick length. Your output will either be true or false, to represent whether you could make one square using all the matchsticks the little match girl has.
Example 1:
Input: [1,1,2,2,2]
Output: true
Explanation: You can form a square with length 2, one side of the square came two sticks with length 1.
Example 2:
Input: [3,3,3,3,4]
Output: false
Explanation: You cannot find a way to form a square with all the matchsticks.
Note:
The length sum of the given matchsticks is in the range of 0 to 10^9.
The length of the given matchstick array will not exceed 15.
"""
class Solution:
def makesquare(self, nums: List[int]) -> bool:
if not nums or len(nums) < 4:
return False
total = sum(nums)
if total % 4:
return False
nums.sort(reverse=True)
return self.dfs(nums, [0]*4, 0, total//4)
def dfs(self, nums: List[int], sums: List[int], pos: int, target: int) -> bool:
if pos == len(nums):
return True
for i in range(4):
if sums[i] + nums[pos] <= target:
sums[i] += nums[pos]
if self.dfs(nums, sums, pos+1, target):
return True
sums[i] -= nums[pos]
return False
|
13,581 | 55378cb769b7fbbe9cb6e24e6185813954b45346 | from datetime import datetime
from django.http import JsonResponse, HttpResponseRedirect
from django.shortcuts import render, get_object_or_404, redirect
from .forms import CreateQuizForm, CreateQuestionForm
from .models import Quiz, Riddle
from django.views.generic import ListView
from users.models import Profile
from django.contrib import messages
from django.contrib.auth.models import User
import time
data_to_quizes = {}
def get_time_of_quiz(type_of_quiz):
return data_to_quizes[type_of_quiz]['time_of_start']
def is_exist_quiz(type_of_quiz):
return data_to_quizes.get(type_of_quiz)
def add_type_of_quiz(type_of_quiz, list_of_questions):
if not data_to_quizes.get(type_of_quiz):
data_to_quizes[type_of_quiz] = {'list_of_questions': list_of_questions, 'current_index': 0, 'time_of_start':
time.time() * 1000}
def get_current_question(type_of_quiz):
return data_to_quizes[type_of_quiz]['list_of_questions'][data_to_quizes[type_of_quiz]['current_index']]
def inc_question_index(type_of_quiz):
data_to_quizes[type_of_quiz]['current_index'] += 1
if len(data_to_quizes[type_of_quiz]['list_of_questions']) == data_to_quizes[type_of_quiz]['current_index']:
data_to_quizes[type_of_quiz]['current_index'] = 0
data_to_quizes[type_of_quiz]['time_of_start'] = time.time() * 1000
class QuestionView(ListView):
model = Riddle
template_name = 'quize/quiz.html'
context_object_name = 'quiz'
def dispatch(self, request, *args, **kwargs):
self.answer_from_user = request.GET.get('answer')
get_object_or_404(Quiz, id=self.kwargs['quiz_id'])
if not is_exist_quiz(self.kwargs['quiz_id']):
add_type_of_quiz(self.kwargs['quiz_id'], Riddle.objects.all().filter(quiz_id=self.kwargs['quiz_id']))
return super(QuestionView, self).dispatch(request, *args, **kwargs)
def get_context_data(self, **kwargs):
context = super(QuestionView, self).get_context_data(**kwargs)
context['is_answered'] = self.answer_from_user
context['text'] = get_current_question(self.kwargs['quiz_id']).text
context['cur_question'] = self.kwargs['quiz_id']
return context
def home(request):
context = {
'quizes': Quiz.objects.all()
}
return render(request, 'quize/home.html', context)
class QuizListView(ListView):
model = Quiz
template_name = 'quize/home.html'
context_object_name = 'quizes'
paginate_by = 5
def rules(request):
return render(request, 'quize/rules.html', {'title': 'Rules'})
def about(request):
return render(request, 'quize/about.html', {'title': 'About'})
def quiz_refresh(request):
type_of_quiz = request.GET.get('id')
data = {
'text': data_to_quizes[type_of_quiz]['list_of_questions'][data_to_quizes[type_of_quiz]['current_index']].text
}
quiz_index = request.GET.get('id')
if int(request.GET.get('time')) - get_time_of_quiz(quiz_index) > 60000:
inc_question_index(quiz_index)
return JsonResponse(data)
def quiz_check(request):
# Profile.objects.get(user_id=user_id).token
print(request.GET)
type_of_quiz = request.GET.get('id')
answer = request.GET.get('answer')
data = {
'text': 'No'
}
if answer == get_current_question(type_of_quiz).answer:
profile = Profile.objects.get(user_id=request.user.id)
profile.token += 1
profile.save()
inc_question_index(type_of_quiz)
data['text'] = "Yes it's correct"
return JsonResponse(data)
def admin_question(request):
if request.method == 'POST':
form = CreateQuestionForm(request.POST)
if form.is_valid():
form.save()
messages.success(request, f'New qestion has been updated!')
return redirect('users-admin-question')
else:
form = CreateQuestionForm()
context = {
'form': form,
}
return render(request, 'quize/admin_question.html', context)
def admin_quiz(request):
if request.method == 'POST':
form = CreateQuizForm(request.POST)
if form.is_valid():
form.save()
messages.success(request, f'New quiz has been updated!')
return redirect('users-admin-quiz')
else:
form = CreateQuizForm()
context = {
'form': form,
}
return render(request, 'quize/admin_quiz.html', context)
|
13,582 | 01a7bb956b3c316cb0da6736eb96face97ffe26f | # -*- coding: utf-8 -*-
"""
flask.ext.social.datastore
~~~~~~~~~~~~~~~~~~~~~~~~~~
This module contains an abstracted social connection datastore.
:copyright: (c) 2012 by Matt Wright.
:license: MIT, see LICENSE for more details.
"""
from flask_security.datastore import SQLAlchemyDatastore, MongoEngineDatastore
class ConnectionDatastore(object):
"""Abstracted oauth connection datastore. Always extend this class and
implement parent methods
:param db: An instance of a configured databse manager from a Flask
extension such as Flask-SQLAlchemy or Flask-MongoEngine"""
def __init__(self, connection_model):
self.connection_model = connection_model
def find_connection(self, **kwargs):
raise NotImplementedError
def find_connections(self, **kwargs):
raise NotImplementedError
def create_connection(self, **kwargs):
return self.put(self.connection_model(**kwargs))
def delete_connection(self, **kwargs):
"""Remove a single connection to a provider for the specified user
"""
conn = self.find_connection(**kwargs)
if not conn:
return False
self.delete(conn)
return True
def delete_connections(self, **kwargs):
"""Remove a single connection to a provider for the specified user
"""
rv = False
for c in self.find_connections(**kwargs):
self.delete(c)
rv = True
return rv
class SQLAlchemyConnectionDatastore(SQLAlchemyDatastore, ConnectionDatastore):
"""A SQLAlchemy datastore implementation for Flask-Social."""
def __init__(self, db, connection_model):
SQLAlchemyDatastore.__init__(self, db)
ConnectionDatastore.__init__(self, connection_model)
def _query(self, **kwargs):
return self.connection_model.query.filter_by(**kwargs)
def find_connection(self, **kwargs):
return self._query(**kwargs).first()
def find_connections(self, **kwargs):
return self._query(**kwargs)
class MongoEngineConnectionDatastore(MongoEngineDatastore, ConnectionDatastore):
"""A MongoEngine datastore implementation for Flask-Social."""
def __init__(self, db, connection_model):
MongoEngineDatastore.__init__(self, db)
ConnectionDatastore.__init__(self, connection_model)
def _query(self, **kwargs):
return self.connection_model.objects(**kwargs)
def find_connection(self, **kwargs):
return self._query(**kwargs).first()
def find_connections(self, **kwargs):
return self._query(**kwargs)
|
13,583 | de83ceb34fd17635f0ba2d41a2b24ea2a191fd14 | import logging
import re
import socket
import struct
import time
import uuid
from threading import Timer
from urllib.parse import quote
import zeroconf
import config
from plugin import GetPlugin
SHARE_TEMPLATE = '/TiVoConnect?Command=QueryContainer&Container=%s'
PLATFORM_MAIN = 'pyTivo'
PLATFORM_VIDEO = 'pc/pyTivo' # For the nice icon
# It's possible this function should live somewhere else, but for now this
# is the only module that needs it. -mjl
def bytes2str(data):
"""
Convert bytes to str as utf-8. sequence values (and keys) will also be converted.
"""
# pylint: disable=multiple-statements
if isinstance(data, bytes): return data.decode('utf-8')
if isinstance(data, dict): return dict(map(bytes2str, data.items()))
if isinstance(data, tuple): return map(bytes2str, data)
return data
def log_serviceinfo(logger, info):
"""
Write interesting attributes from a ServiceInfo to the log.
Information written depends on the log level, basic info
is written w/ log level INFO, if the log level is DEBUG
more the basic info plus more (all properties) is written
w/ log level DEBUG.
"""
try:
debugging = logger.isEnabledFor(logging.DEBUG)
log_level = logging.INFO
log_info = {'name': info.name,
'address': socket.inet_ntoa(info.addresses[0]),
'port': info.port}
log_hdr = "\n {address}:{port} {name}\n"
log_fmt = log_hdr
if debugging:
log_level = logging.DEBUG
if info.server != info.name:
log_info['server'] = info.server
log_fmt += " server: {server}\n"
for (k, v) in info.properties.items():
li_k = "prop_" + bytes2str(k)
log_info[li_k] = v
log_fmt += " {k}: {{{li_k}}}\n".format(k=k, li_k=li_k)
logger.log(log_level, log_fmt.format(**log_info))
except:
logger.exception("exception in log_tivo_serviceinfo")
class ZCListener:
# pylint: disable=redefined-builtin
def __init__(self, names, logger=None):
self.names = names
self.logger = logger
def remove_service(self, server, type_, name):
self.names.remove(name.replace('.' + type_, ''))
def add_service(self, server, type_, name):
self.names.append(name.replace('.' + type_, ''))
def update_service(self, server, type_, name):
# method is required, but can be ignored if you don't care about updates. We don't.
if self.logger is not None:
# ex. WARNING:pyTivo.beacon:ZCListener.update_service name='Movies._tivo-videos._tcp.local.' type_='_tivo-videos._tcp.local.'
# WARNING:pyTivo.beacon:ZCListener.update_service name='LivingRoomVox._tivo-videos._tcp.local.' type_='_tivo-videos._tcp.local.'
self.logger.debug(f'ZCListener.update_service {name=} {type_=}')
class ZCBroadcast:
def __init__(self, logger):
""" Announce our shares via Zeroconf. """
self.share_names = []
self.share_info = []
self.logger = logger
self.rz = zeroconf.Zeroconf()
self.renamed = {}
old_titles = self.scan()
address = socket.inet_aton(config.get_ip())
port = int(config.getPort())
logger.info('Announcing pytivo shares ({}:{})...'.format(config.get_ip(), port))
for section, settings in config.getShares():
try:
plugin = GetPlugin(settings['type'])
ct = plugin.CONTENT_TYPE
# if the plugin provides a test for validity use it otherwise assume valid
if hasattr(plugin, 'is_valid') and not plugin.is_valid(section, settings):
logger.warning('share "%s" is invalid. It will be ignored (maybe check that path exists)', section)
continue
except Exception as e:
logger.error('ZCBroadcast.__init__: raised %s: %s', e.__class__.__name__, e)
continue
if ct.startswith('x-container/'):
if 'video' in ct:
platform = PLATFORM_VIDEO
else:
platform = PLATFORM_MAIN
logger.info('Registering: %s' % section)
self.share_names.append(section)
desc = {b'path': bytes(SHARE_TEMPLATE % quote(section), 'utf-8'),
b'platform': bytes(platform, 'utf-8'),
b'protocol': b'http',
b'tsn': bytes('{%s}' % uuid.uuid4(), 'utf-8')}
tt = ct.split('/')[1]
title = section
count = 1
while title in old_titles:
# debugging info while I try to figure out what this loop is for
logger.info(" title b4: {}".format(title))
count += 1
title = '%s [%d]' % (section, count)
self.renamed[section] = title
# more debugging info
logger.info(" title after: {}\n section: {}".format(title, section))
info = zeroconf.ServiceInfo('_%s._tcp.local.' % tt,
'%s._%s._tcp.local.' % (title, tt),
port=port, addresses=[address], properties=desc)
log_serviceinfo(self.logger, info)
self.rz.register_service(info)
self.share_info.append(info)
def scan(self):
""" Look for TiVos using Zeroconf. """
VIDS = '_tivo-videos._tcp.local.'
names = []
self.logger.info('Scanning for TiVos...\n')
# Get the names of servers offering TiVo videos
browser = zeroconf.ServiceBrowser(self.rz, VIDS, None, ZCListener(names, logger=self.logger))
# Give them a second (or more if no one has responded in the 1st second) to respond
time.sleep(1)
max_sec_to_wait = 10
sec_waited = 0
while not names and sec_waited < max_sec_to_wait:
sec_waited += 1
time.sleep(1)
# Any results?
if names:
config.tivos_found = True
# Now get the addresses -- this is the slow part
for name in names:
info = self.rz.get_service_info(VIDS, name + '.' + VIDS)
log_serviceinfo(self.logger, info)
if info:
# zeroconf v2.7 removed ServiceInfo address member says use addresses instead.
# Some debug logging to see if there is always at least the currently assumed 1 address (and maybe more?)
self.logger.debug(f'Found zeroconf.ServiceInfo with {len(info.addresses)} IP addresses\n')
tsn = info.properties.get(b'TSN')
if config.get_togo('all'):
tsn = info.properties.get(b'tsn', tsn)
if tsn:
if isinstance(tsn, bytes):
tsn = tsn.decode('utf-8')
address = socket.inet_ntoa(info.addresses[0])
port = info.port
config.tivos[tsn] = {'name': name, 'address': address,
'port': port}
# info.properties has bytes keys and values, but we'd rather
# deal with str keys and values, so convert them before adding
# them to our tivos dict.
config.tivos[tsn].update(bytes2str(info.properties))
# Debugging information on what services have been found:
# try:
# all_services = zeroconf.ZeroconfServiceTypes.find(self.rz)
# self.logger.info("All services found")
# for s in all_services:
# self.logger.info(" {}".format(s))
# except Exception as e:
# self.logger.error(e)
return names
def shutdown(self):
self.logger.info('Unregistering: %s' % ', '.join(self.share_names))
for info in self.share_info:
self.rz.unregister_service(info)
self.rz.close()
class Beacon:
def __init__(self):
self.UDPSock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.UDPSock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
self.services = []
self.timer = None
self.platform = PLATFORM_VIDEO
for section, settings in config.getShares():
try:
ct = GetPlugin(settings['type']).CONTENT_TYPE
except:
continue
if ct in ('x-container/tivo-music', 'x-container/tivo-photos'):
self.platform = PLATFORM_MAIN
break
if config.get_zc():
logger = logging.getLogger('pyTivo.beacon')
try:
self.bd = ZCBroadcast(logger)
except Exception as e:
logger.debug('Beacon.__init__: raised %s: %s', e.__class__.__name__, e)
logger.error('Zeroconf failure')
self.bd = None
else:
self.bd = None
def add_service(self, service):
self.services.append(service)
self.send_beacon()
def format_services(self):
return ';'.join(self.services)
def format_beacon(self, conntype, services=True):
beacon = ['tivoconnect=1',
'method=%s' % conntype,
'identity={%s}' % config.getGUID(),
'machine=%s' % socket.gethostname(),
'platform=%s' % self.platform]
if services:
beacon.append('services=' + self.format_services())
else:
beacon.append('services=TiVoMediaServer:0/http')
return '\n'.join(beacon) + '\n'
def send_beacon(self):
beacon_ips = config.getBeaconAddresses()
beacon = self.format_beacon('broadcast')
for beacon_ip in beacon_ips.split():
if beacon_ip != 'listen':
try:
packet = bytes(beacon, "utf-8")
while packet:
result = self.UDPSock.sendto(packet, (beacon_ip, 2190))
if result < 0:
break
packet = packet[result:]
except Exception as e:
print(e)
def start(self):
self.send_beacon()
self.timer = Timer(60, self.start)
self.timer.start()
def stop(self):
self.timer.cancel()
if self.bd:
self.bd.shutdown()
@staticmethod
def recv_bytes(sock, length):
block = ''
while len(block) < length:
add = sock.recv(length - len(block))
if not add:
break
block += add
return block
@staticmethod
def recv_packet(sock):
length = struct.unpack('!I', Beacon.recv_bytes(sock, 4))[0]
return Beacon.recv_bytes(sock, length)
@staticmethod
def send_packet(sock, packet):
sock.sendall(struct.pack('!I', len(packet)) + packet)
def listen(self):
""" For the direct-connect, TCP-style beacon """
import _thread
def server():
TCPSock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
TCPSock.bind(('', 2190))
TCPSock.listen(5)
while True:
# Wait for a connection
client, address = TCPSock.accept()
# Accept (and discard) the client's beacon
self.recv_packet(client)
# Send ours
self.send_packet(client, self.format_beacon('connected'))
client.close()
_thread.start_new_thread(server, ())
def get_name(self, address):
""" Exchange beacons, and extract the machine name. """
our_beacon = self.format_beacon('connected', False)
machine_name = re.compile('machine=(.*)\n').search
try:
tsock = socket.socket()
tsock.connect((address, 2190))
self.send_packet(tsock, our_beacon)
tivo_beacon = self.recv_packet(tsock)
tsock.close()
name = machine_name(tivo_beacon).groups()[0]
except:
name = address
return name
|
13,584 | 0d97cac9b3173506e33be3584b14af9f3ee3657d | """This module provides classes to search in the obs.
Currently only the search for requests is supported.
"""
from lxml import etree
from osc2.remote import Request, RemoteProject, RemotePackage
from osc2.util.xml import fromstring, OscElement
from osc2.core import Osc
class ProjectCollection(OscElement):
"""Contains the project search results.
All project objects are read only. In order to "work"
with the project objects (except reading) a call to
the Project's real_obj method is required.
"""
SCHEMA = ''
def __iter__(self):
for r in self.iterfind('project'):
yield r.real_obj()
class ROProject(OscElement):
"""Represents a read only project.
This kind of project object is usually used in a collection.
"""
def real_obj(self):
"""Returns a "real" Project object.
The returned object is "writable" too that is
its state can be changed etc.
"""
return RemoteProject(xml_data=etree.tostring(self))
class RequestCollection(OscElement):
"""Contains the request search results.
All request objects are read only. In order to "work"
with the request objects (except reading) a call to
the Request's real_obj method is required.
"""
SCHEMA = ''
def __iter__(self):
for r in self.iterfind('request'):
yield r.real_obj()
class RORequest(OscElement):
"""Represents a read only request.
This kind of request object is usually used in a collection.
"""
def real_obj(self):
"""Returns a "real" Request object.
The returned object is "writable" too that is
its state can be changed etc.
"""
return Request(xml_data=etree.tostring(self))
class PackageCollection(OscElement):
"""Contains the package search results.
All package objects are read only. In order to "work"
with the package objects (except reading) a call to
the Package's real_obj method is required.
"""
SCHEMA = ''
def __iter__(self):
for r in self.iterfind('package'):
yield r.real_obj()
class ROPackage(OscElement):
"""Represents a read only package.
This kind of package object is usually used in a collection.
"""
def real_obj(self):
"""Returns a "real" Request object.
The returned object is "writable" too that is
its state can be changed etc.
"""
return RemotePackage(xml_data=etree.tostring(self))
def _find(path, xp, tag_class={}, **kwargs):
"""Returns a Collection with objects which match the xpath.
path is the remote path which is used for the http request.
xp is the xpath which is used for the search (either an
Expression object or a string).
Keyword arguments:
tag_class -- a dict which maps tag names to classes
(see util.xml.fromstring for the details)
(default: {})
**kwargs -- optional parameters for the http request
"""
request = Osc.get_osc().get_reqobj()
xpath = xp
if hasattr(xp, 'tostring'):
xpath = xp.tostring()
f = request.get(path, match=xpath, **kwargs)
return fromstring(f.read(), **tag_class)
def find_request(xp, **kwargs):
"""Returns a RequestCollection with objects which match the xpath.
xp is the xpath which is used for the search (either an
Expression object or a string).
Keyword arguments:
**kwargs -- optional parameters for the http request
"""
path = '/search/request'
if 'schema' not in kwargs:
kwargs['schema'] = RequestCollection.SCHEMA
tag_class = {'collection': RequestCollection, 'request': RORequest}
return _find(path, xp, tag_class, **kwargs)
def find_project(xp, **kwargs):
"""Returns a ProjectCollection with objects which match the xpath.
xp is the xpath which is used for the search (either an
Expression object or a string).
Keyword arguments:
**kwargs -- optional parameters for the http request
"""
path = '/search/project'
if 'schema' not in kwargs:
kwargs['schema'] = ProjectCollection.SCHEMA
tag_class = {'collection': ProjectCollection, 'project': ROProject}
return _find(path, xp, tag_class, **kwargs)
def find_package(xp, **kwargs):
"""Returns a PackageCollection with objects which match the xpath.
xp is the xpath which is used for the search (either an
Expression object or a string).
Keyword arguments:
**kwargs -- optional parameters for the http request
"""
path = '/search/package'
if 'schema' not in kwargs:
kwargs['schema'] = PackageCollection.SCHEMA
tag_class = {'collection': PackageCollection, 'package': ROPackage}
return _find(path, xp, tag_class, **kwargs)
|
13,585 | edb66e83f93365aa737e09c6f79808c3319db83c | """Tests for the various features from the code generation templates."""
import importlib
import pytest
from pyecore.ecore import EPackage, EClass, EReference, EEnum, EAttribute, EInt, EOperation, \
EParameter, EString, EDataType, EAnnotation
from pyecoregen.ecore import EcoreGenerator
def generate_meta_model(model, output_dir, auto_register_package=None):
generator = EcoreGenerator(auto_register_package)
generator.generate(model, output_dir)
return importlib.import_module(model.name)
def test_empty_package(pygen_output_dir):
package = EPackage('empty')
mm = generate_meta_model(package, pygen_output_dir)
assert mm
assert mm.name == 'empty'
assert not mm.nsURI
assert not mm.nsPrefix
assert not mm.eClassifiers
package.name = 'empty2'
package.nsURI = 'http://xyz.org'
package.nsPrefix = 'p'
mm = generate_meta_model(package, pygen_output_dir)
assert mm.nsURI == 'http://xyz.org'
assert mm.nsPrefix == 'p'
def test_top_level_package_with_subpackages(pygen_output_dir):
rootpkg = EPackage('rootpkg')
subpkg = EPackage('subpkg')
cls1 = EClass('A')
cls2 = EClass('B')
cls1.eStructuralFeatures.append(EReference('b', cls2))
cls2.eStructuralFeatures.append(
EReference('a', cls1, eOpposite=cls1.findEStructuralFeature('b')))
rootpkg.eClassifiers.append(cls1)
rootpkg.eSubpackages.append(subpkg)
subpkg.eClassifiers.append(cls2)
mm = generate_meta_model(rootpkg, pygen_output_dir)
assert mm.name == rootpkg.name
assert mm.eSubpackages[0].eSuperPackage.name == rootpkg.name
generated_A = mm.getEClassifier('A')
assert generated_A
generated_subpkg = mm.eSubpackages[0]
assert generated_subpkg
assert generated_subpkg.name == 'subpkg'
generated_B = generated_subpkg.getEClassifier('B')
assert generated_B
a = generated_A()
b = generated_B()
a.b = b
assert b.a is a
def test_package_with_enum(pygen_output_dir):
enumpkg = EPackage('enumpkg')
enum = EEnum('MyEnum', literals=('X', 'Y', 'Z'))
enumpkg.eClassifiers.append(enum)
mm = generate_meta_model(enumpkg, pygen_output_dir)
generated_enum = mm.eClassifiers['MyEnum']
assert isinstance(generated_enum, EEnum)
assert set(l.name for l in generated_enum.eLiterals) == {'X', 'Y', 'Z'}
def test_classifier_imports(pygen_output_dir):
# super types and enums from another package have to be imported in using module:
rootpkg = EPackage('import_test')
ppkg = EPackage('provider')
upkg = EPackage('user')
rootpkg.eSubpackages.extend([ppkg, upkg])
super_class = EClass('SuperClass')
enum = EEnum('AnEnum', literals=('A', 'B'))
ppkg.eClassifiers.extend((super_class, enum))
derived_class = EClass('DerivedClass', superclass=super_class)
derived_class.eStructuralFeatures.append(EAttribute('kind', enum))
upkg.eClassifiers.append(derived_class)
# simply importing successully shows the imports have made the types visible
mm = generate_meta_model(rootpkg, pygen_output_dir)
assert mm
def test_class_with_features(pygen_output_dir):
rootpkg = EPackage('class_features')
class_ = EClass('MyClass')
rootpkg.eClassifiers.append(class_)
class_.eStructuralFeatures.append(EAttribute('number', EInt, changeable=False))
class_.eStructuralFeatures.append(EReference('ref', class_))
mm = generate_meta_model(rootpkg, pygen_output_dir)
generated_class = mm.eClassifiers['MyClass']
instance = generated_class(number=7)
assert instance.number == 7
assert not instance.ref
instance.ref = instance
assert instance.ref is instance
def test_class_with_documentation(pygen_output_dir):
rootpkg = EPackage('class_doc')
class_ = EClass('MyClass')
rootpkg.eClassifiers.append(class_)
doc = EAnnotation('http://www.eclipse.org/emf/2002/GenModel')
class_.eAnnotations.append(doc)
doc.details['documentation'] = 'This is a documentation test'
mm = generate_meta_model(rootpkg, pygen_output_dir)
generated_class = mm.eClassifiers['MyClass']
assert generated_class.__doc__ == 'This is a documentation test'
def test_operation(pygen_output_dir):
rootpkg = EPackage('operation')
class_ = EClass('MyClass')
rootpkg.eClassifiers.append(class_)
class_.eOperations.append(EOperation(
'do_it',
EInt,
params=(EParameter('p1', EInt, required=True), EParameter('p2', EInt)),
))
mm = generate_meta_model(rootpkg, pygen_output_dir)
instance = mm.eClassifiers['MyClass']()
with pytest.raises(NotImplementedError):
instance.do_it(1, 2)
# missing non-required argument
with pytest.raises(NotImplementedError):
instance.do_it(1)
# missing non-required argument
with pytest.raises(NotImplementedError):
instance.do_it(p1=1)
# missing required argument:
with pytest.raises(TypeError):
instance.do_it(p2=2)
def test_class_with_derived_features(pygen_output_dir):
rootpkg = EPackage('simpleClasses')
MyClass = EClass('MyClass')
rootpkg.eClassifiers.append(MyClass)
any_feature = EAttribute('any', EString, derived=True)
MyClass.eStructuralFeatures.append(any_feature)
mm = generate_meta_model(rootpkg, pygen_output_dir)
generated_class = mm.eClassifiers['MyClass']
assert mm.MyClass is generated_class
assert isinstance(mm.MyClass._any, EAttribute)
assert mm.MyClass._any.derived is True
assert mm.MyClass._any.name == 'any'
def test_various_datatypes(pygen_output_dir):
rootpkg = EPackage('datatypes')
data1 = EDataType('Data1', instanceClassName='java.lang.Integer')
data2 = EDataType('Data2', instanceClassName='Unknown')
rootpkg.eClassifiers.extend([data1, data2])
mm = generate_meta_model(rootpkg, pygen_output_dir)
gendata1 = mm.eClassifiers['Data1']
gendata2 = mm.eClassifiers['Data2']
assert gendata1 is mm.Data1
assert mm.Data1.eType is int
assert mm.Data1.default_value is 0
assert gendata2 is mm.Data2
assert mm.Data2.eType is object
assert isinstance(mm.Data2.default_value, object)
def test_class_with_feature_many(pygen_output_dir):
rootpkg = EPackage('manyfeatures')
MyClass = EClass('MyClass')
rootpkg.eClassifiers.append(MyClass)
any_feature = EAttribute('any', EString, upper=-1)
MyClass.eStructuralFeatures.append(any_feature)
mm = generate_meta_model(rootpkg, pygen_output_dir)
generated_class = mm.eClassifiers['MyClass']
instance = mm.MyClass()
assert generated_class is mm.MyClass
assert isinstance(mm.MyClass.any, EAttribute)
assert instance.any == set()
def test_pythonic_names(pygen_output_dir):
rootpkg = EPackage('pythonic_names')
c1 = EClass('MyClass')
rootpkg.eClassifiers.append(c1)
a1 = EAttribute('att', EString, upper=-1)
c1.eStructuralFeatures.append(a1)
c2 = EClass('pass')
rootpkg.eClassifiers.append(c2)
a2 = EAttribute('else', EString, upper=-1)
c2.eStructuralFeatures.append(a2)
mm = generate_meta_model(rootpkg, pygen_output_dir)
assert mm.eClassifiers['MyClass'] is mm.MyClass
assert mm.eClassifiers['pass_'] is mm.pass_
assert isinstance(mm.pass_.else_, EAttribute)
assert mm.pass_().else_ == set()
def test_attribute_with_feature_id(pygen_output_dir):
rootpkg = EPackage('id_attribute')
c1 = EClass('MyClass')
rootpkg.eClassifiers.append(c1)
a1 = EAttribute('att', EString, iD=True)
a2 = EAttribute('att2', EString)
c1.eStructuralFeatures.extend([a1, a2])
mm = generate_meta_model(rootpkg, pygen_output_dir)
assert isinstance(mm.MyClass.att, EAttribute)
assert isinstance(mm.MyClass.att2, EAttribute)
assert mm.MyClass.att.iD is True
assert mm.MyClass.att2.iD is False
def test_eoperation_with_documentation(pygen_output_dir):
rootpkg = EPackage('eoperation_with_documentation')
c1 = EClass('MyClass')
rootpkg.eClassifiers.append(c1)
operation = EOperation('do_it')
doc = EAnnotation('http://www.eclipse.org/emf/2002/GenModel')
operation.eAnnotations.append(doc)
doc.details['documentation'] = 'This is a documentation test'
c1.eOperations.append(operation)
mm = generate_meta_model(rootpkg, pygen_output_dir)
instance = mm.MyClass()
with pytest.raises(NotImplementedError):
instance.do_it()
def test_eattribute_derived_not_changeable(pygen_output_dir):
rootpkg = EPackage('changeable_attribute')
c1 = EClass('MyClass')
rootpkg.eClassifiers.append(c1)
att1 = EAttribute('att1', EString, derived=True, changeable=True)
att2 = EAttribute('att2', EString, derived=True, changeable=False)
c1.eStructuralFeatures.extend([att2, att1])
mm = generate_meta_model(rootpkg, pygen_output_dir)
instance = mm.MyClass()
assert instance.att1 is None
assert instance.att2 is None
instance.att1 = "test_value"
assert instance.att1 == "test_value"
with pytest.raises(AttributeError):
instance.att2 = "test_value2"
def test_auto_registration_enabled(pygen_output_dir):
rootpkg = EPackage('auto_registration', nsURI='http://autoregister')
c1 = EClass('MyClass')
rootpkg.eClassifiers.append(c1)
mm = generate_meta_model(rootpkg, pygen_output_dir, auto_register_package=True)
from pyecore.resources import global_registry
assert mm.nsURI in global_registry
assert global_registry[mm.nsURI] is mm.auto_registration
|
13,586 | 08fe545b271999c17dcb0ed9563fcbfa7e1fda82 | import os
from flask import Flask, render_template
import logging
from logging.handlers import RotatingFileHandler
app = Flask(__name__)
app.debug = True
cwd = os.getcwd()
@app.route('/')
def index():
return 'Simple vault app for files and images'
if __name__ == '__main__':
# Log http requests
logging.basicConfig(filename= cwd + '/logs/webapp.log', level=logging.DEBUG)
# Run the webserver
app.run(host='0.0.0.0', port=8080, debug=True)
|
13,587 | 7499afe3eec54cf693d57f431c91b57d6f246a19 | import logging
import re
from .const import *
from homeassistant.const import (
CONF_NAME,
CONF_ICON,
CONF_DEVICE_CLASS,
CONF_UNIT_OF_MEASUREMENT,
)
from homeassistant.helpers.entity import Entity
_LOGGER = logging.getLogger(__name__)
class IpxDevice(Entity):
"""Representation of a IPX800 generic device entity."""
def __init__(self, ipx_device, suffix_name=""):
"""Initialize the device."""
self.config = ipx_device.get("config")
self.controller = ipx_device.get("controller")
self.coordinator = self.controller.coordinator
self._name = self.config.get(CONF_NAME)
if suffix_name:
self._name += f" {suffix_name}"
self._device_class = self.config.get(CONF_DEVICE_CLASS) or None
self._unit_of_measurement = self.config.get(CONF_UNIT_OF_MEASUREMENT) or None
self._transition = int(self.config.get(CONF_TRANSITION, DEFAULT_TRANSITION) * 1000)
self._icon = self.config.get(CONF_ICON) or None
self._state = None
self._id = self.config.get(CONF_ID)
self._ext_id = self.config.get(CONF_EXT_ID) or None
self._ids = self.config.get(CONF_IDS) or []
self._supported_features = 0
self._unique_id = (
f"{self.controller.name}.{self.config.get(CONF_COMPONENT)}.{re.sub('[^A-Za-z0-9_]+', '', self._name.replace(' ', '_'))}"
).lower()
@property
def should_poll(self):
"""No polling since coordinator used"""
return False
@property
def available(self):
"""Return if entity is available."""
return self.coordinator.last_update_success
@property
def name(self):
"""Return the display name of this light."""
return self._name
@property
def device_info(self):
return {
"identifiers": {(DOMAIN, self._unique_id)},
"name": self._name,
"manufacturer": "GCE",
"model": "IPX800v4",
"via_device": (DOMAIN, self.controller.name),
}
@property
def icon(self):
"""Icon to use in the frontend, if any."""
return self._icon
@property
def unique_id(self) -> str:
"""Return a unique ID."""
return self._unique_id
@property
def supported_features(self):
"""Flag supported features."""
return self._supported_features
async def async_added_to_hass(self):
"""When entity is added to hass."""
self.async_on_remove(
self.coordinator.async_add_listener(self.async_write_ha_state)
)
async def async_update(self):
"""Update the entity."""
await self.coordinator.async_request_refresh()
|
13,588 | 2bc184495e198a1fcf060e6433329f645c27a798 | # 1. Напишите функции уравнений:
# - x в степени 4 + 4 в степени x,
# - y в степени 4 + 4 в степени x
def ur1(x):
return x ** 4 + 4 ** x
def ur2(x, y):
return y ** 4 + 4 ** x
x = int(input("Vvedite 2 chisla:\n #1: "))
y = int(input(" #2: "))
print("Rezultat 1 uravnenija =", ur1(x))
print("Rezultat 2 uravnenija =", ur2(x, y)) |
13,589 | 34bf1aae06fbfca55826f47d8e3c18fa1a867705 | class Solution:
m=0
n=0
count=0
rotten=[]
minutes=0
def rot(self,grid,r,c,time):
if r>=0 and r< self.m and c>=0 and c < self.n and grid[r][c] == 1:
grid[r][c]=2
self.rotten.append((r*self.n+c,time+1))
self.minutes=max(time+1,self.minutes)
def bfs(self,grid):
while(len(self.rotten) != 0):
(pos,time) = self.rotten.pop(0)
r = int(pos/self.n)
c = pos % self.n
self.rot(grid,r+1,c,time)
self.rot(grid,r-1,c,time)
self.rot(grid,r,c+1,time)
self.rot(grid,r,c-1,time)
def orangesRotting(self, grid: List[List[int]]) -> int:
self.m=len(grid)
self.n=len(grid[0])
flag=0
for i in range(self.m):
for j in range(self.n):
if grid[i][j] == 2:
self.rotten.append((i*self.n+j,0))
self.bfs(grid)
for i in range(self.m):
for j in range(self.n):
if grid[i][j] == 1 :
flag=1
if flag:
return -1
else:
return self.minutes
|
13,590 | c84cd50341e6b3783effd3e1fa16f21f78afa8b8 | import numpy as np
import sys,os
import time
import os
import re
import cv2
import argparse
import functools
import subprocess
import numpy as np
from PIL import Image
import moviepy.editor as mpy
import torch.nn.parallel
import torch.optim
from models import TSN
from transforms import *
import datasets_video
from torch.nn import functional as F
import shutil
class Runner(object):
def __init__(self):
categories_file ='pretrain/reduced_categories.txt'
self.categories = [line.rstrip() for line in open(categories_file, 'r').readlines()]
self.num_class = len(self.categories)
#self.arch = 'InceptionV3'
self.arch = 'BNInception'
# Load model.
self.net = TSN(self.num_class, 8, 'RGB', base_model=self.arch, consensus_type='TRNmultiscale', img_feature_dim=256, print_spec=False)
#weights = 'pretrain/TRN_moments_RGB_InceptionV3_TRNmultiscale_segment8_best.pth.tar'
weights = 'pretrain/seniordesign.pth.tar'
checkpoint = torch.load(weights,map_location='cpu')
#print("model epoch {} best prec@1: {}".format(checkpoint['epoch'], checkpoint['best_prec1']))
# print list(checkpoint['state_dict'].items())
base_dict = {'.'.join(k.split('.')[1:]): v for k, v in list(checkpoint['state_dict'].items())}
self.net.load_state_dict(base_dict)
#self.net.eval() #.cuda().eval()
self.net.cuda().eval()
# Initialize frame transforms.
# self.transform = torchvision.transforms.Compose([
# GroupOverSample(self.net.input_size, self.net.scale_size),
# Stack(roll=(self.arch in ['BNInception', 'InceptionV3'])),
# ToTorchFormatTensor(div=(self.arch not in ['BNInception', 'InceptionV3'])),
# GroupNormalize(self.net.input_mean, self.net.input_std),
# ])
self.transform = torchvision.transforms.Compose([
GroupScale(self.net.scale_size),
GroupCenterCrop(self.net.input_size),
Stack(roll=(self.arch in ['BNInception', 'InceptionV3'])),
ToTorchFormatTensor(div=(self.arch not in ['BNInception', 'InceptionV3'])),
GroupNormalize(self.net.input_mean, self.net.input_std),
])
def test_video(self,frames,videoname):
data = self.transform(frames)
input_var = torch.autograd.Variable(data.view(-1, 3, data.size(1), data.size(2)),
volatile=True).unsqueeze(0).cuda()
logits = self.net(input_var)
h_x = torch.mean(F.softmax(logits, 1), dim=0).data
probs, idx = h_x.sort(0, True)
preds = {}
actualProbs = {}
# Output the prediction.
# video_name = args.frame_folder if args.frame_folder is not None else args.video_file
print('RESULT ON ' + videoname)
for i in range(0, 5):
preds[i] = self.categories[idx[i]]
print('{:.3f} -> {}'.format(probs[i], self.categories[idx[i]]))
#print(probs[i].data.tolist())
#actualProbs[i] = probs[i] #with cuda
actualProbs[i] = probs[i].data.tolist() #without cuda
return actualProbs, preds
def my_extract_frames(video_file, num_frames=8):
try:
os.makedirs(os.path.join(os.getcwd(), 'frames'))
except OSError:
print("oh no it could not create frames/ folder, it may already exist\n")
pass
output = subprocess.Popen(['ffmpeg', '-i', video_file],
stderr=subprocess.PIPE).communicate()
# Search and parse 'Duration: 00:05:24.13,' from ffmpeg stderr.
re_duration = re.compile('Duration: (.*?)\.')
duration = re_duration.search(str(output[1])).groups()[0]
seconds = functools.reduce(lambda x, y: x * 60 + y,
map(int, duration.split(':')))
rate = num_frames / float(seconds)
output = subprocess.Popen(['ffmpeg', '-i', video_file,
'-vf', 'fps={}'.format(rate),
'-vframes', str(num_frames),
'-loglevel', 'panic',
'frames/%d.jpg']).communicate()
frame_paths = sorted([os.path.join('frames', frame)
for frame in os.listdir('frames')])
frames = my_load_frames(frame_paths, num_frames)
#subprocess.call(['rmdir', '/s', './frames'], shell=True)
shutil.rmtree('./frames')
return frames
def my_load_frames(frame_paths, num_frames=8):
frames = [Image.open(frame).convert('RGB') for frame in frame_paths]
# print len(frames)
if len(frames) >= num_frames:
return frames[::int(np.ceil(len(frames) / float(num_frames)))]
else:
raise ValueError('Video must have at least {} frames'.format(num_frames))
def my_render_frames(frames, prediction):
rendered_frames = []
for frame in frames:
img = np.array(frame)
height, width, _ = img.shape
cv2.putText(img, prediction,
(1, int(height / 8)),
cv2.FONT_HERSHEY_SIMPLEX,
1, (255, 255, 255), 2)
rendered_frames.append(img)
return rendered_frames
#will extract frames, classify actions on those frames and return a video
#with classifications overlayed
def classify_actions(model, input_video, output_video, frame_step):
#extract frames from video
vid = cv2.VideoCapture(input_video)
video_fps = vid.get(cv2.CAP_PROP_FPS)
video_framecount = int(vid.get(cv2.CAP_PROP_FRAME_COUNT))
classified_frame_count = int(video_framecount/2)
frames = my_extract_frames(input_video, classified_frame_count)
#next, run frame subsets through classifier
rendered_frames_list = []
finished = False
for index in range(0, classified_frame_count, frame_step):
end_index = 0
if(classified_frame_count - index >= frame_step):
end_index = index + frame_step
else:
#rendered_frames_list.append(frames[index:classified_frame_count])
break
print(str(index) + ' ' + str(frame_step) + ' ' + str(end_index) + ' ' + str(classified_frame_count))
frame_subset = frames[index:end_index]
probs, preds = model.test_video(frame_subset, input_video)
rendered_frames = my_render_frames(frame_subset, preds[0])
rendered_frames_list.append(rendered_frames)
final_frames = [item for sublist in rendered_frames_list for item in sublist]
clip = mpy.ImageSequenceClip(final_frames, int(video_fps/2))
clip.write_videofile(output_video)
|
13,591 | ab47bf1a68815483d5536735e6e11c0a85b3b284 | import sys
import re
import urllib.request
def checkurl(url):
match = re.search(r'\s*http://.*txt',str(url))
if match:
try:
req = urllib.request.Request(match.group())
with urllib.request.urlopen(req) as open_file:
lines = open_file.readlines()
return lines
except urllib.error.HTTPError:
print("Oops, can not find your files online.")
sys.exit(1)
else:
try:
with open(url) as open_file:
lines = open_file.readlines()
return lines
except FileNotFoundError:
print("Oops! Can not find your file, Please check again.")
sys.exit(1)
def parsefile(url):
lines = checkurl(url)
# line1 store the size of grid.
sizeGrid = int(lines[0])
# get instruction from each line
cmd=[]
for line in lines[1:]:
if not isinstance(line,str):
line = line.decode()
#print(line)
match = re.search('.*(turn on|turn off|switch)\s*([+-]?\d+)\s*,\s*([+-]?\d+)\s*through\s*([+-]?\d+)\s*,\s*([+-]?\d+).*',line)
if match:
cmd.append([match.group(1),(int(match.group(2)),int(match.group(3))),(int(match.group(4)),int(match.group(5)))])
return sizeGrid, cmd
|
13,592 | 7d4df9a5b63bd5383bba39a956dd6e99e09bfa8d | from open_publishing.core.enums import ValueStatus, FieldKind
from open_publishing.core import SequenceItem, SequenceField
from open_publishing.core import DatabaseObject, SimpleField, FieldDescriptor
class Genre(DatabaseObject):
_object_class = 'realm_genre'
def __init__(self,
context,
genre_id):
super(Genre, self).__init__(context,
genre_id)
self._fields['name'] = SimpleField(database_object=self,
aspect=':basic',
field_locator='name',
dtype=str,
kind=FieldKind.readonly)
name = FieldDescriptor('name')
@property
def genre_id(self):
return self._object_id
def __repr__(self):
return '<Genre {0}>'.format(self.name)
def __str__(self):
return '{0}'.format(self.name)
class GenreItem(SequenceItem):
def __init__(self,
genre):
super(GenreItem, self).__init__(ValueStatus.soft)
self._genre = genre
@property
def value(self):
return self._genre
@classmethod
def from_gjp(cls, gjp, database_object):
guid = gjp
genre_id = Genre.id_from_guid(guid)
genre = Genre(database_object.context,
genre_id)
return cls(genre)
def to_gjp(self):
return self._genre.guid
class GenresList(SequenceField):
_item_type = GenreItem
def __init__(self,
document):
super(GenresList, self).__init__(document,
"non_academic.*",
"non_academic.realm_genres")
def add(self,
genre):
genre_obj = None
if isinstance(genre, str):
for obj in self.database_object.context.genres:
if genre == obj.name:
genre_obj = obj
break
else:
raise ValueError('Genre name "{}" not found in ctx.genres'.format(genre))
elif isinstance(genre, Genre):
if genre in self.database_object.context.genres:
genre_obj = genre
else:
raise ValueError('Genre "{}" not found in ctx.genres'.format(genre.guid))
else:
raise TypeError('Expected str or Genre, got: {0}'.format(type(genre)))
if genre_obj.guid not in [i._genre.guid for i in self._list]:
self._list.append(GenreItem(genre_obj))
self._status = ValueStatus.hard
|
13,593 | 4178d2259e79ca5f908150f26d85acd945521b1c | """
Given an array, rotate the array to the right by k steps, where k is non-negative.
Follow up:
Try to come up as many solutions as you can, there are at least 3 different
ways to solve this problem.
Could you do it in-place with O(1) extra space?
"""
from typing import List
class Solution:
def rotate(self, nums: List[int], k: int) -> None:
n = len(nums)
store_value, store_index = nums[0], (k % n)
if k == 0:
return
if n % k == 0 and k != 1:
print("yes")
self.rotate(nums, 1)
self.rotate(nums, k-1)
else:
for _ in range(len(nums)):
temp = nums[store_index]
nums[store_index] = store_value
store_value = temp
store_index = (store_index + k) % n
print(nums, store_value, store_index)
print("final:", nums)
s = Solution()
print(s.rotate([7, -2, 5, 0, 3, 9], 4))
# print(s.rotate([-1, -100, 3, 99], 2))
|
13,594 | d5a6d231b0a3173e488aa45f503fe4f5ec0919c5 | #coding:utf-8
import math
n= 20000000
def get_pr(n):
list_num = []
for x in range(2, n):
for num in range(2,int(math.sqrt(x))+1 ):
if (x%num == 0 and x != num ):
break
elif x % num != 0 and num == int(sqrt(x)):
list_num.append(x)
return list_num
print math.sqrt(9)
list_sum = get_pr(n)
print sum(list_sum)
|
13,595 | 4929f9d2007c35288bbe1d032c40eef02c2ab7b5 | import json
from typing import Iterator
from selenium.webdriver.remote.webdriver import WebDriver
from fdc.model import Ticket
from fdc.utils.browser import Browser
from fdc.utils.table import parse_table, Table
class Config(object):
def __init__(self, file_name: str):
with open(file_name, 'r') as file:
json_data = json.load(file)
self.username = json_data['username']
self.password = json_data['password']
def tickets(browser: Browser,
config: Config,
threshold: int = 50,
top50: bool = True) -> Iterator[Ticket]:
driver = browser.goto('https://www.magicformulainvesting.com/Account/LogOn')
_do_login(driver, config)
_do_query(driver, threshold, top50)
constituents = driver.find_element_by_css_selector('#tableform > table')
table = parse_table(constituents)
return _extract_tickets(table)
def _do_login(driver: WebDriver, config: Config):
driver.find_element_by_id('Email').send_keys(config.username)
driver.find_element_by_id('Password').send_keys(config.password)
driver.find_element_by_id('login').click()
def _do_query(driver: WebDriver, threshold: int, top50: bool):
mmc_element = driver.find_element_by_id('MinimumMarketCap')
mmc_element.clear()
mmc_element.send_keys(threshold)
if top50:
driver.find_element_by_css_selector('input[value=false]').click()
else:
driver.find_element_by_css_selector('input[value=true]').click()
driver.find_element_by_id('stocks').click()
def _extract_tickets(table: Table) -> Iterator[Ticket]:
return (
Ticket(
code=row.get_value('Ticker'),
name=row.get_value('Company Name (in alphabetical order)'),
)
for row in table.rows
)
|
13,596 | 9f332ab3b6ee3d493927135ff9a9f8cfca45d532 | import sys
import configparser
from datetime import datetime as dt
from my_vocabulary.flashcard_dialog import FlashCardDialog
from sqlalchemy.orm import sessionmaker
from sqlalchemy import create_engine
from PyQt5.QtWidgets import QApplication
from models.general import (
Entry,
Note,
Tray,
FlashCard,
FlashCardPage,
CardBox,
)
alembic_config = configparser.ConfigParser()
alembic_config.read("../alembic.ini")
database_url = alembic_config["alembic"]["sqlalchemy.url"].replace(
"sqlite:///", "sqlite:///../"
)
engine = create_engine(database_url)
Session = sessionmaker(bind=engine)
session = Session()
import logging
logging.basicConfig()
logging.getLogger("sqlalchemy.engine").setLevel(logging.INFO)
lang_1 = "Deutsch"
lang_2 = "Französisch"
def main_3():
entry = session.query(Entry).get(1)
del entry.notes[-1]
session.commit()
def main_2():
entry = session.query(Entry).get(1)
session.delete(entry)
session.commit()
def main():
entry_1 = Entry(text="test", language=lang_1)
note_1 = Note(text="this is a example")
note_2 = Note(text="this is another example")
entry_1.notes = [note_1, note_2]
session.add(entry_1)
session.add(note_1)
session.add(note_2)
session.commit()
if True:
print("test.....")
def create_notes(entry):
note_1 = Note(text="this is a example")
note_2 = Note(text="this is another example")
entry.notes = [note_1, note_2]
session.add(note_1)
session.add(note_2)
example_translations = {
lang_1: ["Test", "hallo", "Guten Tag", "Guten Abend", "die Frau", "der Mann", "der Junge", "das Mädchen", "der Ball"],
lang_2: ["test", "salut", "Bonjour", "Bonsoir", "la femme", "l'homme (m.)", "le garçon", "la fille", "le balon"]
}
entry_counter = 0
word_cnt = 0
def create_entry(flashcardpage):
global entry_counter, word_cnt
if entry_counter % 2 == 0:
entry = Entry(
text=example_translations[lang_1][word_cnt],
language=lang_1,
flashcardpage=flashcardpage
)
else:
entry = Entry(
text=example_translations[lang_2][word_cnt],
language=lang_2,
flashcardpage=flashcardpage
)
word_cnt += 1
session.add(entry)
entry_counter += 1
return entry
def create_cardbox():
cardbox = CardBox(language_1=lang_1, language_2=lang_2)
tray_1 = Tray(poll_interval=1)
tray_2 = Tray(poll_interval=3)
tray_3 = Tray(poll_interval=7)
cardbox.trays = [tray_1, tray_2, tray_3]
session.add(cardbox)
return cardbox
def create_flashcards(cardbox):
for tray in cardbox.trays:
for _ in range(3):
flashcard = FlashCard(tray=tray)
flashcardpage_1 = FlashCardPage()
flashcardpage_1.entry = create_entry(flashcardpage_1)
create_notes(flashcardpage_1.entry)
session.add(flashcardpage_1)
flashcardpage_2 = FlashCardPage()
flashcardpage_2.entry = create_entry(flashcardpage_2)
create_notes(flashcardpage_2.entry)
session.add(flashcardpage_2)
flashcard.pages = [flashcardpage_1, flashcardpage_2]
tray.add_flashcard(flashcard)
session.add(flashcard)
if __name__ == "__main__":
if 0:
app = QApplication([])
window = FlashCardDialog()
window.show()
sys.exit(app.exec_())
else:
cardbox = create_cardbox()
create_flashcards(cardbox)
session.commit()
exit()
cardbox = session.query(CardBox).get(1)
for tray in cardbox.trays:
for flashcard in tray.flashcards:
session.delete(flashcard)
session.delete(tray)
session.commit()
exit()
for flashcard in tray.flashcards:
session.delete(flashcard.pages[0])
session.commit()
session.delete(cardbox)
session.commit()
|
13,597 | ff2810cd09f7945573a05984c02aa9f54ad90f0c | # Generated by Django 3.1.3 on 2020-11-18 21:01
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Ad',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', models.FilePathField(path='/img')),
('product_name', models.CharField(max_length=100)),
],
),
migrations.CreateModel(
name='Coupon',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('product_name', models.CharField(max_length=100)),
('product_price', models.DecimalField(decimal_places=2, max_digits=5)),
('discount', models.DecimalField(decimal_places=2, max_digits=5)),
('image', models.FilePathField(path='/img')),
],
),
]
|
13,598 | b350e52c0cf67e6f322152c1adb353597a407f79 | __author__ = 'Yanyi'
fp = open("Data/vh_sorted_train.txt", "r")
fw = open("Data/re_vh_sorted_train.txt", "w")
childFathers = {}
for line in fp.readlines():
lineNumbers = line[:-1].split('\t')
children = lineNumbers[1:]
if children[0] == "":
continue
for c in children:
if c not in childFathers:
childFathers[c] = []
childFathers[c].append(lineNumbers[0])
for k, v in childFathers.iteritems():
fw.write(k + "\t" + "\t".join(v) + "\n")
fp.close()
fw.close()
|
13,599 | f021567911646137f6921e44b3bc5ad1464e7c59 | #!/usr/bin/python
"""
4Sum
Given an array S of n integers, are there elements a, b, c, and d in S such
that a + b + c + d = target? Find all unique quadruplets in the array which
gives the sum of target.
Note:
Elements in a quadruplet (a,b,c,d) must be in non-descending order.
(ie, a <= b <= c <= d)
The solution set must not contain duplicate quadruplets.
For example, given array S = {1 0 -1 0 -2 2}, and target = 0.
A solution set is:
(-1, 0, 0, 1)
(-2, -1, 1, 2)
(-2, 0, 0, 2)
"""
class Solution:
# @return a list of lists of length 4, [[val1,val2,val3,val4]]
def fourSum(self, num, target):
if len(num) < 4: return []
resultSet, result = set(), []
num.sort()
for i in xrange(len(num)):
for j in xrange(i+1,len(num)):
k, l = j+1, len(num)-1
while k < l:
summ = num[i]+num[j]+num[k]+num[l]
if summ > target: l -= 1
elif summ < target: k += 1
else:
if (num[i],num[j],num[k],num[l]) not in resultSet:
resultSet.add((num[i],num[j],num[k],num[l]))
result.append([num[i],num[j],num[k],num[l]])
k, l = k+1, l-1
return result
if __name__=="__main__":
num, target = [1, 0, -1, 0, -2, 2], 0
print Solution().fourSum(num,target)
"""
This version like 3 sum closest but TLE.
"""
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.