blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2 values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220 values | src_encoding stringclasses 30 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 2 10.3M | extension stringclasses 257 values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
476029c999e320eb929a9e11022fbc490f764b53 | e95fc8c562c050f47ecb6fb2639ce3024271a06d | /easy/1732.找到最高海拔.py | c4b06ab3bb635300bda99006a24c867d6d0c5a0c | [] | no_license | w940853815/my_leetcode | 3fb56745b95fbcb4086465ff42ea377c1d9fc764 | 6d39fa76c0def4f1d57840c40ffb360678caa96e | refs/heads/master | 2023-05-25T03:39:32.304242 | 2023-05-22T01:46:43 | 2023-05-22T01:46:43 | 179,017,338 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 533 | py | #
# @lc app=leetcode.cn id=1732 lang=python3
#
# [1732] 找到最高海拔
#
from typing import List
# @lc code=start
class Solution:
def largestAltitude(self, gain: List[int]) -> int:
res = [0]
for i in range(len(gain)):
res.append(res[i] + gain[i])
return max(res)
# @lc code=end
if __name__ == "__main__":
s = Solution()
res = s.largestAltitude([-5, 1, 5, 0, -7])
print(res)
assert res == 1
res = s.largestAltitude([52, -91, 72])
print(res)
assert res == 52
| [
"940853815@qq.com"
] | 940853815@qq.com |
cc29ef64fbcb8d7437bc2beb8195cceffa04b4a1 | 7abf2c1b981ec1e541eb4294185c21af52043ac2 | /TensorFlow Technology Analysis and Actual Warfare _Li Jiaxuan/10_save_restore_net.py | 9f42838935f4d191684082e0ec81751a3c4855f1 | [] | no_license | wolf-bailang/AI-Book-Source-Code | b9453af6a663fed465a98fdc0158e3ef177516ca | 77122e021230ed12410f1ff6c91758f3f86d1241 | refs/heads/master | 2020-04-11T15:42:24.856085 | 2019-02-20T16:01:00 | 2019-02-20T16:01:00 | 161,900,440 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,386 | py | #!/usr/bin/env python
import tensorflow as tf
import numpy as np
import input_data
import os
# This shows how to save/restore your model (trained variables).
# To see how it works, please stop this program during training and resart.
# This network is the same as 3_net.py
# 定义权重函数
def init_weights(shape):
return tf.Variable(tf.random_normal(shape, stddev=0.01))
# 定义模型
def model(X, w_h, w_h2, w_o, p_keep_input, p_keep_hidden): # this network is the same as the previous one except with an extra hidden layer + dropout
# 第一个全连接层
X = tf.nn.dropout(X, p_keep_input)
h = tf.nn.relu(tf.matmul(X, w_h))
h = tf.nn.dropout(h, p_keep_hidden)
# 第二个全连接层
h2 = tf.nn.relu(tf.matmul(h, w_h2))
h2 = tf.nn.dropout(h2, p_keep_hidden)
#输出预测值
return tf.matmul(h2, w_o)
# 加载数据
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
trX, trY, teX, teY = mnist.train.images, mnist.train.labels, mnist.test.images, mnist.test.labels
X = tf.placeholder("float", [None, 784])
Y = tf.placeholder("float", [None, 10])
# 初始化权重参数
w_h = init_weights([784, 625])
w_h2 = init_weights([625, 625])
w_o = init_weights([625, 10])
#生成网络模型,得到预测值
p_keep_input = tf.placeholder("float")
p_keep_hidden = tf.placeholder("float")
py_x = model(X, w_h, w_h2, w_o, p_keep_input, p_keep_hidden)
#定义损失函数
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(py_x, Y))
train_op = tf.train.RMSPropOptimizer(0.001, 0.9).minimize(cost)
predict_op = tf.argmax(py_x, 1)
#定义一个存储路径
ckpt_dir = "./ckpt_dir"
if not os.path.exists(ckpt_dir):
os.makedirs(ckpt_dir)
#定义一个计数器,为训练轮数计数
global_step = tf.Variable(0, name='global_step', trainable=False)
# Call this after declaring all tf.Variables.
# 在声明完所有变量后,调用tf.train.Saver
saver = tf.train.Saver()
# This variable won't be stored, since it is declared after tf.train.Saver()
# 位于tf.train.Saver 之后的变量将不会被存储
non_storable_variable = tf.Variable(777)
# Launch the graph in a session
#训练模型并存储
with tf.Session() as sess:
# you need to initialize all variables
tf.initialize_all_variables().run()
ckpt = tf.train.get_checkpoint_state(ckpt_dir) # 得到global_step 的初始值
if ckpt and ckpt.model_checkpoint_path:
print(ckpt.model_checkpoint_path)
saver.restore(sess, ckpt.model_checkpoint_path) # restore all variables
start = global_step.eval() # get last global_step
print("Start from:", start)
for i in range(start, 100):
for start, end in zip(range(0, len(trX), 128), range(128, len(trX)+1, 128)):
sess.run(train_op, feed_dict={X: trX[start:end], Y: trY[start:end],
p_keep_input: 0.8, p_keep_hidden: 0.5})
global_step.assign(i).eval() # set and update(eval) global_step with index, i # 更新计数器
saver.save(sess, ckpt_dir + "/model.ckpt", global_step=global_step) # 存储模型
print(i, np.mean(np.argmax(teY, axis=1) ==
sess.run(predict_op, feed_dict={X: teX, Y: teY,
p_keep_input: 1.0,
p_keep_hidden: 1.0})))
| [
"noreply@github.com"
] | wolf-bailang.noreply@github.com |
962cad767cbbcd853c697dca22409323a3e753ba | 5c3e7573e26ee8f15ae261dcee4697572b0757a8 | /1.py | 80747022a439b81cfa3a6e105ed392722d9b2c8d | [] | no_license | duongdinhnghia1999/KTLTthuchanhbuoi05 | c27c46619f6f7a400b175b63bfc93db2460c97c5 | 435485c0c1ef0de5535fd756428167b0738aa6e6 | refs/heads/master | 2020-05-20T20:55:19.289774 | 2019-06-04T08:12:24 | 2019-06-04T08:12:24 | 185,751,489 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 227 | py | import mymath
values = [2, 4, 6, 8, 10]
print ('Squares: ')
for v in values:
print(mymath.square(v))
print('Cubes: ')
for v in values:
print(mymath.cube(v))
print('Average: ' + str(mymath.average(values))) | [
"noreply@github.com"
] | duongdinhnghia1999.noreply@github.com |
0fdd9de339a26ba135bc8388006b048c5bf33995 | b7e3faac9d3ba2cffa4185712e78d152956ace3d | /7_1_solution.py | 0527225fd7f34e042afd4431a6cbb1cd40e39a9c | [] | no_license | gaurav3384/Assignment_7.11189 | 0fc600629a620182f8ece21a52e5c0ce6fcc4fce | a4092a187e1dd8e898a7827110bd3771d6d948d9 | refs/heads/master | 2020-03-25T00:30:44.681929 | 2018-08-01T18:04:48 | 2018-08-01T18:04:48 | 143,189,911 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,798 | py | # -*- coding: utf-8 -*-
"""
Created on Wed Aug 1 23:00:58 2018
@author: gauravkgupta
"""
import numpy as np
import pandas as pd
import re
def fill_na(df) :
""" Problem_1 : Fill missing values in column 'FlightNumber' of DataFrame Where values in this column need to be increased by 10 with each row."""
df2 = pd.DataFrame({'FlightNumber': np.arange(10045,10085,10)})
df3 = df['FlightNumber'].copy()
for item in df[df3.isnull()].index.tolist() :
df3.loc[item] = df2['FlightNumber'].loc[item]
df['FlightNumber'] = df3.astype(dtype="int64")
return df
def create_temp_DataFrame(df) :
""" Problem_2 : Creating a temparory DataFrame with the values in the column 'From_To'. """
df1 = df.copy()
if 'From_To' in df1.columns :
c1,c2 = re.split('_', 'From_To')
c1_values = []
c2_values = []
for item in df1['From_To'] :
a,b = re.split('_',item)
c1_values.append(a)
c2_values.append(b)
temp_df = pd.DataFrame({c1 : c1_values, c2 : c2_values})
return temp_df
def standardise_DataFrame(temp_df) :
""" Problem_3 : standardise temparory DataFrame values."""
temp_df['From'] = pd.DataFrame([item for item in map(lambda x : x.capitalize(), temp_df['From'])])
temp_df['To'] = pd.DataFrame([item for item in map(lambda x : x.capitalize(), temp_df['To'])])
return temp_df
def add_drop_columns(df, temp_df) :
"""
Problem_4 :
1. Delete a column from DataFrame
2. Prefrom merge in DataFrame df and temp_df.
"""
if 'From_To' in df.columns :
df.drop('From_To', axis=1, inplace=True)
df = pd.concat([df, temp_df], axis=1)
return df
def column_operations(df):
"""
Problem_5 :
1. Create new DataFrame delays using a existing column 'RecentDelays'.
2. Use column names as delay_1, delay_2... etc in DataFrame delays.
2. Use NaN for missing values.
3. Replace column 'RecentDelays' with DataFrame 'delays'.
"""
max_len = max(map(lambda x : len(x), df['RecentDelays']))
delays = pd.DataFrame()
for i in range(max_len) :
temp = 'delay_'+ str(i+1)
temp_list = []
for j in range(df.shape[0]) :
try :
temp_list.append(df['RecentDelays'].iloc[j][i])
except :
temp_list.append(np.nan)
delays[temp] = temp_list
df.drop('RecentDelays', axis=1, inplace=True)
df = pd.concat([df,delays], axis=1)
return df
if __name__ == '__main__' :
df = pd.DataFrame({'From_To': ['LoNDon_paris', 'MAdrid_miLAN', 'londON_StockhOlm',
'Budapest_PaRis', 'Brussels_londOn'],
'FlightNumber': [10045, np.nan, 10065, np.nan, 10085],
'RecentDelays': [[23, 47], [], [24, 43, 87], [13], [67, 32]],
'Airline': ['KLM(!)', '<Air France> (12)', '(British Airways. )',
'12. Air France', '"Swiss Air"']})
print(f"DataFrame : \n{df}\n\n")
df = fill_na(df)
print(f"DataFrame after filling missing values in column 'FlightNumber' : \n{df}\n\n")
temp_df = create_temp_DataFrame(df)
print(f"Temparory DataFrame created from column 'From_To' : \n{temp_df}\n\n")
temp_df = standardise_DataFrame(temp_df)
print(f"Temparory DataFrame after standardising columns values : \n{temp_df}\n\n")
df = add_drop_columns(df, temp_df)
print(f"DataFrame after dropping column 'From_To' and merging with Temparory DataFrame: \n{df}\n\n")
df = column_operations(df)
print(f"DataFrame after operation performed on column 'RecentDelays': \n{df}\n\n") | [
"noreply@github.com"
] | gaurav3384.noreply@github.com |
12b46b8ec88ad553c73e0119885f502290535082 | b58a3420ef081b1cea4accd09f48e51ebec1fb66 | /Gömülü Fonksiyonlar/GömülüFonks.py | 8d8c6f26b524d5da169f5d563736cea3c3b4d9fe | [] | no_license | Qhupe/Pythonda-gomulu-fonksiyonlar-ve-veri-yapilar-metotlari | c1d3fbb447f306fedd04a4e9c2cf5ccdb486f80b | 8355e72b6cf6b1eadbfda73511c2276b62339cae | refs/heads/main | 2023-04-19T20:23:06.792816 | 2021-05-08T01:03:57 | 2021-05-08T01:03:57 | 365,387,745 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,574 | py |
print("**************** MAP Metodu **************** ")
def double(x):
return x*2
s=list(map(double,[1,2,3,4,5,6,7,8,]))#burada map fonksiyonu içinde double fonksiyonunu
#çağırdık ve içine girdiğimiz liste elemanlarını göndererek s isminde bir listeye atadık
print(s)
a=list(map(lambda x: x**2,(1,2,3,4,5,6,7,8,9,10)))
print(a)
liste1=[5,6,1,2,3,9,25,45,78]
liste2=[6,2,5,9,4,21,54]
liste3=[25,486,21,65,59,78,21,55,77]
listmap=list(map(lambda x,y:x*y,liste1,liste2))#map fonksiyonu girilen parametre kadar liste alabilir
print(listmap)
print("**************** REDUCE Metodu ****************")
from functools import reduce
def toplama(x,y):
return x+y
print(reduce(toplama,[15,3,26,48]))#reduce fonksiyonunun yaptığı ise girilen
# fonksiyonu önce ilk iki elemana uygular sonra çıkan sonucu tek tek diğer
# elemanlara uygular
s=reduce (lambda x,y : x*y,[1,2,3,4,5,6])#burada yine aynı mantık ilk iki elemanı
# yani 1 ile 2 yi çarptı çıkan sonucu ise teker teker diğer elemanlar ile çarptı yani
# 1 ile 2 nin çarpımı = 2 sonuç oldu sonrasında 2*3=6,6*4=24,24*5=120,120*6=720
print(s)
def maksimum(x,y):
if(x>y):
return x
else:
return y
maksimum(3,4)
maks=reduce(maksimum,[-5,6,9,2,4])# burada yine - 5 ile 6 yı maksimum fonksiyonuna
# gönderdi ve büyük olan sayıyı yani sonucu 6 aldı sonra 6 sonucunu 9 ile maksimum
# fonksiyonuna gönderdi sıra sıra bunu yaparak en büyük sayıyı buldu
print(maks)
print("**************** FİLTER Metodu ****************")
listeçift=list(filter(lambda x : x%2==0,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]))
#filter fonksiyonu ise içine girilen fonksiyona kendi içine girilen parametreyi tek
# tek gönderir ve sadece true olanları return yapar
print(listeçift)
def asal_mi(x):
i = 2
if(x==1):
return False
elif(x==2):
return True
else:
while(i < x):
if(x % i == 0):
return False
i+=1
return True
asalmi=list(filter(asal_mi,range(1,500)))
print(asalmi)
print("**************** ZİP Metodu ****************")
listezip1=[1,2,3,6,5,4,9,8]
listezip2=[12,65,896,547,223,54,145,14,77,89]
#listenin i. elemanlarını gruplandırmaya çalışalım
i = 0
sonuc=list()
while(i<len(listezip1) and i<len(listezip2)):
sonuc.append((listezip1[i],listezip2[i]))
i+=1
print(sonuc)#burada bu kadar uzun işlem yapacağımıza zip fonksiyonunu kullanırsak
sonuczip=list(zip(listezip1,listezip2))
print("*****Zip Metodu İle Birleştirme*****")
print(sonuczip)#zip fonksiyonu istenilen gruplandırma kadar eleman alabilir mesela 3'lü grup
listea=[12,23,34,45,56,67,78,89,90]
listeb=["Python","Java","CSS","HTML","JavaScript"]
print("*****3'lü Birleştirme*****")
sonuczip2=list(zip(listea,listeb,listezip1))
print(sonuczip2)
print("**************** Enumerate Metodu ****************")
listemeyve=["Muz","Elma","Armut","Çilek","Karpuz"]
#sonucu[(0,'Muz'),(1,'Elma'),(2,'Armut'),(3,'Çilek'),(4,'Karpuz') yapılmak istenirse
sonucmeyve=list()
i=0
for a in listemeyve:
sonucmeyve.append((i,a))
i+=1
print(sonucmeyve)
print("*****Metod ile Birleştirme*****")
sonucmeyvefonk=list(enumerate(listemeyve))
print(sonucmeyvefonk)
#Enumerate Fonksiyonu ise liste elemanlarını teker teker indekslemeye yardımcı olur
#indeklerken bizi döngüler kullanmaktan kurtarır
for i,j in enumerate(listemeyve):#burada ise liste içinde gezinip teker teker
# indekslediğimiz verileri alt alta yazdeırdık
print(i,j)
print("******************************")
for i,j in enumerate(listemeyve):#burada ise sadece çift indeks numarasına sahip verileri ekrana yazdırdık
if(i%2==0):
print(i,j)
print("**************** All ve Any Metodu ****************")
def hepsi(liste):
for i in liste:
if not i:
return False
return True
listeft=[True,True,False,True,False]
print(hepsi(listeft))
listesayi=[1,2,3,4,5,6,7]#Sayılarda sadece 0 False değer Alır
listeFalse=[False,False,False,False]
print(hepsi(listesayi))
def herhangi(liste):
for i in liste:
if i :
return True
return False
print(herhangi(listesayi))
print(herhangi(listeFalse))
print("*****All ve Any Metodu ile Yapma*****")
print(all(listesayi))#all fonksiyonu bütün değerler true ise True ,en az bir değer
# False ise False değer döndürür
print(all(listeft))
print(any(listeft))#any Fonksiyonu Bütün değerler False ise False,en az bir değer
# True ise True değer döndürür
print(any(listeFalse))
| [
"hupesat@gmail.com"
] | hupesat@gmail.com |
1cca9cef85ff96101fe84f0c54b6b314b37e5344 | 2ed993ed697e49d4a85f3868bcd85e3894d72e10 | /insertion_sort.py | fc1f2548e18e1d8c94787ed7a6a0ee41533357c0 | [] | no_license | eleanorhsu/sorting-algorithms | b5c3130589c9187a8af3d84213ef8eedd6d5192c | 65c38bcf5a8c17363e53e65e9f0e00b54b1b13d7 | refs/heads/main | 2023-05-31T03:58:58.573724 | 2021-07-04T12:51:59 | 2021-07-04T12:51:59 | 317,121,095 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 305 | py | def insertion_sort(arr):
for i in range(1, len(arr)):
for j in range(i):
if arr[i] < arr[j]:
arr.insert(j, arr[i])
del arr[i+1]
break
return arr
def main(input):
arr = input.split(',')
arr = [int(x) for x in arr]
print(insertion_sort(arr))
if __name__ == "__main__":
main(input()) | [
"eleanor.h.hsu@gmail.com"
] | eleanor.h.hsu@gmail.com |
134f979c0fc5edc79a4a6a7063a58844f4fd0536 | fa17ebb3511585f08a3ac30a7347edd61695a935 | /tfidf/TFIDF.py | c3c90471a1cf3cc65abff01d6b802aae826941b9 | [
"Apache-2.0"
] | permissive | 502ping/RetrievalBased_Chatbot | 3f407108459bb2349400f951e88211488955c505 | 07a1eaae0683dd0f6b31682ab2cf2c6c6733b413 | refs/heads/master | 2020-04-26T19:06:43.816327 | 2019-03-04T15:09:56 | 2019-03-04T15:09:56 | 173,763,824 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,486 | py | import jieba
from gensim import corpora,models,similarities
import codecs
#读取训练词库
Train_test = 'tencent_corpus/data.txt'
Traintest = codecs.open(Train_test,'rb').readlines()
Traintest = [w.strip() for w in Traintest]
# 分词完毕得到结果
Traintest_word = []
for word in Traintest:
words_list = [words for words in jieba.cut(word)]
Traintest_word.append(words_list)
#测试用词
#TestResult = []
#TestResult1 = []
#for word in doc_test_list:
# if word not in stopwords:
# TestResult = TestResult1.append(word)
#用dictionary方法获取词袋
dictionary = corpora.Dictionary(Traintest_word)
#词袋中用数字对所有词进行了编号
dictionary.keys()
#使用doc2bow制作语料库,利用词袋模型中的字典将其映射到向量空间
corpus = [dictionary.doc2bow(doc) for doc in Traintest_word]
#对测试文档也进行制作语料库,利用词袋模型中的字典将其映射到向量空间
tfidf_moel = models.TfidfModel(corpus)
tfidf_moel.save('tfidf.model')
#使用TF-IDF模型对语料库建模
print("***************Model has been successfully built!*******************")
print("***************Begin Testing*******************")
while 1:
keyword = input("ask:")
input_str = keyword
doc_test = input_str
doc_test_list = [word for word in jieba.cut(doc_test)]
doc_test_vec = dictionary.doc2bow(doc_test_list)
#获文档中,每个词的TF-IDF值 tfidf[corpus]
#对每个目标文档,分析测试文档的相似度
index = similarities.SparseMatrixSimilarity(tfidf_moel[corpus], num_features=len(dictionary.keys()))
sim = index[tfidf_moel[doc_test_vec]]
#根据相似度排序是一个列表 表中每一项是一个元组 元组中前面是原句索引 后面是相似度
SimilaritiesList = sorted(enumerate(sim), key=lambda item: -item[1])
num = 0
while (num <= 1):
Result_tutple = SimilaritiesList[num] # 获取元组 索引 相似度
Result_index = Result_tutple[0] # 获取索引
num = num + 1
response_list = Traintest_word[Result_index]
Result_score = Result_tutple[1] # 获取索引
print("该回答相似度为"+str(Result_score))
if response_list.index('\t')!=-1:
newlist = response_list[response_list.index('\t'):]
response=''
for res in newlist:
response+=res
print("answer:"+response)
| [
"46272447+502ping@users.noreply.github.com"
] | 46272447+502ping@users.noreply.github.com |
ea3cd162c0d901f2e544efeb63cbb797a5bb79b7 | 1d6512c1c8bc4e37943b64333c2f5cae741f1fa2 | /setup.py | 420d7ab186cf8e00c2501643ed2bea82011ac040 | [] | no_license | aepnat/scrapy_traveloka | 0505930c5e30c673fb4cf89ed1bbb925d01bfd7d | 8c7e91f4d73155a002252ef89cafe948fd095c7e | refs/heads/master | 2020-04-07T09:15:38.561741 | 2018-11-24T02:48:11 | 2018-11-24T02:48:11 | 158,245,715 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 253 | py | # Automatically created by: shub deploy
from setuptools import setup, find_packages
setup(
name = 'project',
version = '1.0',
packages = find_packages(),
entry_points = {'scrapy': ['settings = traveloka.settings']},
)
| [
"aep.nat@gmail.com"
] | aep.nat@gmail.com |
48d86d1a6bbe9509b8147d9a60c08154a1e01a2e | 23e351955fbd6407b53a5604766bd28c0e5fd18a | /TSP Simulated Annealing.py | e192efc2cab6483b3a134c29f5ea750ebea29142 | [] | no_license | MedadRufus/Optimisation-Project-Design-Engineering | b85b90f8c961468d5373b5e0e2ae9adc3ecfd611 | 81407000f4fa4058622d9f8994c035668cafd489 | refs/heads/master | 2023-07-23T12:59:31.276980 | 2019-12-12T12:00:32 | 2019-12-12T12:00:32 | 218,971,390 | 0 | 0 | null | 2023-07-06T21:36:05 | 2019-11-01T11:18:02 | Python | UTF-8 | Python | false | false | 3,507 | py | # Created by Medad Rufus Newman on 12/12/2019
import numpy as np
from scipy import spatial
import matplotlib.pyplot as plt
import sys
#file_name = sys.argv[1] if len(sys.argv) > 1 else 'data/nctu.csv'
#points_coordinate = np.loadtxt(file_name, delimiter=',')
num_points = 16
points_coordinate = np.array([[0.181,14.9],
[9.06,9.40],
[9.38,29.6],
[10.0,9.77],
[14.0,0.915],
[14.5,10.1],
[14.9,11.8],
[16.5,10.9],
[19.0,22.4],
[19.1,15.6],
[20.0,6.26],
[21.6,10.8],
[24.1,17.3],
[24.5,18.1],
[26.3,9.85],
[0,0]
])
distance_matrix = spatial.distance.cdist(points_coordinate, points_coordinate, metric='euclidean')
distance_matrix = distance_matrix # 1 degree of lat/lon ~ = 111000m
def cal_total_distance(routine):
'''The objective function. input routine, return total distance.
cal_total_distance(np.arange(num_points))
'''
num_points, = routine.shape
return sum([distance_matrix[routine[i % num_points], routine[(i + 1) % num_points]] for i in range(num_points)])
# %%
from sko.SA import SA_TSP
sa_tsp = SA_TSP(func=cal_total_distance, x0=range(num_points), T_max=100, T_min=1, L=1000 * num_points)
best_points, best_distance = sa_tsp.run()
print(best_points, best_distance, cal_total_distance(best_points))
# %% Plot the best routine
from matplotlib.ticker import FormatStrFormatter
fig, ax = plt.subplots(1, 2)
fig.suptitle("Plots showing the results of using Simulated Annealing to optimise the path")
best_points_ = np.concatenate([best_points, [best_points[0]]])
best_points_coordinate = points_coordinate[best_points_, :]
ax[0].plot(sa_tsp.best_y_history)
ax[0].set_ylabel("Distance")
ax[0].set_xlabel("Iteration")
ax[0].set_title('Distance improvements over each iteration')
ax[1].plot(best_points_coordinate[:, 0], best_points_coordinate[:, 1],
marker='o', markerfacecolor='b', color='c', linestyle='-')
ax[1].xaxis.set_major_formatter(FormatStrFormatter('%.3f'))
ax[1].yaxis.set_major_formatter(FormatStrFormatter('%.3f'))
ax[1].set_title('Final minimum travelled path: minimum distance = {0:.2f}km '.format(best_distance))
ax[1].set_xlabel("Longitude")
ax[1].set_ylabel("Latitude")
plt.show()
# %% Now Plot the animation
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation
best_x_history = sa_tsp.best_x_history
fig2, ax2 = plt.subplots(1, 1)
ax2.set_title('title', loc='center')
line = ax2.plot(points_coordinate[:, 0], points_coordinate[:, 1],
marker='o', markerfacecolor='b', color='c', linestyle='-')
ax2.xaxis.set_major_formatter(FormatStrFormatter('%.3f'))
ax2.yaxis.set_major_formatter(FormatStrFormatter('%.3f'))
ax2.set_xlabel("Longitude")
ax2.set_ylabel("Latitude")
plt.ion()
p = plt.show()
def update_scatter(frame):
ax2.set_title('iter = ' + str(frame))
points = best_x_history[frame]
points = np.concatenate([points, [points[0]]])
point_coordinate = points_coordinate[points, :]
plt.setp(line, 'xdata', point_coordinate[:, 0], 'ydata', point_coordinate[:, 1])
return line
ani = FuncAnimation(fig2, update_scatter, blit=True, interval=25, frames=len(best_x_history))
plt.show()
#ani.save('sa_tsp.gif', writer='pillow')
| [
"medadrufus@gmail.com"
] | medadrufus@gmail.com |
90861679ad71262055147a504cc95fad10cfb496 | 9e504da68297149d99c66baab2b9939c21f20664 | /IntersectList.py | bb76f835f5cc9335ee8869601a672be283904ffd | [] | no_license | LeoTheMighty/beginner_python_exercises | 87690b9c7680860eec984417dbd4a17551a56856 | 0945c8a8c0f297b3a58a5e832bd41a8fc5126c1f | refs/heads/master | 2020-03-15T14:23:07.052989 | 2018-05-05T17:08:38 | 2018-05-05T17:08:38 | 132,188,920 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 272 | py | # Make a list intersector
a = [1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89]
b = [1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 88]
intersectList = []
for aelement in a:
if (aelement in b) and not (aelement in intersectList):
intersectList.append(aelement)
print(intersectList) | [
"leonid@ac93.org"
] | leonid@ac93.org |
adb7092b721162a83ad0df6b17753e5b4df2eadb | 8aebf052796b880fcef07fcbbb876a3bcc5c1924 | /Data Quality with Python for Beginner/Deduplikasi.py | 1abb3256bc147b65bc1705d640c6764ef3753e82 | [] | no_license | sultanardia/dqlab | e41420324105901b565af7acc540e7c8f135dea9 | 256a9ecee5522a1fd4a9a971a11a7b8fe4b40626 | refs/heads/main | 2023-04-14T07:43:44.620155 | 2021-05-01T10:50:42 | 2021-05-01T10:50:42 | 346,243,784 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 514 | py | import pandas as pd
import numpy as np
import io
import pandas_profiling
retail_raw = pd.read_csv('dataset.csv')
# Duplikasi data merupakan data dengan kondisi pada row-row tertentu memiliki kesamaan data di seluruh kolomnya. Tentunya ada data yang duplikat dalam dataset yang dimiliki. Kondisi duplikasi harus diatasi dengan jalan mengeliminir baris yang mengalami duplikasi
retail_raw.duplicated(subset = None).to_csv(r'csv.csv') # Cek duplikasi
retail_raw.drop_duplicates() # Dropping duplikasi | [
"noreply@github.com"
] | sultanardia.noreply@github.com |
571d3d7ecc84ebbfb01d15f15d8896b925fcaff2 | eb28effe32371194d236531a6db56a28e5a83d50 | /products/migrations/0004_auto_20180110_0918.py | 9c5609788e265dbb2a8529478ce98bd57dc7c850 | [] | no_license | inno-asiimwe/udemy-django-ecommerce-justin | df3858f06f4ae0a6235ffc45564e6639cc34a7ae | 7c437836d3cc894582b258ef15dfd14cc6ef918f | refs/heads/master | 2021-05-13T16:13:41.025721 | 2018-01-15T14:43:48 | 2018-01-15T14:43:48 | 116,788,444 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 459 | py | # Generated by Django 2.0.1 on 2018-01-10 09:18
from django.db import migrations, models
import products.models
class Migration(migrations.Migration):
dependencies = [
('products', '0003_product_image'),
]
operations = [
migrations.AlterField(
model_name='product',
name='image',
field=models.ImageField(blank=True, null=True, upload_to=products.models.upload_image_path),
),
]
| [
"innocent@Innocents-MacBook-Pro.local"
] | innocent@Innocents-MacBook-Pro.local |
48f6a582b57139777865caf27a6ba3f840b64e36 | 2a621e29ccdb965514b06cf9908fabae034f447d | /24. Lexicographic permutations.py | 09c5f4522a8591cde3855873dd4ed7ae85c0037a | [] | no_license | ZitingShen/Euler-Project | 9760e326e8be460d13caa08c59e57f343fc20594 | a77718e84b9cfaeb88cb8a96f67c22523703cbd6 | refs/heads/master | 2016-09-06T18:15:39.771114 | 2015-10-06T05:08:07 | 2015-10-06T05:08:07 | 26,204,323 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 777 | py | import time
start=time.time()
p=9*8*7*6*5*4*3*2
rest=1000000
nums=[0]*10
num=[]
for x in range(1,11):
if rest%p!=0:
t=0
i=-1
while t<=int(rest/p):
i+=1
if not nums[i]:
t+=1
num.append(i)
nums[num[x-1]]=1
print(nums)
rest-=p*(int(rest/p))
if x!=10:
p=p//(10-x)
print(rest,p)
else:
t=0
i=-1
while t<rest/p:
i+=1
if not nums[i]:
t+=1
num.append(i)
print("do the rest yourself! Just put the large numbers before small ones!")
break
S=''
for x in range(len(num)):
S+=str(num[x])
elapsed=(time.time()-start)
print ("found %s in %s seconds" % (S,elapsed))
| [
"zshen@brynmawr.edu"
] | zshen@brynmawr.edu |
d74fff88ba05004f13b29253044811a8d2b7d787 | 3249577773cf18e5c09ea36de62477ddb43b662b | /Python/flask_fundamentals/Disappearing Ninja/server.py | 91bc33c8de93b14123c980e0d252bf8f7f89d6c4 | [] | no_license | HollinRoberts/code | 5394abe2a7c42bbbe83d8f64a99c50a52f05792b | 8026522ab169c4174037fdf1b271de60b75d79bf | refs/heads/master | 2021-01-01T16:12:11.674680 | 2017-10-18T21:08:10 | 2017-10-18T21:08:10 | 97,786,418 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 671 | py | from flask import Flask, render_template, request, redirect
app = Flask(__name__)
@app.route("/")
def index():
return render_template("index.html")
@app.route("/ninja")
def ninja():
return render_template("ninja.html")
@app.route("/ninja/<color>")
def ninja_color(color):
if color=="blue":
return render_template("leonardo.html" )
elif color=="orange":
return render_template("michelangelo.html")
elif color=="red":
return render_template("raphael.html")
elif color=="purple":
return render_template("donatello.html")
else:
return render_template("notapril.html")
app.run(debug=True) | [
"hollinroberts@gmail.com"
] | hollinroberts@gmail.com |
c93bfa4ec6d9804ee16dbf65765c073e80ea463c | 552d6d8ceb9a88ebe9dfa5ba1c3572a41e029634 | /Exercise 9.9.py | 86f0566c852d9b3bcdceb214d19caab5db392ac3 | [] | no_license | Aguskurnia123/Agus-Kurnia-Akbar_I0320004_Abyan_Tugas-9 | 9df2356f8a3e0c1af78c34dad3c1c7d844cdaf59 | ab89d7c9ab87c5766de02ec4e25817e5904747c5 | refs/heads/main | 2023-04-16T22:37:10.157282 | 2021-04-30T23:54:17 | 2021-04-30T23:54:17 | 363,285,163 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 98 | py | import array
A= array.array('i', [100, 200, 300, 400, 500])
print(A)
A[1]= -700
A[4]= 800
print(A) | [
"aguskurnia123.ak@gmail.com"
] | aguskurnia123.ak@gmail.com |
d59c7349c687bb89df6ffe6c91d0cb52724efdaa | d4eb113c44c86322b3811513a7286d176f106eb6 | /experiments/variational_autoencoder/validation/compare_results.py | 9533452ba1c680b701a373947b1b8279453615c6 | [] | no_license | philip-brohan/Machine-Learning | 67a2eb780383b3436da4fef1d763f39d255ae696 | dc53b9c336d5f12272257f327abe49dec436ea04 | refs/heads/master | 2021-03-27T12:33:07.518279 | 2020-04-30T19:38:02 | 2020-04-30T19:38:02 | 56,614,781 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,113 | py | #!/usr/bin/env python
# Model training results plot
import tensorflow as tf
tf.enable_eager_execution()
import numpy
import IRData.twcr as twcr
import iris
import datetime
import argparse
import os
import math
import pickle
import Meteorographica as mg
import matplotlib
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.figure import Figure
import cartopy
import cartopy.crs as ccrs
# Function to resize and rotate pole
def rr_cube(cbe):
# Use the Cassini projection (boundary is the equator)
cs=iris.coord_systems.RotatedGeogCS(0.0,60.0,270.0)
# Latitudes cover -90 to 90 with 79 values
lat_values=numpy.arange(-90,91,180/78)
latitude = iris.coords.DimCoord(lat_values,
standard_name='latitude',
units='degrees_north',
coord_system=cs)
# Longitudes cover -180 to 180 with 159 values
lon_values=numpy.arange(-180,181,360/158)
longitude = iris.coords.DimCoord(lon_values,
standard_name='longitude',
units='degrees_east',
coord_system=cs)
dummy_data = numpy.zeros((len(lat_values), len(lon_values)))
dummy_cube = iris.cube.Cube(dummy_data,
dim_coords_and_dims=[(latitude, 0),
(longitude, 1)])
n_cube=cbe.regrid(dummy_cube,iris.analysis.Linear())
return(n_cube)
# Get the 20CR data
ic=twcr.load('prmsl',datetime.datetime(2009,3,12,18),
version='2c')
ic=rr_cube(ic.extract(iris.Constraint(member=1)))
# Get the autoencoder
model_save_file=("%s/Machine-Learning-experiments/"+
"variational_autoencoder/"+
"/saved_models/Epoch_%04d/autoencoder") % (
os.getenv('SCRATCH'),500)
autoencoder=tf.keras.models.load_model(model_save_file,compile=False)
# Normalisation - Pa to mean=0, sd=1 - and back
def normalise(x):
x -= 101325
x /= 3000
return x
def unnormalise(x):
x *= 3000
x += 101325
return x
fig=Figure(figsize=(9.6,10.8), # 1/2 HD
dpi=100,
facecolor=(0.88,0.88,0.88,1),
edgecolor=None,
linewidth=0.0,
frameon=False,
subplotpars=None,
tight_layout=None)
canvas=FigureCanvas(fig)
# Top - map showing original and reconstructed fields
projection=ccrs.RotatedPole(pole_longitude=60.0,
pole_latitude=0.0,
central_rotated_longitude=270.0)
ax_map=fig.add_axes([0.01,0.51,0.98,0.48],projection=projection)
ax_map.set_axis_off()
extent=[-180,180,-90,90]
ax_map.set_extent(extent, crs=projection)
matplotlib.rc('image',aspect='auto')
# Run the data through the autoencoder and convert back to iris cube
pm=ic.copy()
pm.data=normalise(pm.data)
ict=tf.convert_to_tensor(pm.data, numpy.float32)
ict=tf.reshape(ict,[1,79,159,1])
result=autoencoder.predict_on_batch(ict)
result=tf.reshape(result,[79,159])
pm.data=unnormalise(result)
# Background, grid and land
ax_map.background_patch.set_facecolor((0.88,0.88,0.88,1))
#mg.background.add_grid(ax_map)
land_img_orig=ax_map.background_img(name='GreyT', resolution='low')
# original pressures as red contours
mg.pressure.plot(ax_map,ic,
scale=0.01,
resolution=0.25,
levels=numpy.arange(870,1050,7),
colors='red',
label=False,
linewidths=1)
# Encoded pressures as blue contours
mg.pressure.plot(ax_map,pm,
scale=0.01,
resolution=0.25,
levels=numpy.arange(870,1050,7),
colors='blue',
label=False,
linewidths=1)
mg.utils.plot_label(ax_map,
'%04d-%02d-%02d:%02d' % (2009,3,12,6),
facecolor=(0.88,0.88,0.88,0.9),
fontsize=8,
x_fraction=0.98,
y_fraction=0.03,
verticalalignment='bottom',
horizontalalignment='right')
# Scatterplot of encoded v original
ax=fig.add_axes([0.08,0.05,0.45,0.4])
aspect=.225/.4*16/9
# Axes ranges from data
dmin=min(ic.data.min(),pm.data.min())
dmax=max(ic.data.max(),pm.data.max())
dmean=(dmin+dmax)/2
dmax=dmean+(dmax-dmean)*1.05
dmin=dmean-(dmean-dmin)*1.05
if aspect<1:
ax.set_xlim(dmin/100,dmax/100)
ax.set_ylim((dmean-(dmean-dmin)*aspect)/100,
(dmean+(dmax-dmean)*aspect)/100)
else:
ax.set_ylim(dmin/100,dmax/100)
ax.set_xlim((dmean-(dmean-dmin)*aspect)/100,
(dmean+(dmax-dmean)*aspect)/100)
ax.scatter(x=pm.data.flatten()/100,
y=ic.data.flatten()/100,
c='black',
alpha=0.25,
marker='.',
s=2)
ax.set(ylabel='Original',
xlabel='Encoded')
ax.grid(color='black',
alpha=0.2,
linestyle='-',
linewidth=0.5)
# Plot the training history
history_save_file=("%s/Machine-Learning-experiments/"+
"variational_autoencoder/"+
"saved_models/history_to_%04d.pkl") % (
os.getenv('SCRATCH'),500)
history=pickle.load( open( history_save_file, "rb" ) )
ax=fig.add_axes([0.62,0.05,0.35,0.4])
# Axes ranges from data
ax.set_xlim(0,len(history['loss']))
ax.set_ylim(0,numpy.max(numpy.concatenate((history['loss'],
history['val_loss']))))
ax.set(xlabel='Epochs',
ylabel='Loss (grey) and validation loss (black)')
ax.grid(color='black',
alpha=0.2,
linestyle='-',
linewidth=0.5)
ax.plot(range(len(history['loss'])),
history['loss'],
color='grey',
linestyle='-',
linewidth=2)
ax.plot(range(len(history['val_loss'])),
history['val_loss'],
color='black',
linestyle='-',
linewidth=2)
# Render the figure as a png
fig.savefig("comparison_results.png")
| [
"philip@brohan.org"
] | philip@brohan.org |
7ca108bd5b4ada0b7bc978f191396ec43af38fb5 | 77f49b5b03cccbe7527d6d2b15a94bfbe15f0ce8 | /tests/servo_bounds.py | 917fe36662b3bf8ea5aa6976b6f8749dd9a6524f | [] | no_license | skrub-wreckers/software | 9ac4bd8e67e54447e605b7b2b45a8278b31dde06 | 17b47ecf46dee192dd007543a958049eba03e690 | refs/heads/master | 2021-01-10T03:22:16.781029 | 2016-03-03T05:14:21 | 2016-03-03T05:14:21 | 49,020,868 | 0 | 0 | null | 2016-01-28T17:46:53 | 2016-01-04T20:22:59 | Python | UTF-8 | Python | false | false | 560 | py | from tamproxy import Sketch, SyncedSketch, Timer
from tamproxy.devices import Servo
class ServoWrite(Sketch):
"""Cycles a servo back and forth between 1050us and 1950us pulse widths (most servos are 1000-2000)"""
def setup(self):
self.servo = Servo(self.tamp, 10)
self.servo.write(2200)
self.timer = Timer()
self.val = 2200
def loop(self):
raw_input()
self.val += 10
print self.val
self.servo.write(self.val)
if __name__ == "__main__":
sketch = ServoWrite()
sketch.run()
| [
"areill1337@gmail.com"
] | areill1337@gmail.com |
e470c8f99b1335a8c6db6aba0002956e696d280d | 4afc0d47446de2fc9fb53833912cd105d0cc3d21 | /25. Seaborn Exercises .py | 9eb711e17984c6db65619c50429ba8db28cd3aa1 | [] | no_license | MEng-Alejandro-Nieto/Python-for-Data-Science-and-machine-Learning-Udemy-Course | 7afa51edd36bdd452fccc639b354bd492ab6e5d9 | a03430c1d57cb022073395c7eb279eaf7807df4b | refs/heads/master | 2020-07-17T02:33:19.478323 | 2019-09-02T18:45:33 | 2019-09-02T18:45:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,886 | py | import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
info=sns.load_dataset('titanic')
new_age=info['age'].dropna().apply(lambda x:int(x))
print(info.head(5))
#JOINT PLOT--------------------------------------------------------
#sns.jointplot(x='age',y='fare',data=info)
#DISTRIBUTION PLOT--------------------------------------------------------
#sns.distplot(info['age'].dropna().apply(lambda x:int(x)),kde=False)
#sns.distplot(info['fare'],kde=False,bins=30,color='red')
#sns.distplot(info['fare'])
#sns.distplot(new_age,kde=False,bins=15)
#BOX PLOT--------------------------------------------------------
#sns.boxplot(x='class',y='age',data=info,palette='rainbow')
#VIOLIN PLOT-----------------------------------------------------
#sns.violinplot(x='class',y='age',data=info,palette='rainbow',hue='sex')
#SWARM PLOT------------------------------------------------------
#sns.swarmplot(x='class',y='age',data=info)
#BAR PLOT -------------------------------------------------------
#sns.countplot(info['class'])
#sns.countplot(info['sex'])
#HEAT PLOT-------------------------------------------------------
#pvt_info=info.pivot_table(index='age',columns='embark_town',values='fare')
#sns.heatmap(info.corr())
#plt.title('heat map')
#FACETGRID PLOT--------------------------------------------------
g=sns.FacetGrid(data=info,col='sex')
g.map(plt.hist,'age')
#g.map(sns.distplot,'age')
plt.tight_layout()
plt.show()
'''
#sns.jointplot(x='fare',y='age',data=info)
#sns.distplot(info['fare'],kde=False,color='red',bins=30)
#sns.boxplot(x='class',y='age',data=info,hue='sex',palette='rainbow')
#sns.swarmplot(x='class',y='age',data=info)
#sns.countplot(info['sex'],data=info)
#sns.heatmap(info.corr())
g=sns.FacetGrid(data=info,col='sex')
g.map(sns.distplot,'age')
plt.tight_layout()
plt.show()
''' | [
"alejandrolive932@hotmail.com"
] | alejandrolive932@hotmail.com |
a2010a39af08d72a34b058f92fd12104c0aa8d29 | aa0cc19eedf38baca2ecef3de6f2a4c69ce68675 | /clld/scripts/postgres2sqlite.py | 168f94320038122afe286f29dcc8c331998e4f23 | [] | no_license | mitcho/clld | de84c54247138efa53ee5f68a87edc2a0ab06bbf | dcf5f063a44ac5167f677f05b2c66b0d094d4ff3 | refs/heads/master | 2021-01-18T09:56:18.486647 | 2013-08-23T15:13:18 | 2013-08-23T15:13:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,229 | py | """
python postgres2sqlite.py apics 2>&1 >/dev/null | less
Unfortunately this approach does not seem to work, thus, our only option is
intialize_db and making sure all db changes are done via alembic migrations.
"""
from subprocess import call
from importlib import import_module
import pkg_resources
import re
from tempfile import mktemp
from path import path
from sqlalchemy import create_engine
from clld.db.meta import Base
def replace_booleans(line):
"""replaces postgres boolean literals with 0|1 within the values in an INSERT
statement as created by pg_dump.
.. note::
- we rely on the INSERT statements not containing newlines.
- we somewhat naively split the values at commas and assume that if a single
token equals "true" or false", it was a boolean value in postgres. Obviously
this assumption does not hold for a text value like "..., true, ...".
We may switch to using sqlparse for a more robust detection of booleans.
>>> assert replace_booleans('INSERT (true, false);').strip() == 'INSERT (1, 0);'
"""
insert, values = line.split('(', 1)
assert values.endswith(');')
values = values[:-2]
clean_values = []
for token in values.split(', '):
if token == 'true':
token = "1"
elif token == 'false':
token = "0"
clean_values.append(token)
return '%s(%s);\n' % (insert, ', '.join(clean_values))
STMT_END = re.compile("([^\']\'|\, [0-9]+)\)\;$")
def inserts(iterator):
"""
>>> assert list(inserts(["INSERT (1, 1);"])) == ['INSERT (1, 1);']
>>> assert list(inserts(["INSERT ('a", "b');"])) == ["INSERT ('a__newline__b');"]
"""
insert = []
for line in iterator:
line = line.strip()
if line.startswith('INSERT '):
if STMT_END.search(line):
yield line
else:
insert = [line]
else:
if insert:
# a continuation line!
insert.append(line)
if STMT_END.search(line):
c = '__newline__'.join(insert)
insert = []
yield c
def convert_dump(i, o): # pragma: no cover
_insert = False
with file(o, 'w') as fp:
fp.write('.echo OFF\n.bail ON\n')
fp.write('BEGIN;\n')
for n, insert in enumerate(inserts(file(i))):
fp.write(replace_booleans(insert))
fp.write('END;\n')
def postgres2sqlite(name): # pragma: no cover
pg_sql = path(mktemp('.sql'))
sqlite_sql = path(mktemp('.sql'))
sqlite = mktemp('.sqlite')
call("pg_dump -f {0} --data-only --inserts {1}".format(pg_sql, name), shell=True)
convert_dump(pg_sql, sqlite_sql)
engine = create_engine('sqlite:////{0}'.format(sqlite))
m = import_module('{0}.models'.format(name))
Base.metadata.create_all(engine)
call('sqlite3 -bail -init {0} {1} ".exit"'.format(sqlite_sql, sqlite), shell=True)
if pg_sql.exists():
pg_sql.remove()
if sqlite_sql.exists():
sqlite_sql.remove()
return sqlite
if __name__ == '__main__': # pragma: no cover
import sys
postgres2sqlite(sys.argv[1])
sys.exit(0)
| [
"xrotwang@googlemail.com"
] | xrotwang@googlemail.com |
dcf71d21fdd4add75ba30e4eb9d4ccb386659a42 | 58d98167c804c5aac5a7688cd9a6142e58e307c6 | /boomerang/__init__.py | 0065754c22ece9a6ff4b6f8b780bc44ec97d588a | [
"MIT"
] | permissive | hxu/boomerang | bed7c9bdc3b8627f6693affd5253bd5f9afd1506 | 58604f9a3af0df34dcb2f9bb2fff9799df5b4c86 | refs/heads/master | 2020-05-16T21:24:29.837175 | 2013-08-28T09:34:28 | 2013-08-28T09:34:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,360 | py | from __future__ import division
import os
import re
import shutil
from itertools import chain
import sys
import time
from string import Template
from fabric.api import env, run
from fabric.api import put as fabput
from fabric.context_managers import cd
from fabric.contrib.files import exists
from fabric.exceptions import NetworkError
import common
import fetch
import put
from common import _expand_path
from fetch import fetch_path
from put import put_path
from boomerang import boom_config
from connection import connect_ec2
__all__ = [
'common',
'fetch',
'put'
]
def provision_instance(itype=None, ami=None, security_group=None, ssh_key=None):
"""
Provisions and instance and returns the instance object
"""
print "Launching {} instance with ami {}.".format(itype, ami)
conn = connect_ec2()
res = conn.run_instances(ami, key_name=ssh_key, security_groups=[security_group], instance_type=itype,
instance_initiated_shutdown_behavior='terminate')
return res.instances[0]
def _generate_fetch_script(key_path=None, bucket_name=None):
"""
Portion of the remote script that pulls stuff down from s3
"""
from templates.remote_fetch import TEMPLATE_TEXT
return Template(TEMPLATE_TEXT).substitute(key_path=key_path,
bucket_name=bucket_name,
aws_access_key_id=boom_config.AWS_ACCESS_KEY_ID,
aws_secret_access_key=boom_config.AWS_SECRET_ACCESS_KEY
)
def _generate_run_script(script_name=None, out_path=None):
"""
Generates the remote script to be run on the instance
Saves the file to a temporary location and returns the path
"""
r_log_filename = 'r_log.txt'
r_log_path = out_path + r_log_filename
call_command = ['Rscript', '--vanilla', '--verbose', script_name]
SCRIPT_TEXT = """
# Make sure to make the file first
import os
import subprocess
print 'Starting the R task'
outfile = open('$r_log_path', mode='w')
subprocess.call($call_command, stdout=outfile, stderr=subprocess.STDOUT)
outfile.close()
"""
return Template(SCRIPT_TEXT).substitute(r_log_path=r_log_path, call_command=call_command, out_path=out_path)
def _generate_put_script(path=None, bucket_name=None):
"""
Generates remote script to put files back to s3
"""
from templates.remote_put import TEMPLATE_TEXT
return Template(TEMPLATE_TEXT).substitute(path=path,
bucket_name=bucket_name,
aws_access_key_id=boom_config.AWS_ACCESS_KEY_ID,
aws_secret_access_key=boom_config.AWS_SECRET_ACCESS_KEY
)
def generate_script(fetch=False, bucket_name=None, fetch_path=None,
put=False, out_path=None,
run=False, script_name=None):
script_text = ''
if fetch:
script_text += _generate_fetch_script(fetch_path, bucket_name)
if run:
script_text += _generate_run_script(script_name, out_path)
if put:
script_text += _generate_put_script(out_path, bucket_name)
script_text += """
import os
os.system('sudo shutdown -h now')
"""
# Strip out from __future__ imports and move to the beginning of the file
imports = set(re.findall('from __future__.+\n', script_text))
for i in imports:
script_text = script_text.replace(i, '')
script_text = i + script_text
return script_text
def _cleanup_workspace(temp_folder=boom_config.TEMPORARY_FOLDER):
"""
Cleans up temporary files
"""
shutil.rmtree(temp_folder)
def _make_workspace(temp_folder=boom_config.TEMPORARY_FOLDER):
"""
Creates temporary workspace for files
"""
if os.path.exists(temp_folder) and os.path.isdir(temp_folder):
shutil.rmtree(temp_folder)
os.makedirs(temp_folder)
def _expand_local_path():
pass
def _expand_remote_path():
pass
def _get_existing_instance(instance_id):
"""
Gets an existing instance object
"""
conn = connect_ec2()
res = [r for r in conn.get_all_instances(instance_id)]
if len(res) == 0:
print 'Instance not found. Aborting'
sys.exit(1)
elif len(res) > 1:
print 'Multiple instances found. Aborting'
sys.exit(1)
elif len(res) == 1:
# We're assuming that each reservation only has one instance
# Not considering the case where a reservation can have multiple instances
instance = res[0].instances[0]
return instance
def send_job(source_script=None, in_directory=None, out_directory=None,
base_directory='task/',
load_from_s3=0, s3_bucket_name=None, s3_fetch_path=None,
put_to_s3=0,
existing_instance=None,
itype=None, ami=boom_config.DEFAULT_AMI, security_group=boom_config.DEFAULT_SECURITY_GROUP,
ssh_key=boom_config.DEFAULT_SSH_KEY,
ssh_key_path=boom_config.DEFAULT_SSH_KEY_PATH):
"""
Spins up an instance, deploys the job, then exits
"""
load_from_s3 = int(load_from_s3)
put_to_s3 = int(put_to_s3)
if not out_directory.endswith('/'):
out_directory += '/'
out_log_file = base_directory + out_directory + 'shell_log.txt'
_make_workspace()
# Prepare the local job files
f = open(boom_config.TEMPORARY_FOLDER + 'boom_task.py', 'w')
f.write(generate_script(fetch=load_from_s3,
bucket_name=s3_bucket_name,
fetch_path=s3_fetch_path,
put=put_to_s3,
out_path=out_directory,
run=True,
script_name=source_script))
f.close()
user = 'ubuntu'
ssh_key_path = _expand_path(ssh_key_path)
path_to_base_directory = '~/{}'.format(base_directory)
instance = None
# When provisioning a spot instance
# res = conn.request_spot_instances(price='0.011', instance_type='t1.micro', image_id='ami-0b9ad862')
# res[0] gives the spot reservation
# but this does not have an update method, so need to do
# conn.get_all_spot_instance_requests(res[0].id)
# res[0].state = 'active'
# or res[0].status.code = 'fulfilled'
# then res[0].instance_id
try:
if not existing_instance:
instance = provision_instance(itype=itype, ami=ami, security_group=security_group, ssh_key=ssh_key)
print "Waiting for instance to boot"
else:
instance = _get_existing_instance(existing_instance)
print 'Using existing instance {}'.format(existing_instance)
while instance.state != 'running':
sys.stdout.write(".")
time.sleep(5)
instance.update()
sys.stdout.write('\n')
except KeyboardInterrupt:
print 'Operation cancelled by user. Attempting to terminate instance'
if instance:
# This does not always terminate, if we are really early in the launch process
instance.terminate()
_cleanup_workspace()
sys.exit(1)
time.sleep(15)
print "Instance is running at ip {}".format(instance.ip_address)
print "Connecting as user {}".format(user)
# Set up the fabric environment to connect to the new machine
env.host_string = instance.ip_address
env.user = user
env.key_filename = ssh_key_path
attempt = 1
success = False
while not success and attempt <= 3:
try:
run('uname -a')
run('pwd')
success = True
except NetworkError as e:
print "Could not connect: {}".format(e)
print "Retrying"
attempt += 1
continue
if not success:
print "Could not connect after 3 tries. Aborting"
_cleanup_workspace()
sys.exit(1)
# Send files to the server
if exists(base_directory):
run('rm -R {}'.format(base_directory))
run('mkdir {}'.format(base_directory))
run('mkdir {}'.format(base_directory + out_directory))
fabput(local_path=_expand_path('./' + boom_config.TEMPORARY_FOLDER + 'boom_task.py'), remote_path='~/' + base_directory)
fabput(local_path=_expand_path('./' + source_script), remote_path='~/' + base_directory)
with cd(path_to_base_directory):
print 'Transferring scripts to instance'
# Kick off the script with tmux
print 'Kicking off the task'
run("tmux new-session -s boom_job -d")
# TODO: Does not always seem to be working, but path looks correct
run("tmux pipe-pane -o -t boom_job 'exec cat >> {}'".format(out_log_file))
run("tmux send -t boom_job 'python boom_task.py' Enter")
_cleanup_workspace()
def list_instances():
"""
Lists all instances
"""
conn = connect_ec2()
res = conn.get_all_instances()
if len(res) == 0:
print('No instances')
instances = chain.from_iterable([r.instances for r in res])
for i in instances:
print('Instance: {}. Status: {}'.format(i, i.state))
"""
fab send_job:source_script=test.R,in_directory=data/,out_directory=output/,put_to_s3=1,s3_bucket_name=boom_test,load_from_s3=1,s3_fetch_path=data
""" | [
"hgcrpd@gmail.com"
] | hgcrpd@gmail.com |
eafb45050bc3226b2600482f9fdd64110784abd1 | dac956439f6918b10026bdf051a952836ab1f5f6 | /todo.py | d9db1d28564b9071f4b923d37a47a0221e99cc49 | [] | no_license | sajja/mytask | fcde27f3d0c199c275b99da2426e13814110e135 | 7c394b64d14db6c7d92006db3c271bba24780da5 | refs/heads/master | 2021-01-15T12:19:07.298249 | 2014-08-17T10:12:04 | 2014-08-17T10:12:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,585 | py | from index import Index
from datetime import datetime
from datetime import timedelta
from time import sleep
from parser import ParserFactory
from print_plugin import TextDecoratorPlugin, PaddedDecoratorPlugin, ConkyColoredDecoratorPlugin, HumanizedDatesPlugin, \
TrimLongNamesPlugin
import pynotify
import argparse
__author__ = 'sajith'
import sys
from subprocess import call
index = Index("/home/sajith/.task")
class Todo:
def __init__(self):
pass
def printTodoList(self):
pass
def listShort(self, status="PENDING", date=None):
taskWithDates, taskWithoutDates = index.listAll()
print("Id Due date Task")
print("-- -------- ----")
allTasks = taskWithDates + taskWithoutDates
for task in allTasks:
print(self.pad(str(task.id), 4) + self.pad(str(task.date), 22) + str(task.taskName))
def listDetails(self):
allTasks = index.listAll()
print("Id Due date Status Reccrance Task")
print("-- -------- ------- --------- ----")
for task in allTasks:
print(self.pad(str(task.id), 4) + self.pad(str(task.date), 21) + self.pad(str(task.status), 9) + self.pad(
str(task.reccrance), 11) + task.taskName)
def viewTask(self, taskId):
pass
def add(self, params):
recurrence = None
date = None
notify = None
if (len(params) > 4):
raise Exception("Too many params")
#todo do this better
name = params[0].replace("\"", "")
params = params[1:]
if (len(params) > 0 ):
date = self.parseDate(params[0])
recurrence = "NONE"
notify = "NO"
params = params[1:]
if (len(params) > 0):
recurrence = params[0]
params = params[1:]
if (len(params) > 0):
notify = params[0]
print("Task name: " + name)
print("Dud daet: " + str(date))
print("Reccurance: " + str(recurrence))
index.addTask(name, date, recurrence)
def parseDate(self, date):
#see its a tagged date
dateStrs = date.split(",")
if (len(dateStrs) > 1):
#tagged date with time
datePart = self.__getDate__(dateStrs[0])
timePart = datetime.strptime(dateStrs[1], '%H:%M').time()
return datetime.combine(datePart, timePart)
elif (len(dateStrs) == 1): #single date, either tag or formatted
if (len(date.split(" ")) > 1):
#formated date with time
parsedDate = datetime.strptime(date, '%Y-%m-%d %H:%M')
return parsedDate
elif len(date.split(" ")) == 1:
return self.__getDate__(date)
else:
raise Exception("Unparsable date " + date)
else:
raise Exception("unparsable date " + date)
def __getDate__(self, date):
if (date.lower() == "today"):
return datetime.today().date()
if (date.lower() == "tomorrow"):
return datetime.today().date() + timedelta(days=1)
else:
parsedDate = datetime.strptime(date, '%Y-%m-%d').date()
return parsedDate
def pad(self, string, size):
appendLen = size - string.__len__()
if appendLen > 0:
for i in range(appendLen):
string += " "
return string
def delete(self, id):
task = index.findTaskById(id)
if (task != None):
index.deleteTask(task)
print("Task delted")
else:
print("No task found")
def complete(self, id):
index.markTaskComplete(id)
def searchTask(self, name):
print("not implemented")
def notifyAll(self):
tasksTobeNotified = index.listNotificationsPendingTasks(15)
print("Entering into notification loop...")
for overdueTask in tasksTobeNotified[0]:
pynotify.init("markup")
n = pynotify.Notification(" ************** TASK NOTIFICATION **************\n",
"<b>Task Name</b> <i>" + overdueTask.taskName + " (" + str(
overdueTask.id) + ")</i> <b>" + self.getDueTime(
overdueTask.dueIn, True) + "</b>",
"/home/sajith/scratch/mytodo/Task-List-icon.png")
n.show()
sleep(2)
for starting in tasksTobeNotified[1]:
pynotify.init("markup")
n = pynotify.Notification(" ************** TASK NOTIFICATION **************\n",
"<b>Task Name</b> <i>" + starting.taskName + " (" + str(
starting.id) + ")</i> <b>" + self.getDueTime(
starting.dueIn, False) + "</b>",
"/home/sajith/scratch/mytodo/Task-List-icon.png")
n.show()
sleep(2)
def listAll(self):
entries = index.listAll()
count = 0
for entry in entries:
print("Task name: " + entry.taskName)
print("Task due date: " + str(entry.dateTime))
print("Recuurance: " + str(entry.reccrance))
print("Notifications: " + str(entry.notify))
print("------------------------------------")
count += 1
print("Total number of entries " + str(count))
def getDueTime(self, time, isOverdue):
if (time == 0 and isOverdue):
return "just passed the scheduled time"
elif (time == 0 and not isOverdue):
return "is just strating"
elif (isOverdue):
return "is overdue by " + str(time) + " min"
elif (not isOverdue):
return "will start in " + str(time) + " min"
def listTodos(self):
taskWithDates, taskWithoutDates = index.listAll()
textDeco = self.__getTextDecorator__("conky")
today = datetime.today().date()
todaysTasks = [task for task in taskWithDates if
not hasattr(task.date, "time") or task.date.strftime('%H:%M') == "00:00"]
alltasks = todaysTasks + taskWithoutDates
if (len(alltasks) > 0):
# print("${color A8A8A8}")
for task in alltasks:
print(str(task.id) + " " + task.taskName)
# print("(" + textDeco.getTaskId(task.id, task) + ")" + textDeco.getTaskName(task.taskName, task))
else:
print("You got nothing todo")
print("Perhaps you should find some work or a new job ????")
def agenda(self, pluginType):
# print(" \n")
today, upcoming = index.agenda()
textDeco = self.__getTextDecorator__(pluginType)
if (len(today) == 0):
print "${font Inconsolata:italic:size=12}Nothing scheduled for today${font}"
print("")
else:
for task in today:
print(str(textDeco.getTaskId(task.id, task)) + textDeco.getTaskName(task.taskName, task) + str(textDeco.getDueDate(task.date, task)))
print "${font}"
print("")
if(pluginType == "conky"):
print "${font Inconsolata:size=12}"
for task in upcoming:
print(str(textDeco.getTaskId(task.id, task)) + textDeco.getTaskName(task.taskName, task) + str(textDeco.getDueDate(task.date, task)))
if(pluginType == "conky"):
print "${font}"
def __getTextDecorator__(self, pluginType):
if (pluginType == "conky"):
return HumanizedDatesPlugin(TrimLongNamesPlugin(PaddedDecoratorPlugin(5, 10, 40, ConkyColoredDecoratorPlugin(TextDecoratorPlugin()))))
else:
return TextDecoratorPlugin()
def importTasks(self, parserType="google", location="google"):
parser = ParserFactory().getParser(parserType)
stringTasks = parser.parse(file(location))
index.importTask(stringTasks)
def snooze(self, args):
snoozeTime = 15
if (len(args) == 2):
snoozeTime = int(args[1])
index.snooze(args[0], snoozeTime)
def gc(self):
index.gc()
def main():
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument("command", choices=["add", "short", "agenda", "todo", "done", "notify", "import", "snooze", "gc"])
parser.add_argument("command_args", nargs="*")
parser.add_argument("--type", help="increase output verbosity", default="terminal")
args = parser.parse_args()
operation = args.command
# for arg in sys.argv:
# print(arg)
if (operation == "short"):
Todo().listShort()
elif (operation == "long"):
Todo().listDetails()
elif (operation == "delete"):
Todo().delete(int(sys.argv[2]))
elif (operation == "done"):
Todo().complete(sys.argv[2])
elif (operation == "add"):
Todo().add(sys.argv[2:len(sys.argv)])
elif (operation == "notify"):
Todo().notifyAll()
elif (operation == "todo"):
Todo().listTodos()
elif (operation == "agenda"):
Todo().agenda(args.type)
elif (operation == "import"):
Todo().importTasks("google", sys.argv[2])
elif (operation == "snooze"):
Todo().snooze(sys.argv[2:])
elif (operation == "gc"):
Todo().gc()
if __name__ == "__main__":
main() | [
"sajiths@pagero.com"
] | sajiths@pagero.com |
ad369ea3b68e110460ac6a271338682c626d5bb4 | 61af9e5a827caf4ec9b570f70a66a682d542db31 | /FLASK_TUTORIAL/FLASK/Large_Application_Structure/myproject/owners/views.py | 24dce0920f47d855fe6f6e73e9cd62a43a27290f | [] | no_license | mdshadanaltmash/FLASK_TUTORIAL | cda1b90f96e833a78bacf444353fb104e51246d5 | 5de5ce2a0f73c7e9319f3ca2d4069e94d2f1bd63 | refs/heads/main | 2023-03-08T08:09:01.008006 | 2021-02-24T19:00:13 | 2021-02-24T19:00:13 | 313,091,120 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 635 | py | from flask import Blueprint,render_template,url_for,redirect
from myproject import db
from myproject.models import Owner
from myproject.owners.forms import AddForm
owners_blueprints= Blueprint('owners',__name__,template_folder='templates/owners')
@owners_blueprints.route('/add',methods=['GET','POST'])
def add():
form=AddForm()
if form.validate_on_submit():
name=form.name.data
puppy_id=form.puppy_id.data
owner=Owner(name,puppy_id)
db.session.add_all([owner])
db.session.commit()
return (redirect(url_for('puppies.list')))
return(render_template('owner.html',form=form))
| [
"mdshadanaltmash44@gmail.com"
] | mdshadanaltmash44@gmail.com |
6a883081299d8849dfc6919f02c3fe13b52b3176 | c0f3c5a65397d0daaf705d74e22e34c0219ffedc | /stage/models.py | d440f70382629acb548560408ccececbaa9ff0d7 | [] | no_license | zhulongcao53/mysite | d76d104e4cd4e2ee0332f7415ae2bbfe26534c4c | 9b66bdc396de616ee8d7b8d2fc2010e3fb50034c | refs/heads/master | 2020-12-25T14:24:15.560923 | 2016-07-13T14:28:01 | 2016-07-13T14:28:01 | 63,251,921 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,512 | py | #-*- encoding: utf-8 -*-
from django.db import models
from django.contrib import admin
from DjangoUeditor.models import UEditorField
# Create your models here.
class BasicInfo(models.Model):
ipadd = models.IPAddressField(verbose_name = u'IP地址')
cpu = models.CharField(max_length=255, blank=True, verbose_name = u'CPU%')
mem = models.CharField(max_length=255, blank=True, verbose_name = u'内存%')
disk = models.CharField(max_length=255, blank=True, verbose_name = u'磁盘%')
sys_version = models.CharField(max_length=255, blank=True, verbose_name = u'操作系统')
sys_bit = models.CharField(max_length=100, blank=True, verbose_name = u'32/64位')
MAC = models.CharField(max_length=100, blank=True, verbose_name = u'MAC')
def __unicode__(self):
return self.ipadd
class Meta:
verbose_name = "服务器信息"
verbose_name_plural = "服务器信息"
#主机组表,用来对主机进行分组
class HostGroup(models.Model):
name = models.CharField(max_length=30)
members = models.ManyToManyField(BasicInfo)
class Meta:
verbose_name = "主机信息"
verbose_name_plural = "主机信息"
class BasicInfo_admin(admin.ModelAdmin):
list_display = ('ipadd', 'cpu', 'mem', 'disk', 'sys_version', 'sys_bit', 'MAC')
list_filter = ('ipadd', )
class HostGroupAdmin(admin.ModelAdmin):
list_display = ['name',]
admin.site.register(BasicInfo, BasicInfo_admin)
admin.site.register(HostGroup,HostGroupAdmin)
| [
"zhulongcao53@163.com"
] | zhulongcao53@163.com |
47b448f166ccbe4e48f156b557246c0253abdf2e | 9782c4e9d348f1afac506ee89885be685074b1f7 | /portal.py | 0cf24c53ce4d0a5c27685966c4175b4a37d90241 | [] | no_license | shpingsun/OCIPA | 1d20a7d0aeaeb1f0d79d141a3ff83fdee4a219d3 | becbde75765b8f732d0bfb83f66e4d5d201cd476 | refs/heads/master | 2020-04-15T16:07:15.767007 | 2018-12-24T09:25:55 | 2018-12-24T09:25:55 | 164,821,002 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 57 | py | msg = "资产云开放协同创新中心"
print(msg)
| [
"panzhaohui@msn.com"
] | panzhaohui@msn.com |
f60880e5d4192b5bcbd9bd669c188d6935c9d098 | 4bee31f6a823fb1aebbd3dfe1d163aa0b1d41a7c | /seata/registry/FileRegistry.py | 460f4982eb6b95f9f7bcc623f50e55a313c15d63 | [
"Apache-2.0"
] | permissive | rohankumardubey/seata-python | 92532d1e8f8c961f2317aa8c23e2f53fe07711e9 | 66fb3382217a43effa3d1bc5ec2b62204d499dba | refs/heads/master | 2023-08-17T08:29:12.603412 | 2021-09-27T06:04:56 | 2021-09-27T06:04:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,249 | py | #!/usr/bin/env python3
# -*- coding:utf-8 -*-
# @author jsbxyyx
# @since 1.0
from seata.config.Config import ConfigFactory
from seata.core.rpc.Address import Address
from seata.registry.Registry import Registry
class FileRegistry(Registry):
config = ConfigFactory.get_config()
def __init__(self):
pass
def register(self, address):
pass
def unregister(self, address):
pass
def subscribe(self, cluster, listener):
pass
def unsubscribe(self, cluster, listener):
pass
def lookup(self, key):
cluster_name = super(FileRegistry, self).get_service_group(key)
if cluster_name is None:
return None
endpoint_str = self.config.get('service.grouplist.' + cluster_name)
endpoints = endpoint_str.split(';')
addresses = []
for endpoint in endpoints:
if endpoint is None or len(endpoint.strip()) == 0:
continue
ip_port_arr = endpoint.split(':')
if len(ip_port_arr) != 2:
raise ValueError('endpoint format should like ip:port')
addresses.append(Address(ip_port_arr[0], int(ip_port_arr[1])))
return addresses
def close(self):
pass
| [
"jsbxyyx@163.com"
] | jsbxyyx@163.com |
cd0d8e561666d128b2ea7436f62b9116e824dd80 | 21d31c95db167b5b042824d1ab1993d55771fd77 | /ch4/utils/logger.py | dfa267bb5d83d58066198fb360daba907de0b500 | [] | no_license | wangxiao9/appium_demo | 4350dfe93a1f0528fc016e8b1e798f5b1444c9e4 | bbd56991d9f648edc53af9937d75a72b292a4e4b | refs/heads/master | 2022-11-30T02:47:07.617001 | 2020-08-12T07:19:04 | 2020-08-12T07:19:04 | 285,777,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 623 | py | __author__ = 'wangxiao'
import logging
formatters = '%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s'
datefmt = '%a, %d %b %Y %H:%M:%S'
filename = '../log/test.log'
logging.basicConfig(level=logging.DEBUG,
format=formatters,
datefmt=datefmt,
filename=filename,
filemode='w')
def debug(message):
logging.debug(message)
def info(message):
logging.info(message)
def warning(message):
logging.warning(message)
def error(message):
logging.error(message)
if __name__ == '__main__':
error('cesss')
| [
"xiaoqian.zhang.ext@siemens.com"
] | xiaoqian.zhang.ext@siemens.com |
ab7a74055b83f59b60f3e2692de2f2d3a6d45b00 | 2f551b40db8dfec546ee229fd52957dc72de9137 | /fibonacci.py | 6da798636d83db68028ed7fdae1c7b7dfb865b31 | [] | no_license | Wikkan/Fibonacci | 45d260a1c2e7d82ea7727f467ea4713b8357e25d | fc846d6f03bc0b48eeb792833be558e0eecbd32b | refs/heads/master | 2020-04-06T17:04:51.599356 | 2018-11-15T03:49:33 | 2018-11-15T03:49:33 | 157,645,826 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 175 | py | from flask import Flask
app = Flask(__name__)
@app.route('/')
def fib():
n = 10
a = 0
b = 1
for x in range(n):
a, b = b, a+b
return a
| [
"josuji-alfa@hotmail.com"
] | josuji-alfa@hotmail.com |
3f513443bd4d9dfff4f056d83b3dbe2b8ef78504 | 24be96793d7c050b31936252e0f3ef809a7b7a32 | /commands/agc_roi.py | 95910dd42fe1ff10c5482ac887e1f22a3fefeb2a | [] | no_license | CraigKnoblauch/tau-2-emulator | 7041b3ead7b693c5e3b2762f6c7a72bb4989aed8 | 1051e1b93fa1c37101649c9ad175719f048bf88d | refs/heads/master | 2021-01-01T17:18:54.107882 | 2017-08-22T18:28:44 | 2017-08-22T18:28:44 | 98,044,874 | 1 | 0 | null | 2017-08-21T00:44:21 | 2017-07-22T16:48:50 | Python | UTF-8 | Python | false | false | 39 | py | def AGC_ROI(settings, reply):
pass
| [
"craigknoblauch@gmail.com"
] | craigknoblauch@gmail.com |
a06e048a185a9d0251f8d18dda29718efb09a160 | 462e52636f351a30da5bf2159bc9b30719bbe79b | /stuff/finished libraries/pytorch/_torch_docs.py | 5737af3d182faaa9e7a7cddb16dc5de740dcd382 | [] | no_license | SoutiRini/Top-20-Python-Libraries | c27b1ae77e11209bfe97405f8e324c54d4d49db4 | 1adcc6255dc59b25a831df2d23fa759e1e7c3264 | refs/heads/master | 2022-12-08T11:09:19.587218 | 2020-08-28T22:16:41 | 2020-08-28T22:16:41 | 291,153,411 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 267,122 | py | """Adds docstrings to functions defined in the torch._C"""
import re
import torch._C
from torch._C import _add_docstr as add_docstr
def parse_kwargs(desc):
"""Maps a description of args to a dictionary of {argname: description}.
Input:
(' weight (Tensor): a weight tensor\n' +
' Some optional description')
Output: {
'weight': \
'weight (Tensor): a weight tensor\n Some optional description'
}
"""
# Split on exactly 4 spaces after a newline
regx = re.compile(r"\n\s{4}(?!\s)")
kwargs = [section.strip() for section in regx.split(desc)]
kwargs = [section for section in kwargs if len(section) > 0]
return {desc.split(' ')[0]: desc for desc in kwargs}
def merge_dicts(*dicts):
return {x: d[x] for d in dicts for x in d}
common_args = parse_kwargs("""
input (Tensor): the input tensor.
generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling
out (Tensor, optional): the output tensor.
""")
reduceops_common_args = merge_dicts(common_args, parse_kwargs("""
dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
If specified, the input tensor is casted to :attr:`dtype` before the operation
is performed. This is useful for preventing data type overflows. Default: None.
keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
"""))
multi_dim_common = merge_dicts(reduceops_common_args, parse_kwargs("""
dim (int or tuple of ints): the dimension or dimensions to reduce.
"""), {'keepdim_details': """
If :attr:`keepdim` is ``True``, the output tensor is of the same size
as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1.
Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the
output tensor having 1 (or ``len(dim)``) fewer dimension(s).
"""})
single_dim_common = merge_dicts(reduceops_common_args, parse_kwargs("""
dim (int): the dimension to reduce.
"""), {'keepdim_details': """If :attr:`keepdim` is ``True``, the output tensor is of the same size
as :attr:`input` except in the dimension :attr:`dim` where it is of size 1.
Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in
the output tensor having 1 fewer dimension than :attr:`input`."""})
factory_common_args = merge_dicts(common_args, parse_kwargs("""
dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
Default: if ``None``, uses a global default (see :func:`torch.set_default_tensor_type`).
layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
Default: ``torch.strided``.
device (:class:`torch.device`, optional): the desired device of returned tensor.
Default: if ``None``, uses the current device for the default tensor type
(see :func:`torch.set_default_tensor_type`). :attr:`device` will be the CPU
for CPU tensor types and the current CUDA device for CUDA tensor types.
requires_grad (bool, optional): If autograd should record operations on the
returned tensor. Default: ``False``.
pin_memory (bool, optional): If set, returned tensor would be allocated in
the pinned memory. Works only for CPU tensors. Default: ``False``.
memory_format (:class:`torch.memory_format`, optional): the desired memory format of
returned Tensor. Default: ``torch.contiguous_format``.
"""))
factory_like_common_args = parse_kwargs("""
input (Tensor): the size of :attr:`input` will determine size of the output tensor.
layout (:class:`torch.layout`, optional): the desired layout of returned tensor.
Default: if ``None``, defaults to the layout of :attr:`input`.
dtype (:class:`torch.dtype`, optional): the desired data type of returned Tensor.
Default: if ``None``, defaults to the dtype of :attr:`input`.
device (:class:`torch.device`, optional): the desired device of returned tensor.
Default: if ``None``, defaults to the device of :attr:`input`.
requires_grad (bool, optional): If autograd should record operations on the
returned tensor. Default: ``False``.
pin_memory (bool, optional): If set, returned tensor would be allocated in
the pinned memory. Works only for CPU tensors. Default: ``False``.
memory_format (:class:`torch.memory_format`, optional): the desired memory format of
returned Tensor. Default: ``torch.preserve_format``.
""")
factory_data_common_args = parse_kwargs("""
data (array_like): Initial data for the tensor. Can be a list, tuple,
NumPy ``ndarray``, scalar, and other types.
dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
Default: if ``None``, infers data type from :attr:`data`.
device (:class:`torch.device`, optional): the desired device of returned tensor.
Default: if ``None``, uses the current device for the default tensor type
(see :func:`torch.set_default_tensor_type`). :attr:`device` will be the CPU
for CPU tensor types and the current CUDA device for CUDA tensor types.
requires_grad (bool, optional): If autograd should record operations on the
returned tensor. Default: ``False``.
pin_memory (bool, optional): If set, returned tensor would be allocated in
the pinned memory. Works only for CPU tensors. Default: ``False``.
""")
add_docstr(torch.abs,
r"""
abs(input, out=None) -> Tensor
Computes the element-wise absolute value of the given :attr:`input` tensor.
.. math::
\text{out}_{i} = |\text{input}_{i}|
""" + r"""
Args:
{input}
{out}
Example::
>>> torch.abs(torch.tensor([-1, -2, 3]))
tensor([ 1, 2, 3])
""".format(**common_args))
add_docstr(torch.absolute,
r"""
absolute(input, out=None) -> Tensor
Alias for :func:`torch.abs`
""".format(**common_args))
add_docstr(torch.acos,
r"""
acos(input, out=None) -> Tensor
Returns a new tensor with the arccosine of the elements of :attr:`input`.
.. math::
\text{out}_{i} = \cos^{-1}(\text{input}_{i})
""" + r"""
Args:
{input}
{out}
Example::
>>> a = torch.randn(4)
>>> a
tensor([ 0.3348, -0.5889, 0.2005, -0.1584])
>>> torch.acos(a)
tensor([ 1.2294, 2.2004, 1.3690, 1.7298])
""".format(**common_args))
add_docstr(torch.acosh,
r"""
acosh(input, out=None) -> Tensor
Returns a new tensor with the inverse hyperbolic cosine of the elements of :attr:`input`.
Note:
The domain of the inverse hyperbolic cosine is `[1, inf)` and values outside this range
will be mapped to ``NaN``, except for `+ INF` for which the output is mapped to `+ INF`.
.. math::
\text{out}_{i} = \cosh^{-1}(\text{input}_{i})
""" + r"""
Args:
{input}
Keyword arguments:
{out}
Example::
>>> a = torch.randn(4).uniform_(1, 2)
>>> a
tensor([ 1.3192, 1.9915, 1.9674, 1.7151 ])
>>> torch.acosh(a)
tensor([ 0.7791, 1.3120, 1.2979, 1.1341 ])
""".format(**common_args))
add_docstr(torch.add,
r"""
add(input, other, out=None)
Adds the scalar :attr:`other` to each element of the input :attr:`input`
and returns a new resulting tensor.
.. math::
\text{{out}} = \text{{input}} + \text{{other}}
If :attr:`input` is of type FloatTensor or DoubleTensor, :attr:`other` must be
a real number, otherwise it should be an integer.
Args:
{input}
value (Number): the number to be added to each element of :attr:`input`
Keyword arguments:
{out}
Example::
>>> a = torch.randn(4)
>>> a
tensor([ 0.0202, 1.0985, 1.3506, -0.6056])
>>> torch.add(a, 20)
tensor([ 20.0202, 21.0985, 21.3506, 19.3944])
.. function:: add(input, other, *, alpha=1, out=None)
Each element of the tensor :attr:`other` is multiplied by the scalar
:attr:`alpha` and added to each element of the tensor :attr:`input`.
The resulting tensor is returned.
The shapes of :attr:`input` and :attr:`other` must be
:ref:`broadcastable <broadcasting-semantics>`.
.. math::
\text{{out}} = \text{{input}} + \text{{alpha}} \times \text{{other}}
If :attr:`other` is of type FloatTensor or DoubleTensor, :attr:`alpha` must be
a real number, otherwise it should be an integer.
Args:
input (Tensor): the first input tensor
other (Tensor): the second input tensor
alpha (Number): the scalar multiplier for :attr:`other`
Keyword arguments:
{out}
Example::
>>> a = torch.randn(4)
>>> a
tensor([-0.9732, -0.3497, 0.6245, 0.4022])
>>> b = torch.randn(4, 1)
>>> b
tensor([[ 0.3743],
[-1.7724],
[-0.5811],
[-0.8017]])
>>> torch.add(a, b, alpha=10)
tensor([[ 2.7695, 3.3930, 4.3672, 4.1450],
[-18.6971, -18.0736, -17.0994, -17.3216],
[ -6.7845, -6.1610, -5.1868, -5.4090],
[ -8.9902, -8.3667, -7.3925, -7.6147]])
""".format(**common_args))
add_docstr(torch.addbmm,
r"""
addbmm(input, batch1, batch2, *, beta=1, alpha=1, out=None) -> Tensor
Performs a batch matrix-matrix product of matrices stored
in :attr:`batch1` and :attr:`batch2`,
with a reduced add step (all matrix multiplications get accumulated
along the first dimension).
:attr:`input` is added to the final result.
:attr:`batch1` and :attr:`batch2` must be 3-D tensors each containing the
same number of matrices.
If :attr:`batch1` is a :math:`(b \times n \times m)` tensor, :attr:`batch2` is a
:math:`(b \times m \times p)` tensor, :attr:`input` must be
:ref:`broadcastable <broadcasting-semantics>` with a :math:`(n \times p)` tensor
and :attr:`out` will be a :math:`(n \times p)` tensor.
.. math::
out = \beta\ \text{input} + \alpha\ (\sum_{i=0}^{b-1} \text{batch1}_i \mathbin{@} \text{batch2}_i)
""" + r"""
For inputs of type `FloatTensor` or `DoubleTensor`, arguments :attr:`beta` and :attr:`alpha`
must be real numbers, otherwise they should be integers.
Args:
batch1 (Tensor): the first batch of matrices to be multiplied
batch2 (Tensor): the second batch of matrices to be multiplied
beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`)
input (Tensor): matrix to be added
alpha (Number, optional): multiplier for `batch1 @ batch2` (:math:`\alpha`)
{out}
Example::
>>> M = torch.randn(3, 5)
>>> batch1 = torch.randn(10, 3, 4)
>>> batch2 = torch.randn(10, 4, 5)
>>> torch.addbmm(M, batch1, batch2)
tensor([[ 6.6311, 0.0503, 6.9768, -12.0362, -2.1653],
[ -4.8185, -1.4255, -6.6760, 8.9453, 2.5743],
[ -3.8202, 4.3691, 1.0943, -1.1109, 5.4730]])
""".format(**common_args))
add_docstr(torch.addcdiv,
r"""
addcdiv(input, tensor1, tensor2, *, value=1, out=None) -> Tensor
Performs the element-wise division of :attr:`tensor1` by :attr:`tensor2`,
multiply the result by the scalar :attr:`value` and add it to :attr:`input`.
.. warning::
Integer division with addcdiv is no longer supported, and in a future release
addcdiv will perform a true division of :attr:`tensor1` and :attr:`tensor2`.
The historic addcdiv behavior can be implemented using :func:`floor_divide`
for integral inputs
(:attr:`input` + :attr:`value` * :attr:`tensor1` // :attr:`tensor2`)
and :func:`div` for float inputs
(:attr:`input` + :attr:`value` * :attr:`tensor1` / :attr:`tensor2`).
The future addcdiv behavior can be implemented with :func:`true_divide`
(:attr:`input` + :attr:`value` * torch.true_divide(:attr:`tensor1`,
:attr:`tensor2`).
.. math::
\text{out}_i = \text{input}_i + \text{value} \times \frac{\text{tensor1}_i}{\text{tensor2}_i}
""" + r"""
The shapes of :attr:`input`, :attr:`tensor1`, and :attr:`tensor2` must be
:ref:`broadcastable <broadcasting-semantics>`.
For inputs of type `FloatTensor` or `DoubleTensor`, :attr:`value` must be
a real number, otherwise an integer.
Args:
input (Tensor): the tensor to be added
tensor1 (Tensor): the numerator tensor
tensor2 (Tensor): the denominator tensor
value (Number, optional): multiplier for :math:`\text{{tensor1}} / \text{{tensor2}}`
{out}
Example::
>>> t = torch.randn(1, 3)
>>> t1 = torch.randn(3, 1)
>>> t2 = torch.randn(1, 3)
>>> torch.addcdiv(t, t1, t2, value=0.1)
tensor([[-0.2312, -3.6496, 0.1312],
[-1.0428, 3.4292, -0.1030],
[-0.5369, -0.9829, 0.0430]])
""".format(**common_args))
add_docstr(torch.addcmul,
r"""
addcmul(input, tensor1, tensor2, *, value=1, out=None) -> Tensor
Performs the element-wise multiplication of :attr:`tensor1`
by :attr:`tensor2`, multiply the result by the scalar :attr:`value`
and add it to :attr:`input`.
.. math::
\text{out}_i = \text{input}_i + \text{value} \times \text{tensor1}_i \times \text{tensor2}_i
""" + r"""
The shapes of :attr:`tensor`, :attr:`tensor1`, and :attr:`tensor2` must be
:ref:`broadcastable <broadcasting-semantics>`.
For inputs of type `FloatTensor` or `DoubleTensor`, :attr:`value` must be
a real number, otherwise an integer.
Args:
input (Tensor): the tensor to be added
tensor1 (Tensor): the tensor to be multiplied
tensor2 (Tensor): the tensor to be multiplied
value (Number, optional): multiplier for :math:`tensor1 .* tensor2`
{out}
Example::
>>> t = torch.randn(1, 3)
>>> t1 = torch.randn(3, 1)
>>> t2 = torch.randn(1, 3)
>>> torch.addcmul(t, t1, t2, value=0.1)
tensor([[-0.8635, -0.6391, 1.6174],
[-0.7617, -0.5879, 1.7388],
[-0.8353, -0.6249, 1.6511]])
""".format(**common_args))
add_docstr(torch.addmm,
r"""
addmm(input, mat1, mat2, *, beta=1, alpha=1, out=None) -> Tensor
Performs a matrix multiplication of the matrices :attr:`mat1` and :attr:`mat2`.
The matrix :attr:`input` is added to the final result.
If :attr:`mat1` is a :math:`(n \times m)` tensor, :attr:`mat2` is a
:math:`(m \times p)` tensor, then :attr:`input` must be
:ref:`broadcastable <broadcasting-semantics>` with a :math:`(n \times p)` tensor
and :attr:`out` will be a :math:`(n \times p)` tensor.
:attr:`alpha` and :attr:`beta` are scaling factors on matrix-vector product between
:attr:`mat1` and :attr:`mat2` and the added matrix :attr:`input` respectively.
.. math::
\text{out} = \beta\ \text{input} + \alpha\ (\text{mat1}_i \mathbin{@} \text{mat2}_i)
""" + r"""
For inputs of type `FloatTensor` or `DoubleTensor`, arguments :attr:`beta` and
:attr:`alpha` must be real numbers, otherwise they should be integers.
Args:
input (Tensor): matrix to be added
mat1 (Tensor): the first matrix to be multiplied
mat2 (Tensor): the second matrix to be multiplied
beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`)
alpha (Number, optional): multiplier for :math:`mat1 @ mat2` (:math:`\alpha`)
{out}
Example::
>>> M = torch.randn(2, 3)
>>> mat1 = torch.randn(2, 3)
>>> mat2 = torch.randn(3, 3)
>>> torch.addmm(M, mat1, mat2)
tensor([[-4.8716, 1.4671, -1.3746],
[ 0.7573, -3.9555, -2.8681]])
""".format(**common_args))
add_docstr(torch.addmv,
r"""
addmv(input, mat, vec, *, beta=1, alpha=1, out=None) -> Tensor
Performs a matrix-vector product of the matrix :attr:`mat` and
the vector :attr:`vec`.
The vector :attr:`input` is added to the final result.
If :attr:`mat` is a :math:`(n \times m)` tensor, :attr:`vec` is a 1-D tensor of
size `m`, then :attr:`input` must be
:ref:`broadcastable <broadcasting-semantics>` with a 1-D tensor of size `n` and
:attr:`out` will be 1-D tensor of size `n`.
:attr:`alpha` and :attr:`beta` are scaling factors on matrix-vector product between
:attr:`mat` and :attr:`vec` and the added tensor :attr:`input` respectively.
.. math::
\text{out} = \beta\ \text{input} + \alpha\ (\text{mat} \mathbin{@} \text{vec})
""" + r"""
For inputs of type `FloatTensor` or `DoubleTensor`, arguments :attr:`beta` and
:attr:`alpha` must be real numbers, otherwise they should be integers
Args:
input (Tensor): vector to be added
mat (Tensor): matrix to be multiplied
vec (Tensor): vector to be multiplied
beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`)
alpha (Number, optional): multiplier for :math:`mat @ vec` (:math:`\alpha`)
{out}
Example::
>>> M = torch.randn(2)
>>> mat = torch.randn(2, 3)
>>> vec = torch.randn(3)
>>> torch.addmv(M, mat, vec)
tensor([-0.3768, -5.5565])
""".format(**common_args))
add_docstr(torch.addr,
r"""
addr(input, vec1, vec2, *, beta=1, alpha=1, out=None) -> Tensor
Performs the outer-product of vectors :attr:`vec1` and :attr:`vec2`
and adds it to the matrix :attr:`input`.
Optional values :attr:`beta` and :attr:`alpha` are scaling factors on the
outer product between :attr:`vec1` and :attr:`vec2` and the added matrix
:attr:`input` respectively.
.. math::
\text{out} = \beta\ \text{input} + \alpha\ (\text{vec1} \otimes \text{vec2})
""" + r"""
If :attr:`vec1` is a vector of size `n` and :attr:`vec2` is a vector
of size `m`, then :attr:`input` must be
:ref:`broadcastable <broadcasting-semantics>` with a matrix of size
:math:`(n \times m)` and :attr:`out` will be a matrix of size
:math:`(n \times m)`.
For inputs of type `FloatTensor` or `DoubleTensor`, arguments :attr:`beta` and
:attr:`alpha` must be real numbers, otherwise they should be integers
Args:
input (Tensor): matrix to be added
vec1 (Tensor): the first vector of the outer product
vec2 (Tensor): the second vector of the outer product
beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`)
alpha (Number, optional): multiplier for :math:`\text{{vec1}} \otimes \text{{vec2}}` (:math:`\alpha`)
{out}
Example::
>>> vec1 = torch.arange(1., 4.)
>>> vec2 = torch.arange(1., 3.)
>>> M = torch.zeros(3, 2)
>>> torch.addr(M, vec1, vec2)
tensor([[ 1., 2.],
[ 2., 4.],
[ 3., 6.]])
""".format(**common_args))
add_docstr(torch.allclose,
r"""
allclose(input, other, rtol=1e-05, atol=1e-08, equal_nan=False) -> bool
This function checks if all :attr:`input` and :attr:`other` satisfy the condition:
.. math::
\lvert \text{input} - \text{other} \rvert \leq \texttt{atol} + \texttt{rtol} \times \lvert \text{other} \rvert
""" + r"""
elementwise, for all elements of :attr:`input` and :attr:`other`. The behaviour of this function is analogous to
`numpy.allclose <https://docs.scipy.org/doc/numpy/reference/generated/numpy.allclose.html>`_
Args:
input (Tensor): first tensor to compare
other (Tensor): second tensor to compare
atol (float, optional): absolute tolerance. Default: 1e-08
rtol (float, optional): relative tolerance. Default: 1e-05
equal_nan (bool, optional): if ``True``, then two ``NaN`` s will be considered equal. Default: ``False``
Example::
>>> torch.allclose(torch.tensor([10000., 1e-07]), torch.tensor([10000.1, 1e-08]))
False
>>> torch.allclose(torch.tensor([10000., 1e-08]), torch.tensor([10000.1, 1e-09]))
True
>>> torch.allclose(torch.tensor([1.0, float('nan')]), torch.tensor([1.0, float('nan')]))
False
>>> torch.allclose(torch.tensor([1.0, float('nan')]), torch.tensor([1.0, float('nan')]), equal_nan=True)
True
""")
add_docstr(torch.angle,
r"""
angle(input, out=None) -> Tensor
Computes the element-wise angle (in radians) of the given :attr:`input` tensor.
.. math::
\text{out}_{i} = angle(\text{input}_{i})
""" + r"""
Args:
{input}
{out}
Example::
>>> torch.angle(torch.tensor([-1 + 1j, -2 + 2j, 3 - 3j]))*180/3.14159
tensor([ 135., 135, -45])
""".format(**common_args))
add_docstr(torch.as_strided,
r"""
as_strided(input, size, stride, storage_offset=0) -> Tensor
Create a view of an existing `torch.Tensor` :attr:`input` with specified
:attr:`size`, :attr:`stride` and :attr:`storage_offset`.
.. warning::
More than one element of a created tensor may refer to a single memory
location. As a result, in-place operations (especially ones that are
vectorized) may result in incorrect behavior. If you need to write to
the tensors, please clone them first.
Many PyTorch functions, which return a view of a tensor, are internally
implemented with this function. Those functions, like
:meth:`torch.Tensor.expand`, are easier to read and are therefore more
advisable to use.
Args:
{input}
size (tuple or ints): the shape of the output tensor
stride (tuple or ints): the stride of the output tensor
storage_offset (int, optional): the offset in the underlying storage of the output tensor
Example::
>>> x = torch.randn(3, 3)
>>> x
tensor([[ 0.9039, 0.6291, 1.0795],
[ 0.1586, 2.1939, -0.4900],
[-0.1909, -0.7503, 1.9355]])
>>> t = torch.as_strided(x, (2, 2), (1, 2))
>>> t
tensor([[0.9039, 1.0795],
[0.6291, 0.1586]])
>>> t = torch.as_strided(x, (2, 2), (1, 2), 1)
tensor([[0.6291, 0.1586],
[1.0795, 2.1939]])
""".format(**common_args))
add_docstr(torch.as_tensor,
r"""
as_tensor(data, dtype=None, device=None) -> Tensor
Convert the data into a `torch.Tensor`. If the data is already a `Tensor` with the same `dtype` and `device`,
no copy will be performed, otherwise a new `Tensor` will be returned with computational graph retained if data
`Tensor` has ``requires_grad=True``. Similarly, if the data is an ``ndarray`` of the corresponding `dtype` and
the `device` is the cpu, no copy will be performed.
Args:
{data}
{dtype}
{device}
Example::
>>> a = numpy.array([1, 2, 3])
>>> t = torch.as_tensor(a)
>>> t
tensor([ 1, 2, 3])
>>> t[0] = -1
>>> a
array([-1, 2, 3])
>>> a = numpy.array([1, 2, 3])
>>> t = torch.as_tensor(a, device=torch.device('cuda'))
>>> t
tensor([ 1, 2, 3])
>>> t[0] = -1
>>> a
array([1, 2, 3])
""".format(**factory_data_common_args))
add_docstr(torch.asin,
r"""
asin(input, out=None) -> Tensor
Returns a new tensor with the arcsine of the elements of :attr:`input`.
.. math::
\text{out}_{i} = \sin^{-1}(\text{input}_{i})
""" + r"""
Args:
{input}
{out}
Example::
>>> a = torch.randn(4)
>>> a
tensor([-0.5962, 1.4985, -0.4396, 1.4525])
>>> torch.asin(a)
tensor([-0.6387, nan, -0.4552, nan])
""".format(**common_args))
add_docstr(torch.asinh,
r"""
asinh(input, out=None) -> Tensor
Returns a new tensor with the inverse hyperbolic sine of the elements of :attr:`input`.
.. math::
\text{out}_{i} = \sinh^{-1}(\text{input}_{i})
""" + r"""
Args:
{input}
Keyword arguments:
{out}
Example::
>>> a = torch.randn(4)
>>> a
tensor([ 0.1606, -1.4267, -1.0899, -1.0250 ])
>>> torch.asinh(a)
tensor([ 0.1599, -1.1534, -0.9435, -0.8990 ])
""".format(**common_args))
add_docstr(torch.atan,
r"""
atan(input, out=None) -> Tensor
Returns a new tensor with the arctangent of the elements of :attr:`input`.
.. math::
\text{out}_{i} = \tan^{-1}(\text{input}_{i})
""" + r"""
Args:
{input}
{out}
Example::
>>> a = torch.randn(4)
>>> a
tensor([ 0.2341, 0.2539, -0.6256, -0.6448])
>>> torch.atan(a)
tensor([ 0.2299, 0.2487, -0.5591, -0.5727])
""".format(**common_args))
add_docstr(torch.atan2,
r"""
atan2(input, other, out=None) -> Tensor
Element-wise arctangent of :math:`\text{{input}}_{{i}} / \text{{other}}_{{i}}`
with consideration of the quadrant. Returns a new tensor with the signed angles
in radians between vector :math:`(\text{{other}}_{{i}}, \text{{input}}_{{i}})`
and vector :math:`(1, 0)`. (Note that :math:`\text{{other}}_{{i}}`, the second
parameter, is the x-coordinate, while :math:`\text{{input}}_{{i}}`, the first
parameter, is the y-coordinate.)
The shapes of ``input`` and ``other`` must be
:ref:`broadcastable <broadcasting-semantics>`.
Args:
input (Tensor): the first input tensor
other (Tensor): the second input tensor
{out}
Example::
>>> a = torch.randn(4)
>>> a
tensor([ 0.9041, 0.0196, -0.3108, -2.4423])
>>> torch.atan2(a, torch.randn(4))
tensor([ 0.9833, 0.0811, -1.9743, -1.4151])
""".format(**common_args))
add_docstr(torch.atanh,
r"""
atanh(input, out=None) -> Tensor
Returns a new tensor with the inverse hyperbolic tangent of the elements of :attr:`input`.
Note:
The domain of the inverse hyperbolic tangent is `(-1, 1)` and values outside this range
will be mapped to ``NaN``, except for the values `1` and `-1` for which the output is
mapped to `+/-INF` respectively.
.. math::
\text{out}_{i} = \tanh^{-1}(\text{input}_{i})
""" + r"""
Args:
{input}
Keyword arguments:
{out}
Example::
>>> a = torch.randn(4).uniform_(-1, 1)
>>> a
tensor([ -0.9385, 0.2968, -0.8591, -0.1871 ])
>>> torch.atanh(a)
tensor([ -1.7253, 0.3060, -1.2899, -0.1893 ])
""".format(**common_args))
add_docstr(torch.baddbmm,
r"""
baddbmm(input, batch1, batch2, *, beta=1, alpha=1, out=None) -> Tensor
Performs a batch matrix-matrix product of matrices in :attr:`batch1`
and :attr:`batch2`.
:attr:`input` is added to the final result.
:attr:`batch1` and :attr:`batch2` must be 3-D tensors each containing the same
number of matrices.
If :attr:`batch1` is a :math:`(b \times n \times m)` tensor, :attr:`batch2` is a
:math:`(b \times m \times p)` tensor, then :attr:`input` must be
:ref:`broadcastable <broadcasting-semantics>` with a
:math:`(b \times n \times p)` tensor and :attr:`out` will be a
:math:`(b \times n \times p)` tensor. Both :attr:`alpha` and :attr:`beta` mean the
same as the scaling factors used in :meth:`torch.addbmm`.
.. math::
\text{out}_i = \beta\ \text{input}_i + \alpha\ (\text{batch1}_i \mathbin{@} \text{batch2}_i)
""" + r"""
For inputs of type `FloatTensor` or `DoubleTensor`, arguments :attr:`beta` and
:attr:`alpha` must be real numbers, otherwise they should be integers.
Args:
input (Tensor): the tensor to be added
batch1 (Tensor): the first batch of matrices to be multiplied
batch2 (Tensor): the second batch of matrices to be multiplied
beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`)
alpha (Number, optional): multiplier for :math:`\text{{batch1}} \mathbin{{@}} \text{{batch2}}` (:math:`\alpha`)
{out}
Example::
>>> M = torch.randn(10, 3, 5)
>>> batch1 = torch.randn(10, 3, 4)
>>> batch2 = torch.randn(10, 4, 5)
>>> torch.baddbmm(M, batch1, batch2).size()
torch.Size([10, 3, 5])
""".format(**common_args))
add_docstr(torch.bernoulli,
r"""
bernoulli(input, *, generator=None, out=None) -> Tensor
Draws binary random numbers (0 or 1) from a Bernoulli distribution.
The :attr:`input` tensor should be a tensor containing probabilities
to be used for drawing the binary random number.
Hence, all values in :attr:`input` have to be in the range:
:math:`0 \leq \text{input}_i \leq 1`.
The :math:`\text{i}^{th}` element of the output tensor will draw a
value :math:`1` according to the :math:`\text{i}^{th}` probability value given
in :attr:`input`.
.. math::
\text{out}_{i} \sim \mathrm{Bernoulli}(p = \text{input}_{i})
""" + r"""
The returned :attr:`out` tensor only has values 0 or 1 and is of the same
shape as :attr:`input`.
:attr:`out` can have integral ``dtype``, but :attr:`input` must have floating
point ``dtype``.
Args:
input (Tensor): the input tensor of probability values for the Bernoulli distribution
{generator}
{out}
Example::
>>> a = torch.empty(3, 3).uniform_(0, 1) # generate a uniform random matrix with range [0, 1]
>>> a
tensor([[ 0.1737, 0.0950, 0.3609],
[ 0.7148, 0.0289, 0.2676],
[ 0.9456, 0.8937, 0.7202]])
>>> torch.bernoulli(a)
tensor([[ 1., 0., 0.],
[ 0., 0., 0.],
[ 1., 1., 1.]])
>>> a = torch.ones(3, 3) # probability of drawing "1" is 1
>>> torch.bernoulli(a)
tensor([[ 1., 1., 1.],
[ 1., 1., 1.],
[ 1., 1., 1.]])
>>> a = torch.zeros(3, 3) # probability of drawing "1" is 0
>>> torch.bernoulli(a)
tensor([[ 0., 0., 0.],
[ 0., 0., 0.],
[ 0., 0., 0.]])
""".format(**common_args))
add_docstr(torch.bincount,
r"""
bincount(input, weights=None, minlength=0) -> Tensor
Count the frequency of each value in an array of non-negative ints.
The number of bins (size 1) is one larger than the largest value in
:attr:`input` unless :attr:`input` is empty, in which case the result is a
tensor of size 0. If :attr:`minlength` is specified, the number of bins is at least
:attr:`minlength` and if :attr:`input` is empty, then the result is tensor of size
:attr:`minlength` filled with zeros. If ``n`` is the value at position ``i``,
``out[n] += weights[i]`` if :attr:`weights` is specified else
``out[n] += 1``.
Note:
In some circumstances when using the CUDA backend with CuDNN, this operator
may select a nondeterministic algorithm to increase performance. If this is
undesirable, you can try to make the operation deterministic (potentially at
a performance cost) by setting ``torch.backends.cudnn.deterministic =
True``.
Please see the notes on :doc:`/notes/randomness` for background.
Arguments:
input (Tensor): 1-d int tensor
weights (Tensor): optional, weight for each value in the input tensor.
Should be of same size as input tensor.
minlength (int): optional, minimum number of bins. Should be non-negative.
Returns:
output (Tensor): a tensor of shape ``Size([max(input) + 1])`` if
:attr:`input` is non-empty, else ``Size(0)``
Example::
>>> input = torch.randint(0, 8, (5,), dtype=torch.int64)
>>> weights = torch.linspace(0, 1, steps=5)
>>> input, weights
(tensor([4, 3, 6, 3, 4]),
tensor([ 0.0000, 0.2500, 0.5000, 0.7500, 1.0000])
>>> torch.bincount(input)
tensor([0, 0, 0, 2, 2, 0, 1])
>>> input.bincount(weights)
tensor([0.0000, 0.0000, 0.0000, 1.0000, 1.0000, 0.0000, 0.5000])
""")
add_docstr(torch.bitwise_not,
r"""
bitwise_not(input, out=None) -> Tensor
Computes the bitwise NOT of the given input tensor. The input tensor must be of
integral or Boolean types. For bool tensors, it computes the logical NOT.
Args:
{input}
{out}
Example:
>>> torch.bitwise_not(torch.tensor([-1, -2, 3], dtype=torch.int8))
tensor([ 0, 1, -4], dtype=torch.int8)
""".format(**common_args))
add_docstr(torch.bmm,
r"""
bmm(input, mat2, deterministic=False, out=None) -> Tensor
Performs a batch matrix-matrix product of matrices stored in :attr:`input`
and :attr:`mat2`.
:attr:`input` and :attr:`mat2` must be 3-D tensors each containing
the same number of matrices.
If :attr:`input` is a :math:`(b \times n \times m)` tensor, :attr:`mat2` is a
:math:`(b \times m \times p)` tensor, :attr:`out` will be a
:math:`(b \times n \times p)` tensor.
.. math::
\text{out}_i = \text{input}_i \mathbin{@} \text{mat2}_i
""" + r"""
.. note:: This function does not :ref:`broadcast <broadcasting-semantics>`.
For broadcasting matrix products, see :func:`torch.matmul`.
Args:
input (Tensor): the first batch of matrices to be multiplied
mat2 (Tensor): the second batch of matrices to be multiplied
deterministic (bool, optional): flag to choose between a faster non-deterministic
calculation, or a slower deterministic calculation.
This argument is only available for sparse-dense CUDA bmm.
Default: ``False``
{out}
Example::
>>> input = torch.randn(10, 3, 4)
>>> mat2 = torch.randn(10, 4, 5)
>>> res = torch.bmm(input, mat2)
>>> res.size()
torch.Size([10, 3, 5])
""".format(**common_args))
add_docstr(torch.bitwise_and,
r"""
bitwise_and(input, other, out=None) -> Tensor
Computes the bitwise AND of :attr:`input` and :attr:`other`. The input tensor must be of
integral or Boolean types. For bool tensors, it computes the logical AND.
Args:
input: the first input tensor
other: the second input tensor
{out}
Example:
>>> torch.bitwise_and(torch.tensor([-1, -2, 3], dtype=torch.int8), torch.tensor([1, 0, 3], dtype=torch.int8))
tensor([1, 0, 3], dtype=torch.int8)
>>> torch.bitwise_and(torch.tensor([True, True, False]), torch.tensor([False, True, False]))
tensor([ False, True, False])
""".format(**common_args))
add_docstr(torch.bitwise_or,
r"""
bitwise_or(input, other, out=None) -> Tensor
Computes the bitwise OR of :attr:`input` and :attr:`other`. The input tensor must be of
integral or Boolean types. For bool tensors, it computes the logical OR.
Args:
input: the first input tensor
other: the second input tensor
{out}
Example:
>>> torch.bitwise_or(torch.tensor([-1, -2, 3], dtype=torch.int8), torch.tensor([1, 0, 3], dtype=torch.int8))
tensor([-1, -2, 3], dtype=torch.int8)
>>> torch.bitwise_or(torch.tensor([True, True, False]), torch.tensor([False, True, False]))
tensor([ True, True, False])
""".format(**common_args))
add_docstr(torch.bitwise_xor,
r"""
bitwise_xor(input, other, out=None) -> Tensor
Computes the bitwise XOR of :attr:`input` and :attr:`other`. The input tensor must be of
integral or Boolean types. For bool tensors, it computes the logical XOR.
Args:
input: the first input tensor
other: the second input tensor
{out}
Example:
>>> torch.bitwise_xor(torch.tensor([-1, -2, 3], dtype=torch.int8), torch.tensor([1, 0, 3], dtype=torch.int8))
tensor([-2, -2, 0], dtype=torch.int8)
>>> torch.bitwise_xor(torch.tensor([True, True, False]), torch.tensor([False, True, False]))
tensor([ True, False, False])
""".format(**common_args))
add_docstr(torch.stack,
r"""
stack(tensors, dim=0, out=None) -> Tensor
Concatenates sequence of tensors along a new dimension.
All tensors need to be of the same size.
Arguments:
tensors (sequence of Tensors): sequence of tensors to concatenate
dim (int): dimension to insert. Has to be between 0 and the number
of dimensions of concatenated tensors (inclusive)
{out}
""".format(**common_args))
add_docstr(torch.chunk,
r"""
chunk(input, chunks, dim=0) -> List of Tensors
Splits a tensor into a specific number of chunks. Each chunk is a view of
the input tensor.
Last chunk will be smaller if the tensor size along the given dimension
:attr:`dim` is not divisible by :attr:`chunks`.
Arguments:
input (Tensor): the tensor to split
chunks (int): number of chunks to return
dim (int): dimension along which to split the tensor
""")
add_docstr(torch.unsafe_chunk,
r"""
unsafe_chunk(input, chunks, dim=0) -> List of Tensors
Works like :func:`torch.chunk` but without enforcing the autograd restrictions
on inplace modification of the outputs.
.. warning::
This function is safe to use as long as only the input, or only the outputs
are modified inplace after calling this function. It is user's
responsibility to ensure that is the case. If both the input and one or more
of the outputs are modified inplace, gradients computed by autograd will be
silently incorrect.
""")
add_docstr(torch.unsafe_split,
r"""
unsafe_split(tensor, split_size_or_sections, dim=0) -> List of Tensors
Works like :func:`torch.split` but without enforcing the autograd restrictions
on inplace modification of the outputs.
.. warning::
This function is safe to use as long as only the input, or only the outputs
are modified inplace after calling this function. It is user's
responsibility to ensure that is the case. If both the input and one or more
of the outputs are modified inplace, gradients computed by autograd will be
silently incorrect.
""")
add_docstr(torch.can_cast,
r"""
can_cast(from, to) -> bool
Determines if a type conversion is allowed under PyTorch casting rules
described in the type promotion :ref:`documentation <type-promotion-doc>`.
Args:
from (dtype): The original :class:`torch.dtype`.
to (dtype): The target :class:`torch.dtype`.
Example::
>>> torch.can_cast(torch.double, torch.float)
True
>>> torch.can_cast(torch.float, torch.int)
False
""")
add_docstr(torch.cat,
r"""
cat(tensors, dim=0, out=None) -> Tensor
Concatenates the given sequence of :attr:`seq` tensors in the given dimension.
All tensors must either have the same shape (except in the concatenating
dimension) or be empty.
:func:`torch.cat` can be seen as an inverse operation for :func:`torch.split`
and :func:`torch.chunk`.
:func:`torch.cat` can be best understood via examples.
Args:
tensors (sequence of Tensors): any python sequence of tensors of the same type.
Non-empty tensors provided must have the same shape, except in the
cat dimension.
dim (int, optional): the dimension over which the tensors are concatenated
{out}
Example::
>>> x = torch.randn(2, 3)
>>> x
tensor([[ 0.6580, -1.0969, -0.4614],
[-0.1034, -0.5790, 0.1497]])
>>> torch.cat((x, x, x), 0)
tensor([[ 0.6580, -1.0969, -0.4614],
[-0.1034, -0.5790, 0.1497],
[ 0.6580, -1.0969, -0.4614],
[-0.1034, -0.5790, 0.1497],
[ 0.6580, -1.0969, -0.4614],
[-0.1034, -0.5790, 0.1497]])
>>> torch.cat((x, x, x), 1)
tensor([[ 0.6580, -1.0969, -0.4614, 0.6580, -1.0969, -0.4614, 0.6580,
-1.0969, -0.4614],
[-0.1034, -0.5790, 0.1497, -0.1034, -0.5790, 0.1497, -0.1034,
-0.5790, 0.1497]])
""".format(**common_args))
add_docstr(torch.ceil,
r"""
ceil(input, out=None) -> Tensor
Returns a new tensor with the ceil of the elements of :attr:`input`,
the smallest integer greater than or equal to each element.
.. math::
\text{out}_{i} = \left\lceil \text{input}_{i} \right\rceil = \left\lfloor \text{input}_{i} \right\rfloor + 1
""" + r"""
Args:
{input}
{out}
Example::
>>> a = torch.randn(4)
>>> a
tensor([-0.6341, -1.4208, -1.0900, 0.5826])
>>> torch.ceil(a)
tensor([-0., -1., -1., 1.])
""".format(**common_args))
add_docstr(torch.real,
r"""
real(input) -> Tensor
Returns a new tensor containing real values of the :attr:`self` tensor.
The returned tensor and :attr:`self` share the same underlying storage.
.. warning::
:func:`real` is only supported for tensors with complex dtypes.
Args:
{input}
Example::
>>> x=torch.randn(4, dtype=torch.cfloat)
>>> x
tensor([(0.3100+0.3553j), (-0.5445-0.7896j), (-1.6492-0.0633j), (-0.0638-0.8119j)])
>>> x.real
tensor([ 0.3100, -0.5445, -1.6492, -0.0638])
""".format(**common_args))
add_docstr(torch.imag,
r"""
imag(input) -> Tensor
Returns a new tensor containing imaginary values of the :attr:`self` tensor.
The returned tensor and :attr:`self` share the same underlying storage.
.. warning::
:func:`imag` is only supported for tensors with complex dtypes.
Args:
{input}
Example::
>>> x=torch.randn(4, dtype=torch.cfloat)
>>> x
tensor([(0.3100+0.3553j), (-0.5445-0.7896j), (-1.6492-0.0633j), (-0.0638-0.8119j)])
>>> x.imag
tensor([ 0.3553, -0.7896, -0.0633, -0.8119])
""".format(**common_args))
add_docstr(torch.view_as_real,
r"""
view_as_real(input) -> Tensor
Returns a view of :attr:`input` as a real tensor. For an input complex tensor of
:attr:`size` :math:`m1, m2, \dots, mi`, this function returns a new
real tensor of size :math:`m1, m2, \dots, mi, 2`, where the last dimension of size 2
represents the real and imaginary components of complex numbers.
.. warning::
:func:`view_as_real` is only supported for tensors with ``complex dtypes``.
Args:
{input}
Example::
>>> x=torch.randn(4, dtype=torch.cfloat)
>>> x
tensor([(0.4737-0.3839j), (-0.2098-0.6699j), (0.3470-0.9451j), (-0.5174-1.3136j)])
>>> torch.view_as_real(x)
tensor([[ 0.4737, -0.3839],
[-0.2098, -0.6699],
[ 0.3470, -0.9451],
[-0.5174, -1.3136]])
""".format(**common_args))
add_docstr(torch.view_as_complex,
r"""
view_as_complex(input) -> Tensor
Returns a view of :attr:`input` as a complex tensor. For an input complex
tensor of :attr:`size` :math:`m1, m2, \dots, mi, 2`, this function returns a
new complex tensor of :attr:`size` :math:`m1, m2, \dots, mi` where the last
dimension of the input tensor is expected to represent the real and imaginary
components of complex numbers.
.. warning::
:func:`view_as_complex` is only supported for tensors with
:class:`torch.dtype` ``torch.float64`` and ``torch.float32``. The input is
expected to have the last dimension of :attr:`size` 2. In addition, the
tensor must have a `stride` of 1 for its last dimension. The strides of all
other dimensions must be even numbers.
Args:
{input}
Example::
>>> x=torch.randn(4, 2)
>>> x
tensor([[ 1.6116, -0.5772],
[-1.4606, -0.9120],
[ 0.0786, -1.7497],
[-0.6561, -1.6623]])
>>> torch.view_as_complex(x)
tensor([(1.6116-0.5772j), (-1.4606-0.9120j), (0.0786-1.7497j), (-0.6561-1.6623j)])
""".format(**common_args))
add_docstr(torch.reciprocal,
r"""
reciprocal(input, out=None) -> Tensor
Returns a new tensor with the reciprocal of the elements of :attr:`input`
.. math::
\text{out}_{i} = \frac{1}{\text{input}_{i}}
""" + r"""
Args:
{input}
{out}
Example::
>>> a = torch.randn(4)
>>> a
tensor([-0.4595, -2.1219, -1.4314, 0.7298])
>>> torch.reciprocal(a)
tensor([-2.1763, -0.4713, -0.6986, 1.3702])
""".format(**common_args))
add_docstr(torch.cholesky, r"""
cholesky(input, upper=False, out=None) -> Tensor
Computes the Cholesky decomposition of a symmetric positive-definite
matrix :math:`A` or for batches of symmetric positive-definite matrices.
If :attr:`upper` is ``True``, the returned matrix ``U`` is upper-triangular, and
the decomposition has the form:
.. math::
A = U^TU
If :attr:`upper` is ``False``, the returned matrix ``L`` is lower-triangular, and
the decomposition has the form:
.. math::
A = LL^T
If :attr:`upper` is ``True``, and :math:`A` is a batch of symmetric positive-definite
matrices, then the returned tensor will be composed of upper-triangular Cholesky factors
of each of the individual matrices. Similarly, when :attr:`upper` is ``False``, the returned
tensor will be composed of lower-triangular Cholesky factors of each of the individual
matrices.
Args:
input (Tensor): the input tensor :math:`A` of size :math:`(*, n, n)` where `*` is zero or more
batch dimensions consisting of symmetric positive-definite matrices.
upper (bool, optional): flag that indicates whether to return a
upper or lower triangular matrix. Default: ``False``
out (Tensor, optional): the output matrix
Example::
>>> a = torch.randn(3, 3)
>>> a = torch.mm(a, a.t()) # make symmetric positive-definite
>>> l = torch.cholesky(a)
>>> a
tensor([[ 2.4112, -0.7486, 1.4551],
[-0.7486, 1.3544, 0.1294],
[ 1.4551, 0.1294, 1.6724]])
>>> l
tensor([[ 1.5528, 0.0000, 0.0000],
[-0.4821, 1.0592, 0.0000],
[ 0.9371, 0.5487, 0.7023]])
>>> torch.mm(l, l.t())
tensor([[ 2.4112, -0.7486, 1.4551],
[-0.7486, 1.3544, 0.1294],
[ 1.4551, 0.1294, 1.6724]])
>>> a = torch.randn(3, 2, 2)
>>> a = torch.matmul(a, a.transpose(-1, -2)) + 1e-03 # make symmetric positive-definite
>>> l = torch.cholesky(a)
>>> z = torch.matmul(l, l.transpose(-1, -2))
>>> torch.max(torch.abs(z - a)) # Max non-zero
tensor(2.3842e-07)
""")
add_docstr(torch.cholesky_solve, r"""
cholesky_solve(input, input2, upper=False, out=None) -> Tensor
Solves a linear system of equations with a positive semidefinite
matrix to be inverted given its Cholesky factor matrix :math:`u`.
If :attr:`upper` is ``False``, :math:`u` is and lower triangular and `c` is
returned such that:
.. math::
c = (u u^T)^{{-1}} b
If :attr:`upper` is ``True`` or not provided, :math:`u` is upper triangular
and `c` is returned such that:
.. math::
c = (u^T u)^{{-1}} b
`torch.cholesky_solve(b, u)` can take in 2D inputs `b, u` or inputs that are
batches of 2D matrices. If the inputs are batches, then returns
batched outputs `c`
Args:
input (Tensor): input matrix :math:`b` of size :math:`(*, m, k)`,
where :math:`*` is zero or more batch dimensions
input2 (Tensor): input matrix :math:`u` of size :math:`(*, m, m)`,
where :math:`*` is zero of more batch dimensions composed of
upper or lower triangular Cholesky factor
upper (bool, optional): whether to consider the Cholesky factor as a
lower or upper triangular matrix. Default: ``False``.
out (Tensor, optional): the output tensor for `c`
Example::
>>> a = torch.randn(3, 3)
>>> a = torch.mm(a, a.t()) # make symmetric positive definite
>>> u = torch.cholesky(a)
>>> a
tensor([[ 0.7747, -1.9549, 1.3086],
[-1.9549, 6.7546, -5.4114],
[ 1.3086, -5.4114, 4.8733]])
>>> b = torch.randn(3, 2)
>>> b
tensor([[-0.6355, 0.9891],
[ 0.1974, 1.4706],
[-0.4115, -0.6225]])
>>> torch.cholesky_solve(b, u)
tensor([[ -8.1625, 19.6097],
[ -5.8398, 14.2387],
[ -4.3771, 10.4173]])
>>> torch.mm(a.inverse(), b)
tensor([[ -8.1626, 19.6097],
[ -5.8398, 14.2387],
[ -4.3771, 10.4173]])
""")
add_docstr(torch.cholesky_inverse, r"""
cholesky_inverse(input, upper=False, out=None) -> Tensor
Computes the inverse of a symmetric positive-definite matrix :math:`A` using its
Cholesky factor :math:`u`: returns matrix ``inv``. The inverse is computed using
LAPACK routines ``dpotri`` and ``spotri`` (and the corresponding MAGMA routines).
If :attr:`upper` is ``False``, :math:`u` is lower triangular
such that the returned tensor is
.. math::
inv = (uu^{{T}})^{{-1}}
If :attr:`upper` is ``True`` or not provided, :math:`u` is upper
triangular such that the returned tensor is
.. math::
inv = (u^T u)^{{-1}}
Args:
input (Tensor): the input 2-D tensor :math:`u`, a upper or lower triangular
Cholesky factor
upper (bool, optional): whether to return a lower (default) or upper triangular matrix
out (Tensor, optional): the output tensor for `inv`
Example::
>>> a = torch.randn(3, 3)
>>> a = torch.mm(a, a.t()) + 1e-05 * torch.eye(3) # make symmetric positive definite
>>> u = torch.cholesky(a)
>>> a
tensor([[ 0.9935, -0.6353, 1.5806],
[ -0.6353, 0.8769, -1.7183],
[ 1.5806, -1.7183, 10.6618]])
>>> torch.cholesky_inverse(u)
tensor([[ 1.9314, 1.2251, -0.0889],
[ 1.2251, 2.4439, 0.2122],
[-0.0889, 0.2122, 0.1412]])
>>> a.inverse()
tensor([[ 1.9314, 1.2251, -0.0889],
[ 1.2251, 2.4439, 0.2122],
[-0.0889, 0.2122, 0.1412]])
""")
add_docstr(torch.clamp,
r"""
clamp(input, min, max, out=None) -> Tensor
Clamp all elements in :attr:`input` into the range `[` :attr:`min`, :attr:`max` `]` and return
a resulting tensor:
.. math::
y_i = \begin{cases}
\text{min} & \text{if } x_i < \text{min} \\
x_i & \text{if } \text{min} \leq x_i \leq \text{max} \\
\text{max} & \text{if } x_i > \text{max}
\end{cases}
""" + r"""
If :attr:`input` is of type `FloatTensor` or `DoubleTensor`, args :attr:`min`
and :attr:`max` must be real numbers, otherwise they should be integers.
Args:
{input}
min (Number): lower-bound of the range to be clamped to
max (Number): upper-bound of the range to be clamped to
{out}
Example::
>>> a = torch.randn(4)
>>> a
tensor([-1.7120, 0.1734, -0.0478, -0.0922])
>>> torch.clamp(a, min=-0.5, max=0.5)
tensor([-0.5000, 0.1734, -0.0478, -0.0922])
.. function:: clamp(input, *, min, out=None) -> Tensor
Clamps all elements in :attr:`input` to be larger or equal :attr:`min`.
If :attr:`input` is of type `FloatTensor` or `DoubleTensor`, :attr:`value`
should be a real number, otherwise it should be an integer.
Args:
{input}
value (Number): minimal value of each element in the output
{out}
Example::
>>> a = torch.randn(4)
>>> a
tensor([-0.0299, -2.3184, 2.1593, -0.8883])
>>> torch.clamp(a, min=0.5)
tensor([ 0.5000, 0.5000, 2.1593, 0.5000])
.. function:: clamp(input, *, max, out=None) -> Tensor
Clamps all elements in :attr:`input` to be smaller or equal :attr:`max`.
If :attr:`input` is of type `FloatTensor` or `DoubleTensor`, :attr:`value`
should be a real number, otherwise it should be an integer.
Args:
{input}
value (Number): maximal value of each element in the output
{out}
Example::
>>> a = torch.randn(4)
>>> a
tensor([ 0.7753, -0.4702, -0.4599, 1.1899])
>>> torch.clamp(a, max=0.5)
tensor([ 0.5000, -0.4702, -0.4599, 0.5000])
""".format(**common_args))
add_docstr(torch.conj,
r"""
conj(input, out=None) -> Tensor
Computes the element-wise conjugate of the given :attr:`input` tensor.
.. math::
\text{out}_{i} = conj(\text{input}_{i})
""" + r"""
Args:
{input}
{out}
Example::
>>> torch.conj(torch.tensor([-1 + 1j, -2 + 2j, 3 - 3j]))
tensor([-1 - 1j, -2 - 2j, 3 + 3j])
""".format(**common_args))
add_docstr(torch.cos,
r"""
cos(input, out=None) -> Tensor
Returns a new tensor with the cosine of the elements of :attr:`input`.
.. math::
\text{out}_{i} = \cos(\text{input}_{i})
""" + r"""
Args:
{input}
{out}
Example::
>>> a = torch.randn(4)
>>> a
tensor([ 1.4309, 1.2706, -0.8562, 0.9796])
>>> torch.cos(a)
tensor([ 0.1395, 0.2957, 0.6553, 0.5574])
""".format(**common_args))
add_docstr(torch.cosh,
r"""
cosh(input, out=None) -> Tensor
Returns a new tensor with the hyperbolic cosine of the elements of
:attr:`input`.
.. math::
\text{out}_{i} = \cosh(\text{input}_{i})
""" + r"""
Args:
{input}
{out}
Example::
>>> a = torch.randn(4)
>>> a
tensor([ 0.1632, 1.1835, -0.6979, -0.7325])
>>> torch.cosh(a)
tensor([ 1.0133, 1.7860, 1.2536, 1.2805])
""".format(**common_args))
add_docstr(torch.cross,
r"""
cross(input, other, dim=-1, out=None) -> Tensor
Returns the cross product of vectors in dimension :attr:`dim` of :attr:`input`
and :attr:`other`.
:attr:`input` and :attr:`other` must have the same size, and the size of their
:attr:`dim` dimension should be 3.
If :attr:`dim` is not given, it defaults to the first dimension found with the
size 3.
Args:
{input}
other (Tensor): the second input tensor
dim (int, optional): the dimension to take the cross-product in.
{out}
Example::
>>> a = torch.randn(4, 3)
>>> a
tensor([[-0.3956, 1.1455, 1.6895],
[-0.5849, 1.3672, 0.3599],
[-1.1626, 0.7180, -0.0521],
[-0.1339, 0.9902, -2.0225]])
>>> b = torch.randn(4, 3)
>>> b
tensor([[-0.0257, -1.4725, -1.2251],
[-1.1479, -0.7005, -1.9757],
[-1.3904, 0.3726, -1.1836],
[-0.9688, -0.7153, 0.2159]])
>>> torch.cross(a, b, dim=1)
tensor([[ 1.0844, -0.5281, 0.6120],
[-2.4490, -1.5687, 1.9792],
[-0.8304, -1.3037, 0.5650],
[-1.2329, 1.9883, 1.0551]])
>>> torch.cross(a, b)
tensor([[ 1.0844, -0.5281, 0.6120],
[-2.4490, -1.5687, 1.9792],
[-0.8304, -1.3037, 0.5650],
[-1.2329, 1.9883, 1.0551]])
""".format(**common_args))
add_docstr(torch.logcumsumexp,
r"""
logcumsumexp(input, dim, out=None) -> Tensor
Returns the logarithm of the cumulative summation of the exponentiation of
elements of :attr:`input` in the dimension :attr:`dim`.
For summation index :math:`j` given by `dim` and other indices :math:`i`, the result is
.. math::
\text{{logcumsumexp}}(x)_{{ij}} = \log \sum\limits_{{j=0}}^{{i}} \exp(x_{{ij}})
Args:
{input}
dim (int): the dimension to do the operation over
{out}
Example::
>>> a = torch.randn(10)
>>> torch.logcumsumexp(a, dim=0)
tensor([-0.42296738, -0.04462666, 0.86278635, 0.94622083, 1.05277811,
1.39202815, 1.83525007, 1.84492621, 2.06084887, 2.06844475]))
""".format(**reduceops_common_args))
add_docstr(torch.cummax,
r"""
cummax(input, dim, out=None) -> (Tensor, LongTensor)
Returns a namedtuple ``(values, indices)`` where ``values`` is the cumulative maximum of
elements of :attr:`input` in the dimension :attr:`dim`. And ``indices`` is the index
location of each maximum value found in the dimension :attr:`dim`.
.. math::
y_i = max(x_1, x_2, x_3, \dots, x_i)
Args:
{input}
dim (int): the dimension to do the operation over
out (tuple, optional): the result tuple of two output tensors (values, indices)
Example::
>>> a = torch.randn(10)
>>> a
tensor([-0.3449, -1.5447, 0.0685, -1.5104, -1.1706, 0.2259, 1.4696, -1.3284,
1.9946, -0.8209])
>>> torch.cummax(a, dim=0)
torch.return_types.cummax(
values=tensor([-0.3449, -0.3449, 0.0685, 0.0685, 0.0685, 0.2259, 1.4696, 1.4696,
1.9946, 1.9946]),
indices=tensor([0, 0, 2, 2, 2, 5, 6, 6, 8, 8]))
""".format(**reduceops_common_args))
add_docstr(torch.cummin,
r"""
cummin(input, dim, out=None) -> (Tensor, LongTensor)
Returns a namedtuple ``(values, indices)`` where ``values`` is the cumulative minimum of
elements of :attr:`input` in the dimension :attr:`dim`. And ``indices`` is the index
location of each maximum value found in the dimension :attr:`dim`.
.. math::
y_i = min(x_1, x_2, x_3, \dots, x_i)
Args:
{input}
dim (int): the dimension to do the operation over
out (tuple, optional): the result tuple of two output tensors (values, indices)
Example::
>>> a = torch.randn(10)
>>> a
tensor([-0.2284, -0.6628, 0.0975, 0.2680, -1.3298, -0.4220, -0.3885, 1.1762,
0.9165, 1.6684])
>>> torch.cummin(a, dim=0)
torch.return_types.cummin(
values=tensor([-0.2284, -0.6628, -0.6628, -0.6628, -1.3298, -1.3298, -1.3298, -1.3298,
-1.3298, -1.3298]),
indices=tensor([0, 1, 1, 1, 4, 4, 4, 4, 4, 4]))
""".format(**reduceops_common_args))
add_docstr(torch.cumprod,
r"""
cumprod(input, dim, out=None, dtype=None) -> Tensor
Returns the cumulative product of elements of :attr:`input` in the dimension
:attr:`dim`.
For example, if :attr:`input` is a vector of size N, the result will also be
a vector of size N, with elements.
.. math::
y_i = x_1 \times x_2\times x_3\times \dots \times x_i
Args:
{input}
dim (int): the dimension to do the operation over
{dtype}
{out}
Example::
>>> a = torch.randn(10)
>>> a
tensor([ 0.6001, 0.2069, -0.1919, 0.9792, 0.6727, 1.0062, 0.4126,
-0.2129, -0.4206, 0.1968])
>>> torch.cumprod(a, dim=0)
tensor([ 0.6001, 0.1241, -0.0238, -0.0233, -0.0157, -0.0158, -0.0065,
0.0014, -0.0006, -0.0001])
>>> a[5] = 0.0
>>> torch.cumprod(a, dim=0)
tensor([ 0.6001, 0.1241, -0.0238, -0.0233, -0.0157, -0.0000, -0.0000,
0.0000, -0.0000, -0.0000])
""".format(**reduceops_common_args))
add_docstr(torch.cumsum,
r"""
cumsum(input, dim, out=None, dtype=None) -> Tensor
Returns the cumulative sum of elements of :attr:`input` in the dimension
:attr:`dim`.
For example, if :attr:`input` is a vector of size N, the result will also be
a vector of size N, with elements.
.. math::
y_i = x_1 + x_2 + x_3 + \dots + x_i
Args:
{input}
dim (int): the dimension to do the operation over
{dtype}
{out}
Example::
>>> a = torch.randn(10)
>>> a
tensor([-0.8286, -0.4890, 0.5155, 0.8443, 0.1865, -0.1752, -2.0595,
0.1850, -1.1571, -0.4243])
>>> torch.cumsum(a, dim=0)
tensor([-0.8286, -1.3175, -0.8020, 0.0423, 0.2289, 0.0537, -2.0058,
-1.8209, -2.9780, -3.4022])
""".format(**reduceops_common_args))
add_docstr(torch.count_nonzero,
r"""
count_nonzero(input, dim=None) -> Tensor
Counts the number of non-zero values in the tensor :attr:`input` along the given :attr:`dim`.
If no dim is specified then all non-zeros in the tensor are counted.
Args:
{input}
dim (int or tuple of ints, optional): Dim or tuple of dims along which to count non-zeros.
Example::
>>> x = torch.zeros(3,3)
>>> x[torch.randn(3,3) > 0.5] = 1
>>> x
tensor([[0., 1., 1.],
[0., 0., 0.],
[0., 0., 1.]])
>>> torch.count_nonzero(x)
tensor(3)
>>> torch.count_nonzero(x, dim=0)
tensor([0, 1, 2])
""".format(**reduceops_common_args))
add_docstr(torch.dequantize,
r"""
dequantize(tensor) -> Tensor
Given a quantized Tensor, dequantize it and return an fp32 Tensor
Args:
tensor (Tensor): A quantized Tensor
.. function:: dequantize(tensors) -> sequence of Tensors
Given a list of quantized Tensors, dequantize them and return a list of fp32 Tensors
Args:
tensors (sequence of Tensors): A list of quantized Tensors
""")
add_docstr(torch.diag,
r"""
diag(input, diagonal=0, out=None) -> Tensor
- If :attr:`input` is a vector (1-D tensor), then returns a 2-D square tensor
with the elements of :attr:`input` as the diagonal.
- If :attr:`input` is a matrix (2-D tensor), then returns a 1-D tensor with
the diagonal elements of :attr:`input`.
The argument :attr:`diagonal` controls which diagonal to consider:
- If :attr:`diagonal` = 0, it is the main diagonal.
- If :attr:`diagonal` > 0, it is above the main diagonal.
- If :attr:`diagonal` < 0, it is below the main diagonal.
Args:
{input}
diagonal (int, optional): the diagonal to consider
{out}
.. seealso::
:func:`torch.diagonal` always returns the diagonal of its input.
:func:`torch.diagflat` always constructs a tensor with diagonal elements
specified by the input.
Examples:
Get the square matrix where the input vector is the diagonal::
>>> a = torch.randn(3)
>>> a
tensor([ 0.5950,-0.0872, 2.3298])
>>> torch.diag(a)
tensor([[ 0.5950, 0.0000, 0.0000],
[ 0.0000,-0.0872, 0.0000],
[ 0.0000, 0.0000, 2.3298]])
>>> torch.diag(a, 1)
tensor([[ 0.0000, 0.5950, 0.0000, 0.0000],
[ 0.0000, 0.0000,-0.0872, 0.0000],
[ 0.0000, 0.0000, 0.0000, 2.3298],
[ 0.0000, 0.0000, 0.0000, 0.0000]])
Get the k-th diagonal of a given matrix::
>>> a = torch.randn(3, 3)
>>> a
tensor([[-0.4264, 0.0255,-0.1064],
[ 0.8795,-0.2429, 0.1374],
[ 0.1029,-0.6482,-1.6300]])
>>> torch.diag(a, 0)
tensor([-0.4264,-0.2429,-1.6300])
>>> torch.diag(a, 1)
tensor([ 0.0255, 0.1374])
""".format(**common_args))
add_docstr(torch.diag_embed,
r"""
diag_embed(input, offset=0, dim1=-2, dim2=-1) -> Tensor
Creates a tensor whose diagonals of certain 2D planes (specified by
:attr:`dim1` and :attr:`dim2`) are filled by :attr:`input`.
To facilitate creating batched diagonal matrices, the 2D planes formed by
the last two dimensions of the returned tensor are chosen by default.
The argument :attr:`offset` controls which diagonal to consider:
- If :attr:`offset` = 0, it is the main diagonal.
- If :attr:`offset` > 0, it is above the main diagonal.
- If :attr:`offset` < 0, it is below the main diagonal.
The size of the new matrix will be calculated to make the specified diagonal
of the size of the last input dimension.
Note that for :attr:`offset` other than :math:`0`, the order of :attr:`dim1`
and :attr:`dim2` matters. Exchanging them is equivalent to changing the
sign of :attr:`offset`.
Applying :meth:`torch.diagonal` to the output of this function with
the same arguments yields a matrix identical to input. However,
:meth:`torch.diagonal` has different default dimensions, so those
need to be explicitly specified.
Args:
{input} Must be at least 1-dimensional.
offset (int, optional): which diagonal to consider. Default: 0
(main diagonal).
dim1 (int, optional): first dimension with respect to which to
take diagonal. Default: -2.
dim2 (int, optional): second dimension with respect to which to
take diagonal. Default: -1.
Example::
>>> a = torch.randn(2, 3)
>>> torch.diag_embed(a)
tensor([[[ 1.5410, 0.0000, 0.0000],
[ 0.0000, -0.2934, 0.0000],
[ 0.0000, 0.0000, -2.1788]],
[[ 0.5684, 0.0000, 0.0000],
[ 0.0000, -1.0845, 0.0000],
[ 0.0000, 0.0000, -1.3986]]])
>>> torch.diag_embed(a, offset=1, dim1=0, dim2=2)
tensor([[[ 0.0000, 1.5410, 0.0000, 0.0000],
[ 0.0000, 0.5684, 0.0000, 0.0000]],
[[ 0.0000, 0.0000, -0.2934, 0.0000],
[ 0.0000, 0.0000, -1.0845, 0.0000]],
[[ 0.0000, 0.0000, 0.0000, -2.1788],
[ 0.0000, 0.0000, 0.0000, -1.3986]],
[[ 0.0000, 0.0000, 0.0000, 0.0000],
[ 0.0000, 0.0000, 0.0000, 0.0000]]])
""".format(**common_args))
add_docstr(torch.diagflat,
r"""
diagflat(input, offset=0) -> Tensor
- If :attr:`input` is a vector (1-D tensor), then returns a 2-D square tensor
with the elements of :attr:`input` as the diagonal.
- If :attr:`input` is a tensor with more than one dimension, then returns a
2-D tensor with diagonal elements equal to a flattened :attr:`input`.
The argument :attr:`offset` controls which diagonal to consider:
- If :attr:`offset` = 0, it is the main diagonal.
- If :attr:`offset` > 0, it is above the main diagonal.
- If :attr:`offset` < 0, it is below the main diagonal.
Args:
{input}
offset (int, optional): the diagonal to consider. Default: 0 (main
diagonal).
Examples::
>>> a = torch.randn(3)
>>> a
tensor([-0.2956, -0.9068, 0.1695])
>>> torch.diagflat(a)
tensor([[-0.2956, 0.0000, 0.0000],
[ 0.0000, -0.9068, 0.0000],
[ 0.0000, 0.0000, 0.1695]])
>>> torch.diagflat(a, 1)
tensor([[ 0.0000, -0.2956, 0.0000, 0.0000],
[ 0.0000, 0.0000, -0.9068, 0.0000],
[ 0.0000, 0.0000, 0.0000, 0.1695],
[ 0.0000, 0.0000, 0.0000, 0.0000]])
>>> a = torch.randn(2, 2)
>>> a
tensor([[ 0.2094, -0.3018],
[-0.1516, 1.9342]])
>>> torch.diagflat(a)
tensor([[ 0.2094, 0.0000, 0.0000, 0.0000],
[ 0.0000, -0.3018, 0.0000, 0.0000],
[ 0.0000, 0.0000, -0.1516, 0.0000],
[ 0.0000, 0.0000, 0.0000, 1.9342]])
""".format(**common_args))
add_docstr(torch.diagonal,
r"""
diagonal(input, offset=0, dim1=0, dim2=1) -> Tensor
Returns a partial view of :attr:`input` with the its diagonal elements
with respect to :attr:`dim1` and :attr:`dim2` appended as a dimension
at the end of the shape.
The argument :attr:`offset` controls which diagonal to consider:
- If :attr:`offset` = 0, it is the main diagonal.
- If :attr:`offset` > 0, it is above the main diagonal.
- If :attr:`offset` < 0, it is below the main diagonal.
Applying :meth:`torch.diag_embed` to the output of this function with
the same arguments yields a diagonal matrix with the diagonal entries
of the input. However, :meth:`torch.diag_embed` has different default
dimensions, so those need to be explicitly specified.
Args:
{input} Must be at least 2-dimensional.
offset (int, optional): which diagonal to consider. Default: 0
(main diagonal).
dim1 (int, optional): first dimension with respect to which to
take diagonal. Default: 0.
dim2 (int, optional): second dimension with respect to which to
take diagonal. Default: 1.
.. note:: To take a batch diagonal, pass in dim1=-2, dim2=-1.
Examples::
>>> a = torch.randn(3, 3)
>>> a
tensor([[-1.0854, 1.1431, -0.1752],
[ 0.8536, -0.0905, 0.0360],
[ 0.6927, -0.3735, -0.4945]])
>>> torch.diagonal(a, 0)
tensor([-1.0854, -0.0905, -0.4945])
>>> torch.diagonal(a, 1)
tensor([ 1.1431, 0.0360])
>>> x = torch.randn(2, 5, 4, 2)
>>> torch.diagonal(x, offset=-1, dim1=1, dim2=2)
tensor([[[-1.2631, 0.3755, -1.5977, -1.8172],
[-1.1065, 1.0401, -0.2235, -0.7938]],
[[-1.7325, -0.3081, 0.6166, 0.2335],
[ 1.0500, 0.7336, -0.3836, -1.1015]]])
""".format(**common_args))
add_docstr(torch.digamma,
r"""
digamma(input, out=None) -> Tensor
Computes the logarithmic derivative of the gamma function on `input`.
.. math::
\psi(x) = \frac{d}{dx} \ln\left(\Gamma\left(x\right)\right) = \frac{\Gamma'(x)}{\Gamma(x)}
Args:
input (Tensor): the tensor to compute the digamma function on
Example::
>>> a = torch.tensor([1, 0.5])
>>> torch.digamma(a)
tensor([-0.5772, -1.9635])
""")
add_docstr(torch.dist,
r"""
dist(input, other, p=2) -> Tensor
Returns the p-norm of (:attr:`input` - :attr:`other`)
The shapes of :attr:`input` and :attr:`other` must be
:ref:`broadcastable <broadcasting-semantics>`.
Args:
{input}
other (Tensor): the Right-hand-side input tensor
p (float, optional): the norm to be computed
Example::
>>> x = torch.randn(4)
>>> x
tensor([-1.5393, -0.8675, 0.5916, 1.6321])
>>> y = torch.randn(4)
>>> y
tensor([ 0.0967, -1.0511, 0.6295, 0.8360])
>>> torch.dist(x, y, 3.5)
tensor(1.6727)
>>> torch.dist(x, y, 3)
tensor(1.6973)
>>> torch.dist(x, y, 0)
tensor(inf)
>>> torch.dist(x, y, 1)
tensor(2.6537)
""".format(**common_args))
add_docstr(torch.div,
r"""
div(input, other, out=None) -> Tensor
Divides each element of the input ``input`` with the scalar ``other`` and
returns a new resulting tensor.
.. warning::
Integer division using div is no longer supported, and in a future release
div will perform true division as in Python 3. Use :func:`torch.true_divide`
or :func:`torch.floor_divide` (// in Python), instead.
.. math::
\text{{out}}_i = \frac{{\text{{input}}_i}}{{\text{{other}}}}
If the :class:`torch.dtype` of ``input`` and ``other`` differ, the
:class:`torch.dtype` of the result tensor is determined following rules
described in the type promotion :ref:`documentation <type-promotion-doc>`. If
``out`` is specified, the result must be :ref:`castable <type-promotion-doc>`
to the :class:`torch.dtype` of the specified output tensor. Integral division
by zero leads to undefined behavior.
Args:
{input}
other (Number): the number to be divided to each element of ``input``
Keyword args:
{out}
Example::
>>> a = torch.randn(5)
>>> a
tensor([ 0.3810, 1.2774, -0.2972, -0.3719, 0.4637])
>>> torch.div(a, 0.5)
tensor([ 0.7620, 2.5548, -0.5944, -0.7439, 0.9275])
.. function:: div(input, other, out=None) -> Tensor
Each element of the tensor ``input`` is divided by each element of the tensor
``other``. The resulting tensor is returned.
.. math::
\text{{out}}_i = \frac{{\text{{input}}_i}}{{\text{{other}}_i}}
The shapes of ``input`` and ``other`` must be :ref:`broadcastable
<broadcasting-semantics>`. If the :class:`torch.dtype` of ``input`` and
``other`` differ, the :class:`torch.dtype` of the result tensor is determined
following rules described in the type promotion :ref:`documentation
<type-promotion-doc>`. If ``out`` is specified, the result must be
:ref:`castable <type-promotion-doc>` to the :class:`torch.dtype` of the
specified output tensor. Integral division by zero leads to undefined behavior.
Args:
input (Tensor): the numerator tensor
other (Tensor): the denominator tensor
Keyword args:
{out}
Example::
>>> a = torch.randn(4, 4)
>>> a
tensor([[-0.3711, -1.9353, -0.4605, -0.2917],
[ 0.1815, -1.0111, 0.9805, -1.5923],
[ 0.1062, 1.4581, 0.7759, -1.2344],
[-0.1830, -0.0313, 1.1908, -1.4757]])
>>> b = torch.randn(4)
>>> b
tensor([ 0.8032, 0.2930, -0.8113, -0.2308])
>>> torch.div(a, b)
tensor([[-0.4620, -6.6051, 0.5676, 1.2637],
[ 0.2260, -3.4507, -1.2086, 6.8988],
[ 0.1322, 4.9764, -0.9564, 5.3480],
[-0.2278, -0.1068, -1.4678, 6.3936]])
""".format(**common_args))
add_docstr(torch.dot,
r"""
dot(input, tensor) -> Tensor
Computes the dot product (inner product) of two tensors.
.. note:: This function does not :ref:`broadcast <broadcasting-semantics>`.
Example::
>>> torch.dot(torch.tensor([2, 3]), torch.tensor([2, 1]))
tensor(7)
""")
add_docstr(torch.eig,
r"""
eig(input, eigenvectors=False, out=None) -> (Tensor, Tensor)
Computes the eigenvalues and eigenvectors of a real square matrix.
.. note::
Since eigenvalues and eigenvectors might be complex, backward pass is supported only
for :func:`torch.symeig`
Args:
input (Tensor): the square matrix of shape :math:`(n \times n)` for which the eigenvalues and eigenvectors
will be computed
eigenvectors (bool): ``True`` to compute both eigenvalues and eigenvectors;
otherwise, only eigenvalues will be computed
out (tuple, optional): the output tensors
Returns:
(Tensor, Tensor): A namedtuple (eigenvalues, eigenvectors) containing
- **eigenvalues** (*Tensor*): Shape :math:`(n \times 2)`. Each row is an eigenvalue of ``input``,
where the first element is the real part and the second element is the imaginary part.
The eigenvalues are not necessarily ordered.
- **eigenvectors** (*Tensor*): If ``eigenvectors=False``, it's an empty tensor.
Otherwise, this tensor of shape :math:`(n \times n)` can be used to compute normalized (unit length)
eigenvectors of corresponding eigenvalues as follows.
If the corresponding `eigenvalues[j]` is a real number, column `eigenvectors[:, j]` is the eigenvector
corresponding to `eigenvalues[j]`.
If the corresponding `eigenvalues[j]` and `eigenvalues[j + 1]` form a complex conjugate pair, then the
true eigenvectors can be computed as
:math:`\text{true eigenvector}[j] = eigenvectors[:, j] + i \times eigenvectors[:, j + 1]`,
:math:`\text{true eigenvector}[j + 1] = eigenvectors[:, j] - i \times eigenvectors[:, j + 1]`.
""")
add_docstr(torch.eq,
r"""
eq(input, other, out=None) -> Tensor
Computes element-wise equality
The second argument can be a number or a tensor whose shape is
:ref:`broadcastable <broadcasting-semantics>` with the first argument.
Args:
input (Tensor): the tensor to compare
other (Tensor or float): the tensor or value to compare
{out} Must be a `ByteTensor`
Returns:
Tensor: A ``torch.BoolTensor`` containing a True at each location where comparison is true
Example::
>>> torch.eq(torch.tensor([[1, 2], [3, 4]]), torch.tensor([[1, 1], [4, 4]]))
tensor([[ True, False],
[False, True]])
""".format(**common_args))
add_docstr(torch.equal,
r"""
equal(input, other) -> bool
``True`` if two tensors have the same size and elements, ``False`` otherwise.
Example::
>>> torch.equal(torch.tensor([1, 2]), torch.tensor([1, 2]))
True
""")
add_docstr(torch.erf,
r"""
erf(input, out=None) -> Tensor
Computes the error function of each element. The error function is defined as follows:
.. math::
\mathrm{erf}(x) = \frac{2}{\sqrt{\pi}} \int_{0}^{x} e^{-t^2} dt
""" + r"""
Args:
{input}
{out}
Example::
>>> torch.erf(torch.tensor([0, -1., 10.]))
tensor([ 0.0000, -0.8427, 1.0000])
""".format(**common_args))
add_docstr(torch.erfc,
r"""
erfc(input, out=None) -> Tensor
Computes the complementary error function of each element of :attr:`input`.
The complementary error function is defined as follows:
.. math::
\mathrm{erfc}(x) = 1 - \frac{2}{\sqrt{\pi}} \int_{0}^{x} e^{-t^2} dt
""" + r"""
Args:
{input}
{out}
Example::
>>> torch.erfc(torch.tensor([0, -1., 10.]))
tensor([ 1.0000, 1.8427, 0.0000])
""".format(**common_args))
add_docstr(torch.erfinv,
r"""
erfinv(input, out=None) -> Tensor
Computes the inverse error function of each element of :attr:`input`.
The inverse error function is defined in the range :math:`(-1, 1)` as:
.. math::
\mathrm{erfinv}(\mathrm{erf}(x)) = x
""" + r"""
Args:
{input}
{out}
Example::
>>> torch.erfinv(torch.tensor([0, 0.5, -1.]))
tensor([ 0.0000, 0.4769, -inf])
""".format(**common_args))
add_docstr(torch.exp,
r"""
exp(input, out=None) -> Tensor
Returns a new tensor with the exponential of the elements
of the input tensor :attr:`input`.
.. math::
y_{i} = e^{x_{i}}
""" + r"""
Args:
{input}
{out}
Example::
>>> torch.exp(torch.tensor([0, math.log(2.)]))
tensor([ 1., 2.])
""".format(**common_args))
add_docstr(torch.expm1,
r"""
expm1(input, out=None) -> Tensor
Returns a new tensor with the exponential of the elements minus 1
of :attr:`input`.
.. math::
y_{i} = e^{x_{i}} - 1
""" + r"""
Args:
{input}
{out}
Example::
>>> torch.expm1(torch.tensor([0, math.log(2.)]))
tensor([ 0., 1.])
""".format(**common_args))
add_docstr(torch.eye,
r"""
eye(n, m=None, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
Returns a 2-D tensor with ones on the diagonal and zeros elsewhere.
Args:
n (int): the number of rows
m (int, optional): the number of columns with default being :attr:`n`
{out}
{dtype}
{layout}
{device}
{requires_grad}
Returns:
Tensor: A 2-D tensor with ones on the diagonal and zeros elsewhere
Example::
>>> torch.eye(3)
tensor([[ 1., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 1.]])
""".format(**factory_common_args))
add_docstr(torch.floor,
r"""
floor(input, out=None) -> Tensor
Returns a new tensor with the floor of the elements of :attr:`input`,
the largest integer less than or equal to each element.
.. math::
\text{out}_{i} = \left\lfloor \text{input}_{i} \right\rfloor
""" + r"""
Args:
{input}
{out}
Example::
>>> a = torch.randn(4)
>>> a
tensor([-0.8166, 1.5308, -0.2530, -0.2091])
>>> torch.floor(a)
tensor([-1., 1., -1., -1.])
""".format(**common_args))
add_docstr(torch.floor_divide,
r"""
floor_divide(input, other, out=None) -> Tensor
Return the division of the inputs rounded down to the nearest integer. See :func:`torch.div`
for type promotion and broadcasting rules.
.. math::
\text{{out}}_i = \left\lfloor \frac{{\text{{input}}_i}}{{\text{{other}}_i}} \right\rfloor
""" + r"""
Args:
input (Tensor): the numerator tensor
other (Tensor or Scalar): the denominator
Keyword args:
{out}
Example::
>>> a = torch.tensor([4.0, 3.0])
>>> b = torch.tensor([2.0, 2.0])
>>> torch.floor_divide(a, b)
tensor([2.0, 1.0])
>>> torch.floor_divide(a, 1.4)
tensor([2.0, 2.0])
""".format(**common_args))
add_docstr(torch.fmod,
r"""
fmod(input, other, out=None) -> Tensor
Computes the element-wise remainder of division.
The dividend and divisor may contain both for integer and floating point
numbers. The remainder has the same sign as the dividend :attr:`input`.
When :attr:`other` is a tensor, the shapes of :attr:`input` and
:attr:`other` must be :ref:`broadcastable <broadcasting-semantics>`.
Args:
input (Tensor): the dividend
other (Tensor or float): the divisor, which may be either a number or a tensor of the same shape as the dividend
{out}
Example::
>>> torch.fmod(torch.tensor([-3., -2, -1, 1, 2, 3]), 2)
tensor([-1., -0., -1., 1., 0., 1.])
>>> torch.fmod(torch.tensor([1., 2, 3, 4, 5]), 1.5)
tensor([ 1.0000, 0.5000, 0.0000, 1.0000, 0.5000])
""".format(**common_args))
add_docstr(torch.frac,
r"""
frac(input, out=None) -> Tensor
Computes the fractional portion of each element in :attr:`input`.
.. math::
\text{out}_{i} = \text{input}_{i} - \left\lfloor |\text{input}_{i}| \right\rfloor * \operatorname{sgn}(\text{input}_{i})
Example::
>>> torch.frac(torch.tensor([1, 2.5, -3.2]))
tensor([ 0.0000, 0.5000, -0.2000])
""")
add_docstr(torch.from_numpy,
r"""
from_numpy(ndarray) -> Tensor
Creates a :class:`Tensor` from a :class:`numpy.ndarray`.
The returned tensor and :attr:`ndarray` share the same memory. Modifications to
the tensor will be reflected in the :attr:`ndarray` and vice versa. The returned
tensor is not resizable.
It currently accepts :attr:`ndarray` with dtypes of ``numpy.float64``,
``numpy.float32``, ``numpy.float16``, ``numpy.complex64``, ``numpy.complex128``,
``numpy.int64``, ``numpy.int32``, ``numpy.int16``, ``numpy.int8``, ``numpy.uint8``,
and ``numpy.bool``.
Example::
>>> a = numpy.array([1, 2, 3])
>>> t = torch.from_numpy(a)
>>> t
tensor([ 1, 2, 3])
>>> t[0] = -1
>>> a
array([-1, 2, 3])
""")
add_docstr(torch.flatten,
r"""
flatten(input, start_dim=0, end_dim=-1) -> Tensor
Flattens a contiguous range of dims in a tensor.
Args:
{input}
start_dim (int): the first dim to flatten
end_dim (int): the last dim to flatten
Example::
>>> t = torch.tensor([[[1, 2],
[3, 4]],
[[5, 6],
[7, 8]]])
>>> torch.flatten(t)
tensor([1, 2, 3, 4, 5, 6, 7, 8])
>>> torch.flatten(t, start_dim=1)
tensor([[1, 2, 3, 4],
[5, 6, 7, 8]])
""".format(**common_args))
add_docstr(torch.gather,
r"""
gather(input, dim, index, out=None, sparse_grad=False) -> Tensor
Gathers values along an axis specified by `dim`.
For a 3-D tensor the output is specified by::
out[i][j][k] = input[index[i][j][k]][j][k] # if dim == 0
out[i][j][k] = input[i][index[i][j][k]][k] # if dim == 1
out[i][j][k] = input[i][j][index[i][j][k]] # if dim == 2
If :attr:`input` is an n-dimensional tensor with size
:math:`(x_0, x_1..., x_{i-1}, x_i, x_{i+1}, ..., x_{n-1})`
and ``dim = i``, then :attr:`index` must be an :math:`n`-dimensional tensor with
size :math:`(x_0, x_1, ..., x_{i-1}, y, x_{i+1}, ..., x_{n-1})` where :math:`y \geq 1`
and :attr:`out` will have the same size as :attr:`index`.
""" + r"""
Args:
input (Tensor): the source tensor
dim (int): the axis along which to index
index (LongTensor): the indices of elements to gather
out (Tensor, optional): the destination tensor
sparse_grad(bool,optional): If ``True``, gradient w.r.t. :attr:`input` will be a sparse tensor.
Example::
>>> t = torch.tensor([[1,2],[3,4]])
>>> torch.gather(t, 1, torch.tensor([[0,0],[1,0]]))
tensor([[ 1, 1],
[ 4, 3]])
""")
add_docstr(torch.gcd,
r"""
gcd(input, other, out=None) -> Tensor
Computes the element-wise greatest common divisor (GCD) of :attr:`input` and :attr:`other`.
Both :attr:`input` and :attr:`other` must have integer types.
.. note::
This defines :math:`gcd(0, 0) = 0`.
Args:
{input}
other (Tensor): the second input tensor
Keyword arguments:
{out}
Example::
>>> a = torch.tensor([5, 10, 15])
>>> b = torch.tensor([3, 4, 5])
>>> torch.gcd(a, b)
tensor([1, 2, 5])
>>> c = torch.tensor([3])
>>> torch.gcd(a, c)
tensor([1, 1, 3])
""".format(**common_args))
add_docstr(torch.ge,
r"""
ge(input, other, out=None) -> Tensor
Computes :math:`\text{input} \geq \text{other}` element-wise.
The second argument can be a number or a tensor whose shape is
:ref:`broadcastable <broadcasting-semantics>` with the first argument.
Args:
input (Tensor): the tensor to compare
other (Tensor or float): the tensor or value to compare
out (Tensor, optional): the output tensor that must be a `BoolTensor`
Returns:
Tensor: A ``torch.BoolTensor`` containing a True at each location where comparison is true
Example::
>>> torch.ge(torch.tensor([[1, 2], [3, 4]]), torch.tensor([[1, 1], [4, 4]]))
tensor([[True, True], [False, True]])
""")
add_docstr(torch.geqrf,
r"""
geqrf(input, out=None) -> (Tensor, Tensor)
This is a low-level function for calling LAPACK directly. This function
returns a namedtuple (a, tau) as defined in `LAPACK documentation for geqrf`_ .
You'll generally want to use :func:`torch.qr` instead.
Computes a QR decomposition of :attr:`input`, but without constructing
:math:`Q` and :math:`R` as explicit separate matrices.
Rather, this directly calls the underlying LAPACK function `?geqrf`
which produces a sequence of 'elementary reflectors'.
See `LAPACK documentation for geqrf`_ for further details.
Args:
input (Tensor): the input matrix
out (tuple, optional): the output tuple of (Tensor, Tensor)
.. _LAPACK documentation for geqrf:
https://software.intel.com/en-us/node/521004
""")
add_docstr(torch.ger,
r"""
ger(input, vec2, out=None) -> Tensor
Outer product of :attr:`input` and :attr:`vec2`.
If :attr:`input` is a vector of size :math:`n` and :attr:`vec2` is a vector of
size :math:`m`, then :attr:`out` must be a matrix of size :math:`(n \times m)`.
.. note:: This function does not :ref:`broadcast <broadcasting-semantics>`.
Args:
input (Tensor): 1-D input vector
vec2 (Tensor): 1-D input vector
out (Tensor, optional): optional output matrix
Example::
>>> v1 = torch.arange(1., 5.)
>>> v2 = torch.arange(1., 4.)
>>> torch.ger(v1, v2)
tensor([[ 1., 2., 3.],
[ 2., 4., 6.],
[ 3., 6., 9.],
[ 4., 8., 12.]])
""")
add_docstr(torch.solve,
r"""
torch.solve(input, A, out=None) -> (Tensor, Tensor)
This function returns the solution to the system of linear
equations represented by :math:`AX = B` and the LU factorization of
A, in order as a namedtuple `solution, LU`.
`LU` contains `L` and `U` factors for LU factorization of `A`.
`torch.solve(B, A)` can take in 2D inputs `B, A` or inputs that are
batches of 2D matrices. If the inputs are batches, then returns
batched outputs `solution, LU`.
.. note::
Irrespective of the original strides, the returned matrices
`solution` and `LU` will be transposed, i.e. with strides like
`B.contiguous().transpose(-1, -2).stride()` and
`A.contiguous().transpose(-1, -2).stride()` respectively.
Args:
input (Tensor): input matrix :math:`B` of size :math:`(*, m, k)` , where :math:`*`
is zero or more batch dimensions.
A (Tensor): input square matrix of size :math:`(*, m, m)`, where
:math:`*` is zero or more batch dimensions.
out ((Tensor, Tensor), optional): optional output tuple.
Example::
>>> A = torch.tensor([[6.80, -2.11, 5.66, 5.97, 8.23],
[-6.05, -3.30, 5.36, -4.44, 1.08],
[-0.45, 2.58, -2.70, 0.27, 9.04],
[8.32, 2.71, 4.35, -7.17, 2.14],
[-9.67, -5.14, -7.26, 6.08, -6.87]]).t()
>>> B = torch.tensor([[4.02, 6.19, -8.22, -7.57, -3.03],
[-1.56, 4.00, -8.67, 1.75, 2.86],
[9.81, -4.09, -4.57, -8.61, 8.99]]).t()
>>> X, LU = torch.solve(B, A)
>>> torch.dist(B, torch.mm(A, X))
tensor(1.00000e-06 *
7.0977)
>>> # Batched solver example
>>> A = torch.randn(2, 3, 1, 4, 4)
>>> B = torch.randn(2, 3, 1, 4, 6)
>>> X, LU = torch.solve(B, A)
>>> torch.dist(B, A.matmul(X))
tensor(1.00000e-06 *
3.6386)
""")
add_docstr(torch.get_default_dtype,
r"""
get_default_dtype() -> torch.dtype
Get the current default floating point :class:`torch.dtype`.
Example::
>>> torch.get_default_dtype() # initial default for floating point is torch.float32
torch.float32
>>> torch.set_default_dtype(torch.float64)
>>> torch.get_default_dtype() # default is now changed to torch.float64
torch.float64
>>> torch.set_default_tensor_type(torch.FloatTensor) # setting tensor type also affects this
>>> torch.get_default_dtype() # changed to torch.float32, the dtype for torch.FloatTensor
torch.float32
""")
add_docstr(torch.get_num_threads,
r"""
get_num_threads() -> int
Returns the number of threads used for parallelizing CPU operations
""")
add_docstr(torch.get_num_interop_threads,
r"""
get_num_interop_threads() -> int
Returns the number of threads used for inter-op parallelism on CPU
(e.g. in JIT interpreter)
""")
add_docstr(torch.gt,
r"""
gt(input, other, out=None) -> Tensor
Computes :math:`\text{input} > \text{other}` element-wise.
The second argument can be a number or a tensor whose shape is
:ref:`broadcastable <broadcasting-semantics>` with the first argument.
Args:
input (Tensor): the tensor to compare
other (Tensor or float): the tensor or value to compare
out (Tensor, optional): the output tensor that must be a `BoolTensor`
Returns:
Tensor: A ``torch.BoolTensor`` containing a True at each location where comparison is true
Example::
>>> torch.gt(torch.tensor([[1, 2], [3, 4]]), torch.tensor([[1, 1], [4, 4]]))
tensor([[False, True], [False, False]])
""")
add_docstr(torch.histc,
r"""
histc(input, bins=100, min=0, max=0, out=None) -> Tensor
Computes the histogram of a tensor.
The elements are sorted into equal width bins between :attr:`min` and
:attr:`max`. If :attr:`min` and :attr:`max` are both zero, the minimum and
maximum values of the data are used.
Elements lower than min and higher than max are ignored.
Args:
{input}
bins (int): number of histogram bins
min (int): lower end of the range (inclusive)
max (int): upper end of the range (inclusive)
{out}
Returns:
Tensor: Histogram represented as a tensor
Example::
>>> torch.histc(torch.tensor([1., 2, 1]), bins=4, min=0, max=3)
tensor([ 0., 2., 1., 0.])
""".format(**common_args))
add_docstr(torch.index_select,
r"""
index_select(input, dim, index, out=None) -> Tensor
Returns a new tensor which indexes the :attr:`input` tensor along dimension
:attr:`dim` using the entries in :attr:`index` which is a `LongTensor`.
The returned tensor has the same number of dimensions as the original tensor
(:attr:`input`). The :attr:`dim`\ th dimension has the same size as the length
of :attr:`index`; other dimensions have the same size as in the original tensor.
.. note:: The returned tensor does **not** use the same storage as the original
tensor. If :attr:`out` has a different shape than expected, we
silently change it to the correct shape, reallocating the underlying
storage if necessary.
Args:
{input}
dim (int): the dimension in which we index
index (LongTensor): the 1-D tensor containing the indices to index
{out}
Example::
>>> x = torch.randn(3, 4)
>>> x
tensor([[ 0.1427, 0.0231, -0.5414, -1.0009],
[-0.4664, 0.2647, -0.1228, -1.1068],
[-1.1734, -0.6571, 0.7230, -0.6004]])
>>> indices = torch.tensor([0, 2])
>>> torch.index_select(x, 0, indices)
tensor([[ 0.1427, 0.0231, -0.5414, -1.0009],
[-1.1734, -0.6571, 0.7230, -0.6004]])
>>> torch.index_select(x, 1, indices)
tensor([[ 0.1427, -0.5414],
[-0.4664, -0.1228],
[-1.1734, 0.7230]])
""".format(**common_args))
add_docstr(torch.inverse,
r"""
inverse(input, out=None) -> Tensor
Takes the inverse of the square matrix :attr:`input`. :attr:`input` can be batches
of 2D square tensors, in which case this function would return a tensor composed of
individual inverses.
.. note::
Irrespective of the original strides, the returned tensors will be
transposed, i.e. with strides like `input.contiguous().transpose(-2, -1).stride()`
Args:
input (Tensor): the input tensor of size :math:`(*, n, n)` where `*` is zero or more
batch dimensions
{out}
Example::
>>> x = torch.rand(4, 4)
>>> y = torch.inverse(x)
>>> z = torch.mm(x, y)
>>> z
tensor([[ 1.0000, -0.0000, -0.0000, 0.0000],
[ 0.0000, 1.0000, 0.0000, 0.0000],
[ 0.0000, 0.0000, 1.0000, 0.0000],
[ 0.0000, -0.0000, -0.0000, 1.0000]])
>>> torch.max(torch.abs(z - torch.eye(4))) # Max non-zero
tensor(1.1921e-07)
>>> # Batched inverse example
>>> x = torch.randn(2, 3, 4, 4)
>>> y = torch.inverse(x)
>>> z = torch.matmul(x, y)
>>> torch.max(torch.abs(z - torch.eye(4).expand_as(x))) # Max non-zero
tensor(1.9073e-06)
""".format(**common_args))
add_docstr(torch.isinf,
r"""
Returns a new tensor with boolean elements representing if each element is `+/-INF` or not.
Complex values are infinite when their real and/or imaginary part is infinite.
Arguments:
tensor (Tensor): A tensor to check
Returns:
Tensor: ``A torch.Tensor with dtype torch.bool`` containing a True at each location of `+/-INF` elements and False otherwise
Example::
>>> torch.isinf(torch.tensor([1, float('inf'), 2, float('-inf'), float('nan')]))
tensor([False, True, False, True, False])
""")
add_docstr(torch.isclose,
r"""
isclose(input, other, rtol=1e-05, atol=1e-08, equal_nan=False) -> Tensor
Returns a new tensor with boolean elements representing if each element of
:attr:`input` is "close" to the corresponding element of :attr:`other`.
Closeness is defined as:
.. math::
\lvert \text{input} - \text{other} \rvert \leq \texttt{atol} + \texttt{rtol} \times \lvert \text{other} \rvert
""" + r"""
where :attr:`input` and :attr:`other` are finite. Where :attr:`input`
and/or :attr:`other` are nonfinite they are close if and only if
they are equal, with NaNs being considered equal to each other when
:attr:`equal_nan` is True.
Args:
input (Tensor): first tensor to compare
other (Tensor): second tensor to compare
atol (float, optional): absolute tolerance. Default: 1e-08
rtol (float, optional): relative tolerance. Default: 1e-05
equal_nan (bool, optional): if ``True``, then two ``NaN`` s will be considered equal. Default: ``False``
Examples::
>>> torch.isclose(torch.tensor((1., 2, 3)), torch.tensor((1 + 1e-10, 3, 4)))
tensor([ True, False, False])
>>> torch.isclose(torch.tensor((float('inf'), 4)), torch.tensor((float('inf'), 6)), rtol=.5)
tensor([True, True])
""")
add_docstr(torch.isfinite,
r"""
Returns a new tensor with boolean elements representing if each element is `finite` or not.
Real values are finite when they are not NaN, negative infinity, or infinity.
Complex values are finite when both their real and imaginary parts are finite.
Arguments:
tensor (Tensor): A tensor to check
Returns:
Tensor: ``A torch.Tensor with dtype torch.bool`` containing a True at each location of finite elements and False otherwise
Example::
>>> torch.isfinite(torch.tensor([1, float('inf'), 2, float('-inf'), float('nan')]))
tensor([True, False, True, False, False])
""")
add_docstr(torch.isnan,
r"""
Returns a new tensor with boolean elements representing if each element is `NaN` or not.
Complex values are considered `NaN` when either their real and/or imaginary part is NaN.
Arguments:
input (Tensor): A tensor to check
Returns:
Tensor: A ``torch.BoolTensor`` containing a True at each location of `NaN` elements.
Example::
>>> torch.isnan(torch.tensor([1, float('nan'), 2]))
tensor([False, True, False])
""")
add_docstr(torch.is_floating_point,
r"""
is_floating_point(input) -> (bool)
Returns True if the data type of :attr:`input` is a floating point data type i.e.,
one of ``torch.float64``, ``torch.float32`` and ``torch.float16``.
Args:
input (Tensor): the PyTorch tensor to test
""")
add_docstr(torch.is_complex,
r"""
is_complex(input) -> (bool)
Returns True if the data type of :attr:`input` is a complex data type i.e.,
one of ``torch.complex64``, and ``torch.complex128``.
Args:
input (Tensor): the PyTorch tensor to test
""")
add_docstr(torch.is_nonzero,
r"""
is_nonzero(input) -> (bool)
Returns True if the :attr:`input` is a single element tensor which is not equal to zero
after type conversions.
i.e. not equal to ``torch.tensor([0.])`` or ``torch.tensor([0])`` or
``torch.tensor([False])``.
Throws a ``RuntimeError`` if ``torch.numel() != 1`` (even in case
of sparse tensors).
Args:
input (Tensor): the PyTorch tensor to test
Example::
>>> torch.is_nonzero(torch.tensor([0.]))
False
>>> torch.is_nonzero(torch.tensor([1.5]))
True
>>> torch.is_nonzero(torch.tensor([False]))
False
>>> torch.is_nonzero(torch.tensor([3]))
True
>>> torch.is_nonzero(torch.tensor([1, 3, 5]))
Traceback (most recent call last):
...
RuntimeError: bool value of Tensor with more than one value is ambiguous
>>> torch.is_nonzero(torch.tensor([]))
Traceback (most recent call last):
...
RuntimeError: bool value of Tensor with no values is ambiguous
""")
add_docstr(torch.kthvalue,
r"""
kthvalue(input, k, dim=None, keepdim=False, out=None) -> (Tensor, LongTensor)
Returns a namedtuple ``(values, indices)`` where ``values`` is the :attr:`k` th
smallest element of each row of the :attr:`input` tensor in the given dimension
:attr:`dim`. And ``indices`` is the index location of each element found.
If :attr:`dim` is not given, the last dimension of the `input` is chosen.
If :attr:`keepdim` is ``True``, both the :attr:`values` and :attr:`indices` tensors
are the same size as :attr:`input`, except in the dimension :attr:`dim` where
they are of size 1. Otherwise, :attr:`dim` is squeezed
(see :func:`torch.squeeze`), resulting in both the :attr:`values` and
:attr:`indices` tensors having 1 fewer dimension than the :attr:`input` tensor.
Args:
{input}
k (int): k for the k-th smallest element
dim (int, optional): the dimension to find the kth value along
{keepdim}
out (tuple, optional): the output tuple of (Tensor, LongTensor)
can be optionally given to be used as output buffers
Example::
>>> x = torch.arange(1., 6.)
>>> x
tensor([ 1., 2., 3., 4., 5.])
>>> torch.kthvalue(x, 4)
torch.return_types.kthvalue(values=tensor(4.), indices=tensor(3))
>>> x=torch.arange(1.,7.).resize_(2,3)
>>> x
tensor([[ 1., 2., 3.],
[ 4., 5., 6.]])
>>> torch.kthvalue(x, 2, 0, True)
torch.return_types.kthvalue(values=tensor([[4., 5., 6.]]), indices=tensor([[1, 1, 1]]))
""".format(**single_dim_common))
add_docstr(torch.lcm,
r"""
lcm(input, other, out=None) -> Tensor
Computes the element-wise least common multiple (LCM) of :attr:`input` and :attr:`other`.
Both :attr:`input` and :attr:`other` must have integer types.
.. note::
This defines :math:`lcm(0, 0) = 0` and :math:`lcm(0, a) = 0`.
Args:
{input}
other (Tensor): the second input tensor
Keyword arguments:
{out}
Example::
>>> a = torch.tensor([5, 10, 15])
>>> b = torch.tensor([3, 4, 5])
>>> torch.lcm(a, b)
tensor([15, 20, 15])
>>> c = torch.tensor([3])
>>> torch.lcm(a, c)
tensor([15, 30, 15])
""".format(**common_args))
add_docstr(torch.le,
r"""
le(input, other, out=None) -> Tensor
Computes :math:`\text{input} \leq \text{other}` element-wise.
The second argument can be a number or a tensor whose shape is
:ref:`broadcastable <broadcasting-semantics>` with the first argument.
Args:
input (Tensor): the tensor to compare
other (Tensor or float): the tensor or value to compare
out (Tensor, optional): the output tensor that must be a `BoolTensor`
Returns:
Tensor: A ``torch.BoolTensor`` containing a True at each location where comparison is true
Example::
>>> torch.le(torch.tensor([[1, 2], [3, 4]]), torch.tensor([[1, 1], [4, 4]]))
tensor([[True, False], [True, True]])
""")
add_docstr(torch.lerp,
r"""
lerp(input, end, weight, out=None)
Does a linear interpolation of two tensors :attr:`start` (given by :attr:`input`) and :attr:`end` based
on a scalar or tensor :attr:`weight` and returns the resulting :attr:`out` tensor.
.. math::
\text{out}_i = \text{start}_i + \text{weight}_i \times (\text{end}_i - \text{start}_i)
""" + r"""
The shapes of :attr:`start` and :attr:`end` must be
:ref:`broadcastable <broadcasting-semantics>`. If :attr:`weight` is a tensor, then
the shapes of :attr:`weight`, :attr:`start`, and :attr:`end` must be :ref:`broadcastable <broadcasting-semantics>`.
Args:
input (Tensor): the tensor with the starting points
end (Tensor): the tensor with the ending points
weight (float or tensor): the weight for the interpolation formula
{out}
Example::
>>> start = torch.arange(1., 5.)
>>> end = torch.empty(4).fill_(10)
>>> start
tensor([ 1., 2., 3., 4.])
>>> end
tensor([ 10., 10., 10., 10.])
>>> torch.lerp(start, end, 0.5)
tensor([ 5.5000, 6.0000, 6.5000, 7.0000])
>>> torch.lerp(start, end, torch.full_like(start, 0.5))
tensor([ 5.5000, 6.0000, 6.5000, 7.0000])
""".format(**common_args))
add_docstr(torch.lgamma,
r"""
lgamma(input, out=None) -> Tensor
Computes the logarithm of the gamma function on :attr:`input`.
.. math::
\text{out}_{i} = \log \Gamma(\text{input}_{i})
""" + """
Args:
{input}
{out}
Example::
>>> a = torch.arange(0.5, 2, 0.5)
>>> torch.lgamma(a)
tensor([ 0.5724, 0.0000, -0.1208])
""".format(**common_args))
add_docstr(torch.linspace,
r"""
linspace(start, end, steps=100, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
Returns a one-dimensional tensor of :attr:`steps`
equally spaced points between :attr:`start` and :attr:`end`.
The output tensor is 1-D of size :attr:`steps`.
Args:
start (float): the starting value for the set of points
end (float): the ending value for the set of points
steps (int): number of points to sample between :attr:`start`
and :attr:`end`. Default: ``100``.
{out}
{dtype}
{layout}
{device}
{requires_grad}
Example::
>>> torch.linspace(3, 10, steps=5)
tensor([ 3.0000, 4.7500, 6.5000, 8.2500, 10.0000])
>>> torch.linspace(-10, 10, steps=5)
tensor([-10., -5., 0., 5., 10.])
>>> torch.linspace(start=-10, end=10, steps=5)
tensor([-10., -5., 0., 5., 10.])
>>> torch.linspace(start=-10, end=10, steps=1)
tensor([-10.])
""".format(**factory_common_args))
add_docstr(torch.log,
r"""
log(input, out=None) -> Tensor
Returns a new tensor with the natural logarithm of the elements
of :attr:`input`.
.. math::
y_{i} = \log_{e} (x_{i})
""" + r"""
Args:
{input}
{out}
Example::
>>> a = torch.randn(5)
>>> a
tensor([-0.7168, -0.5471, -0.8933, -1.4428, -0.1190])
>>> torch.log(a)
tensor([ nan, nan, nan, nan, nan])
""".format(**common_args))
add_docstr(torch.log10,
r"""
log10(input, out=None) -> Tensor
Returns a new tensor with the logarithm to the base 10 of the elements
of :attr:`input`.
.. math::
y_{i} = \log_{10} (x_{i})
""" + r"""
Args:
{input}
{out}
Example::
>>> a = torch.rand(5)
>>> a
tensor([ 0.5224, 0.9354, 0.7257, 0.1301, 0.2251])
>>> torch.log10(a)
tensor([-0.2820, -0.0290, -0.1392, -0.8857, -0.6476])
""".format(**common_args))
add_docstr(torch.log1p,
r"""
log1p(input, out=None) -> Tensor
Returns a new tensor with the natural logarithm of (1 + :attr:`input`).
.. math::
y_i = \log_{e} (x_i + 1)
""" + r"""
.. note:: This function is more accurate than :func:`torch.log` for small
values of :attr:`input`
Args:
{input}
{out}
Example::
>>> a = torch.randn(5)
>>> a
tensor([-1.0090, -0.9923, 1.0249, -0.5372, 0.2492])
>>> torch.log1p(a)
tensor([ nan, -4.8653, 0.7055, -0.7705, 0.2225])
""".format(**common_args))
add_docstr(torch.log2,
r"""
log2(input, out=None) -> Tensor
Returns a new tensor with the logarithm to the base 2 of the elements
of :attr:`input`.
.. math::
y_{i} = \log_{2} (x_{i})
""" + r"""
Args:
{input}
{out}
Example::
>>> a = torch.rand(5)
>>> a
tensor([ 0.8419, 0.8003, 0.9971, 0.5287, 0.0490])
>>> torch.log2(a)
tensor([-0.2483, -0.3213, -0.0042, -0.9196, -4.3504])
""".format(**common_args))
add_docstr(torch.logaddexp,
r"""
logaddexp(input, other, out=None) -> Tensor
Logarithm of the sum of exponentiations of the inputs.
Calculates pointwise :math:`\log\left(e^x + e^y\right)`. This function is useful
in statistics where the calculated probabilities of events may be so small as to
exceed the range of normal floating point numbers. In such cases the logarithm
of the calculated probability is stored. This function allows adding
probabilities stored in such a fashion.
This op should be disambiguated with :func:`torch.logsumexp` which performs a
reduction on a single tensor.
Args:
{input}
other (Tensor): the second input tensor
Keyword arguments:
{out}
Example::
>>> torch.logaddexp(torch.tensor([-1.0]), torch.tensor([-1.0, -2, -3]))
tensor([-0.3069, -0.6867, -0.8731])
>>> torch.logaddexp(torch.tensor([-100.0, -200, -300]), torch.tensor([-1.0, -2, -3]))
tensor([-1., -2., -3.])
>>> torch.logaddexp(torch.tensor([1.0, 2000, 30000]), torch.tensor([-1.0, -2, -3]))
tensor([1.1269e+00, 2.0000e+03, 3.0000e+04])
""".format(**common_args))
add_docstr(torch.logaddexp2,
r"""
logaddexp2(input, other, out=None) -> Tensor
Logarithm of the sum of exponentiations of the inputs in base-2.
Calculates pointwise :math:`\log_2\left(2^x + 2^y\right)`. See
:func:`torch.logaddexp` for more details.
Args:
{input}
other (Tensor): the second input tensor
Keyword arguments:
{out}
""".format(**common_args))
add_docstr(torch.logical_and,
r"""
logical_and(input, other, out=None) -> Tensor
Computes the element-wise logical AND of the given input tensors. Zeros are treated as ``False`` and nonzeros are
treated as ``True``.
Args:
{input}
other (Tensor): the tensor to compute AND with
{out}
Example::
>>> torch.logical_and(torch.tensor([True, False, True]), torch.tensor([True, False, False]))
tensor([ True, False, False])
>>> a = torch.tensor([0, 1, 10, 0], dtype=torch.int8)
>>> b = torch.tensor([4, 0, 1, 0], dtype=torch.int8)
>>> torch.logical_and(a, b)
tensor([False, False, True, False])
>>> torch.logical_and(a.double(), b.double())
tensor([False, False, True, False])
>>> torch.logical_and(a.double(), b)
tensor([False, False, True, False])
>>> torch.logical_and(a, b, out=torch.empty(4, dtype=torch.bool))
tensor([False, False, True, False])
""".format(**common_args))
add_docstr(torch.logical_not,
r"""
logical_not(input, out=None) -> Tensor
Computes the element-wise logical NOT of the given input tensor. If not specified, the output tensor will have the bool
dtype. If the input tensor is not a bool tensor, zeros are treated as ``False`` and non-zeros are treated as ``True``.
Args:
{input}
{out}
Example::
>>> torch.logical_not(torch.tensor([True, False]))
tensor([False, True])
>>> torch.logical_not(torch.tensor([0, 1, -10], dtype=torch.int8))
tensor([ True, False, False])
>>> torch.logical_not(torch.tensor([0., 1.5, -10.], dtype=torch.double))
tensor([ True, False, False])
>>> torch.logical_not(torch.tensor([0., 1., -10.], dtype=torch.double), out=torch.empty(3, dtype=torch.int16))
tensor([1, 0, 0], dtype=torch.int16)
""".format(**common_args))
add_docstr(torch.logical_or,
r"""
logical_or(input, other, out=None) -> Tensor
Computes the element-wise logical OR of the given input tensors. Zeros are treated as ``False`` and nonzeros are
treated as ``True``.
Args:
{input}
other (Tensor): the tensor to compute OR with
{out}
Example::
>>> torch.logical_or(torch.tensor([True, False, True]), torch.tensor([True, False, False]))
tensor([ True, False, True])
>>> a = torch.tensor([0, 1, 10, 0], dtype=torch.int8)
>>> b = torch.tensor([4, 0, 1, 0], dtype=torch.int8)
>>> torch.logical_or(a, b)
tensor([ True, True, True, False])
>>> torch.logical_or(a.double(), b.double())
tensor([ True, True, True, False])
>>> torch.logical_or(a.double(), b)
tensor([ True, True, True, False])
>>> torch.logical_or(a, b, out=torch.empty(4, dtype=torch.bool))
tensor([ True, True, True, False])
""".format(**common_args))
add_docstr(torch.logical_xor,
r"""
logical_xor(input, other, out=None) -> Tensor
Computes the element-wise logical XOR of the given input tensors. Zeros are treated as ``False`` and nonzeros are
treated as ``True``.
Args:
{input}
other (Tensor): the tensor to compute XOR with
{out}
Example::
>>> torch.logical_xor(torch.tensor([True, False, True]), torch.tensor([True, False, False]))
tensor([False, False, True])
>>> a = torch.tensor([0, 1, 10, 0], dtype=torch.int8)
>>> b = torch.tensor([4, 0, 1, 0], dtype=torch.int8)
>>> torch.logical_xor(a, b)
tensor([ True, True, False, False])
>>> torch.logical_xor(a.double(), b.double())
tensor([ True, True, False, False])
>>> torch.logical_xor(a.double(), b)
tensor([ True, True, False, False])
>>> torch.logical_xor(a, b, out=torch.empty(4, dtype=torch.bool))
tensor([ True, True, False, False])
""".format(**common_args))
add_docstr(torch.logspace,
r"""
logspace(start, end, steps=100, base=10.0, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
Returns a one-dimensional tensor of :attr:`steps` points
logarithmically spaced with base :attr:`base` between
:math:`{{\text{{base}}}}^{{\text{{start}}}}` and :math:`{{\text{{base}}}}^{{\text{{end}}}}`.
The output tensor is 1-D of size :attr:`steps`.
Args:
start (float): the starting value for the set of points
end (float): the ending value for the set of points
steps (int): number of points to sample between :attr:`start`
and :attr:`end`. Default: ``100``.
base (float): base of the logarithm function. Default: ``10.0``.
{out}
{dtype}
{layout}
{device}
{requires_grad}
Example::
>>> torch.logspace(start=-10, end=10, steps=5)
tensor([ 1.0000e-10, 1.0000e-05, 1.0000e+00, 1.0000e+05, 1.0000e+10])
>>> torch.logspace(start=0.1, end=1.0, steps=5)
tensor([ 1.2589, 2.1135, 3.5481, 5.9566, 10.0000])
>>> torch.logspace(start=0.1, end=1.0, steps=1)
tensor([1.2589])
>>> torch.logspace(start=2, end=2, steps=1, base=2)
tensor([4.0])
""".format(**factory_common_args))
add_docstr(torch.logsumexp,
r"""
logsumexp(input, dim, keepdim=False, out=None)
Returns the log of summed exponentials of each row of the :attr:`input`
tensor in the given dimension :attr:`dim`. The computation is numerically
stabilized.
For summation index :math:`j` given by `dim` and other indices :math:`i`, the result is
.. math::
\text{{logsumexp}}(x)_{{i}} = \log \sum_j \exp(x_{{ij}})
{keepdim_details}
Args:
{input}
{dim}
{keepdim}
{out}
Example::
>>> a = torch.randn(3, 3)
>>> torch.logsumexp(a, 1)
tensor([ 0.8442, 1.4322, 0.8711])
""".format(**multi_dim_common))
add_docstr(torch.lstsq,
r"""
lstsq(input, A, out=None) -> Tensor
Computes the solution to the least squares and least norm problems for a full
rank matrix :math:`A` of size :math:`(m \times n)` and a matrix :math:`B` of
size :math:`(m \times k)`.
If :math:`m \geq n`, :func:`lstsq` solves the least-squares problem:
.. math::
\begin{array}{ll}
\min_X & \|AX-B\|_2.
\end{array}
If :math:`m < n`, :func:`lstsq` solves the least-norm problem:
.. math::
\begin{array}{ll}
\min_X & \|X\|_2 & \text{subject to} & AX = B.
\end{array}
Returned tensor :math:`X` has shape :math:`(\max(m, n) \times k)`. The first :math:`n`
rows of :math:`X` contains the solution. If :math:`m \geq n`, the residual sum of squares
for the solution in each column is given by the sum of squares of elements in the
remaining :math:`m - n` rows of that column.
.. note::
The case when :math:`m < n` is not supported on the GPU.
Args:
input (Tensor): the matrix :math:`B`
A (Tensor): the :math:`m` by :math:`n` matrix :math:`A`
out (tuple, optional): the optional destination tensor
Returns:
(Tensor, Tensor): A namedtuple (solution, QR) containing:
- **solution** (*Tensor*): the least squares solution
- **QR** (*Tensor*): the details of the QR factorization
.. note::
The returned matrices will always be transposed, irrespective of the strides
of the input matrices. That is, they will have stride `(1, m)` instead of
`(m, 1)`.
Example::
>>> A = torch.tensor([[1., 1, 1],
[2, 3, 4],
[3, 5, 2],
[4, 2, 5],
[5, 4, 3]])
>>> B = torch.tensor([[-10., -3],
[ 12, 14],
[ 14, 12],
[ 16, 16],
[ 18, 16]])
>>> X, _ = torch.lstsq(B, A)
>>> X
tensor([[ 2.0000, 1.0000],
[ 1.0000, 1.0000],
[ 1.0000, 2.0000],
[ 10.9635, 4.8501],
[ 8.9332, 5.2418]])
""")
add_docstr(torch.lt,
r"""
lt(input, other, out=None) -> Tensor
Computes :math:`\text{input} < \text{other}` element-wise.
The second argument can be a number or a tensor whose shape is
:ref:`broadcastable <broadcasting-semantics>` with the first argument.
Args:
input (Tensor): the tensor to compare
other (Tensor or float): the tensor or value to compare
out (Tensor, optional): the output tensor that must be a `BoolTensor`
Returns:
Tensor: A `torch.BoolTensor` containing a True at each location where comparison is true
Example::
>>> torch.lt(torch.tensor([[1, 2], [3, 4]]), torch.tensor([[1, 1], [4, 4]]))
tensor([[False, False], [True, False]])
""")
add_docstr(torch.lu_solve,
r"""
lu_solve(input, LU_data, LU_pivots, out=None) -> Tensor
Returns the LU solve of the linear system :math:`Ax = b` using the partially pivoted
LU factorization of A from :meth:`torch.lu`.
Arguments:
b (Tensor): the RHS tensor of size :math:`(*, m, k)`, where :math:`*`
is zero or more batch dimensions.
LU_data (Tensor): the pivoted LU factorization of A from :meth:`torch.lu` of size :math:`(*, m, m)`,
where :math:`*` is zero or more batch dimensions.
LU_pivots (IntTensor): the pivots of the LU factorization from :meth:`torch.lu` of size :math:`(*, m)`,
where :math:`*` is zero or more batch dimensions.
The batch dimensions of :attr:`LU_pivots` must be equal to the batch dimensions of
:attr:`LU_data`.
{out}
Example::
>>> A = torch.randn(2, 3, 3)
>>> b = torch.randn(2, 3, 1)
>>> A_LU = torch.lu(A)
>>> x = torch.lu_solve(b, *A_LU)
>>> torch.norm(torch.bmm(A, x) - b)
tensor(1.00000e-07 *
2.8312)
""".format(**common_args))
add_docstr(torch.masked_select,
r"""
masked_select(input, mask, out=None) -> Tensor
Returns a new 1-D tensor which indexes the :attr:`input` tensor according to
the boolean mask :attr:`mask` which is a `BoolTensor`.
The shapes of the :attr:`mask` tensor and the :attr:`input` tensor don't need
to match, but they must be :ref:`broadcastable <broadcasting-semantics>`.
.. note:: The returned tensor does **not** use the same storage
as the original tensor
Args:
{input}
mask (BoolTensor): the tensor containing the binary mask to index with
{out}
Example::
>>> x = torch.randn(3, 4)
>>> x
tensor([[ 0.3552, -2.3825, -0.8297, 0.3477],
[-1.2035, 1.2252, 0.5002, 0.6248],
[ 0.1307, -2.0608, 0.1244, 2.0139]])
>>> mask = x.ge(0.5)
>>> mask
tensor([[False, False, False, False],
[False, True, True, True],
[False, False, False, True]])
>>> torch.masked_select(x, mask)
tensor([ 1.2252, 0.5002, 0.6248, 2.0139])
""".format(**common_args))
add_docstr(torch.matrix_rank,
r"""
matrix_rank(input, tol=None, symmetric=False) -> Tensor
Returns the numerical rank of a 2-D tensor. The method to compute the
matrix rank is done using SVD by default. If :attr:`symmetric` is ``True``,
then :attr:`input` is assumed to be symmetric, and the computation of the
rank is done by obtaining the eigenvalues.
:attr:`tol` is the threshold below which the singular values (or the eigenvalues
when :attr:`symmetric` is ``True``) are considered to be 0. If :attr:`tol` is not
specified, :attr:`tol` is set to ``S.max() * max(S.size()) * eps`` where `S` is the
singular values (or the eigenvalues when :attr:`symmetric` is ``True``), and ``eps``
is the epsilon value for the datatype of :attr:`input`.
Args:
input (Tensor): the input 2-D tensor
tol (float, optional): the tolerance value. Default: ``None``
symmetric(bool, optional): indicates whether :attr:`input` is symmetric.
Default: ``False``
Example::
>>> a = torch.eye(10)
>>> torch.matrix_rank(a)
tensor(10)
>>> b = torch.eye(10)
>>> b[0, 0] = 0
>>> torch.matrix_rank(b)
tensor(9)
""")
add_docstr(torch.matrix_power,
r"""
matrix_power(input, n) -> Tensor
Returns the matrix raised to the power :attr:`n` for square matrices.
For batch of matrices, each individual matrix is raised to the power :attr:`n`.
If :attr:`n` is negative, then the inverse of the matrix (if invertible) is
raised to the power :attr:`n`. For a batch of matrices, the batched inverse
(if invertible) is raised to the power :attr:`n`. If :attr:`n` is 0, then an identity matrix
is returned.
Args:
{input}
n (int): the power to raise the matrix to
Example::
>>> a = torch.randn(2, 2, 2)
>>> a
tensor([[[-1.9975, -1.9610],
[ 0.9592, -2.3364]],
[[-1.2534, -1.3429],
[ 0.4153, -1.4664]]])
>>> torch.matrix_power(a, 3)
tensor([[[ 3.9392, -23.9916],
[ 11.7357, -0.2070]],
[[ 0.2468, -6.7168],
[ 2.0774, -0.8187]]])
""".format(**common_args))
add_docstr(torch.max,
r"""
max(input) -> Tensor
Returns the maximum value of all elements in the ``input`` tensor.
.. warning::
This function produces deterministic (sub)gradients unlike ``max(dim=0)``
Args:
{input}
Example::
>>> a = torch.randn(1, 3)
>>> a
tensor([[ 0.6763, 0.7445, -2.2369]])
>>> torch.max(a)
tensor(0.7445)
.. function:: max(input, dim, keepdim=False, out=None) -> (Tensor, LongTensor)
Returns a namedtuple ``(values, indices)`` where ``values`` is the maximum
value of each row of the :attr:`input` tensor in the given dimension
:attr:`dim`. And ``indices`` is the index location of each maximum value found
(argmax).
.. warning::
``indices`` does not necessarily contain the first occurrence of each
maximal value found, unless it is unique.
The exact implementation details are device-specific.
Do not expect the same result when run on CPU and GPU in general.
For the same reason do not expect the gradients to be deterministic.
If ``keepdim`` is ``True``, the output tensors are of the same size
as ``input`` except in the dimension ``dim`` where they are of size 1.
Otherwise, ``dim`` is squeezed (see :func:`torch.squeeze`), resulting
in the output tensors having 1 fewer dimension than ``input``.
Args:
{input}
{dim}
{keepdim} Default: ``False``.
out (tuple, optional): the result tuple of two output tensors (max, max_indices)
Example::
>>> a = torch.randn(4, 4)
>>> a
tensor([[-1.2360, -0.2942, -0.1222, 0.8475],
[ 1.1949, -1.1127, -2.2379, -0.6702],
[ 1.5717, -0.9207, 0.1297, -1.8768],
[-0.6172, 1.0036, -0.6060, -0.2432]])
>>> torch.max(a, 1)
torch.return_types.max(values=tensor([0.8475, 1.1949, 1.5717, 1.0036]), indices=tensor([3, 0, 0, 1]))
.. function:: max(input, other, out=None) -> Tensor
Each element of the tensor ``input`` is compared with the corresponding
element of the tensor ``other`` and an element-wise maximum is taken.
The shapes of ``input`` and ``other`` don't need to match,
but they must be :ref:`broadcastable <broadcasting-semantics>`.
.. math::
\text{{out}}_i = \max(\text{{tensor}}_i, \text{{other}}_i)
.. note:: When the shapes do not match, the shape of the returned output tensor
follows the :ref:`broadcasting rules <broadcasting-semantics>`.
Args:
{input}
other (Tensor): the second input tensor
{out}
Example::
>>> a = torch.randn(4)
>>> a
tensor([ 0.2942, -0.7416, 0.2653, -0.1584])
>>> b = torch.randn(4)
>>> b
tensor([ 0.8722, -1.7421, -0.4141, -0.5055])
>>> torch.max(a, b)
tensor([ 0.8722, -0.7416, 0.2653, -0.1584])
""".format(**single_dim_common))
add_docstr(torch.argmax,
r"""
argmax(input) -> LongTensor
Returns the indices of the maximum value of all elements in the :attr:`input` tensor.
This is the second value returned by :meth:`torch.max`. See its
documentation for the exact semantics of this method.
Args:
{input}
Example::
>>> a = torch.randn(4, 4)
>>> a
tensor([[ 1.3398, 0.2663, -0.2686, 0.2450],
[-0.7401, -0.8805, -0.3402, -1.1936],
[ 0.4907, -1.3948, -1.0691, -0.3132],
[-1.6092, 0.5419, -0.2993, 0.3195]])
>>> torch.argmax(a)
tensor(0)
.. function:: argmax(input, dim, keepdim=False) -> LongTensor
Returns the indices of the maximum values of a tensor across a dimension.
This is the second value returned by :meth:`torch.max`. See its
documentation for the exact semantics of this method.
Args:
{input}
{dim} If ``None``, the argmax of the flattened input is returned.
{keepdim} Ignored if ``dim=None``.
Example::
>>> a = torch.randn(4, 4)
>>> a
tensor([[ 1.3398, 0.2663, -0.2686, 0.2450],
[-0.7401, -0.8805, -0.3402, -1.1936],
[ 0.4907, -1.3948, -1.0691, -0.3132],
[-1.6092, 0.5419, -0.2993, 0.3195]])
>>> torch.argmax(a, dim=1)
tensor([ 0, 2, 0, 1])
""".format(**single_dim_common))
add_docstr(torch.mean,
r"""
mean(input) -> Tensor
Returns the mean value of all elements in the :attr:`input` tensor.
Args:
{input}
Example::
>>> a = torch.randn(1, 3)
>>> a
tensor([[ 0.2294, -0.5481, 1.3288]])
>>> torch.mean(a)
tensor(0.3367)
.. function:: mean(input, dim, keepdim=False, out=None) -> Tensor
Returns the mean value of each row of the :attr:`input` tensor in the given
dimension :attr:`dim`. If :attr:`dim` is a list of dimensions,
reduce over all of them.
{keepdim_details}
Args:
{input}
{dim}
{keepdim}
{out}
Example::
>>> a = torch.randn(4, 4)
>>> a
tensor([[-0.3841, 0.6320, 0.4254, -0.7384],
[-0.9644, 1.0131, -0.6549, -1.4279],
[-0.2951, -1.3350, -0.7694, 0.5600],
[ 1.0842, -0.9580, 0.3623, 0.2343]])
>>> torch.mean(a, 1)
tensor([-0.0163, -0.5085, -0.4599, 0.1807])
>>> torch.mean(a, 1, True)
tensor([[-0.0163],
[-0.5085],
[-0.4599],
[ 0.1807]])
""".format(**multi_dim_common))
add_docstr(torch.median,
r"""
median(input) -> Tensor
Returns the median value of all elements in the :attr:`input` tensor.
.. warning::
This function produces deterministic (sub)gradients unlike ``median(dim=0)``
Args:
{input}
Example::
>>> a = torch.randn(1, 3)
>>> a
tensor([[ 1.5219, -1.5212, 0.2202]])
>>> torch.median(a)
tensor(0.2202)
.. function:: median(input, dim=-1, keepdim=False, out=None) -> (Tensor, LongTensor)
Returns a namedtuple ``(values, indices)`` where ``values`` is the median
value of each row of the :attr:`input` tensor in the given dimension
:attr:`dim`. And ``indices`` is the index location of each median value found.
By default, :attr:`dim` is the last dimension of the :attr:`input` tensor.
If :attr:`keepdim` is ``True``, the output tensors are of the same size
as :attr:`input` except in the dimension :attr:`dim` where they are of size 1.
Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in
the outputs tensor having 1 fewer dimension than :attr:`input`.
.. warning::
``indices`` does not necessarily contain the first occurrence of each
median value found, unless it is unique.
The exact implementation details are device-specific.
Do not expect the same result when run on CPU and GPU in general.
For the same reason do not expect the gradients to be deterministic.
Args:
{input}
{dim}
{keepdim}
out (tuple, optional): the result tuple of two output tensors (max, max_indices)
Example::
>>> a = torch.randn(4, 5)
>>> a
tensor([[ 0.2505, -0.3982, -0.9948, 0.3518, -1.3131],
[ 0.3180, -0.6993, 1.0436, 0.0438, 0.2270],
[-0.2751, 0.7303, 0.2192, 0.3321, 0.2488],
[ 1.0778, -1.9510, 0.7048, 0.4742, -0.7125]])
>>> torch.median(a, 1)
torch.return_types.median(values=tensor([-0.3982, 0.2270, 0.2488, 0.4742]), indices=tensor([1, 4, 4, 3]))
""".format(**single_dim_common))
add_docstr(torch.min,
r"""
min(input) -> Tensor
Returns the minimum value of all elements in the :attr:`input` tensor.
.. warning::
This function produces deterministic (sub)gradients unlike ``min(dim=0)``
Args:
{input}
Example::
>>> a = torch.randn(1, 3)
>>> a
tensor([[ 0.6750, 1.0857, 1.7197]])
>>> torch.min(a)
tensor(0.6750)
.. function:: min(input, dim, keepdim=False, out=None) -> (Tensor, LongTensor)
Returns a namedtuple ``(values, indices)`` where ``values`` is the minimum
value of each row of the :attr:`input` tensor in the given dimension
:attr:`dim`. And ``indices`` is the index location of each minimum value found
(argmin).
.. warning::
``indices`` does not necessarily contain the first occurrence of each
minimal value found, unless it is unique.
The exact implementation details are device-specific.
Do not expect the same result when run on CPU and GPU in general.
For the same reason do not expect the gradients to be deterministic.
If :attr:`keepdim` is ``True``, the output tensors are of the same size as
:attr:`input` except in the dimension :attr:`dim` where they are of size 1.
Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in
the output tensors having 1 fewer dimension than :attr:`input`.
Args:
{input}
{dim}
{keepdim}
out (tuple, optional): the tuple of two output tensors (min, min_indices)
Example::
>>> a = torch.randn(4, 4)
>>> a
tensor([[-0.6248, 1.1334, -1.1899, -0.2803],
[-1.4644, -0.2635, -0.3651, 0.6134],
[ 0.2457, 0.0384, 1.0128, 0.7015],
[-0.1153, 2.9849, 2.1458, 0.5788]])
>>> torch.min(a, 1)
torch.return_types.min(values=tensor([-1.1899, -1.4644, 0.0384, -0.1153]), indices=tensor([2, 0, 1, 0]))
.. function:: min(input, other, out=None) -> Tensor
Each element of the tensor :attr:`input` is compared with the corresponding
element of the tensor :attr:`other` and an element-wise minimum is taken.
The resulting tensor is returned.
The shapes of :attr:`input` and :attr:`other` don't need to match,
but they must be :ref:`broadcastable <broadcasting-semantics>`.
.. math::
\text{{out}}_i = \min(\text{{tensor}}_i, \text{{other}}_i)
.. note:: When the shapes do not match, the shape of the returned output tensor
follows the :ref:`broadcasting rules <broadcasting-semantics>`.
Args:
{input}
other (Tensor): the second input tensor
{out}
Example::
>>> a = torch.randn(4)
>>> a
tensor([ 0.8137, -1.1740, -0.6460, 0.6308])
>>> b = torch.randn(4)
>>> b
tensor([-0.1369, 0.1555, 0.4019, -0.1929])
>>> torch.min(a, b)
tensor([-0.1369, -1.1740, -0.6460, -0.1929])
""".format(**single_dim_common))
add_docstr(torch.argmin,
r"""
argmin(input) -> LongTensor
Returns the indices of the minimum value of all elements in the :attr:`input` tensor.
This is the second value returned by :meth:`torch.min`. See its
documentation for the exact semantics of this method.
Args:
{input}
Example::
>>> a = torch.randn(4, 4)
>>> a
tensor([[ 0.1139, 0.2254, -0.1381, 0.3687],
[ 1.0100, -1.1975, -0.0102, -0.4732],
[-0.9240, 0.1207, -0.7506, -1.0213],
[ 1.7809, -1.2960, 0.9384, 0.1438]])
>>> torch.argmin(a)
tensor(13)
.. function:: argmin(input, dim, keepdim=False, out=None) -> LongTensor
Returns the indices of the minimum values of a tensor across a dimension.
This is the second value returned by :meth:`torch.min`. See its
documentation for the exact semantics of this method.
Args:
{input}
{dim} If ``None``, the argmin of the flattened input is returned.
{keepdim} Ignored if ``dim=None``.
Example::
>>> a = torch.randn(4, 4)
>>> a
tensor([[ 0.1139, 0.2254, -0.1381, 0.3687],
[ 1.0100, -1.1975, -0.0102, -0.4732],
[-0.9240, 0.1207, -0.7506, -1.0213],
[ 1.7809, -1.2960, 0.9384, 0.1438]])
>>> torch.argmin(a, dim=1)
tensor([ 2, 1, 3, 1])
""".format(**single_dim_common))
add_docstr(torch.mm,
r"""
mm(input, mat2, out=None) -> Tensor
Performs a matrix multiplication of the matrices :attr:`input` and :attr:`mat2`.
If :attr:`input` is a :math:`(n \times m)` tensor, :attr:`mat2` is a
:math:`(m \times p)` tensor, :attr:`out` will be a :math:`(n \times p)` tensor.
.. note:: This function does not :ref:`broadcast <broadcasting-semantics>`.
For broadcasting matrix products, see :func:`torch.matmul`.
Args:
input (Tensor): the first matrix to be multiplied
mat2 (Tensor): the second matrix to be multiplied
{out}
Example::
>>> mat1 = torch.randn(2, 3)
>>> mat2 = torch.randn(3, 3)
>>> torch.mm(mat1, mat2)
tensor([[ 0.4851, 0.5037, -0.3633],
[-0.0760, -3.6705, 2.4784]])
""".format(**common_args))
add_docstr(torch.matmul,
r"""
matmul(input, other, out=None) -> Tensor
Matrix product of two tensors.
The behavior depends on the dimensionality of the tensors as follows:
- If both tensors are 1-dimensional, the dot product (scalar) is returned.
- If both arguments are 2-dimensional, the matrix-matrix product is returned.
- If the first argument is 1-dimensional and the second argument is 2-dimensional,
a 1 is prepended to its dimension for the purpose of the matrix multiply.
After the matrix multiply, the prepended dimension is removed.
- If the first argument is 2-dimensional and the second argument is 1-dimensional,
the matrix-vector product is returned.
- If both arguments are at least 1-dimensional and at least one argument is
N-dimensional (where N > 2), then a batched matrix multiply is returned. If the first
argument is 1-dimensional, a 1 is prepended to its dimension for the purpose of the
batched matrix multiply and removed after. If the second argument is 1-dimensional, a
1 is appended to its dimension for the purpose of the batched matrix multiple and removed after.
The non-matrix (i.e. batch) dimensions are :ref:`broadcasted <broadcasting-semantics>` (and thus
must be broadcastable). For example, if :attr:`input` is a
:math:`(j \times 1 \times n \times m)` tensor and :attr:`other` is a :math:`(k \times m \times p)`
tensor, :attr:`out` will be an :math:`(j \times k \times n \times p)` tensor.
.. note::
The 1-dimensional dot product version of this function does not support an :attr:`out` parameter.
Arguments:
input (Tensor): the first tensor to be multiplied
other (Tensor): the second tensor to be multiplied
{out}
Example::
>>> # vector x vector
>>> tensor1 = torch.randn(3)
>>> tensor2 = torch.randn(3)
>>> torch.matmul(tensor1, tensor2).size()
torch.Size([])
>>> # matrix x vector
>>> tensor1 = torch.randn(3, 4)
>>> tensor2 = torch.randn(4)
>>> torch.matmul(tensor1, tensor2).size()
torch.Size([3])
>>> # batched matrix x broadcasted vector
>>> tensor1 = torch.randn(10, 3, 4)
>>> tensor2 = torch.randn(4)
>>> torch.matmul(tensor1, tensor2).size()
torch.Size([10, 3])
>>> # batched matrix x batched matrix
>>> tensor1 = torch.randn(10, 3, 4)
>>> tensor2 = torch.randn(10, 4, 5)
>>> torch.matmul(tensor1, tensor2).size()
torch.Size([10, 3, 5])
>>> # batched matrix x broadcasted matrix
>>> tensor1 = torch.randn(10, 3, 4)
>>> tensor2 = torch.randn(4, 5)
>>> torch.matmul(tensor1, tensor2).size()
torch.Size([10, 3, 5])
""".format(**common_args))
add_docstr(torch.mode,
r"""
mode(input, dim=-1, keepdim=False, out=None) -> (Tensor, LongTensor)
Returns a namedtuple ``(values, indices)`` where ``values`` is the mode
value of each row of the :attr:`input` tensor in the given dimension
:attr:`dim`, i.e. a value which appears most often
in that row, and ``indices`` is the index location of each mode value found.
By default, :attr:`dim` is the last dimension of the :attr:`input` tensor.
If :attr:`keepdim` is ``True``, the output tensors are of the same size as
:attr:`input` except in the dimension :attr:`dim` where they are of size 1.
Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting
in the output tensors having 1 fewer dimension than :attr:`input`.
.. note:: This function is not defined for ``torch.cuda.Tensor`` yet.
Args:
{input}
{dim}
{keepdim}
out (tuple, optional): the result tuple of two output tensors (values, indices)
Example::
>>> a = torch.randint(10, (5,))
>>> a
tensor([6, 5, 1, 0, 2])
>>> b = a + (torch.randn(50, 1) * 5).long()
>>> torch.mode(b, 0)
torch.return_types.mode(values=tensor([6, 5, 1, 0, 2]), indices=tensor([2, 2, 2, 2, 2]))
""".format(**single_dim_common))
add_docstr(torch.mul,
r"""
mul(input, other, out=None)
Multiplies each element of the input :attr:`input` with the scalar
:attr:`other` and returns a new resulting tensor.
.. math::
\text{out}_i = \text{other} \times \text{input}_i
""" + r"""
If :attr:`input` is of type `FloatTensor` or `DoubleTensor`, :attr:`other`
should be a real number, otherwise it should be an integer
Args:
{input}
value (Number): the number to be multiplied to each element of :attr:`input`
{out}
Example::
>>> a = torch.randn(3)
>>> a
tensor([ 0.2015, -0.4255, 2.6087])
>>> torch.mul(a, 100)
tensor([ 20.1494, -42.5491, 260.8663])
.. function:: mul(input, other, out=None)
Each element of the tensor :attr:`input` is multiplied by the corresponding
element of the Tensor :attr:`other`. The resulting tensor is returned.
The shapes of :attr:`input` and :attr:`other` must be
:ref:`broadcastable <broadcasting-semantics>`.
.. math::
\text{out}_i = \text{input}_i \times \text{other}_i
""" + r"""
Args:
input (Tensor): the first multiplicand tensor
other (Tensor): the second multiplicand tensor
{out}
Example::
>>> a = torch.randn(4, 1)
>>> a
tensor([[ 1.1207],
[-0.3137],
[ 0.0700],
[ 0.8378]])
>>> b = torch.randn(1, 4)
>>> b
tensor([[ 0.5146, 0.1216, -0.5244, 2.2382]])
>>> torch.mul(a, b)
tensor([[ 0.5767, 0.1363, -0.5877, 2.5083],
[-0.1614, -0.0382, 0.1645, -0.7021],
[ 0.0360, 0.0085, -0.0367, 0.1567],
[ 0.4312, 0.1019, -0.4394, 1.8753]])
""".format(**common_args))
add_docstr(torch.multinomial,
r"""
multinomial(input, num_samples, replacement=False, *, generator=None, out=None) -> LongTensor
Returns a tensor where each row contains :attr:`num_samples` indices sampled
from the multinomial probability distribution located in the corresponding row
of tensor :attr:`input`.
.. note::
The rows of :attr:`input` do not need to sum to one (in which case we use
the values as weights), but must be non-negative, finite and have
a non-zero sum.
Indices are ordered from left to right according to when each was sampled
(first samples are placed in first column).
If :attr:`input` is a vector, :attr:`out` is a vector of size :attr:`num_samples`.
If :attr:`input` is a matrix with `m` rows, :attr:`out` is an matrix of shape
:math:`(m \times \text{{num\_samples}})`.
If replacement is ``True``, samples are drawn with replacement.
If not, they are drawn without replacement, which means that when a
sample index is drawn for a row, it cannot be drawn again for that row.
.. note::
When drawn without replacement, :attr:`num_samples` must be lower than
number of non-zero elements in :attr:`input` (or the min number of non-zero
elements in each row of :attr:`input` if it is a matrix).
Args:
input (Tensor): the input tensor containing probabilities
num_samples (int): number of samples to draw
replacement (bool, optional): whether to draw with replacement or not
{generator}
{out}
Example::
>>> weights = torch.tensor([0, 10, 3, 0], dtype=torch.float) # create a tensor of weights
>>> torch.multinomial(weights, 2)
tensor([1, 2])
>>> torch.multinomial(weights, 4) # ERROR!
RuntimeError: invalid argument 2: invalid multinomial distribution (with replacement=False,
not enough non-negative category to sample) at ../aten/src/TH/generic/THTensorRandom.cpp:320
>>> torch.multinomial(weights, 4, replacement=True)
tensor([ 2, 1, 1, 1])
""".format(**common_args))
add_docstr(torch.mv,
r"""
mv(input, vec, out=None) -> Tensor
Performs a matrix-vector product of the matrix :attr:`input` and the vector
:attr:`vec`.
If :attr:`input` is a :math:`(n \times m)` tensor, :attr:`vec` is a 1-D tensor of
size :math:`m`, :attr:`out` will be 1-D of size :math:`n`.
.. note:: This function does not :ref:`broadcast <broadcasting-semantics>`.
Args:
input (Tensor): matrix to be multiplied
vec (Tensor): vector to be multiplied
{out}
Example::
>>> mat = torch.randn(2, 3)
>>> vec = torch.randn(3)
>>> torch.mv(mat, vec)
tensor([ 1.0404, -0.6361])
""".format(**common_args))
add_docstr(torch.mvlgamma,
r"""
mvlgamma(input, p) -> Tensor
Computes the `multivariate log-gamma function
<https://en.wikipedia.org/wiki/Multivariate_gamma_function>`_) with dimension
:math:`p` element-wise, given by
.. math::
\log(\Gamma_{p}(a)) = C + \displaystyle \sum_{i=1}^{p} \log\left(\Gamma\left(a - \frac{i - 1}{2}\right)\right)
where :math:`C = \log(\pi) \times \frac{p (p - 1)}{4}` and :math:`\Gamma(\cdot)` is the Gamma function.
All elements must be greater than :math:`\frac{p - 1}{2}`, otherwise an error would be thrown.
Args:
input (Tensor): the tensor to compute the multivariate log-gamma function
p (int): the number of dimensions
Example::
>>> a = torch.empty(2, 3).uniform_(1, 2)
>>> a
tensor([[1.6835, 1.8474, 1.1929],
[1.0475, 1.7162, 1.4180]])
>>> torch.mvlgamma(a, 2)
tensor([[0.3928, 0.4007, 0.7586],
[1.0311, 0.3901, 0.5049]])
""")
add_docstr(torch.narrow,
r"""
narrow(input, dim, start, length) -> Tensor
Returns a new tensor that is a narrowed version of :attr:`input` tensor. The
dimension :attr:`dim` is input from :attr:`start` to :attr:`start + length`. The
returned tensor and :attr:`input` tensor share the same underlying storage.
Args:
input (Tensor): the tensor to narrow
dim (int): the dimension along which to narrow
start (int): the starting dimension
length (int): the distance to the ending dimension
Example::
>>> x = torch.tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
>>> torch.narrow(x, 0, 0, 2)
tensor([[ 1, 2, 3],
[ 4, 5, 6]])
>>> torch.narrow(x, 1, 1, 2)
tensor([[ 2, 3],
[ 5, 6],
[ 8, 9]])
""")
add_docstr(torch.ne,
r"""
ne(input, other, out=None) -> Tensor
Computes :math:`input \neq other` element-wise.
The second argument can be a number or a tensor whose shape is
:ref:`broadcastable <broadcasting-semantics>` with the first argument.
Args:
input (Tensor): the tensor to compare
other (Tensor or float): the tensor or value to compare
out (Tensor, optional): the output tensor that must be a `BoolTensor`
Returns:
Tensor: A ``torch.BoolTensor`` containing a True at each location where comparison is true.
Example::
>>> torch.ne(torch.tensor([[1, 2], [3, 4]]), torch.tensor([[1, 1], [4, 4]]))
tensor([[False, True], [True, False]])
""")
add_docstr(torch.neg,
r"""
neg(input, out=None) -> Tensor
Returns a new tensor with the negative of the elements of :attr:`input`.
.. math::
\text{out} = -1 \times \text{input}
""" + r"""
Args:
{input}
{out}
Example::
>>> a = torch.randn(5)
>>> a
tensor([ 0.0090, -0.2262, -0.0682, -0.2866, 0.3940])
>>> torch.neg(a)
tensor([-0.0090, 0.2262, 0.0682, 0.2866, -0.3940])
""".format(**common_args))
add_docstr(torch.nonzero,
r"""
nonzero(input, *, out=None, as_tuple=False) -> LongTensor or tuple of LongTensors
.. note::
:func:`torch.nonzero(..., as_tuple=False) <torch.nonzero>` (default) returns a
2-D tensor where each row is the index for a nonzero value.
:func:`torch.nonzero(..., as_tuple=True) <torch.nonzero>` returns a tuple of 1-D
index tensors, allowing for advanced indexing, so ``x[x.nonzero(as_tuple=True)]``
gives all nonzero values of tensor ``x``. Of the returned tuple, each index tensor
contains nonzero indices for a certain dimension.
See below for more details on the two behaviors.
**When** :attr:`as_tuple` **is ``False`` (default)**:
Returns a tensor containing the indices of all non-zero elements of
:attr:`input`. Each row in the result contains the indices of a non-zero
element in :attr:`input`. The result is sorted lexicographically, with
the last index changing the fastest (C-style).
If :attr:`input` has :math:`n` dimensions, then the resulting indices tensor
:attr:`out` is of size :math:`(z \times n)`, where :math:`z` is the total number of
non-zero elements in the :attr:`input` tensor.
**When** :attr:`as_tuple` **is ``True``**:
Returns a tuple of 1-D tensors, one for each dimension in :attr:`input`,
each containing the indices (in that dimension) of all non-zero elements of
:attr:`input` .
If :attr:`input` has :math:`n` dimensions, then the resulting tuple contains :math:`n`
tensors of size :math:`z`, where :math:`z` is the total number of
non-zero elements in the :attr:`input` tensor.
As a special case, when :attr:`input` has zero dimensions and a nonzero scalar
value, it is treated as a one-dimensional tensor with one element.
Args:
{input}
out (LongTensor, optional): the output tensor containing indices
Returns:
LongTensor or tuple of LongTensor: If :attr:`as_tuple` is ``False``, the output
tensor containing indices. If :attr:`as_tuple` is ``True``, one 1-D tensor for
each dimension, containing the indices of each nonzero element along that
dimension.
Example::
>>> torch.nonzero(torch.tensor([1, 1, 1, 0, 1]))
tensor([[ 0],
[ 1],
[ 2],
[ 4]])
>>> torch.nonzero(torch.tensor([[0.6, 0.0, 0.0, 0.0],
[0.0, 0.4, 0.0, 0.0],
[0.0, 0.0, 1.2, 0.0],
[0.0, 0.0, 0.0,-0.4]]))
tensor([[ 0, 0],
[ 1, 1],
[ 2, 2],
[ 3, 3]])
>>> torch.nonzero(torch.tensor([1, 1, 1, 0, 1]), as_tuple=True)
(tensor([0, 1, 2, 4]),)
>>> torch.nonzero(torch.tensor([[0.6, 0.0, 0.0, 0.0],
[0.0, 0.4, 0.0, 0.0],
[0.0, 0.0, 1.2, 0.0],
[0.0, 0.0, 0.0,-0.4]]), as_tuple=True)
(tensor([0, 1, 2, 3]), tensor([0, 1, 2, 3]))
>>> torch.nonzero(torch.tensor(5), as_tuple=True)
(tensor([0]),)
""".format(**common_args))
add_docstr(torch.normal,
r"""
normal(mean, std, *, generator=None, out=None) -> Tensor
Returns a tensor of random numbers drawn from separate normal distributions
whose mean and standard deviation are given.
The :attr:`mean` is a tensor with the mean of
each output element's normal distribution
The :attr:`std` is a tensor with the standard deviation of
each output element's normal distribution
The shapes of :attr:`mean` and :attr:`std` don't need to match, but the
total number of elements in each tensor need to be the same.
.. note:: When the shapes do not match, the shape of :attr:`mean`
is used as the shape for the returned output tensor
Args:
mean (Tensor): the tensor of per-element means
std (Tensor): the tensor of per-element standard deviations
{generator}
{out}
Example::
>>> torch.normal(mean=torch.arange(1., 11.), std=torch.arange(1, 0, -0.1))
tensor([ 1.0425, 3.5672, 2.7969, 4.2925, 4.7229, 6.2134,
8.0505, 8.1408, 9.0563, 10.0566])
.. function:: normal(mean=0.0, std, out=None) -> Tensor
Similar to the function above, but the means are shared among all drawn
elements.
Args:
mean (float, optional): the mean for all distributions
std (Tensor): the tensor of per-element standard deviations
{out}
Example::
>>> torch.normal(mean=0.5, std=torch.arange(1., 6.))
tensor([-1.2793, -1.0732, -2.0687, 5.1177, -1.2303])
.. function:: normal(mean, std=1.0, out=None) -> Tensor
Similar to the function above, but the standard-deviations are shared among
all drawn elements.
Args:
mean (Tensor): the tensor of per-element means
std (float, optional): the standard deviation for all distributions
out (Tensor, optional): the output tensor
Example::
>>> torch.normal(mean=torch.arange(1., 6.))
tensor([ 1.1552, 2.6148, 2.6535, 5.8318, 4.2361])
.. function:: normal(mean, std, size, *, out=None) -> Tensor
Similar to the function above, but the means and standard deviations are shared
among all drawn elements. The resulting tensor has size given by :attr:`size`.
Args:
mean (float): the mean for all distributions
std (float): the standard deviation for all distributions
size (int...): a sequence of integers defining the shape of the output tensor.
{out}
Example::
>>> torch.normal(2, 3, size=(1, 4))
tensor([[-1.3987, -1.9544, 3.6048, 0.7909]])
""".format(**common_args))
add_docstr(torch.numel,
r"""
numel(input) -> int
Returns the total number of elements in the :attr:`input` tensor.
Args:
{input}
Example::
>>> a = torch.randn(1, 2, 3, 4, 5)
>>> torch.numel(a)
120
>>> a = torch.zeros(4,4)
>>> torch.numel(a)
16
""".format(**common_args))
add_docstr(torch.ones,
r"""
ones(*size, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
Returns a tensor filled with the scalar value `1`, with the shape defined
by the variable argument :attr:`size`.
Args:
size (int...): a sequence of integers defining the shape of the output tensor.
Can be a variable number of arguments or a collection like a list or tuple.
{out}
{dtype}
{layout}
{device}
{requires_grad}
Example::
>>> torch.ones(2, 3)
tensor([[ 1., 1., 1.],
[ 1., 1., 1.]])
>>> torch.ones(5)
tensor([ 1., 1., 1., 1., 1.])
""".format(**factory_common_args))
add_docstr(torch.ones_like,
r"""
ones_like(input, dtype=None, layout=None, device=None, requires_grad=False, memory_format=torch.preserve_format) -> Tensor
Returns a tensor filled with the scalar value `1`, with the same size as
:attr:`input`. ``torch.ones_like(input)`` is equivalent to
``torch.ones(input.size(), dtype=input.dtype, layout=input.layout, device=input.device)``.
.. warning::
As of 0.4, this function does not support an :attr:`out` keyword. As an alternative,
the old ``torch.ones_like(input, out=output)`` is equivalent to
``torch.ones(input.size(), out=output)``.
Args:
{input}
{dtype}
{layout}
{device}
{requires_grad}
{memory_format}
Example::
>>> input = torch.empty(2, 3)
>>> torch.ones_like(input)
tensor([[ 1., 1., 1.],
[ 1., 1., 1.]])
""".format(**factory_like_common_args))
add_docstr(torch.orgqr,
r"""
orgqr(input, input2) -> Tensor
Computes the orthogonal matrix `Q` of a QR factorization, from the `(input, input2)`
tuple returned by :func:`torch.geqrf`.
This directly calls the underlying LAPACK function `?orgqr`.
See `LAPACK documentation for orgqr`_ for further details.
Args:
input (Tensor): the `a` from :func:`torch.geqrf`.
input2 (Tensor): the `tau` from :func:`torch.geqrf`.
.. _LAPACK documentation for orgqr:
https://software.intel.com/en-us/mkl-developer-reference-c-orgqr
""")
add_docstr(torch.ormqr,
r"""
ormqr(input, input2, input3, left=True, transpose=False) -> Tensor
Multiplies `mat` (given by :attr:`input3`) by the orthogonal `Q` matrix of the QR factorization
formed by :func:`torch.geqrf` that is represented by `(a, tau)` (given by (:attr:`input`, :attr:`input2`)).
This directly calls the underlying LAPACK function `?ormqr`.
See `LAPACK documentation for ormqr`_ for further details.
Args:
input (Tensor): the `a` from :func:`torch.geqrf`.
input2 (Tensor): the `tau` from :func:`torch.geqrf`.
input3 (Tensor): the matrix to be multiplied.
.. _LAPACK documentation for ormqr:
https://software.intel.com/en-us/mkl-developer-reference-c-ormqr
""")
add_docstr(torch.poisson,
r"""
poisson(input *, generator=None) -> Tensor
Returns a tensor of the same size as :attr:`input` with each element
sampled from a Poisson distribution with rate parameter given by the corresponding
element in :attr:`input` i.e.,
.. math::
\text{{out}}_i \sim \text{{Poisson}}(\text{{input}}_i)
Args:
input (Tensor): the input tensor containing the rates of the Poisson distribution
{generator}
Example::
>>> rates = torch.rand(4, 4) * 5 # rate parameter between 0 and 5
>>> torch.poisson(rates)
tensor([[9., 1., 3., 5.],
[8., 6., 6., 0.],
[0., 4., 5., 3.],
[2., 1., 4., 2.]])
""".format(**common_args))
add_docstr(torch.polygamma,
r"""
polygamma(n, input, out=None) -> Tensor
Computes the :math:`n^{th}` derivative of the digamma function on :attr:`input`.
:math:`n \geq 0` is called the order of the polygamma function.
.. math::
\psi^{(n)}(x) = \frac{d^{(n)}}{dx^{(n)}} \psi(x)
.. note::
This function is not implemented for :math:`n \geq 2`.
""" + """
Args:
n (int): the order of the polygamma function
{input}
{out}
Example::
>>> a = torch.tensor([1, 0.5])
>>> torch.polygamma(1, a)
tensor([1.64493, 4.9348])
""".format(**common_args))
add_docstr(torch.pow,
r"""
pow(input, exponent, out=None) -> Tensor
Takes the power of each element in :attr:`input` with :attr:`exponent` and
returns a tensor with the result.
:attr:`exponent` can be either a single ``float`` number or a `Tensor`
with the same number of elements as :attr:`input`.
When :attr:`exponent` is a scalar value, the operation applied is:
.. math::
\text{out}_i = x_i ^ \text{exponent}
When :attr:`exponent` is a tensor, the operation applied is:
.. math::
\text{out}_i = x_i ^ {\text{exponent}_i}
""" + r"""
When :attr:`exponent` is a tensor, the shapes of :attr:`input`
and :attr:`exponent` must be :ref:`broadcastable <broadcasting-semantics>`.
Args:
{input}
exponent (float or tensor): the exponent value
{out}
Example::
>>> a = torch.randn(4)
>>> a
tensor([ 0.4331, 1.2475, 0.6834, -0.2791])
>>> torch.pow(a, 2)
tensor([ 0.1875, 1.5561, 0.4670, 0.0779])
>>> exp = torch.arange(1., 5.)
>>> a = torch.arange(1., 5.)
>>> a
tensor([ 1., 2., 3., 4.])
>>> exp
tensor([ 1., 2., 3., 4.])
>>> torch.pow(a, exp)
tensor([ 1., 4., 27., 256.])
.. function:: pow(self, exponent, out=None) -> Tensor
:attr:`self` is a scalar ``float`` value, and :attr:`exponent` is a tensor.
The returned tensor :attr:`out` is of the same shape as :attr:`exponent`
The operation applied is:
.. math::
\text{{out}}_i = \text{{self}} ^ {{\text{{exponent}}_i}}
Args:
self (float): the scalar base value for the power operation
exponent (Tensor): the exponent tensor
{out}
Example::
>>> exp = torch.arange(1., 5.)
>>> base = 2
>>> torch.pow(base, exp)
tensor([ 2., 4., 8., 16.])
""".format(**common_args))
add_docstr(torch.prod,
r"""
prod(input, dtype=None) -> Tensor
Returns the product of all elements in the :attr:`input` tensor.
Args:
{input}
{dtype}
Example::
>>> a = torch.randn(1, 3)
>>> a
tensor([[-0.8020, 0.5428, -1.5854]])
>>> torch.prod(a)
tensor(0.6902)
.. function:: prod(input, dim, keepdim=False, dtype=None) -> Tensor
Returns the product of each row of the :attr:`input` tensor in the given
dimension :attr:`dim`.
{keepdim_details}
Args:
{input}
{dim}
{keepdim}
{dtype}
Example::
>>> a = torch.randn(4, 2)
>>> a
tensor([[ 0.5261, -0.3837],
[ 1.1857, -0.2498],
[-1.1646, 0.0705],
[ 1.1131, -1.0629]])
>>> torch.prod(a, 1)
tensor([-0.2018, -0.2962, -0.0821, -1.1831])
""".format(**single_dim_common))
add_docstr(torch.promote_types,
r"""
promote_types(type1, type2) -> dtype
Returns the :class:`torch.dtype` with the smallest size and scalar kind that is
not smaller nor of lower kind than either `type1` or `type2`. See type promotion
:ref:`documentation <type-promotion-doc>` for more information on the type
promotion logic.
Args:
type1 (:class:`torch.dtype`)
type2 (:class:`torch.dtype`)
Example::
>>> torch.promote_types(torch.int32, torch.float32))
torch.float32
>>> torch.promote_types(torch.uint8, torch.long)
torch.long
""")
add_docstr(torch.qr,
r"""
qr(input, some=True, out=None) -> (Tensor, Tensor)
Computes the QR decomposition of a matrix or a batch of matrices :attr:`input`,
and returns a namedtuple (Q, R) of tensors such that :math:`\text{input} = Q R`
with :math:`Q` being an orthogonal matrix or batch of orthogonal matrices and
:math:`R` being an upper triangular matrix or batch of upper triangular matrices.
If :attr:`some` is ``True``, then this function returns the thin (reduced) QR factorization.
Otherwise, if :attr:`some` is ``False``, this function returns the complete QR factorization.
.. note:: precision may be lost if the magnitudes of the elements of :attr:`input`
are large
.. note:: While it should always give you a valid decomposition, it may not
give you the same one across platforms - it will depend on your
LAPACK implementation.
Args:
input (Tensor): the input tensor of size :math:`(*, m, n)` where `*` is zero or more
batch dimensions consisting of matrices of dimension :math:`m \times n`.
some (bool, optional): Set to ``True`` for reduced QR decomposition and ``False`` for
complete QR decomposition.
out (tuple, optional): tuple of `Q` and `R` tensors
satisfying :code:`input = torch.matmul(Q, R)`.
The dimensions of `Q` and `R` are :math:`(*, m, k)` and :math:`(*, k, n)`
respectively, where :math:`k = \min(m, n)` if :attr:`some:` is ``True`` and
:math:`k = m` otherwise.
Example::
>>> a = torch.tensor([[12., -51, 4], [6, 167, -68], [-4, 24, -41]])
>>> q, r = torch.qr(a)
>>> q
tensor([[-0.8571, 0.3943, 0.3314],
[-0.4286, -0.9029, -0.0343],
[ 0.2857, -0.1714, 0.9429]])
>>> r
tensor([[ -14.0000, -21.0000, 14.0000],
[ 0.0000, -175.0000, 70.0000],
[ 0.0000, 0.0000, -35.0000]])
>>> torch.mm(q, r).round()
tensor([[ 12., -51., 4.],
[ 6., 167., -68.],
[ -4., 24., -41.]])
>>> torch.mm(q.t(), q).round()
tensor([[ 1., 0., 0.],
[ 0., 1., -0.],
[ 0., -0., 1.]])
>>> a = torch.randn(3, 4, 5)
>>> q, r = torch.qr(a, some=False)
>>> torch.allclose(torch.matmul(q, r), a)
True
>>> torch.allclose(torch.matmul(q.transpose(-2, -1), q), torch.eye(5))
True
""")
add_docstr(torch.rad2deg,
r"""
rad2deg(input, out=None) -> Tensor
Returns a new tensor with each of the elements of :attr:`input`
converted from angles in radians to degrees.
Args:
{input}
Keyword arguments:
{out}
Example::
>>> a = torch.tensor([[3.142, -3.142], [6.283, -6.283], [1.570, -1.570]])
>>> torch.rad2deg(a)
tensor([[ 180.0233, -180.0233],
[ 359.9894, -359.9894],
[ 89.9544, -89.9544]])
""".format(**common_args))
add_docstr(torch.deg2rad,
r"""
deg2rad(input, out=None) -> Tensor
Returns a new tensor with each of the elements of :attr:`input`
converted from angles in degrees to radians.
Args:
{input}
Keyword arguments:
{out}
Example::
>>> a = torch.tensor([[180.0, -180.0], [360.0, -360.0], [90.0, -90.0]])
>>> torch.deg2rad(a)
tensor([[ 3.1416, -3.1416],
[ 6.2832, -6.2832],
[ 1.5708, -1.5708]])
""".format(**common_args))
add_docstr(torch.rand,
r"""
rand(*size, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
Returns a tensor filled with random numbers from a uniform distribution
on the interval :math:`[0, 1)`
The shape of the tensor is defined by the variable argument :attr:`size`.
Args:
size (int...): a sequence of integers defining the shape of the output tensor.
Can be a variable number of arguments or a collection like a list or tuple.
{out}
{dtype}
{layout}
{device}
{requires_grad}
Example::
>>> torch.rand(4)
tensor([ 0.5204, 0.2503, 0.3525, 0.5673])
>>> torch.rand(2, 3)
tensor([[ 0.8237, 0.5781, 0.6879],
[ 0.3816, 0.7249, 0.0998]])
""".format(**factory_common_args))
add_docstr(torch.rand_like,
r"""
rand_like(input, dtype=None, layout=None, device=None, requires_grad=False, memory_format=torch.preserve_format) -> Tensor
Returns a tensor with the same size as :attr:`input` that is filled with
random numbers from a uniform distribution on the interval :math:`[0, 1)`.
``torch.rand_like(input)`` is equivalent to
``torch.rand(input.size(), dtype=input.dtype, layout=input.layout, device=input.device)``.
Args:
{input}
{dtype}
{layout}
{device}
{requires_grad}
{memory_format}
""".format(**factory_like_common_args))
add_docstr(torch.randint,
"""
randint(low=0, high, size, \\*, generator=None, out=None, \
dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
Returns a tensor filled with random integers generated uniformly
between :attr:`low` (inclusive) and :attr:`high` (exclusive).
The shape of the tensor is defined by the variable argument :attr:`size`.
.. note:
With the global dtype default (``torch.float32``), this function returns
a tensor with dtype ``torch.int64``.
Args:
low (int, optional): Lowest integer to be drawn from the distribution. Default: 0.
high (int): One above the highest integer to be drawn from the distribution.
size (tuple): a tuple defining the shape of the output tensor.
{generator}
{out}
{dtype}
{layout}
{device}
{requires_grad}
Example::
>>> torch.randint(3, 5, (3,))
tensor([4, 3, 4])
>>> torch.randint(10, (2, 2))
tensor([[0, 2],
[5, 5]])
>>> torch.randint(3, 10, (2, 2))
tensor([[4, 5],
[6, 7]])
""".format(**factory_common_args))
add_docstr(torch.randint_like,
"""
randint_like(input, low=0, high, dtype=None, layout=torch.strided, device=None, requires_grad=False, \
memory_format=torch.preserve_format) -> Tensor
Returns a tensor with the same shape as Tensor :attr:`input` filled with
random integers generated uniformly between :attr:`low` (inclusive) and
:attr:`high` (exclusive).
.. note:
With the global dtype default (``torch.float32``), this function returns
a tensor with dtype ``torch.int64``.
Args:
{input}
low (int, optional): Lowest integer to be drawn from the distribution. Default: 0.
high (int): One above the highest integer to be drawn from the distribution.
{dtype}
{layout}
{device}
{requires_grad}
{memory_format}
""".format(**factory_like_common_args))
add_docstr(torch.randn,
r"""
randn(*size, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
Returns a tensor filled with random numbers from a normal distribution
with mean `0` and variance `1` (also called the standard normal
distribution).
.. math::
\text{{out}}_{{i}} \sim \mathcal{{N}}(0, 1)
The shape of the tensor is defined by the variable argument :attr:`size`.
Args:
size (int...): a sequence of integers defining the shape of the output tensor.
Can be a variable number of arguments or a collection like a list or tuple.
{out}
{dtype}
{layout}
{device}
{requires_grad}
Example::
>>> torch.randn(4)
tensor([-2.1436, 0.9966, 2.3426, -0.6366])
>>> torch.randn(2, 3)
tensor([[ 1.5954, 2.8929, -1.0923],
[ 1.1719, -0.4709, -0.1996]])
""".format(**factory_common_args))
add_docstr(torch.randn_like,
r"""
randn_like(input, dtype=None, layout=None, device=None, requires_grad=False, memory_format=torch.preserve_format) -> Tensor
Returns a tensor with the same size as :attr:`input` that is filled with
random numbers from a normal distribution with mean 0 and variance 1.
``torch.randn_like(input)`` is equivalent to
``torch.randn(input.size(), dtype=input.dtype, layout=input.layout, device=input.device)``.
Args:
{input}
{dtype}
{layout}
{device}
{requires_grad}
{memory_format}
""".format(**factory_like_common_args))
add_docstr(torch.randperm,
r"""
randperm(n, out=None, dtype=torch.int64, layout=torch.strided, device=None, requires_grad=False) -> LongTensor
Returns a random permutation of integers from ``0`` to ``n - 1``.
Args:
n (int): the upper bound (exclusive)
{out}
dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
Default: ``torch.int64``.
{layout}
{device}
{requires_grad}
Example::
>>> torch.randperm(4)
tensor([2, 1, 0, 3])
""".format(**factory_common_args))
add_docstr(torch.tensor,
r"""
tensor(data, dtype=None, device=None, requires_grad=False, pin_memory=False) -> Tensor
Constructs a tensor with :attr:`data`.
.. warning::
:func:`torch.tensor` always copies :attr:`data`. If you have a Tensor
``data`` and want to avoid a copy, use :func:`torch.Tensor.requires_grad_`
or :func:`torch.Tensor.detach`.
If you have a NumPy ``ndarray`` and want to avoid a copy, use
:func:`torch.as_tensor`.
.. warning::
When data is a tensor `x`, :func:`torch.tensor` reads out 'the data' from whatever it is passed,
and constructs a leaf variable. Therefore ``torch.tensor(x)`` is equivalent to ``x.clone().detach()``
and ``torch.tensor(x, requires_grad=True)`` is equivalent to ``x.clone().detach().requires_grad_(True)``.
The equivalents using ``clone()`` and ``detach()`` are recommended.
Args:
{data}
{dtype}
{device}
{requires_grad}
{pin_memory}
Example::
>>> torch.tensor([[0.1, 1.2], [2.2, 3.1], [4.9, 5.2]])
tensor([[ 0.1000, 1.2000],
[ 2.2000, 3.1000],
[ 4.9000, 5.2000]])
>>> torch.tensor([0, 1]) # Type inference on data
tensor([ 0, 1])
>>> torch.tensor([[0.11111, 0.222222, 0.3333333]],
dtype=torch.float64,
device=torch.device('cuda:0')) # creates a torch.cuda.DoubleTensor
tensor([[ 0.1111, 0.2222, 0.3333]], dtype=torch.float64, device='cuda:0')
>>> torch.tensor(3.14159) # Create a scalar (zero-dimensional tensor)
tensor(3.1416)
>>> torch.tensor([]) # Create an empty tensor (of size (0,))
tensor([])
""".format(**factory_data_common_args))
add_docstr(torch.range,
r"""
range(start=0, end, step=1, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
Returns a 1-D tensor of size :math:`\left\lfloor \frac{\text{end} - \text{start}}{\text{step}} \right\rfloor + 1`
with values from :attr:`start` to :attr:`end` with step :attr:`step`. Step is
the gap between two values in the tensor.
.. math::
\text{out}_{i+1} = \text{out}_i + \text{step}.
""" + r"""
.. warning::
This function is deprecated in favor of :func:`torch.arange`.
Args:
start (float): the starting value for the set of points. Default: ``0``.
end (float): the ending value for the set of points
step (float): the gap between each pair of adjacent points. Default: ``1``.
{out}
{dtype} If `dtype` is not given, infer the data type from the other input
arguments. If any of `start`, `end`, or `stop` are floating-point, the
`dtype` is inferred to be the default dtype, see
:meth:`~torch.get_default_dtype`. Otherwise, the `dtype` is inferred to
be `torch.int64`.
{layout}
{device}
{requires_grad}
Example::
>>> torch.range(1, 4)
tensor([ 1., 2., 3., 4.])
>>> torch.range(1, 4, 0.5)
tensor([ 1.0000, 1.5000, 2.0000, 2.5000, 3.0000, 3.5000, 4.0000])
""".format(**factory_common_args))
add_docstr(torch.arange,
r"""
arange(start=0, end, step=1, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
Returns a 1-D tensor of size :math:`\left\lceil \frac{\text{end} - \text{start}}{\text{step}} \right\rceil`
with values from the interval ``[start, end)`` taken with common difference
:attr:`step` beginning from `start`.
Note that non-integer :attr:`step` is subject to floating point rounding errors when
comparing against :attr:`end`; to avoid inconsistency, we advise adding a small epsilon to :attr:`end`
in such cases.
.. math::
\text{out}_{{i+1}} = \text{out}_{i} + \text{step}
""" + r"""
Args:
start (Number): the starting value for the set of points. Default: ``0``.
end (Number): the ending value for the set of points
step (Number): the gap between each pair of adjacent points. Default: ``1``.
{out}
{dtype} If `dtype` is not given, infer the data type from the other input
arguments. If any of `start`, `end`, or `stop` are floating-point, the
`dtype` is inferred to be the default dtype, see
:meth:`~torch.get_default_dtype`. Otherwise, the `dtype` is inferred to
be `torch.int64`.
{layout}
{device}
{requires_grad}
Example::
>>> torch.arange(5)
tensor([ 0, 1, 2, 3, 4])
>>> torch.arange(1, 4)
tensor([ 1, 2, 3])
>>> torch.arange(1, 2.5, 0.5)
tensor([ 1.0000, 1.5000, 2.0000])
""".format(**factory_common_args))
add_docstr(torch.remainder,
r"""
remainder(input, other, out=None) -> Tensor
Computes the element-wise remainder of division.
The dividend and divisor may contain both for integer and floating point
numbers. The remainder has the same sign as the divisor :attr:`other`.
When :attr:`other` is a tensor, the shapes of :attr:`input` and
:attr:`other` must be :ref:`broadcastable <broadcasting-semantics>`.
Args:
input (Tensor): the dividend
other (Tensor or float): the divisor that may be either a number or a
Tensor of the same shape as the dividend
{out}
Example::
>>> torch.remainder(torch.tensor([-3., -2, -1, 1, 2, 3]), 2)
tensor([ 1., 0., 1., 1., 0., 1.])
>>> torch.remainder(torch.tensor([1., 2, 3, 4, 5]), 1.5)
tensor([ 1.0000, 0.5000, 0.0000, 1.0000, 0.5000])
.. seealso::
:func:`torch.fmod`, which computes the element-wise remainder of
division equivalently to the C library function ``fmod()``.
""".format(**common_args))
add_docstr(torch.renorm,
r"""
renorm(input, p, dim, maxnorm, out=None) -> Tensor
Returns a tensor where each sub-tensor of :attr:`input` along dimension
:attr:`dim` is normalized such that the `p`-norm of the sub-tensor is lower
than the value :attr:`maxnorm`
.. note:: If the norm of a row is lower than `maxnorm`, the row is unchanged
Args:
{input}
p (float): the power for the norm computation
dim (int): the dimension to slice over to get the sub-tensors
maxnorm (float): the maximum norm to keep each sub-tensor under
{out}
Example::
>>> x = torch.ones(3, 3)
>>> x[1].fill_(2)
tensor([ 2., 2., 2.])
>>> x[2].fill_(3)
tensor([ 3., 3., 3.])
>>> x
tensor([[ 1., 1., 1.],
[ 2., 2., 2.],
[ 3., 3., 3.]])
>>> torch.renorm(x, 1, 0, 5)
tensor([[ 1.0000, 1.0000, 1.0000],
[ 1.6667, 1.6667, 1.6667],
[ 1.6667, 1.6667, 1.6667]])
""".format(**common_args))
add_docstr(torch.reshape,
r"""
reshape(input, shape) -> Tensor
Returns a tensor with the same data and number of elements as :attr:`input`,
but with the specified shape. When possible, the returned tensor will be a view
of :attr:`input`. Otherwise, it will be a copy. Contiguous inputs and inputs
with compatible strides can be reshaped without copying, but you should not
depend on the copying vs. viewing behavior.
See :meth:`torch.Tensor.view` on when it is possible to return a view.
A single dimension may be -1, in which case it's inferred from the remaining
dimensions and the number of elements in :attr:`input`.
Args:
input (Tensor): the tensor to be reshaped
shape (tuple of ints): the new shape
Example::
>>> a = torch.arange(4.)
>>> torch.reshape(a, (2, 2))
tensor([[ 0., 1.],
[ 2., 3.]])
>>> b = torch.tensor([[0, 1], [2, 3]])
>>> torch.reshape(b, (-1,))
tensor([ 0, 1, 2, 3])
""")
add_docstr(torch.result_type,
r"""
result_type(tensor1, tensor2) -> dtype
Returns the :class:`torch.dtype` that would result from performing an arithmetic
operation on the provided input tensors. See type promotion :ref:`documentation <type-promotion-doc>`
for more information on the type promotion logic.
Args:
tensor1 (Tensor or Number): an input tensor or number
tensor2 (Tensor or Number): an input tensor or number
Example::
>>> torch.result_type(torch.tensor([1, 2], dtype=torch.int), 1.0)
torch.float32
>>> torch.result_type(torch.tensor([1, 2], dtype=torch.uint8), torch.tensor(1))
torch.uint8
""")
add_docstr(torch.round,
r"""
round(input, out=None) -> Tensor
Returns a new tensor with each of the elements of :attr:`input` rounded
to the closest integer.
Args:
{input}
{out}
Example::
>>> a = torch.randn(4)
>>> a
tensor([ 0.9920, 0.6077, 0.9734, -1.0362])
>>> torch.round(a)
tensor([ 1., 1., 1., -1.])
""".format(**common_args))
add_docstr(torch.rsqrt,
r"""
rsqrt(input, out=None) -> Tensor
Returns a new tensor with the reciprocal of the square-root of each of
the elements of :attr:`input`.
.. math::
\text{out}_{i} = \frac{1}{\sqrt{\text{input}_{i}}}
""" + r"""
Args:
{input}
{out}
Example::
>>> a = torch.randn(4)
>>> a
tensor([-0.0370, 0.2970, 1.5420, -0.9105])
>>> torch.rsqrt(a)
tensor([ nan, 1.8351, 0.8053, nan])
""".format(**common_args))
add_docstr(torch.set_flush_denormal,
r"""
set_flush_denormal(mode) -> bool
Disables denormal floating numbers on CPU.
Returns ``True`` if your system supports flushing denormal numbers and it
successfully configures flush denormal mode. :meth:`~torch.set_flush_denormal`
is only supported on x86 architectures supporting SSE3.
Args:
mode (bool): Controls whether to enable flush denormal mode or not
Example::
>>> torch.set_flush_denormal(True)
True
>>> torch.tensor([1e-323], dtype=torch.float64)
tensor([ 0.], dtype=torch.float64)
>>> torch.set_flush_denormal(False)
True
>>> torch.tensor([1e-323], dtype=torch.float64)
tensor(9.88131e-324 *
[ 1.0000], dtype=torch.float64)
""")
add_docstr(torch.set_num_threads,
r"""
set_num_threads(int)
Sets the number of threads used for intraop parallelism on CPU.
WARNING:
To ensure that the correct number of threads is used, set_num_threads
must be called before running eager, JIT or autograd code.
""")
add_docstr(torch.set_num_interop_threads,
r"""
set_num_interop_threads(int)
Sets the number of threads used for interop parallelism
(e.g. in JIT interpreter) on CPU.
WARNING: Can only be called once and before any inter-op parallel work
is started (e.g. JIT execution).
""")
add_docstr(torch.sigmoid,
r"""
sigmoid(input, out=None) -> Tensor
Returns a new tensor with the sigmoid of the elements of :attr:`input`.
.. math::
\text{out}_{i} = \frac{1}{1 + e^{-\text{input}_{i}}}
""" + r"""
Args:
{input}
{out}
Example::
>>> a = torch.randn(4)
>>> a
tensor([ 0.9213, 1.0887, -0.8858, -1.7683])
>>> torch.sigmoid(a)
tensor([ 0.7153, 0.7481, 0.2920, 0.1458])
""".format(**common_args))
add_docstr(torch.logit,
r"""
logit(input, eps=None, out=None) -> Tensor
Returns a new tensor with the logit of the elements of :attr:`input`.
:attr:`input` is clamped to [eps, 1 - eps] when eps is not None.
When eps is None and :attr:`input` < 0 or :attr:`input` > 1, the function will yields NaN.
.. math::
y_{i} = \ln(\frac{z_{i}}{1 - z_{i}}) \\
z_{i} = \begin{cases}
x_{i} & \text{if eps is None} \\
\text{eps} & \text{if } x_{i} < \text{eps} \\
x_{i} & \text{if } \text{eps} \leq x_{i} \leq 1 - \text{eps} \\
1 - \text{eps} & \text{if } x_{i} > 1 - \text{eps}
\end{cases}
""" + r"""
Args:
{input}
eps (float, optional): the epsilon for input clamp bound. Default: ``None``
{out}
Example::
>>> a = torch.rand(5)
>>> a
tensor([0.2796, 0.9331, 0.6486, 0.1523, 0.6516])
>>> torch.logit(a, eps=1e-6)
tensor([-0.9466, 2.6352, 0.6131, -1.7169, 0.6261])
""".format(**common_args))
add_docstr(torch.sign,
r"""
sign(input, out=None) -> Tensor
Returns a new tensor with the signs of the elements of :attr:`input`.
.. math::
\text{out}_{i} = \operatorname{sgn}(\text{input}_{i})
""" + r"""
Args:
{input}
{out}
Example::
>>> a = torch.tensor([0.7, -1.2, 0., 2.3])
>>> a
tensor([ 0.7000, -1.2000, 0.0000, 2.3000])
>>> torch.sign(a)
tensor([ 1., -1., 0., 1.])
""".format(**common_args))
add_docstr(torch.sin,
r"""
sin(input, out=None) -> Tensor
Returns a new tensor with the sine of the elements of :attr:`input`.
.. math::
\text{out}_{i} = \sin(\text{input}_{i})
""" + r"""
Args:
{input}
{out}
Example::
>>> a = torch.randn(4)
>>> a
tensor([-0.5461, 0.1347, -2.7266, -0.2746])
>>> torch.sin(a)
tensor([-0.5194, 0.1343, -0.4032, -0.2711])
""".format(**common_args))
add_docstr(torch.sinh,
r"""
sinh(input, out=None) -> Tensor
Returns a new tensor with the hyperbolic sine of the elements of
:attr:`input`.
.. math::
\text{out}_{i} = \sinh(\text{input}_{i})
""" + r"""
Args:
{input}
{out}
Example::
>>> a = torch.randn(4)
>>> a
tensor([ 0.5380, -0.8632, -0.1265, 0.9399])
>>> torch.sinh(a)
tensor([ 0.5644, -0.9744, -0.1268, 1.0845])
""".format(**common_args))
add_docstr(torch.sort,
r"""
sort(input, dim=-1, descending=False, out=None) -> (Tensor, LongTensor)
Sorts the elements of the :attr:`input` tensor along a given dimension
in ascending order by value.
If :attr:`dim` is not given, the last dimension of the `input` is chosen.
If :attr:`descending` is ``True`` then the elements are sorted in descending
order by value.
A namedtuple of (values, indices) is returned, where the `values` are the
sorted values and `indices` are the indices of the elements in the original
`input` tensor.
Args:
{input}
dim (int, optional): the dimension to sort along
descending (bool, optional): controls the sorting order (ascending or descending)
out (tuple, optional): the output tuple of (`Tensor`, `LongTensor`) that can
be optionally given to be used as output buffers
Example::
>>> x = torch.randn(3, 4)
>>> sorted, indices = torch.sort(x)
>>> sorted
tensor([[-0.2162, 0.0608, 0.6719, 2.3332],
[-0.5793, 0.0061, 0.6058, 0.9497],
[-0.5071, 0.3343, 0.9553, 1.0960]])
>>> indices
tensor([[ 1, 0, 2, 3],
[ 3, 1, 0, 2],
[ 0, 3, 1, 2]])
>>> sorted, indices = torch.sort(x, 0)
>>> sorted
tensor([[-0.5071, -0.2162, 0.6719, -0.5793],
[ 0.0608, 0.0061, 0.9497, 0.3343],
[ 0.6058, 0.9553, 1.0960, 2.3332]])
>>> indices
tensor([[ 2, 0, 0, 1],
[ 0, 1, 1, 2],
[ 1, 2, 2, 0]])
""".format(**common_args))
add_docstr(torch.argsort,
r"""
argsort(input, dim=-1, descending=False) -> LongTensor
Returns the indices that sort a tensor along a given dimension in ascending
order by value.
This is the second value returned by :meth:`torch.sort`. See its documentation
for the exact semantics of this method.
Args:
{input}
dim (int, optional): the dimension to sort along
descending (bool, optional): controls the sorting order (ascending or descending)
Example::
>>> a = torch.randn(4, 4)
>>> a
tensor([[ 0.0785, 1.5267, -0.8521, 0.4065],
[ 0.1598, 0.0788, -0.0745, -1.2700],
[ 1.2208, 1.0722, -0.7064, 1.2564],
[ 0.0669, -0.2318, -0.8229, -0.9280]])
>>> torch.argsort(a, dim=1)
tensor([[2, 0, 3, 1],
[3, 2, 1, 0],
[2, 1, 0, 3],
[3, 2, 1, 0]])
""".format(**common_args))
add_docstr(torch.sparse_coo_tensor,
r"""
sparse_coo_tensor(indices, values, size=None, dtype=None, device=None, requires_grad=False) -> Tensor
Constructs a sparse tensors in COO(rdinate) format with non-zero elements at the given :attr:`indices`
with the given :attr:`values`. A sparse tensor can be `uncoalesced`, in that case, there are duplicate
coordinates in the indices, and the value at that index is the sum of all duplicate value entries:
`torch.sparse`_.
Args:
indices (array_like): Initial data for the tensor. Can be a list, tuple,
NumPy ``ndarray``, scalar, and other types. Will be cast to a :class:`torch.LongTensor`
internally. The indices are the coordinates of the non-zero values in the matrix, and thus
should be two-dimensional where the first dimension is the number of tensor dimensions and
the second dimension is the number of non-zero values.
values (array_like): Initial values for the tensor. Can be a list, tuple,
NumPy ``ndarray``, scalar, and other types.
size (list, tuple, or :class:`torch.Size`, optional): Size of the sparse tensor. If not
provided the size will be inferred as the minimum size big enough to hold all non-zero
elements.
dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
Default: if None, infers data type from :attr:`values`.
device (:class:`torch.device`, optional): the desired device of returned tensor.
Default: if None, uses the current device for the default tensor type
(see :func:`torch.set_default_tensor_type`). :attr:`device` will be the CPU
for CPU tensor types and the current CUDA device for CUDA tensor types.
{requires_grad}
Example::
>>> i = torch.tensor([[0, 1, 1],
[2, 0, 2]])
>>> v = torch.tensor([3, 4, 5], dtype=torch.float32)
>>> torch.sparse_coo_tensor(i, v, [2, 4])
tensor(indices=tensor([[0, 1, 1],
[2, 0, 2]]),
values=tensor([3., 4., 5.]),
size=(2, 4), nnz=3, layout=torch.sparse_coo)
>>> torch.sparse_coo_tensor(i, v) # Shape inference
tensor(indices=tensor([[0, 1, 1],
[2, 0, 2]]),
values=tensor([3., 4., 5.]),
size=(2, 3), nnz=3, layout=torch.sparse_coo)
>>> torch.sparse_coo_tensor(i, v, [2, 4],
dtype=torch.float64,
device=torch.device('cuda:0'))
tensor(indices=tensor([[0, 1, 1],
[2, 0, 2]]),
values=tensor([3., 4., 5.]),
device='cuda:0', size=(2, 4), nnz=3, dtype=torch.float64,
layout=torch.sparse_coo)
# Create an empty sparse tensor with the following invariants:
# 1. sparse_dim + dense_dim = len(SparseTensor.shape)
# 2. SparseTensor._indices().shape = (sparse_dim, nnz)
# 3. SparseTensor._values().shape = (nnz, SparseTensor.shape[sparse_dim:])
#
# For instance, to create an empty sparse tensor with nnz = 0, dense_dim = 0 and
# sparse_dim = 1 (hence indices is a 2D tensor of shape = (1, 0))
>>> S = torch.sparse_coo_tensor(torch.empty([1, 0]), [], [1])
tensor(indices=tensor([], size=(1, 0)),
values=tensor([], size=(0,)),
size=(1,), nnz=0, layout=torch.sparse_coo)
# and to create an empty sparse tensor with nnz = 0, dense_dim = 1 and
# sparse_dim = 1
>>> S = torch.sparse_coo_tensor(torch.empty([1, 0]), torch.empty([0, 2]), [1, 2])
tensor(indices=tensor([], size=(1, 0)),
values=tensor([], size=(0, 2)),
size=(1, 2), nnz=0, layout=torch.sparse_coo)
.. _torch.sparse: https://pytorch.org/docs/stable/sparse.html
""".format(**factory_common_args))
add_docstr(torch.sqrt,
r"""
sqrt(input, out=None) -> Tensor
Returns a new tensor with the square-root of the elements of :attr:`input`.
.. math::
\text{out}_{i} = \sqrt{\text{input}_{i}}
""" + r"""
Args:
{input}
{out}
Example::
>>> a = torch.randn(4)
>>> a
tensor([-2.0755, 1.0226, 0.0831, 0.4806])
>>> torch.sqrt(a)
tensor([ nan, 1.0112, 0.2883, 0.6933])
""".format(**common_args))
add_docstr(torch.square,
r"""
square(input, out=None) -> Tensor
Returns a new tensor with the square of the elements of :attr:`input`.
Args:
{input}
{out}
Example::
>>> a = torch.randn(4)
>>> a
tensor([-2.0755, 1.0226, 0.0831, 0.4806])
>>> torch.square(a)
tensor([ 4.3077, 1.0457, 0.0069, 0.2310])
""".format(**common_args))
add_docstr(torch.squeeze,
r"""
squeeze(input, dim=None, out=None) -> Tensor
Returns a tensor with all the dimensions of :attr:`input` of size `1` removed.
For example, if `input` is of shape:
:math:`(A \times 1 \times B \times C \times 1 \times D)` then the `out` tensor
will be of shape: :math:`(A \times B \times C \times D)`.
When :attr:`dim` is given, a squeeze operation is done only in the given
dimension. If `input` is of shape: :math:`(A \times 1 \times B)`,
``squeeze(input, 0)`` leaves the tensor unchanged, but ``squeeze(input, 1)``
will squeeze the tensor to the shape :math:`(A \times B)`.
.. note:: The returned tensor shares the storage with the input tensor,
so changing the contents of one will change the contents of the other.
.. warning:: If the tensor has a batch dimension of size 1, then `squeeze(input)`
will also remove the batch dimension, which can lead to unexpected
errors.
Args:
{input}
dim (int, optional): if given, the input will be squeezed only in
this dimension
{out}
Example::
>>> x = torch.zeros(2, 1, 2, 1, 2)
>>> x.size()
torch.Size([2, 1, 2, 1, 2])
>>> y = torch.squeeze(x)
>>> y.size()
torch.Size([2, 2, 2])
>>> y = torch.squeeze(x, 0)
>>> y.size()
torch.Size([2, 1, 2, 1, 2])
>>> y = torch.squeeze(x, 1)
>>> y.size()
torch.Size([2, 2, 1, 2])
""".format(**common_args))
add_docstr(torch.std,
r"""
std(input, unbiased=True) -> Tensor
Returns the standard-deviation of all elements in the :attr:`input` tensor.
If :attr:`unbiased` is ``False``, then the standard-deviation will be calculated
via the biased estimator. Otherwise, Bessel's correction will be used.
Args:
{input}
unbiased (bool): whether to use the unbiased estimation or not
Example::
>>> a = torch.randn(1, 3)
>>> a
tensor([[-0.8166, -1.3802, -0.3560]])
>>> torch.std(a)
tensor(0.5130)
.. function:: std(input, dim, unbiased=True, keepdim=False, out=None) -> Tensor
Returns the standard-deviation of each row of the :attr:`input` tensor in the
dimension :attr:`dim`. If :attr:`dim` is a list of dimensions,
reduce over all of them.
{keepdim_details}
If :attr:`unbiased` is ``False``, then the standard-deviation will be calculated
via the biased estimator. Otherwise, Bessel's correction will be used.
Args:
{input}
{dim}
unbiased (bool): whether to use the unbiased estimation or not
{keepdim}
{out}
Example::
>>> a = torch.randn(4, 4)
>>> a
tensor([[ 0.2035, 1.2959, 1.8101, -0.4644],
[ 1.5027, -0.3270, 0.5905, 0.6538],
[-1.5745, 1.3330, -0.5596, -0.6548],
[ 0.1264, -0.5080, 1.6420, 0.1992]])
>>> torch.std(a, dim=1)
tensor([ 1.0311, 0.7477, 1.2204, 0.9087])
""".format(**multi_dim_common))
add_docstr(torch.std_mean,
r"""
std_mean(input, unbiased=True) -> (Tensor, Tensor)
Returns the standard-deviation and mean of all elements in the :attr:`input` tensor.
If :attr:`unbiased` is ``False``, then the standard-deviation will be calculated
via the biased estimator. Otherwise, Bessel's correction will be used.
Args:
{input}
unbiased (bool): whether to use the unbiased estimation or not
Example::
>>> a = torch.randn(1, 3)
>>> a
tensor([[0.3364, 0.3591, 0.9462]])
>>> torch.std_mean(a)
(tensor(0.3457), tensor(0.5472))
.. function:: std_mean(input, dim, unbiased=True, keepdim=False) -> (Tensor, Tensor)
Returns the standard-deviation and mean of each row of the :attr:`input` tensor in the
dimension :attr:`dim`. If :attr:`dim` is a list of dimensions,
reduce over all of them.
{keepdim_details}
If :attr:`unbiased` is ``False``, then the standard-deviation will be calculated
via the biased estimator. Otherwise, Bessel's correction will be used.
Args:
{input}
{dim}
unbiased (bool): whether to use the unbiased estimation or not
{keepdim}
Example::
>>> a = torch.randn(4, 4)
>>> a
tensor([[ 0.5648, -0.5984, -1.2676, -1.4471],
[ 0.9267, 1.0612, 1.1050, -0.6014],
[ 0.0154, 1.9301, 0.0125, -1.0904],
[-1.9711, -0.7748, -1.3840, 0.5067]])
>>> torch.std_mean(a, 1)
(tensor([0.9110, 0.8197, 1.2552, 1.0608]), tensor([-0.6871, 0.6229, 0.2169, -0.9058]))
""".format(**multi_dim_common))
add_docstr(torch.sum,
r"""
sum(input, dtype=None) -> Tensor
Returns the sum of all elements in the :attr:`input` tensor.
Args:
{input}
{dtype}
Example::
>>> a = torch.randn(1, 3)
>>> a
tensor([[ 0.1133, -0.9567, 0.2958]])
>>> torch.sum(a)
tensor(-0.5475)
.. function:: sum(input, dim, keepdim=False, dtype=None) -> Tensor
Returns the sum of each row of the :attr:`input` tensor in the given
dimension :attr:`dim`. If :attr:`dim` is a list of dimensions,
reduce over all of them.
{keepdim_details}
Args:
{input}
{dim}
{keepdim}
{dtype}
Example::
>>> a = torch.randn(4, 4)
>>> a
tensor([[ 0.0569, -0.2475, 0.0737, -0.3429],
[-0.2993, 0.9138, 0.9337, -1.6864],
[ 0.1132, 0.7892, -0.1003, 0.5688],
[ 0.3637, -0.9906, -0.4752, -1.5197]])
>>> torch.sum(a, 1)
tensor([-0.4598, -0.1381, 1.3708, -2.6217])
>>> b = torch.arange(4 * 5 * 6).view(4, 5, 6)
>>> torch.sum(b, (2, 1))
tensor([ 435., 1335., 2235., 3135.])
""".format(**multi_dim_common))
add_docstr(torch.svd,
r"""
svd(input, some=True, compute_uv=True, out=None) -> (Tensor, Tensor, Tensor)
This function returns a namedtuple ``(U, S, V)`` which is the singular value
decomposition of a input real matrix or batches of real matrices :attr:`input` such that
:math:`input = U \times diag(S) \times V^T`.
If :attr:`some` is ``True`` (default), the method returns the reduced singular value decomposition
i.e., if the last two dimensions of :attr:`input` are ``m`` and ``n``, then the returned
`U` and `V` matrices will contain only :math:`min(n, m)` orthonormal columns.
If :attr:`compute_uv` is ``False``, the returned `U` and `V` matrices will be zero matrices
of shape :math:`(m \times m)` and :math:`(n \times n)` respectively. :attr:`some` will be ignored here.
.. note:: The singular values are returned in descending order. If :attr:`input` is a batch of matrices,
then the singular values of each matrix in the batch is returned in descending order.
.. note:: The implementation of SVD on CPU uses the LAPACK routine `?gesdd` (a divide-and-conquer
algorithm) instead of `?gesvd` for speed. Analogously, the SVD on GPU uses the MAGMA routine
`gesdd` as well.
.. note:: Irrespective of the original strides, the returned matrix `U`
will be transposed, i.e. with strides :code:`U.contiguous().transpose(-2, -1).stride()`
.. note:: Extra care needs to be taken when backward through `U` and `V`
outputs. Such operation is really only stable when :attr:`input` is
full rank with all distinct singular values. Otherwise, ``NaN`` can
appear as the gradients are not properly defined. Also, notice that
double backward will usually do an additional backward through `U` and
`V` even if the original backward is only on `S`.
.. note:: When :attr:`some` = ``False``, the gradients on :code:`U[..., :, min(m, n):]`
and :code:`V[..., :, min(m, n):]` will be ignored in backward as those vectors
can be arbitrary bases of the subspaces.
.. note:: When :attr:`compute_uv` = ``False``, backward cannot be performed since `U` and `V`
from the forward pass is required for the backward operation.
Args:
input (Tensor): the input tensor of size :math:`(*, m, n)` where `*` is zero or more
batch dimensions consisting of :math:`m \times n` matrices.
some (bool, optional): controls the shape of returned `U` and `V`
compute_uv (bool, optional): option whether to compute `U` and `V` or not
out (tuple, optional): the output tuple of tensors
Example::
>>> a = torch.randn(5, 3)
>>> a
tensor([[ 0.2364, -0.7752, 0.6372],
[ 1.7201, 0.7394, -0.0504],
[-0.3371, -1.0584, 0.5296],
[ 0.3550, -0.4022, 1.5569],
[ 0.2445, -0.0158, 1.1414]])
>>> u, s, v = torch.svd(a)
>>> u
tensor([[ 0.4027, 0.0287, 0.5434],
[-0.1946, 0.8833, 0.3679],
[ 0.4296, -0.2890, 0.5261],
[ 0.6604, 0.2717, -0.2618],
[ 0.4234, 0.2481, -0.4733]])
>>> s
tensor([2.3289, 2.0315, 0.7806])
>>> v
tensor([[-0.0199, 0.8766, 0.4809],
[-0.5080, 0.4054, -0.7600],
[ 0.8611, 0.2594, -0.4373]])
>>> torch.dist(a, torch.mm(torch.mm(u, torch.diag(s)), v.t()))
tensor(8.6531e-07)
>>> a_big = torch.randn(7, 5, 3)
>>> u, s, v = torch.svd(a_big)
>>> torch.dist(a_big, torch.matmul(torch.matmul(u, torch.diag_embed(s)), v.transpose(-2, -1)))
tensor(2.6503e-06)
""")
add_docstr(torch.symeig,
r"""
symeig(input, eigenvectors=False, upper=True, out=None) -> (Tensor, Tensor)
This function returns eigenvalues and eigenvectors
of a real symmetric matrix :attr:`input` or a batch of real symmetric matrices,
represented by a namedtuple (eigenvalues, eigenvectors).
This function calculates all eigenvalues (and vectors) of :attr:`input`
such that :math:`\text{input} = V \text{diag}(e) V^T`.
The boolean argument :attr:`eigenvectors` defines computation of
both eigenvectors and eigenvalues or eigenvalues only.
If it is ``False``, only eigenvalues are computed. If it is ``True``,
both eigenvalues and eigenvectors are computed.
Since the input matrix :attr:`input` is supposed to be symmetric,
only the upper triangular portion is used by default.
If :attr:`upper` is ``False``, then lower triangular portion is used.
.. note:: The eigenvalues are returned in ascending order. If :attr:`input` is a batch of matrices,
then the eigenvalues of each matrix in the batch is returned in ascending order.
.. note:: Irrespective of the original strides, the returned matrix `V` will
be transposed, i.e. with strides `V.contiguous().transpose(-1, -2).stride()`.
.. note:: Extra care needs to be taken when backward through outputs. Such
operation is really only stable when all eigenvalues are distinct.
Otherwise, ``NaN`` can appear as the gradients are not properly defined.
Args:
input (Tensor): the input tensor of size :math:`(*, n, n)` where `*` is zero or more
batch dimensions consisting of symmetric matrices.
eigenvectors(boolean, optional): controls whether eigenvectors have to be computed
upper(boolean, optional): controls whether to consider upper-triangular or lower-triangular region
out (tuple, optional): the output tuple of (Tensor, Tensor)
Returns:
(Tensor, Tensor): A namedtuple (eigenvalues, eigenvectors) containing
- **eigenvalues** (*Tensor*): Shape :math:`(*, m)`. The eigenvalues in ascending order.
- **eigenvectors** (*Tensor*): Shape :math:`(*, m, m)`.
If ``eigenvectors=False``, it's an empty tensor.
Otherwise, this tensor contains the orthonormal eigenvectors of the ``input``.
Examples::
>>> a = torch.randn(5, 5)
>>> a = a + a.t() # To make a symmetric
>>> a
tensor([[-5.7827, 4.4559, -0.2344, -1.7123, -1.8330],
[ 4.4559, 1.4250, -2.8636, -3.2100, -0.1798],
[-0.2344, -2.8636, 1.7112, -5.5785, 7.1988],
[-1.7123, -3.2100, -5.5785, -2.6227, 3.1036],
[-1.8330, -0.1798, 7.1988, 3.1036, -5.1453]])
>>> e, v = torch.symeig(a, eigenvectors=True)
>>> e
tensor([-13.7012, -7.7497, -2.3163, 5.2477, 8.1050])
>>> v
tensor([[ 0.1643, 0.9034, -0.0291, 0.3508, 0.1817],
[-0.2417, -0.3071, -0.5081, 0.6534, 0.4026],
[-0.5176, 0.1223, -0.0220, 0.3295, -0.7798],
[-0.4850, 0.2695, -0.5773, -0.5840, 0.1337],
[ 0.6415, -0.0447, -0.6381, -0.0193, -0.4230]])
>>> a_big = torch.randn(5, 2, 2)
>>> a_big = a_big + a_big.transpose(-2, -1) # To make a_big symmetric
>>> e, v = a_big.symeig(eigenvectors=True)
>>> torch.allclose(torch.matmul(v, torch.matmul(e.diag_embed(), v.transpose(-2, -1))), a_big)
True
""")
add_docstr(torch.t,
r"""
t(input) -> Tensor
Expects :attr:`input` to be <= 2-D tensor and transposes dimensions 0
and 1.
0-D and 1-D tensors are returned as is. When input is a 2-D tensor this
is equivalent to ``transpose(input, 0, 1)``.
Args:
{input}
Example::
>>> x = torch.randn(())
>>> x
tensor(0.1995)
>>> torch.t(x)
tensor(0.1995)
>>> x = torch.randn(3)
>>> x
tensor([ 2.4320, -0.4608, 0.7702])
>>> torch.t(x)
tensor([ 2.4320, -0.4608, 0.7702])
>>> x = torch.randn(2, 3)
>>> x
tensor([[ 0.4875, 0.9158, -0.5872],
[ 0.3938, -0.6929, 0.6932]])
>>> torch.t(x)
tensor([[ 0.4875, 0.3938],
[ 0.9158, -0.6929],
[-0.5872, 0.6932]])
""".format(**common_args))
add_docstr(torch.flip,
r"""
flip(input, dims) -> Tensor
Reverse the order of a n-D tensor along given axis in dims.
Args:
{input}
dims (a list or tuple): axis to flip on
Example::
>>> x = torch.arange(8).view(2, 2, 2)
>>> x
tensor([[[ 0, 1],
[ 2, 3]],
[[ 4, 5],
[ 6, 7]]])
>>> torch.flip(x, [0, 1])
tensor([[[ 6, 7],
[ 4, 5]],
[[ 2, 3],
[ 0, 1]]])
""".format(**common_args))
add_docstr(torch.fliplr,
r"""
fliplr(input) -> Tensor
Flip array in the left/right direction, returning a new tensor.
Flip the entries in each row in the left/right direction.
Columns are preserved, but appear in a different order than before.
Note:
Equivalent to input[:,::-1]. Requires the array to be at least 2-D.
Args:
input (Tensor): Must be at least 2-dimensional.
Example::
>>> x = torch.arange(4).view(2, 2)
>>> x
tensor([[0, 1],
[2, 3]])
>>> torch.fliplr(x)
tensor([[1, 0],
[3, 2]])
""".format(**common_args))
add_docstr(torch.flipud,
r"""
flipud(input) -> Tensor
Flip array in the up/down direction, returning a new tensor.
Flip the entries in each column in the up/down direction.
Rows are preserved, but appear in a different order than before.
Note:
Equivalent to input[::-1,...]. Requires the array to be at least 1-D.
Args:
input (Tensor): Must be at least 1-dimensional.
Example::
>>> x = torch.arange(4).view(2, 2)
>>> x
tensor([[0, 1],
[2, 3]])
>>> torch.flipud(x)
tensor([[2, 3],
[0, 1]])
""".format(**common_args))
add_docstr(torch.roll,
r"""
roll(input, shifts, dims=None) -> Tensor
Roll the tensor along the given dimension(s). Elements that are shifted beyond the
last position are re-introduced at the first position. If a dimension is not
specified, the tensor will be flattened before rolling and then restored
to the original shape.
Args:
{input}
shifts (int or tuple of ints): The number of places by which the elements
of the tensor are shifted. If shifts is a tuple, dims must be a tuple of
the same size, and each dimension will be rolled by the corresponding
value
dims (int or tuple of ints): Axis along which to roll
Example::
>>> x = torch.tensor([1, 2, 3, 4, 5, 6, 7, 8]).view(4, 2)
>>> x
tensor([[1, 2],
[3, 4],
[5, 6],
[7, 8]])
>>> torch.roll(x, 1, 0)
tensor([[7, 8],
[1, 2],
[3, 4],
[5, 6]])
>>> torch.roll(x, -1, 0)
tensor([[3, 4],
[5, 6],
[7, 8],
[1, 2]])
>>> torch.roll(x, shifts=(2, 1), dims=(0, 1))
tensor([[6, 5],
[8, 7],
[2, 1],
[4, 3]])
""".format(**common_args))
add_docstr(torch.rot90,
r"""
rot90(input, k, dims) -> Tensor
Rotate a n-D tensor by 90 degrees in the plane specified by dims axis.
Rotation direction is from the first towards the second axis if k > 0, and from the second towards the first for k < 0.
Args:
{input}
k (int): number of times to rotate
dims (a list or tuple): axis to rotate
Example::
>>> x = torch.arange(4).view(2, 2)
>>> x
tensor([[0, 1],
[2, 3]])
>>> torch.rot90(x, 1, [0, 1])
tensor([[1, 3],
[0, 2]])
>>> x = torch.arange(8).view(2, 2, 2)
>>> x
tensor([[[0, 1],
[2, 3]],
[[4, 5],
[6, 7]]])
>>> torch.rot90(x, 1, [1, 2])
tensor([[[1, 3],
[0, 2]],
[[5, 7],
[4, 6]]])
""".format(**common_args))
add_docstr(torch.take,
r"""
take(input, index) -> Tensor
Returns a new tensor with the elements of :attr:`input` at the given indices.
The input tensor is treated as if it were viewed as a 1-D tensor. The result
takes the same shape as the indices.
Args:
{input}
indices (LongTensor): the indices into tensor
Example::
>>> src = torch.tensor([[4, 3, 5],
[6, 7, 8]])
>>> torch.take(src, torch.tensor([0, 2, 5]))
tensor([ 4, 5, 8])
""".format(**common_args))
add_docstr(torch.tan,
r"""
tan(input, out=None) -> Tensor
Returns a new tensor with the tangent of the elements of :attr:`input`.
.. math::
\text{out}_{i} = \tan(\text{input}_{i})
""" + r"""
Args:
{input}
{out}
Example::
>>> a = torch.randn(4)
>>> a
tensor([-1.2027, -1.7687, 0.4412, -1.3856])
>>> torch.tan(a)
tensor([-2.5930, 4.9859, 0.4722, -5.3366])
""".format(**common_args))
add_docstr(torch.tanh,
r"""
tanh(input, out=None) -> Tensor
Returns a new tensor with the hyperbolic tangent of the elements
of :attr:`input`.
.. math::
\text{out}_{i} = \tanh(\text{input}_{i})
""" + r"""
Args:
{input}
{out}
Example::
>>> a = torch.randn(4)
>>> a
tensor([ 0.8986, -0.7279, 1.1745, 0.2611])
>>> torch.tanh(a)
tensor([ 0.7156, -0.6218, 0.8257, 0.2553])
""".format(**common_args))
add_docstr(torch.topk,
r"""
topk(input, k, dim=None, largest=True, sorted=True, out=None) -> (Tensor, LongTensor)
Returns the :attr:`k` largest elements of the given :attr:`input` tensor along
a given dimension.
If :attr:`dim` is not given, the last dimension of the `input` is chosen.
If :attr:`largest` is ``False`` then the `k` smallest elements are returned.
A namedtuple of `(values, indices)` is returned, where the `indices` are the indices
of the elements in the original `input` tensor.
The boolean option :attr:`sorted` if ``True``, will make sure that the returned
`k` elements are themselves sorted
Args:
{input}
k (int): the k in "top-k"
dim (int, optional): the dimension to sort along
largest (bool, optional): controls whether to return largest or
smallest elements
sorted (bool, optional): controls whether to return the elements
in sorted order
out (tuple, optional): the output tuple of (Tensor, LongTensor) that can be
optionally given to be used as output buffers
Example::
>>> x = torch.arange(1., 6.)
>>> x
tensor([ 1., 2., 3., 4., 5.])
>>> torch.topk(x, 3)
torch.return_types.topk(values=tensor([5., 4., 3.]), indices=tensor([4, 3, 2]))
""".format(**common_args))
add_docstr(torch.trace,
r"""
trace(input) -> Tensor
Returns the sum of the elements of the diagonal of the input 2-D matrix.
Example::
>>> x = torch.arange(1., 10.).view(3, 3)
>>> x
tensor([[ 1., 2., 3.],
[ 4., 5., 6.],
[ 7., 8., 9.]])
>>> torch.trace(x)
tensor(15.)
""")
add_docstr(torch.transpose,
r"""
transpose(input, dim0, dim1) -> Tensor
Returns a tensor that is a transposed version of :attr:`input`.
The given dimensions :attr:`dim0` and :attr:`dim1` are swapped.
The resulting :attr:`out` tensor shares it's underlying storage with the
:attr:`input` tensor, so changing the content of one would change the content
of the other.
Args:
{input}
dim0 (int): the first dimension to be transposed
dim1 (int): the second dimension to be transposed
Example::
>>> x = torch.randn(2, 3)
>>> x
tensor([[ 1.0028, -0.9893, 0.5809],
[-0.1669, 0.7299, 0.4942]])
>>> torch.transpose(x, 0, 1)
tensor([[ 1.0028, -0.1669],
[-0.9893, 0.7299],
[ 0.5809, 0.4942]])
""".format(**common_args))
add_docstr(torch.triangular_solve,
r"""
triangular_solve(input, A, upper=True, transpose=False, unitriangular=False) -> (Tensor, Tensor)
Solves a system of equations with a triangular coefficient matrix :math:`A`
and multiple right-hand sides :math:`b`.
In particular, solves :math:`AX = b` and assumes :math:`A` is upper-triangular
with the default keyword arguments.
`torch.triangular_solve(b, A)` can take in 2D inputs `b, A` or inputs that are
batches of 2D matrices. If the inputs are batches, then returns
batched outputs `X`
Args:
input (Tensor): multiple right-hand sides of size :math:`(*, m, k)` where
:math:`*` is zero of more batch dimensions (:math:`b`)
A (Tensor): the input triangular coefficient matrix of size :math:`(*, m, m)`
where :math:`*` is zero or more batch dimensions
upper (bool, optional): whether to solve the upper-triangular system
of equations (default) or the lower-triangular system of equations. Default: ``True``.
transpose (bool, optional): whether :math:`A` should be transposed before
being sent into the solver. Default: ``False``.
unitriangular (bool, optional): whether :math:`A` is unit triangular.
If True, the diagonal elements of :math:`A` are assumed to be
1 and not referenced from :math:`A`. Default: ``False``.
Returns:
A namedtuple `(solution, cloned_coefficient)` where `cloned_coefficient`
is a clone of :math:`A` and `solution` is the solution :math:`X` to :math:`AX = b`
(or whatever variant of the system of equations, depending on the keyword arguments.)
Examples::
>>> A = torch.randn(2, 2).triu()
>>> A
tensor([[ 1.1527, -1.0753],
[ 0.0000, 0.7986]])
>>> b = torch.randn(2, 3)
>>> b
tensor([[-0.0210, 2.3513, -1.5492],
[ 1.5429, 0.7403, -1.0243]])
>>> torch.triangular_solve(b, A)
torch.return_types.triangular_solve(
solution=tensor([[ 1.7841, 2.9046, -2.5405],
[ 1.9320, 0.9270, -1.2826]]),
cloned_coefficient=tensor([[ 1.1527, -1.0753],
[ 0.0000, 0.7986]]))
""")
add_docstr(torch.tril,
r"""
tril(input, diagonal=0, out=None) -> Tensor
Returns the lower triangular part of the matrix (2-D tensor) or batch of matrices
:attr:`input`, the other elements of the result tensor :attr:`out` are set to 0.
The lower triangular part of the matrix is defined as the elements on and
below the diagonal.
The argument :attr:`diagonal` controls which diagonal to consider. If
:attr:`diagonal` = 0, all elements on and below the main diagonal are
retained. A positive value includes just as many diagonals above the main
diagonal, and similarly a negative value excludes just as many diagonals below
the main diagonal. The main diagonal are the set of indices
:math:`\lbrace (i, i) \rbrace` for :math:`i \in [0, \min\{d_{1}, d_{2}\} - 1]` where
:math:`d_{1}, d_{2}` are the dimensions of the matrix.
""" + r"""
Args:
{input}
diagonal (int, optional): the diagonal to consider
{out}
Example::
>>> a = torch.randn(3, 3)
>>> a
tensor([[-1.0813, -0.8619, 0.7105],
[ 0.0935, 0.1380, 2.2112],
[-0.3409, -0.9828, 0.0289]])
>>> torch.tril(a)
tensor([[-1.0813, 0.0000, 0.0000],
[ 0.0935, 0.1380, 0.0000],
[-0.3409, -0.9828, 0.0289]])
>>> b = torch.randn(4, 6)
>>> b
tensor([[ 1.2219, 0.5653, -0.2521, -0.2345, 1.2544, 0.3461],
[ 0.4785, -0.4477, 0.6049, 0.6368, 0.8775, 0.7145],
[ 1.1502, 3.2716, -1.1243, -0.5413, 0.3615, 0.6864],
[-0.0614, -0.7344, -1.3164, -0.7648, -1.4024, 0.0978]])
>>> torch.tril(b, diagonal=1)
tensor([[ 1.2219, 0.5653, 0.0000, 0.0000, 0.0000, 0.0000],
[ 0.4785, -0.4477, 0.6049, 0.0000, 0.0000, 0.0000],
[ 1.1502, 3.2716, -1.1243, -0.5413, 0.0000, 0.0000],
[-0.0614, -0.7344, -1.3164, -0.7648, -1.4024, 0.0000]])
>>> torch.tril(b, diagonal=-1)
tensor([[ 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
[ 0.4785, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
[ 1.1502, 3.2716, 0.0000, 0.0000, 0.0000, 0.0000],
[-0.0614, -0.7344, -1.3164, 0.0000, 0.0000, 0.0000]])
""".format(**common_args))
# docstr is split in two parts to avoid format mis-captureing :math: braces '{}'
# as common args.
add_docstr(torch.tril_indices,
r"""
tril_indices(row, col, offset=0, dtype=torch.long, device='cpu', layout=torch.strided) -> Tensor
Returns the indices of the lower triangular part of a :attr:`row`-by-
:attr:`col` matrix in a 2-by-N Tensor, where the first row contains row
coordinates of all indices and the second row contains column coordinates.
Indices are ordered based on rows and then columns.
The lower triangular part of the matrix is defined as the elements on and
below the diagonal.
The argument :attr:`offset` controls which diagonal to consider. If
:attr:`offset` = 0, all elements on and below the main diagonal are
retained. A positive value includes just as many diagonals above the main
diagonal, and similarly a negative value excludes just as many diagonals below
the main diagonal. The main diagonal are the set of indices
:math:`\lbrace (i, i) \rbrace` for :math:`i \in [0, \min\{d_{1}, d_{2}\} - 1]`
where :math:`d_{1}, d_{2}` are the dimensions of the matrix.
.. note::
When running on CUDA, ``row * col`` must be less than :math:`2^{59}` to
prevent overflow during calculation.
""" + r"""
Args:
row (``int``): number of rows in the 2-D matrix.
col (``int``): number of columns in the 2-D matrix.
offset (``int``): diagonal offset from the main diagonal.
Default: if not provided, 0.
dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
Default: if ``None``, ``torch.long``.
{device}
layout (:class:`torch.layout`, optional): currently only support ``torch.strided``.
Example::
>>> a = torch.tril_indices(3, 3)
>>> a
tensor([[0, 1, 1, 2, 2, 2],
[0, 0, 1, 0, 1, 2]])
>>> a = torch.tril_indices(4, 3, -1)
>>> a
tensor([[1, 2, 2, 3, 3, 3],
[0, 0, 1, 0, 1, 2]])
>>> a = torch.tril_indices(4, 3, 1)
>>> a
tensor([[0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3],
[0, 1, 0, 1, 2, 0, 1, 2, 0, 1, 2]])
""".format(**factory_common_args))
add_docstr(torch.triu,
r"""
triu(input, diagonal=0, out=None) -> Tensor
Returns the upper triangular part of a matrix (2-D tensor) or batch of matrices
:attr:`input`, the other elements of the result tensor :attr:`out` are set to 0.
The upper triangular part of the matrix is defined as the elements on and
above the diagonal.
The argument :attr:`diagonal` controls which diagonal to consider. If
:attr:`diagonal` = 0, all elements on and above the main diagonal are
retained. A positive value excludes just as many diagonals above the main
diagonal, and similarly a negative value includes just as many diagonals below
the main diagonal. The main diagonal are the set of indices
:math:`\lbrace (i, i) \rbrace` for :math:`i \in [0, \min\{d_{1}, d_{2}\} - 1]` where
:math:`d_{1}, d_{2}` are the dimensions of the matrix.
""" + r"""
Args:
{input}
diagonal (int, optional): the diagonal to consider
{out}
Example::
>>> a = torch.randn(3, 3)
>>> a
tensor([[ 0.2309, 0.5207, 2.0049],
[ 0.2072, -1.0680, 0.6602],
[ 0.3480, -0.5211, -0.4573]])
>>> torch.triu(a)
tensor([[ 0.2309, 0.5207, 2.0049],
[ 0.0000, -1.0680, 0.6602],
[ 0.0000, 0.0000, -0.4573]])
>>> torch.triu(a, diagonal=1)
tensor([[ 0.0000, 0.5207, 2.0049],
[ 0.0000, 0.0000, 0.6602],
[ 0.0000, 0.0000, 0.0000]])
>>> torch.triu(a, diagonal=-1)
tensor([[ 0.2309, 0.5207, 2.0049],
[ 0.2072, -1.0680, 0.6602],
[ 0.0000, -0.5211, -0.4573]])
>>> b = torch.randn(4, 6)
>>> b
tensor([[ 0.5876, -0.0794, -1.8373, 0.6654, 0.2604, 1.5235],
[-0.2447, 0.9556, -1.2919, 1.3378, -0.1768, -1.0857],
[ 0.4333, 0.3146, 0.6576, -1.0432, 0.9348, -0.4410],
[-0.9888, 1.0679, -1.3337, -1.6556, 0.4798, 0.2830]])
>>> torch.triu(b, diagonal=1)
tensor([[ 0.0000, -0.0794, -1.8373, 0.6654, 0.2604, 1.5235],
[ 0.0000, 0.0000, -1.2919, 1.3378, -0.1768, -1.0857],
[ 0.0000, 0.0000, 0.0000, -1.0432, 0.9348, -0.4410],
[ 0.0000, 0.0000, 0.0000, 0.0000, 0.4798, 0.2830]])
>>> torch.triu(b, diagonal=-1)
tensor([[ 0.5876, -0.0794, -1.8373, 0.6654, 0.2604, 1.5235],
[-0.2447, 0.9556, -1.2919, 1.3378, -0.1768, -1.0857],
[ 0.0000, 0.3146, 0.6576, -1.0432, 0.9348, -0.4410],
[ 0.0000, 0.0000, -1.3337, -1.6556, 0.4798, 0.2830]])
""".format(**common_args))
# docstr is split in two parts to avoid format mis-capturing :math: braces '{}'
# as common args.
add_docstr(torch.triu_indices,
r"""
triu_indices(row, col, offset=0, dtype=torch.long, device='cpu', layout=torch.strided) -> Tensor
Returns the indices of the upper triangular part of a :attr:`row` by
:attr:`col` matrix in a 2-by-N Tensor, where the first row contains row
coordinates of all indices and the second row contains column coordinates.
Indices are ordered based on rows and then columns.
The upper triangular part of the matrix is defined as the elements on and
above the diagonal.
The argument :attr:`offset` controls which diagonal to consider. If
:attr:`offset` = 0, all elements on and above the main diagonal are
retained. A positive value excludes just as many diagonals above the main
diagonal, and similarly a negative value includes just as many diagonals below
the main diagonal. The main diagonal are the set of indices
:math:`\lbrace (i, i) \rbrace` for :math:`i \in [0, \min\{d_{1}, d_{2}\} - 1]`
where :math:`d_{1}, d_{2}` are the dimensions of the matrix.
.. note::
When running on CUDA, ``row * col`` must be less than :math:`2^{59}` to
prevent overflow during calculation.
""" + r"""
Args:
row (``int``): number of rows in the 2-D matrix.
col (``int``): number of columns in the 2-D matrix.
offset (``int``): diagonal offset from the main diagonal.
Default: if not provided, 0.
dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
Default: if ``None``, ``torch.long``.
{device}
layout (:class:`torch.layout`, optional): currently only support ``torch.strided``.
Example::
>>> a = torch.triu_indices(3, 3)
>>> a
tensor([[0, 0, 0, 1, 1, 2],
[0, 1, 2, 1, 2, 2]])
>>> a = torch.triu_indices(4, 3, -1)
>>> a
tensor([[0, 0, 0, 1, 1, 1, 2, 2, 3],
[0, 1, 2, 0, 1, 2, 1, 2, 2]])
>>> a = torch.triu_indices(4, 3, 1)
>>> a
tensor([[0, 0, 1],
[1, 2, 2]])
""".format(**factory_common_args))
add_docstr(torch.true_divide,
r"""
true_divide(dividend, divisor) -> Tensor
Performs "true division" that always computes the division
in floating point. Analogous to division in Python 3 and equivalent to
:func:`torch.div` except when both inputs have bool or integer scalar types,
in which case they are cast to the default (floating) scalar type before the division.
.. math::
\text{{out}}_i = \frac{{\text{{dividend}}_i}}{{\text{{divisor}}}}
Args:
dividend (Tensor): the dividend
divisor (Tensor or Scalar): the divisor
Keyword args:
{out}
Example::
>>> dividend = torch.tensor([5, 3], dtype=torch.int)
>>> divisor = torch.tensor([3, 2], dtype=torch.int)
>>> torch.true_divide(dividend, divisor)
tensor([1.6667, 1.5000])
>>> torch.true_divide(dividend, 2)
tensor([2.5000, 1.5000])
""".format(**common_args))
add_docstr(torch.trunc,
r"""
trunc(input, out=None) -> Tensor
Returns a new tensor with the truncated integer values of
the elements of :attr:`input`.
Args:
{input}
{out}
Example::
>>> a = torch.randn(4)
>>> a
tensor([ 3.4742, 0.5466, -0.8008, -0.9079])
>>> torch.trunc(a)
tensor([ 3., 0., -0., -0.])
""".format(**common_args))
add_docstr(torch.unsqueeze,
r"""
unsqueeze(input, dim) -> Tensor
Returns a new tensor with a dimension of size one inserted at the
specified position.
The returned tensor shares the same underlying data with this tensor.
A :attr:`dim` value within the range ``[-input.dim() - 1, input.dim() + 1)``
can be used. Negative :attr:`dim` will correspond to :meth:`unsqueeze`
applied at :attr:`dim` = ``dim + input.dim() + 1``.
Args:
{input}
dim (int): the index at which to insert the singleton dimension
Example::
>>> x = torch.tensor([1, 2, 3, 4])
>>> torch.unsqueeze(x, 0)
tensor([[ 1, 2, 3, 4]])
>>> torch.unsqueeze(x, 1)
tensor([[ 1],
[ 2],
[ 3],
[ 4]])
""".format(**common_args))
add_docstr(torch.var,
r"""
var(input, unbiased=True) -> Tensor
Returns the variance of all elements in the :attr:`input` tensor.
If :attr:`unbiased` is ``False``, then the variance will be calculated via the
biased estimator. Otherwise, Bessel's correction will be used.
Args:
{input}
unbiased (bool): whether to use the unbiased estimation or not
Example::
>>> a = torch.randn(1, 3)
>>> a
tensor([[-0.3425, -1.2636, -0.4864]])
>>> torch.var(a)
tensor(0.2455)
.. function:: var(input, dim, keepdim=False, unbiased=True, out=None) -> Tensor
Returns the variance of each row of the :attr:`input` tensor in the given
dimension :attr:`dim`.
{keepdim_details}
If :attr:`unbiased` is ``False``, then the variance will be calculated via the
biased estimator. Otherwise, Bessel's correction will be used.
Args:
{input}
{dim}
{keepdim}
unbiased (bool): whether to use the unbiased estimation or not
{out}
Example::
>>> a = torch.randn(4, 4)
>>> a
tensor([[-0.3567, 1.7385, -1.3042, 0.7423],
[ 1.3436, -0.1015, -0.9834, -0.8438],
[ 0.6056, 0.1089, -0.3112, -1.4085],
[-0.7700, 0.6074, -0.1469, 0.7777]])
>>> torch.var(a, 1)
tensor([ 1.7444, 1.1363, 0.7356, 0.5112])
""".format(**multi_dim_common))
add_docstr(torch.var_mean,
r"""
var_mean(input, unbiased=True) -> (Tensor, Tensor)
Returns the variance and mean of all elements in the :attr:`input` tensor.
If :attr:`unbiased` is ``False``, then the variance will be calculated via the
biased estimator. Otherwise, Bessel's correction will be used.
Args:
{input}
unbiased (bool): whether to use the unbiased estimation or not
Example::
>>> a = torch.randn(1, 3)
>>> a
tensor([[0.0146, 0.4258, 0.2211]])
>>> torch.var_mean(a)
(tensor(0.0423), tensor(0.2205))
.. function:: var_mean(input, dim, keepdim=False, unbiased=True) -> (Tensor, Tensor)
Returns the variance and mean of each row of the :attr:`input` tensor in the given
dimension :attr:`dim`.
{keepdim_details}
If :attr:`unbiased` is ``False``, then the variance will be calculated via the
biased estimator. Otherwise, Bessel's correction will be used.
Args:
{input}
{dim}
{keepdim}
unbiased (bool): whether to use the unbiased estimation or not
Example::
>>> a = torch.randn(4, 4)
>>> a
tensor([[-1.5650, 2.0415, -0.1024, -0.5790],
[ 0.2325, -2.6145, -1.6428, -0.3537],
[-0.2159, -1.1069, 1.2882, -1.3265],
[-0.6706, -1.5893, 0.6827, 1.6727]])
>>> torch.var_mean(a, 1)
(tensor([2.3174, 1.6403, 1.4092, 2.0791]), tensor([-0.0512, -1.0946, -0.3403, 0.0239]))
""".format(**multi_dim_common))
add_docstr(torch.zeros,
r"""
zeros(*size, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
Returns a tensor filled with the scalar value `0`, with the shape defined
by the variable argument :attr:`size`.
Args:
size (int...): a sequence of integers defining the shape of the output tensor.
Can be a variable number of arguments or a collection like a list or tuple.
{out}
{dtype}
{layout}
{device}
{requires_grad}
Example::
>>> torch.zeros(2, 3)
tensor([[ 0., 0., 0.],
[ 0., 0., 0.]])
>>> torch.zeros(5)
tensor([ 0., 0., 0., 0., 0.])
""".format(**factory_common_args))
add_docstr(torch.zeros_like,
r"""
zeros_like(input, dtype=None, layout=None, device=None, requires_grad=False, memory_format=torch.preserve_format) -> Tensor
Returns a tensor filled with the scalar value `0`, with the same size as
:attr:`input`. ``torch.zeros_like(input)`` is equivalent to
``torch.zeros(input.size(), dtype=input.dtype, layout=input.layout, device=input.device)``.
.. warning::
As of 0.4, this function does not support an :attr:`out` keyword. As an alternative,
the old ``torch.zeros_like(input, out=output)`` is equivalent to
``torch.zeros(input.size(), out=output)``.
Args:
{input}
{dtype}
{layout}
{device}
{requires_grad}
{memory_format}
Example::
>>> input = torch.empty(2, 3)
>>> torch.zeros_like(input)
tensor([[ 0., 0., 0.],
[ 0., 0., 0.]])
""".format(**factory_like_common_args))
add_docstr(torch.empty,
r"""
empty(*size, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False, pin_memory=False) -> Tensor
Returns a tensor filled with uninitialized data. The shape of the tensor is
defined by the variable argument :attr:`size`.
Args:
size (int...): a sequence of integers defining the shape of the output tensor.
Can be a variable number of arguments or a collection like a list or tuple.
{out}
{dtype}
{layout}
{device}
{requires_grad}
{pin_memory}
{memory_format}
Example::
>>> torch.empty(2, 3)
tensor(1.00000e-08 *
[[ 6.3984, 0.0000, 0.0000],
[ 0.0000, 0.0000, 0.0000]])
""".format(**factory_common_args))
add_docstr(torch.empty_like,
r"""
empty_like(input, dtype=None, layout=None, device=None, requires_grad=False, memory_format=torch.preserve_format) -> Tensor
Returns an uninitialized tensor with the same size as :attr:`input`.
``torch.empty_like(input)`` is equivalent to
``torch.empty(input.size(), dtype=input.dtype, layout=input.layout, device=input.device)``.
Args:
{input}
{dtype}
{layout}
{device}
{requires_grad}
{memory_format}
Example::
>>> torch.empty((2,3), dtype=torch.int64)
tensor([[ 9.4064e+13, 2.8000e+01, 9.3493e+13],
[ 7.5751e+18, 7.1428e+18, 7.5955e+18]])
""".format(**factory_like_common_args))
add_docstr(torch.empty_strided,
r"""
empty_strided(size, stride, dtype=None, layout=None, device=None, requires_grad=False, pin_memory=False) -> Tensor
Returns a tensor filled with uninitialized data. The shape and strides of the tensor is
defined by the variable argument :attr:`size` and :attr:`stride` respectively.
``torch.empty_strided(size, stride)`` is equivalent to
``torch.empty(size).as_strided(size, stride)``.
.. warning::
More than one element of the created tensor may refer to a single memory
location. As a result, in-place operations (especially ones that are
vectorized) may result in incorrect behavior. If you need to write to
the tensors, please clone them first.
Args:
size (tuple of ints): the shape of the output tensor
stride (tuple of ints): the strides of the output tensor
{dtype}
{layout}
{device}
{requires_grad}
{pin_memory}
Example::
>>> a = torch.empty_strided((2, 3), (1, 2))
>>> a
tensor([[8.9683e-44, 4.4842e-44, 5.1239e+07],
[0.0000e+00, 0.0000e+00, 3.0705e-41]])
>>> a.stride()
(1, 2)
>>> a.size()
torch.Size([2, 3])
""".format(**factory_common_args))
add_docstr(torch.full,
r"""
full(size, fill_value, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
Returns a tensor of size :attr:`size` filled with :attr:`fill_value`.
.. warning::
Providing a bool or integral :attr:`fill_value` without setting
the optional :attr:`dtype` or :attr:`out` arguments is currently unsupported.
In PyTorch 1.7, when :attr:`dtype` and :attr:`out` are not set
a bool :attr:`fill_value` will return a tensor of torch.bool dtype,
and an integral :attr:`fill_value` will return a tensor of torch.long dtype.
Args:
size (int...): a list, tuple, or :class:`torch.Size` of integers defining the
shape of the output tensor.
fill_value: the number to fill the output tensor with.
{out}
{dtype}
{layout}
{device}
{requires_grad}
Example::
>>> torch.full((2, 3), 3.141592)
tensor([[ 3.1416, 3.1416, 3.1416],
[ 3.1416, 3.1416, 3.1416]])
""".format(**factory_common_args))
add_docstr(torch.full_like,
"""
full_like(input, fill_value, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False, \
memory_format=torch.preserve_format) -> Tensor
Returns a tensor with the same size as :attr:`input` filled with :attr:`fill_value`.
``torch.full_like(input, fill_value)`` is equivalent to
``torch.full(input.size(), fill_value, dtype=input.dtype, layout=input.layout, device=input.device)``.
Args:
{input}
fill_value: the number to fill the output tensor with.
{dtype}
{layout}
{device}
{requires_grad}
{memory_format}
""".format(**factory_like_common_args))
add_docstr(torch.det,
r"""
det(input) -> Tensor
Calculates determinant of a square matrix or batches of square matrices.
.. note::
Backward through :meth:`det` internally uses SVD results when :attr:`input` is
not invertible. In this case, double backward through :meth:`det` will be
unstable in when :attr:`input` doesn't have distinct singular values. See
:meth:`~torch.svd` for details.
Arguments:
input (Tensor): the input tensor of size ``(*, n, n)`` where ``*`` is zero or more
batch dimensions.
Example::
>>> A = torch.randn(3, 3)
>>> torch.det(A)
tensor(3.7641)
>>> A = torch.randn(3, 2, 2)
>>> A
tensor([[[ 0.9254, -0.6213],
[-0.5787, 1.6843]],
[[ 0.3242, -0.9665],
[ 0.4539, -0.0887]],
[[ 1.1336, -0.4025],
[-0.7089, 0.9032]]])
>>> A.det()
tensor([1.1990, 0.4099, 0.7386])
""")
add_docstr(torch.where,
r"""
where(condition, x, y) -> Tensor
Return a tensor of elements selected from either :attr:`x` or :attr:`y`, depending on :attr:`condition`.
The operation is defined as:
.. math::
\text{out}_i = \begin{cases}
\text{x}_i & \text{if } \text{condition}_i \\
\text{y}_i & \text{otherwise} \\
\end{cases}
.. note::
The tensors :attr:`condition`, :attr:`x`, :attr:`y` must be :ref:`broadcastable <broadcasting-semantics>`.
Arguments:
condition (BoolTensor): When True (nonzero), yield x, otherwise yield y
x (Tensor): values selected at indices where :attr:`condition` is ``True``
y (Tensor): values selected at indices where :attr:`condition` is ``False``
Returns:
Tensor: A tensor of shape equal to the broadcasted shape of :attr:`condition`, :attr:`x`, :attr:`y`
Example::
>>> x = torch.randn(3, 2)
>>> y = torch.ones(3, 2)
>>> x
tensor([[-0.4620, 0.3139],
[ 0.3898, -0.7197],
[ 0.0478, -0.1657]])
>>> torch.where(x > 0, x, y)
tensor([[ 1.0000, 0.3139],
[ 0.3898, 1.0000],
[ 0.0478, 1.0000]])
.. function:: where(condition) -> tuple of LongTensor
``torch.where(condition)`` is identical to
``torch.nonzero(condition, as_tuple=True)``.
.. note::
See also :func:`torch.nonzero`.
""")
add_docstr(torch.logdet,
r"""
logdet(input) -> Tensor
Calculates log determinant of a square matrix or batches of square matrices.
.. note::
Result is ``-inf`` if :attr:`input` has zero log determinant, and is ``nan`` if
:attr:`input` has negative determinant.
.. note::
Backward through :meth:`logdet` internally uses SVD results when :attr:`input`
is not invertible. In this case, double backward through :meth:`logdet` will
be unstable in when :attr:`input` doesn't have distinct singular values. See
:meth:`~torch.svd` for details.
Arguments:
input (Tensor): the input tensor of size ``(*, n, n)`` where ``*`` is zero or more
batch dimensions.
Example::
>>> A = torch.randn(3, 3)
>>> torch.det(A)
tensor(0.2611)
>>> torch.logdet(A)
tensor(-1.3430)
>>> A
tensor([[[ 0.9254, -0.6213],
[-0.5787, 1.6843]],
[[ 0.3242, -0.9665],
[ 0.4539, -0.0887]],
[[ 1.1336, -0.4025],
[-0.7089, 0.9032]]])
>>> A.det()
tensor([1.1990, 0.4099, 0.7386])
>>> A.det().log()
tensor([ 0.1815, -0.8917, -0.3031])
""")
add_docstr(torch.slogdet,
r"""
slogdet(input) -> (Tensor, Tensor)
Calculates the sign and log absolute value of the determinant(s) of a square matrix or batches of square matrices.
.. note::
If ``input`` has zero determinant, this returns ``(0, -inf)``.
.. note::
Backward through :meth:`slogdet` internally uses SVD results when :attr:`input`
is not invertible. In this case, double backward through :meth:`slogdet`
will be unstable in when :attr:`input` doesn't have distinct singular values.
See :meth:`~torch.svd` for details.
Arguments:
input (Tensor): the input tensor of size ``(*, n, n)`` where ``*`` is zero or more
batch dimensions.
Returns:
A namedtuple (sign, logabsdet) containing the sign of the determinant, and the log
value of the absolute determinant.
Example::
>>> A = torch.randn(3, 3)
>>> A
tensor([[ 0.0032, -0.2239, -1.1219],
[-0.6690, 0.1161, 0.4053],
[-1.6218, -0.9273, -0.0082]])
>>> torch.det(A)
tensor(-0.7576)
>>> torch.logdet(A)
tensor(nan)
>>> torch.slogdet(A)
torch.return_types.slogdet(sign=tensor(-1.), logabsdet=tensor(-0.2776))
""")
add_docstr(torch.pinverse,
r"""
pinverse(input, rcond=1e-15) -> Tensor
Calculates the pseudo-inverse (also known as the Moore-Penrose inverse) of a 2D tensor.
Please look at `Moore-Penrose inverse`_ for more details
.. note::
This method is implemented using the Singular Value Decomposition.
.. note::
The pseudo-inverse is not necessarily a continuous function in the elements of the matrix `[1]`_.
Therefore, derivatives are not always existent, and exist for a constant rank only `[2]`_.
However, this method is backprop-able due to the implementation by using SVD results, and
could be unstable. Double-backward will also be unstable due to the usage of SVD internally.
See :meth:`~torch.svd` for more details.
Arguments:
input (Tensor): The input tensor of size :math:`(*, m, n)` where :math:`*` is zero or more batch dimensions
rcond (float): A floating point value to determine the cutoff for small singular values.
Default: 1e-15
Returns:
The pseudo-inverse of :attr:`input` of dimensions :math:`(*, n, m)`
Example::
>>> input = torch.randn(3, 5)
>>> input
tensor([[ 0.5495, 0.0979, -1.4092, -0.1128, 0.4132],
[-1.1143, -0.3662, 0.3042, 1.6374, -0.9294],
[-0.3269, -0.5745, -0.0382, -0.5922, -0.6759]])
>>> torch.pinverse(input)
tensor([[ 0.0600, -0.1933, -0.2090],
[-0.0903, -0.0817, -0.4752],
[-0.7124, -0.1631, -0.2272],
[ 0.1356, 0.3933, -0.5023],
[-0.0308, -0.1725, -0.5216]])
>>> # Batched pinverse example
>>> a = torch.randn(2,6,3)
>>> b = torch.pinverse(a)
>>> torch.matmul(b, a)
tensor([[[ 1.0000e+00, 1.6391e-07, -1.1548e-07],
[ 8.3121e-08, 1.0000e+00, -2.7567e-07],
[ 3.5390e-08, 1.4901e-08, 1.0000e+00]],
[[ 1.0000e+00, -8.9407e-08, 2.9802e-08],
[-2.2352e-07, 1.0000e+00, 1.1921e-07],
[ 0.0000e+00, 8.9407e-08, 1.0000e+00]]])
.. _Moore-Penrose inverse: https://en.wikipedia.org/wiki/Moore%E2%80%93Penrose_inverse
.. _[1]: https://epubs.siam.org/doi/10.1137/0117004
.. _[2]: https://www.jstor.org/stable/2156365
""")
add_docstr(torch.fft,
r"""
fft(input, signal_ndim, normalized=False) -> Tensor
Complex-to-complex Discrete Fourier Transform
This method computes the complex-to-complex discrete Fourier transform.
Ignoring the batch dimensions, it computes the following expression:
.. math::
X[\omega_1, \dots, \omega_d] =
\sum_{n_1=0}^{N_1-1} \dots \sum_{n_d=0}^{N_d-1} x[n_1, \dots, n_d]
e^{-j\ 2 \pi \sum_{i=0}^d \frac{\omega_i n_i}{N_i}},
where :math:`d` = :attr:`signal_ndim` is number of dimensions for the
signal, and :math:`N_i` is the size of signal dimension :math:`i`.
This method supports 1D, 2D and 3D complex-to-complex transforms, indicated
by :attr:`signal_ndim`. :attr:`input` must be a tensor with last dimension
of size 2, representing the real and imaginary components of complex
numbers, and should have at least ``signal_ndim + 1`` dimensions with optionally
arbitrary number of leading batch dimensions. If :attr:`normalized` is set to
``True``, this normalizes the result by dividing it with
:math:`\sqrt{\prod_{i=1}^K N_i}` so that the operator is unitary.
Returns the real and the imaginary parts together as one tensor of the same
shape of :attr:`input`.
The inverse of this function is :func:`~torch.ifft`.
.. note::
For CUDA tensors, an LRU cache is used for cuFFT plans to speed up
repeatedly running FFT methods on tensors of same geometry with same
configuration. See :ref:`cufft-plan-cache` for more details on how to
monitor and control the cache.
.. warning::
Due to limited dynamic range of half datatype, performing this operation in half
precision may cause the first element of result to overflow for certain inputs.
.. warning::
For CPU tensors, this method is currently only available with MKL. Use
:func:`torch.backends.mkl.is_available` to check if MKL is installed.
Arguments:
input (Tensor): the input tensor of at least :attr:`signal_ndim` ``+ 1``
dimensions
signal_ndim (int): the number of dimensions in each signal.
:attr:`signal_ndim` can only be 1, 2 or 3
normalized (bool, optional): controls whether to return normalized results.
Default: ``False``
Returns:
Tensor: A tensor containing the complex-to-complex Fourier transform result
Example::
>>> # unbatched 2D FFT
>>> x = torch.randn(4, 3, 2)
>>> torch.fft(x, 2)
tensor([[[-0.0876, 1.7835],
[-2.0399, -2.9754],
[ 4.4773, -5.0119]],
[[-1.5716, 2.7631],
[-3.8846, 5.2652],
[ 0.2046, -0.7088]],
[[ 1.9938, -0.5901],
[ 6.5637, 6.4556],
[ 2.9865, 4.9318]],
[[ 7.0193, 1.1742],
[-1.3717, -2.1084],
[ 2.0289, 2.9357]]])
>>> # batched 1D FFT
>>> torch.fft(x, 1)
tensor([[[ 1.8385, 1.2827],
[-0.1831, 1.6593],
[ 2.4243, 0.5367]],
[[-0.9176, -1.5543],
[-3.9943, -2.9860],
[ 1.2838, -2.9420]],
[[-0.8854, -0.6860],
[ 2.4450, 0.0808],
[ 1.3076, -0.5768]],
[[-0.1231, 2.7411],
[-0.3075, -1.7295],
[-0.5384, -2.0299]]])
>>> # arbitrary number of batch dimensions, 2D FFT
>>> x = torch.randn(3, 3, 5, 5, 2)
>>> y = torch.fft(x, 2)
>>> y.shape
torch.Size([3, 3, 5, 5, 2])
""")
add_docstr(torch.ifft,
r"""
ifft(input, signal_ndim, normalized=False) -> Tensor
Complex-to-complex Inverse Discrete Fourier Transform
This method computes the complex-to-complex inverse discrete Fourier
transform. Ignoring the batch dimensions, it computes the following
expression:
.. math::
X[\omega_1, \dots, \omega_d] =
\frac{1}{\prod_{i=1}^d N_i} \sum_{n_1=0}^{N_1-1} \dots \sum_{n_d=0}^{N_d-1} x[n_1, \dots, n_d]
e^{\ j\ 2 \pi \sum_{i=0}^d \frac{\omega_i n_i}{N_i}},
where :math:`d` = :attr:`signal_ndim` is number of dimensions for the
signal, and :math:`N_i` is the size of signal dimension :math:`i`.
The argument specifications are almost identical with :func:`~torch.fft`.
However, if :attr:`normalized` is set to ``True``, this instead returns the
results multiplied by :math:`\sqrt{\prod_{i=1}^d N_i}`, to become a unitary
operator. Therefore, to invert a :func:`~torch.fft`, the :attr:`normalized`
argument should be set identically for :func:`~torch.fft`.
Returns the real and the imaginary parts together as one tensor of the same
shape of :attr:`input`.
The inverse of this function is :func:`~torch.fft`.
.. note::
For CUDA tensors, an LRU cache is used for cuFFT plans to speed up
repeatedly running FFT methods on tensors of same geometry with same
configuration. See :ref:`cufft-plan-cache` for more details on how to
monitor and control the cache.
.. warning::
Due to limited dynamic range of half datatype, performing this operation in half
precision may cause the first element of result to overflow for certain inputs.
.. warning::
For CPU tensors, this method is currently only available with MKL. Use
:func:`torch.backends.mkl.is_available` to check if MKL is installed.
Arguments:
input (Tensor): the input tensor of at least :attr:`signal_ndim` ``+ 1``
dimensions
signal_ndim (int): the number of dimensions in each signal.
:attr:`signal_ndim` can only be 1, 2 or 3
normalized (bool, optional): controls whether to return normalized results.
Default: ``False``
Returns:
Tensor: A tensor containing the complex-to-complex inverse Fourier transform result
Example::
>>> x = torch.randn(3, 3, 2)
>>> x
tensor([[[ 1.2766, 1.3680],
[-0.8337, 2.0251],
[ 0.9465, -1.4390]],
[[-0.1890, 1.6010],
[ 1.1034, -1.9230],
[-0.9482, 1.0775]],
[[-0.7708, -0.8176],
[-0.1843, -0.2287],
[-1.9034, -0.2196]]])
>>> y = torch.fft(x, 2)
>>> torch.ifft(y, 2) # recover x
tensor([[[ 1.2766, 1.3680],
[-0.8337, 2.0251],
[ 0.9465, -1.4390]],
[[-0.1890, 1.6010],
[ 1.1034, -1.9230],
[-0.9482, 1.0775]],
[[-0.7708, -0.8176],
[-0.1843, -0.2287],
[-1.9034, -0.2196]]])
""")
add_docstr(torch.rfft,
r"""
rfft(input, signal_ndim, normalized=False, onesided=True) -> Tensor
Real-to-complex Discrete Fourier Transform
This method computes the real-to-complex discrete Fourier transform. It is
mathematically equivalent with :func:`~torch.fft` with differences only in
formats of the input and output.
This method supports 1D, 2D and 3D real-to-complex transforms, indicated
by :attr:`signal_ndim`. :attr:`input` must be a tensor with at least
``signal_ndim`` dimensions with optionally arbitrary number of leading batch
dimensions. If :attr:`normalized` is set to ``True``, this normalizes the result
by dividing it with :math:`\sqrt{\prod_{i=1}^K N_i}` so that the operator is
unitary, where :math:`N_i` is the size of signal dimension :math:`i`.
The real-to-complex Fourier transform results follow conjugate symmetry:
.. math::
X[\omega_1, \dots, \omega_d] = X^*[N_1 - \omega_1, \dots, N_d - \omega_d],
where the index arithmetic is computed modulus the size of the corresponding
dimension, :math:`\ ^*` is the conjugate operator, and
:math:`d` = :attr:`signal_ndim`. :attr:`onesided` flag controls whether to avoid
redundancy in the output results. If set to ``True`` (default), the output will
not be full complex result of shape :math:`(*, 2)`, where :math:`*` is the shape
of :attr:`input`, but instead the last dimension will be halfed as of size
:math:`\lfloor \frac{N_d}{2} \rfloor + 1`.
The inverse of this function is :func:`~torch.irfft`.
.. note::
For CUDA tensors, an LRU cache is used for cuFFT plans to speed up
repeatedly running FFT methods on tensors of same geometry with same
configuration. See :ref:`cufft-plan-cache` for more details on how to
monitor and control the cache.
.. warning::
Due to limited dynamic range of half datatype, performing this operation in half
precision may cause the first element of result to overflow for certain inputs.
.. warning::
For CPU tensors, this method is currently only available with MKL. Use
:func:`torch.backends.mkl.is_available` to check if MKL is installed.
Arguments:
input (Tensor): the input tensor of at least :attr:`signal_ndim` dimensions
signal_ndim (int): the number of dimensions in each signal.
:attr:`signal_ndim` can only be 1, 2 or 3
normalized (bool, optional): controls whether to return normalized results.
Default: ``False``
onesided (bool, optional): controls whether to return half of results to
avoid redundancy. Default: ``True``
Returns:
Tensor: A tensor containing the real-to-complex Fourier transform result
Example::
>>> x = torch.randn(5, 5)
>>> torch.rfft(x, 2).shape
torch.Size([5, 3, 2])
>>> torch.rfft(x, 2, onesided=False).shape
torch.Size([5, 5, 2])
""")
add_docstr(torch.irfft,
r"""
irfft(input, signal_ndim, normalized=False, onesided=True, signal_sizes=None) -> Tensor
Complex-to-real Inverse Discrete Fourier Transform
This method computes the complex-to-real inverse discrete Fourier transform.
It is mathematically equivalent with :func:`ifft` with differences only in
formats of the input and output.
The argument specifications are almost identical with :func:`~torch.ifft`.
Similar to :func:`~torch.ifft`, if :attr:`normalized` is set to ``True``,
this normalizes the result by multiplying it with
:math:`\sqrt{\prod_{i=1}^K N_i}` so that the operator is unitary, where
:math:`N_i` is the size of signal dimension :math:`i`.
.. note::
Due to the conjugate symmetry, :attr:`input` do not need to contain the full
complex frequency values. Roughly half of the values will be sufficient, as
is the case when :attr:`input` is given by :func:`~torch.rfft` with
``rfft(signal, onesided=True)``. In such case, set the :attr:`onesided`
argument of this method to ``True``. Moreover, the original signal shape
information can sometimes be lost, optionally set :attr:`signal_sizes` to be
the size of the original signal (without the batch dimensions if in batched
mode) to recover it with correct shape.
Therefore, to invert an :func:`~torch.rfft`, the :attr:`normalized` and
:attr:`onesided` arguments should be set identically for :func:`~torch.irfft`,
and preferably a :attr:`signal_sizes` is given to avoid size mismatch. See the
example below for a case of size mismatch.
See :func:`~torch.rfft` for details on conjugate symmetry.
The inverse of this function is :func:`~torch.rfft`.
.. warning::
Generally speaking, input to this function should contain values
following conjugate symmetry. Note that even if :attr:`onesided` is
``True``, often symmetry on some part is still needed. When this
requirement is not satisfied, the behavior of :func:`~torch.irfft` is
undefined. Since :func:`torch.autograd.gradcheck` estimates numerical
Jacobian with point perturbations, :func:`~torch.irfft` will almost
certainly fail the check.
.. note::
For CUDA tensors, an LRU cache is used for cuFFT plans to speed up
repeatedly running FFT methods on tensors of same geometry with same
configuration. See :ref:`cufft-plan-cache` for more details on how to
monitor and control the cache.
.. warning::
Due to limited dynamic range of half datatype, performing this operation in half
precision may cause the first element of result to overflow for certain inputs.
.. warning::
For CPU tensors, this method is currently only available with MKL. Use
:func:`torch.backends.mkl.is_available` to check if MKL is installed.
Arguments:
input (Tensor): the input tensor of at least :attr:`signal_ndim` ``+ 1``
dimensions
signal_ndim (int): the number of dimensions in each signal.
:attr:`signal_ndim` can only be 1, 2 or 3
normalized (bool, optional): controls whether to return normalized results.
Default: ``False``
onesided (bool, optional): controls whether :attr:`input` was halfed to avoid
redundancy, e.g., by :func:`rfft`. Default: ``True``
signal_sizes (list or :class:`torch.Size`, optional): the size of the original
signal (without batch dimension). Default: ``None``
Returns:
Tensor: A tensor containing the complex-to-real inverse Fourier transform result
Example::
>>> x = torch.randn(4, 4)
>>> torch.rfft(x, 2, onesided=True).shape
torch.Size([4, 3, 2])
>>>
>>> # notice that with onesided=True, output size does not determine the original signal size
>>> x = torch.randn(4, 5)
>>> torch.rfft(x, 2, onesided=True).shape
torch.Size([4, 3, 2])
>>>
>>> # now we use the original shape to recover x
>>> x
tensor([[-0.8992, 0.6117, -1.6091, -0.4155, -0.8346],
[-2.1596, -0.0853, 0.7232, 0.1941, -0.0789],
[-2.0329, 1.1031, 0.6869, -0.5042, 0.9895],
[-0.1884, 0.2858, -1.5831, 0.9917, -0.8356]])
>>> y = torch.rfft(x, 2, onesided=True)
>>> torch.irfft(y, 2, onesided=True, signal_sizes=x.shape) # recover x
tensor([[-0.8992, 0.6117, -1.6091, -0.4155, -0.8346],
[-2.1596, -0.0853, 0.7232, 0.1941, -0.0789],
[-2.0329, 1.1031, 0.6869, -0.5042, 0.9895],
[-0.1884, 0.2858, -1.5831, 0.9917, -0.8356]])
""")
add_docstr(torch.hann_window,
"""
hann_window(window_length, periodic=True, dtype=None, \
layout=torch.strided, device=None, requires_grad=False) -> Tensor
""" + r"""
Hann window function.
.. math::
w[n] = \frac{1}{2}\ \left[1 - \cos \left( \frac{2 \pi n}{N - 1} \right)\right] =
\sin^2 \left( \frac{\pi n}{N - 1} \right),
where :math:`N` is the full window size.
The input :attr:`window_length` is a positive integer controlling the
returned window size. :attr:`periodic` flag determines whether the returned
window trims off the last duplicate value from the symmetric window and is
ready to be used as a periodic window with functions like
:meth:`torch.stft`. Therefore, if :attr:`periodic` is true, the :math:`N` in
above formula is in fact :math:`\text{window\_length} + 1`. Also, we always have
``torch.hann_window(L, periodic=True)`` equal to
``torch.hann_window(L + 1, periodic=False)[:-1])``.
.. note::
If :attr:`window_length` :math:`=1`, the returned window contains a single value 1.
""" + r"""
Arguments:
window_length (int): the size of returned window
periodic (bool, optional): If True, returns a window to be used as periodic
function. If False, return a symmetric window.
{dtype} Only floating point types are supported.
layout (:class:`torch.layout`, optional): the desired layout of returned window tensor. Only
``torch.strided`` (dense layout) is supported.
{device}
{requires_grad}
Returns:
Tensor: A 1-D tensor of size :math:`(\text{{window\_length}},)` containing the window
""".format(**factory_common_args))
add_docstr(torch.hamming_window,
"""
hamming_window(window_length, periodic=True, alpha=0.54, beta=0.46, dtype=None, \
layout=torch.strided, device=None, requires_grad=False) -> Tensor
""" + r"""
Hamming window function.
.. math::
w[n] = \alpha - \beta\ \cos \left( \frac{2 \pi n}{N - 1} \right),
where :math:`N` is the full window size.
The input :attr:`window_length` is a positive integer controlling the
returned window size. :attr:`periodic` flag determines whether the returned
window trims off the last duplicate value from the symmetric window and is
ready to be used as a periodic window with functions like
:meth:`torch.stft`. Therefore, if :attr:`periodic` is true, the :math:`N` in
above formula is in fact :math:`\text{window\_length} + 1`. Also, we always have
``torch.hamming_window(L, periodic=True)`` equal to
``torch.hamming_window(L + 1, periodic=False)[:-1])``.
.. note::
If :attr:`window_length` :math:`=1`, the returned window contains a single value 1.
.. note::
This is a generalized version of :meth:`torch.hann_window`.
""" + r"""
Arguments:
window_length (int): the size of returned window
periodic (bool, optional): If True, returns a window to be used as periodic
function. If False, return a symmetric window.
alpha (float, optional): The coefficient :math:`\alpha` in the equation above
beta (float, optional): The coefficient :math:`\beta` in the equation above
{dtype} Only floating point types are supported.
layout (:class:`torch.layout`, optional): the desired layout of returned window tensor. Only
``torch.strided`` (dense layout) is supported.
{device}
{requires_grad}
Returns:
Tensor: A 1-D tensor of size :math:`(\text{{window\_length}},)` containing the window
""".format(**factory_common_args))
add_docstr(torch.bartlett_window,
"""
bartlett_window(window_length, periodic=True, dtype=None, \
layout=torch.strided, device=None, requires_grad=False) -> Tensor
""" + r"""
Bartlett window function.
.. math::
w[n] = 1 - \left| \frac{2n}{N-1} - 1 \right| = \begin{cases}
\frac{2n}{N - 1} & \text{if } 0 \leq n \leq \frac{N - 1}{2} \\
2 - \frac{2n}{N - 1} & \text{if } \frac{N - 1}{2} < n < N \\
\end{cases},
where :math:`N` is the full window size.
The input :attr:`window_length` is a positive integer controlling the
returned window size. :attr:`periodic` flag determines whether the returned
window trims off the last duplicate value from the symmetric window and is
ready to be used as a periodic window with functions like
:meth:`torch.stft`. Therefore, if :attr:`periodic` is true, the :math:`N` in
above formula is in fact :math:`\text{window\_length} + 1`. Also, we always have
``torch.bartlett_window(L, periodic=True)`` equal to
``torch.bartlett_window(L + 1, periodic=False)[:-1])``.
.. note::
If :attr:`window_length` :math:`=1`, the returned window contains a single value 1.
""" + r"""
Arguments:
window_length (int): the size of returned window
periodic (bool, optional): If True, returns a window to be used as periodic
function. If False, return a symmetric window.
{dtype} Only floating point types are supported.
layout (:class:`torch.layout`, optional): the desired layout of returned window tensor. Only
``torch.strided`` (dense layout) is supported.
{device}
{requires_grad}
Returns:
Tensor: A 1-D tensor of size :math:`(\text{{window\_length}},)` containing the window
""".format(**factory_common_args))
add_docstr(torch.blackman_window,
"""
blackman_window(window_length, periodic=True, dtype=None, \
layout=torch.strided, device=None, requires_grad=False) -> Tensor
""" + r"""
Blackman window function.
.. math::
w[n] = 0.42 - 0.5 \cos \left( \frac{2 \pi n}{N - 1} \right) + 0.08 \cos \left( \frac{4 \pi n}{N - 1} \right)
where :math:`N` is the full window size.
The input :attr:`window_length` is a positive integer controlling the
returned window size. :attr:`periodic` flag determines whether the returned
window trims off the last duplicate value from the symmetric window and is
ready to be used as a periodic window with functions like
:meth:`torch.stft`. Therefore, if :attr:`periodic` is true, the :math:`N` in
above formula is in fact :math:`\text{window\_length} + 1`. Also, we always have
``torch.blackman_window(L, periodic=True)`` equal to
``torch.blackman_window(L + 1, periodic=False)[:-1])``.
.. note::
If :attr:`window_length` :math:`=1`, the returned window contains a single value 1.
""" + r"""
Arguments:
window_length (int): the size of returned window
periodic (bool, optional): If True, returns a window to be used as periodic
function. If False, return a symmetric window.
{dtype} Only floating point types are supported.
layout (:class:`torch.layout`, optional): the desired layout of returned window tensor. Only
``torch.strided`` (dense layout) is supported.
{device}
{requires_grad}
Returns:
Tensor: A 1-D tensor of size :math:`(\text{{window\_length}},)` containing the window
""".format(**factory_common_args))
add_docstr(torch.vander,
"""
vander(x, N=None, increasing=False) -> Tensor
""" + r"""
Generates a Vandermonde matrix.
The columns of the output matrix are elementwise powers of the input vector :math:`x^{{(N-1)}}, x^{{(N-2)}}, ..., x^0`.
If increasing is True, the order of the columns is reversed :math:`x^0, x^1, ..., x^{{(N-1)}}`. Such a
matrix with a geometric progression in each row is named for Alexandre-Theophile Vandermonde.
Arguments:
x (Tensor): 1-D input tensor.
N (int, optional): Number of columns in the output. If N is not specified,
a square array is returned :math:`(N = len(x))`.
increasing (bool, optional): Order of the powers of the columns. If True,
the powers increase from left to right, if False (the default) they are reversed.
Returns:
Tensor: Vandermonde matrix. If increasing is False, the first column is :math:`x^{{(N-1)}}`,
the second :math:`x^{{(N-2)}}` and so forth. If increasing is True, the columns
are :math:`x^0, x^1, ..., x^{{(N-1)}}`.
Example::
>>> x = torch.tensor([1, 2, 3, 5])
>>> torch.vander(x)
tensor([[ 1, 1, 1, 1],
[ 8, 4, 2, 1],
[ 27, 9, 3, 1],
[125, 25, 5, 1]])
>>> torch.vander(x, N=3)
tensor([[ 1, 1, 1],
[ 4, 2, 1],
[ 9, 3, 1],
[25, 5, 1]])
>>> torch.vander(x, N=3, increasing=True)
tensor([[ 1, 1, 1],
[ 1, 2, 4],
[ 1, 3, 9],
[ 1, 5, 25]])
""".format(**factory_common_args))
add_docstr(torch.unbind,
r"""
unbind(input, dim=0) -> seq
Removes a tensor dimension.
Returns a tuple of all slices along a given dimension, already without it.
Arguments:
input (Tensor): the tensor to unbind
dim (int): dimension to remove
Example::
>>> torch.unbind(torch.tensor([[1, 2, 3],
>>> [4, 5, 6],
>>> [7, 8, 9]]))
(tensor([1, 2, 3]), tensor([4, 5, 6]), tensor([7, 8, 9]))
""")
add_docstr(torch.combinations,
r"""
combinations(input, r=2, with_replacement=False) -> seq
Compute combinations of length :math:`r` of the given tensor. The behavior is similar to
python's `itertools.combinations` when `with_replacement` is set to `False`, and
`itertools.combinations_with_replacement` when `with_replacement` is set to `True`.
Arguments:
input (Tensor): 1D vector.
r (int, optional): number of elements to combine
with_replacement (boolean, optional): whether to allow duplication in combination
Returns:
Tensor: A tensor equivalent to converting all the input tensors into lists, do
`itertools.combinations` or `itertools.combinations_with_replacement` on these
lists, and finally convert the resulting list into tensor.
Example::
>>> a = [1, 2, 3]
>>> list(itertools.combinations(a, r=2))
[(1, 2), (1, 3), (2, 3)]
>>> list(itertools.combinations(a, r=3))
[(1, 2, 3)]
>>> list(itertools.combinations_with_replacement(a, r=2))
[(1, 1), (1, 2), (1, 3), (2, 2), (2, 3), (3, 3)]
>>> tensor_a = torch.tensor(a)
>>> torch.combinations(tensor_a)
tensor([[1, 2],
[1, 3],
[2, 3]])
>>> torch.combinations(tensor_a, r=3)
tensor([[1, 2, 3]])
>>> torch.combinations(tensor_a, with_replacement=True)
tensor([[1, 1],
[1, 2],
[1, 3],
[2, 2],
[2, 3],
[3, 3]])
""")
add_docstr(torch.trapz,
r"""
trapz(y, x, *, dim=-1) -> Tensor
Estimate :math:`\int y\,dx` along `dim`, using the trapezoid rule.
Arguments:
y (Tensor): The values of the function to integrate
x (Tensor): The points at which the function `y` is sampled.
If `x` is not in ascending order, intervals on which it is decreasing
contribute negatively to the estimated integral (i.e., the convention
:math:`\int_a^b f = -\int_b^a f` is followed).
dim (int): The dimension along which to integrate.
By default, use the last dimension.
Returns:
A Tensor with the same shape as the input, except with `dim` removed.
Each element of the returned tensor represents the estimated integral
:math:`\int y\,dx` along `dim`.
Example::
>>> y = torch.randn((2, 3))
>>> y
tensor([[-2.1156, 0.6857, -0.2700],
[-1.2145, 0.5540, 2.0431]])
>>> x = torch.tensor([[1, 3, 4], [1, 2, 3]])
>>> torch.trapz(y, x)
tensor([-1.2220, 0.9683])
.. function:: trapz(y, *, dx=1, dim=-1) -> Tensor
As above, but the sample points are spaced uniformly at a distance of `dx`.
Arguments:
y (Tensor): The values of the function to integrate
dx (float): The distance between points at which `y` is sampled.
dim (int): The dimension along which to integrate.
By default, use the last dimension.
Returns:
A Tensor with the same shape as the input, except with `dim` removed.
Each element of the returned tensor represents the estimated integral
:math:`\int y\,dx` along `dim`.
""")
add_docstr(torch.repeat_interleave,
r"""
repeat_interleave(input, repeats, dim=None) -> Tensor
Repeat elements of a tensor.
.. warning::
This is different from :meth:`torch.Tensor.repeat` but similar to ``numpy.repeat``.
Args:
{input}
repeats (Tensor or int): The number of repetitions for each element.
repeats is broadcasted to fit the shape of the given axis.
dim (int, optional): The dimension along which to repeat values.
By default, use the flattened input array, and return a flat output
array.
Returns:
Tensor: Repeated tensor which has the same shape as input, except along the
given axis.
Example::
>>> x = torch.tensor([1, 2, 3])
>>> x.repeat_interleave(2)
tensor([1, 1, 2, 2, 3, 3])
>>> y = torch.tensor([[1, 2], [3, 4]])
>>> torch.repeat_interleave(y, 2)
tensor([1, 1, 2, 2, 3, 3, 4, 4])
>>> torch.repeat_interleave(y, 3, dim=1)
tensor([[1, 1, 1, 2, 2, 2],
[3, 3, 3, 4, 4, 4]])
>>> torch.repeat_interleave(y, torch.tensor([1, 2]), dim=0)
tensor([[1, 2],
[3, 4],
[3, 4]])
.. function:: repeat_interleave(repeats) -> Tensor
If the `repeats` is `tensor([n1, n2, n3, ...])`, then the output will be
`tensor([0, 0, ..., 1, 1, ..., 2, 2, ..., ...])` where `0` appears `n1` times,
`1` appears `n2` times, `2` appears `n3` times, etc.
""".format(**common_args))
add_docstr(torch.quantize_per_tensor,
r"""
quantize_per_tensor(input, scale, zero_point, dtype) -> Tensor
Converts a float tensor to quantized tensor with given scale and zero point.
Arguments:
input (Tensor): float tensor to quantize
scale (float): scale to apply in quantization formula
zero_point (int): offset in integer value that maps to float zero
dtype (:class:`torch.dtype`): the desired data type of returned tensor.
Has to be one of the quantized dtypes: ``torch.quint8``, ``torch.qint8``, ``torch.qint32``
Returns:
Tensor: A newly quantized tensor
Example::
>>> torch.quantize_per_tensor(torch.tensor([-1.0, 0.0, 1.0, 2.0]), 0.1, 10, torch.quint8)
tensor([-1., 0., 1., 2.], size=(4,), dtype=torch.quint8,
quantization_scheme=torch.per_tensor_affine, scale=0.1, zero_point=10)
>>> torch.quantize_per_tensor(torch.tensor([-1.0, 0.0, 1.0, 2.0]), 0.1, 10, torch.quint8).int_repr()
tensor([ 0, 10, 20, 30], dtype=torch.uint8)
""")
add_docstr(torch.quantize_per_channel,
r"""
quantize_per_channel(input, scales, zero_points, axis, dtype) -> Tensor
Converts a float tensor to per-channel quantized tensor with given scales and zero points.
Arguments:
input (Tensor): float tensor to quantize
scales (Tensor): float 1D tensor of scales to use, size should match ``input.size(axis)``
zero_points (int): integer 1D tensor of offset to use, size should match ``input.size(axis)``
axis (int): dimension on which apply per-channel quantization
dtype (:class:`torch.dtype`): the desired data type of returned tensor.
Has to be one of the quantized dtypes: ``torch.quint8``, ``torch.qint8``, ``torch.qint32``
Returns:
Tensor: A newly quantized tensor
Example::
>>> x = torch.tensor([[-1.0, 0.0], [1.0, 2.0]])
>>> torch.quantize_per_channel(x, torch.tensor([0.1, 0.01]), torch.tensor([10, 0]), 0, torch.quint8)
tensor([[-1., 0.],
[ 1., 2.]], size=(2, 2), dtype=torch.quint8,
quantization_scheme=torch.per_channel_affine,
scale=tensor([0.1000, 0.0100], dtype=torch.float64),
zero_point=tensor([10, 0]), axis=0)
>>> torch.quantize_per_channel(x, torch.tensor([0.1, 0.01]), torch.tensor([10, 0]), 0, torch.quint8).int_repr()
tensor([[ 0, 10],
[100, 200]], dtype=torch.uint8)
""")
add_docstr(torch.Generator,
r"""
Generator(device='cpu') -> Generator
Creates and returns a generator object which manages the state of the algorithm that
produces pseudo random numbers. Used as a keyword argument in many :ref:`inplace-random-sampling`
functions.
Arguments:
device (:class:`torch.device`, optional): the desired device for the generator.
Returns:
Generator: An torch.Generator object.
Example::
>>> g_cpu = torch.Generator()
>>> g_cuda = torch.Generator(device='cuda')
""")
add_docstr(torch.Generator.set_state,
r"""
Generator.set_state(new_state) -> void
Sets the Generator state.
Arguments:
new_state (torch.ByteTensor): The desired state.
Example::
>>> g_cpu = torch.Generator()
>>> g_cpu_other = torch.Generator()
>>> g_cpu.set_state(g_cpu_other.get_state())
""")
add_docstr(torch.Generator.get_state,
r"""
Generator.get_state() -> Tensor
Returns the Generator state as a ``torch.ByteTensor``.
Returns:
Tensor: A ``torch.ByteTensor`` which contains all the necessary bits
to restore a Generator to a specific point in time.
Example::
>>> g_cpu = torch.Generator()
>>> g_cpu.get_state()
""")
add_docstr(torch.Generator.manual_seed,
r"""
Generator.manual_seed(seed) -> Generator
Sets the seed for generating random numbers. Returns a `torch.Generator` object.
It is recommended to set a large seed, i.e. a number that has a good balance of 0
and 1 bits. Avoid having many 0 bits in the seed.
Arguments:
seed (int): The desired seed.
Returns:
Generator: An torch.Generator object.
Example::
>>> g_cpu = torch.Generator()
>>> g_cpu.manual_seed(2147483647)
""")
add_docstr(torch.Generator.initial_seed,
r"""
Generator.initial_seed() -> int
Returns the initial seed for generating random numbers.
Example::
>>> g_cpu = torch.Generator()
>>> g_cpu.initial_seed()
2147483647
""")
add_docstr(torch.Generator.seed,
r"""
Generator.seed() -> int
Gets a non-deterministic random number from std::random_device or the current
time and uses it to seed a Generator.
Example::
>>> g_cpu = torch.Generator()
>>> g_cpu.seed()
1516516984916
""")
add_docstr(torch.Generator.device,
r"""
Generator.device -> device
Gets the current device of the generator.
Example::
>>> g_cpu = torch.Generator()
>>> g_cpu.device
device(type='cpu')
""")
add_docstr(torch.searchsorted,
r"""
searchsorted(sorted_sequence, values, out_int32=False, right=False, out=None) -> Tensor
Find the indices from the *innermost* dimension of :attr:`sorted_sequence` such that, if the
corresponding values in :attr:`values` were inserted before the indices, the order of the
corresponding *innermost* dimension within :attr:`sorted_sequence` would be preserved.
Return a new tensor with the same size as :attr:`values`. If :attr:`right` is False (default),
then the left boundary of :attr:`sorted_sequence` is closed. More formally, the returned index
satisfies the following rules:
.. list-table::
:widths: 12 10 78
:header-rows: 1
* - :attr:`sorted_sequence`
- :attr:`right`
- *returned index satisfies*
* - 1-D
- False
- ``sorted_sequence[i-1] <= values[m][n]...[l][x] < sorted_sequence[i]``
* - 1-D
- True
- ``sorted_sequence[i-1] < values[m][n]...[l][x] <= sorted_sequence[i]``
* - N-D
- False
- ``sorted_sequence[m][n]...[l][i-1] <= values[m][n]...[l][x] < sorted_sequence[m][n]...[l][i]``
* - N-D
- True
- ``sorted_sequence[m][n]...[l][i-1] < values[m][n]...[l][x] <= sorted_sequence[m][n]...[l][i]``
Args:
sorted_sequence (Tensor): N-D or 1-D tensor, containing monotonically increasing sequence on the *innermost*
dimension.
values (Tensor or Scalar): N-D tensor or a Scalar containing the search value(s).
out_int32 (bool, optional): indicate the output data type. torch.int32 if True, torch.int64 otherwise.
Default value is False, i.e. default output data type is torch.int64.
right (bool, optional): if False, return the first suitable location that is found. If True, return the
last such index. If no suitable index found, return 0 for non-numerical value
(eg. nan, inf) or the size of *innermost* dimension within :attr:`sorted_sequence`
(one pass the last index of the *innermost* dimension). In other words, if False,
gets the lower bound index for each value in :attr:`values` on the corresponding
*innermost* dimension of the :attr:`sorted_sequence`. If True, gets the upper
bound index instead. Default value is False.
out (Tensor, optional): the output tensor, must be the same size as :attr:`values` if provided.
.. note:: If your use case is always 1-D sorted sequence, :func:`torch.bucketize` is preferred,
because it has fewer dimension checks resulting in slightly better performance.
Example::
>>> sorted_sequence = torch.tensor([[1, 3, 5, 7, 9], [2, 4, 6, 8, 10]])
>>> sorted_sequence
tensor([[ 1, 3, 5, 7, 9],
[ 2, 4, 6, 8, 10]])
>>> values = torch.tensor([[3, 6, 9], [3, 6, 9]])
>>> values
tensor([[3, 6, 9],
[3, 6, 9]])
>>> torch.searchsorted(sorted_sequence, values)
tensor([[1, 3, 4],
[1, 2, 4]])
>>> torch.searchsorted(sorted_sequence, values, right=True)
tensor([[2, 3, 5],
[1, 3, 4]])
>>> sorted_sequence_1d = torch.tensor([1, 3, 5, 7, 9])
>>> sorted_sequence_1d
tensor([1, 3, 5, 7, 9])
>>> torch.searchsorted(sorted_sequence_1d, values)
tensor([[1, 3, 4],
[1, 3, 4]])
""")
add_docstr(torch.bucketize,
r"""
bucketize(input, boundaries, out_int32=False, right=False, out=None) -> Tensor
Returns the indices of the buckets to which each value in the :attr:`input` belongs, where the
boundaries of the buckets are set by :attr:`boundaries`. Return a new tensor with the same size
as :attr:`input`. If :attr:`right` is False (default), then the left boundary is closed. More
formally, the returned index satisfies the following rules:
.. list-table::
:widths: 15 85
:header-rows: 1
* - :attr:`right`
- *returned index satisfies*
* - False
- ``boundaries[i-1] <= input[m][n]...[l][x] < boundaries[i]``
* - True
- ``boundaries[i-1] < input[m][n]...[l][x] <= boundaries[i]``
Args:
input (Tensor or Scalar): N-D tensor or a Scalar containing the search value(s).
boundaries (Tensor): 1-D tensor, must contain a monotonically increasing sequence.
out_int32 (bool, optional): indicate the output data type. torch.int32 if True, torch.int64 otherwise.
Default value is False, i.e. default output data type is torch.int64.
right (bool, optional): if False, return the first suitable location that is found. If True, return the
last such index. If no suitable index found, return 0 for non-numerical value
(eg. nan, inf) or the size of :attr:`boundaries` (one pass the last index).
In other words, if False, gets the lower bound index for each value in :attr:`input`
from :attr:`boundaries`. If True, gets the upper bound index instead.
Default value is False.
out (Tensor, optional): the output tensor, must be the same size as :attr:`input` if provided.
Example::
>>> boundaries = torch.tensor([1, 3, 5, 7, 9])
>>> boundaries
tensor([1, 3, 5, 7, 9])
>>> v = torch.tensor([[3, 6, 9], [3, 6, 9]])
>>> v
tensor([[3, 6, 9],
[3, 6, 9]])
>>> torch.bucketize(v, boundaries)
tensor([[1, 3, 4],
[1, 3, 4]])
>>> torch.bucketize(v, boundaries, right=True)
tensor([[2, 3, 5],
[2, 3, 5]])
""")
| [
"auau@oregonstate.edu"
] | auau@oregonstate.edu |
bf2364bce092239e91cfc27cd4a483dba41eb9d2 | 5164e4ccb0cfb2b16ba3805b7e5a05ed4153a245 | /Lab03/GraphQL.py | 96210aafd9af8a39d0e1b5ee7c6d3b8c0e2639ab | [
"Unlicense"
] | permissive | Zuquim/smelly-octopus | 538c40f36304306f01a3a8cfa0e462f1ee2ad58b | c9e2959a95b5d5c4b0a25c6c6987b63a9d1984f2 | refs/heads/master | 2023-08-21T18:10:25.629871 | 2020-05-15T18:25:44 | 2020-05-15T18:25:44 | 240,612,179 | 0 | 0 | Unlicense | 2023-08-11T19:53:17 | 2020-02-14T22:51:27 | Python | UTF-8 | Python | false | false | 3,844 | py | from csv import writer
from json import dumps
from requests import post
import time
headers = {"Authorization": "token "}
repositoriesQuery = """
query repositoriesQuery {
search(type: REPOSITORY, first: 100, query: "stars:>100 created:>=2016-01-01 language:python"{AFTER}) {
pageInfo {
hasNextPage
endCursor
}
nodes {
... on Repository {
id
nameWithOwner
url
stargazers {
totalCount
}
issues {
totalCount
}
}
}
}
}
"""
issuesQuery = """
query example {
repository(owner: "{OWNER}", name: "{NAME}"){
issues(first: 10, orderBy:{field: CREATED_AT, direction: ASC}{AFTER}){
pageInfo{
hasNextPage
endCursor
}
nodes {
id
title
createdAt
closedAt
closed
}
}
}
}
"""
def runQuery(query):
request = post(
'https://api.github.com/graphql', json={'query': query}, headers=headers
)
while (request.status_code == 502):
time.sleep(2)
request = post(
'https://api.github.com/graphql', json={'query': query}, headers=headers
)
if request.status_code == 200:
return request.json()
else:
raise Exception("Query falhou! Codigo de retorno: {}. {}".format(request.status_code, query))
def getAllRepositories(query):
finalQuery = query.replace("{AFTER}", "")
result = runQuery(finalQuery)
totalPages = 1
hasNextPage = result["data"]["search"]["pageInfo"]["hasNextPage"]
currentEndCursor = result["data"]["search"]["pageInfo"]["endCursor"]
allResults = result["data"]["search"]["nodes"]
while hasNextPage and totalPages <= 10:
finalQuery = query.replace("{AFTER}", f', after: "{currentEndCursor}"')
result = runQuery(finalQuery)
totalPages += 1
hasNextPage = result["data"]["search"]["pageInfo"]["hasNextPage"]
currentEndCursor = result["data"]["search"]["pageInfo"]["endCursor"]
allResults += result["data"]["search"]["nodes"]
writeCSV("repositories.csv", allResults)
def getAllIssues(query):
with open("repositories.csv", "r", encoding="utf-8") as f:
lines = f.read()
for line in lines.splitlines():
line = line.split(",")
nameWithOwner = line[1].split("/")
owner = nameWithOwner[0]
name = nameWithOwner[1]
idRepository = line[0]
allResults = getRepositoryIssues(owner, name, query)
for result in allResults:
result["idRepository"] = idRepository
result["owner"] = owner
result["name"] = name
writeCSV("issues.csv", allResults)
def getRepositoryIssues(owner, name, query):
finalQuery = query.replace("{OWNER}", owner).replace("{NAME}", name).replace("{AFTER}", "")
result = runQuery(finalQuery)
totalPages = 1
currentEndCursor = result["data"]["repository"]["issues"]["pageInfo"]["endCursor"]
hasNextPage = result["data"]["repository"]["issues"]["pageInfo"]["hasNextPage"]
allResults = result["data"]["repository"]["issues"]["nodes"]
while hasNextPage and totalPages <= 10:
finalQuery = query.replace("{OWNER}", owner).replace("{NAME}", name).replace("{AFTER}", f', after: "{currentEndCursor}"')
result = runQuery(finalQuery)
totalPages += 1
currentEndCursor = result["data"]["repository"]["issues"]["pageInfo"]["endCursor"]
hasNextPage = result["data"]["repository"]["issues"]["pageInfo"]["hasNextPage"]
allResults += result["data"]["repository"]["issues"]["nodes"]
return allResults
def writeCSV(file, allResults):
with open(file, "a", newline = '', encoding="utf-8") as csv_file:
csv = writer(csv_file)
for result in allResults:
csv.writerow(result.values())
def main():
getAllRepositories(repositoriesQuery)
getAllIssues(issuesQuery)
main()
| [
"isabelaedilene@gmail.com"
] | isabelaedilene@gmail.com |
3d6cccf8ef1c1c59e686fc0a6389770283a33d32 | 2dd34eabb729be2c0574b7422c545d7fea67822e | /gridlayout.py | 18aedcaf572124c8c95d6f06a6f3f83b08387e34 | [] | no_license | Dhruv2012/GUIBasics_python | 2206fb082c1db16b9e5be7b40df871b754e11242 | 53db37b2fa164cbf8db088b00f3e96f7e9e39f6a | refs/heads/master | 2020-03-17T07:41:18.322369 | 2018-05-17T17:08:45 | 2018-05-17T17:23:06 | 133,408,591 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 259 | py | from tkinter import *
root=Tk()
lable1=Label(root,text="username")
lable2=Label(root,text="password")
entry1=Entry(root)
entry2=Entry(root)
lable1.grid(row=0)
lable2.grid(row=1)
entry1.grid(row=0,column=1)
entry2.grid(row=1,column=1)
root.mainloop() | [
"dhruv.r.patel14@gmail.com"
] | dhruv.r.patel14@gmail.com |
efaf5827b686a2a2c8b12a2e327f2178fa269f5c | 7954d761dde104a9d977006c514ff976a9c88444 | /backend/menu/migrations/0001_initial.py | a6a707da319ae2e8ae9d0ffbe9ae598eb1ac1002 | [] | no_license | crowdbotics-apps/firebase-25585 | 3c693fee6f6e75805fe5b8d40f24ee6b137e29e3 | 5473848fbdad0683030c8f3bd64d03fdc4a1382c | refs/heads/master | 2023-04-05T13:07:26.443879 | 2021-04-09T10:28:31 | 2021-04-09T10:28:31 | 356,229,816 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,144 | py | # Generated by Django 2.2.19 on 2021-04-09 10:26
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('delivery_user_profile', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('description', models.TextField()),
('image', models.URLField()),
('icon', models.URLField()),
],
),
migrations.CreateModel(
name='Country',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('description', models.TextField()),
('prefix', models.CharField(max_length=8)),
('flag', models.URLField()),
],
),
migrations.CreateModel(
name='Item',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('description', models.TextField()),
('image', models.URLField()),
('category', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='item_category', to='menu.Category')),
],
),
migrations.CreateModel(
name='Review',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('rating', models.FloatField()),
('review_text', models.TextField()),
('timestamp_created', models.DateTimeField(auto_now_add=True)),
('item', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='review_item', to='menu.Item')),
('profile', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='review_profile', to='delivery_user_profile.Profile')),
],
),
migrations.CreateModel(
name='ItemVariant',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('description', models.TextField()),
('price', models.FloatField()),
('image', models.URLField()),
('country', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='itemvariant_country', to='menu.Country')),
('item', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='itemvariant_item', to='menu.Item')),
],
),
]
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
4ed37678ade84f7abe6e3189d8fe998df02a293e | 8e705a0887ad6311b096d537bf977e6cdd5e3fbb | /test78.py | 9b4d7e4e8a6213b77e7b6c2126448cbbb7a05f97 | [] | no_license | xfx1993/goto1000 | b2269e2c778c30ff055c246d6565b7c86c3e0f95 | d1a08ddeb06423bb23e8c22c88b3fde048e86b46 | refs/heads/master | 2022-12-19T13:47:16.038909 | 2020-09-30T02:39:49 | 2020-09-30T02:39:49 | 299,791,027 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 538 | py | def maxChunksToSorted( arr):
"""
:type arr: List[int]
:rtype: int
"""
size = len(arr)
rightmin = [0 for i in range(size)]
curmin = arr[-1]
for i in range(size - 1, -1, -1):
curmin = min(curmin, arr[i])
rightmin[i] = curmin
leftmax = arr[0]
count = 0
for i in range(size - 1):
leftmax = max(leftmax, arr[i])
if leftmax==i and rightmin[i+1]==i+1:
leftmax = arr[i + 1]
count += 1
return count + 1
print(maxChunksToSorted([1,0,2,3,4])) | [
"1064222854@qq.com"
] | 1064222854@qq.com |
ee0608d85e3d44cb755901e6aa79d6964ff34242 | 72ddc142197b20ea9dec88c40d46448b56aa4f1c | /350. Intersection of Two Arrays II.py | 7fff6aae29dbd5d4934154cc27fefa23ff2f4bb4 | [] | no_license | dattnguyen/Leetcode_exercises | ee6a4325b47f8a07844bbb84511b1e158edfc7ac | d74d8987f07dcfd4e02348385f88381802adb1aa | refs/heads/master | 2022-12-31T10:15:31.443277 | 2020-10-09T20:28:56 | 2020-10-09T20:28:56 | 267,236,435 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 567 | py | # # Given two arrays, write a function to compute their intersection.
# Note:
#
# Each element in the result should appear as many times as it shows in both arrays.
# The result can be in any order.
def intersection(nums1, nums2):
hmap1 = {}
hmap2 = {}
res = []
for i in nums1:
hmap1[i] = hmap1.get(i,0) +1
for j in nums2:
hmap2[j] = hmap2.get(i, 0) + 1
for key1, value1 in hmap1:
if key1 in hmap2:
res.append([key1]*min(hmap1[key1],hmap2[key1]))
return print(res)
intersection([4,9,5], [9,4,9,8,4]) | [
"tiendat.ams@gmail.com"
] | tiendat.ams@gmail.com |
2973c8a04aa45789fe2dd63d8482dcf76c80e95b | 53440fe1e7370b564d3e1161a2a39bd99425f2f7 | /fairing/constants/constants.py | 703f260d7848b679e869cb80c980ec0ea0265a54 | [
"Apache-2.0"
] | permissive | karthikrajkumar/fairing | a89123c0c1385f691bb8d2b301926360c9e70ed3 | 4f9e007365101443e1230ee206980ed6014f7d31 | refs/heads/master | 2020-06-24T01:11:10.950976 | 2019-07-22T03:06:52 | 2019-07-22T03:06:52 | 198,804,843 | 0 | 0 | Apache-2.0 | 2019-07-25T09:51:13 | 2019-07-25T09:51:13 | null | UTF-8 | Python | false | false | 1,125 | py | TEMP_TAR_GZ_FILENAME = '/tmp/fairing.layer.tar.gz'
DEFAULT_IMAGE_NAME = 'fairing-job'
DEFAULT_BASE_IMAGE = 'gcr.io/kubeflow-images-public/fairing:dev'
DEFAULT_REGISTRY = 'index.docker.io'
DEFAULT_DEST_PREFIX = '/app/'
DEFAULT_CONTEXT_FILENAME = '/tmp/fairing.context.tar.gz'
DEFAULT_GENERATED_DOCKERFILE_FILENAME = '/tmp/Dockerfile'
GOOGLE_CREDS_ENV = 'GOOGLE_APPLICATION_CREDENTIALS'
GCP_CREDS_SECRET_NAME = 'user-gcp-sa'
AWS_CREDS_SECRET_NAME = 'aws-secret'
DEFAULT_USER_AGENT = 'kubeflow-fairing/{VERSION}'
# Job Constants
JOB_DEFAULT_NAME = 'fairing-job-'
JOB_DEPLOPYER_TYPE = 'job'
# Serving Constants
SERVING_DEPLOPYER_TYPE = 'serving'
#TFJob Constants
TF_JOB_GROUP = "kubeflow.org"
TF_JOB_KIND = "TFJob"
TF_JOB_PLURAL = "tfjobs"
TF_JOB_VERSION = "v1beta2"
TF_JOB_DEFAULT_NAME = 'fairing-tfjob-'
TF_JOB_DEPLOYER_TYPE = 'tfjob'
# KFServing constants
KFSERVING_GROUP = "serving.kubeflow.org"
KFSERVING_KIND = "KFService"
KFSERVING_PLURAL = "kfservices"
KFSERVING_VERSION = "v1alpha1"
KFSERVING_DEFAULT_NAME = 'fairing-kfserving-'
KFSERVING_DEPLOYER_TYPE = 'kfservice'
KFSERVING_CONTAINER_NAME = 'user-container'
| [
"k8s-ci-robot@users.noreply.github.com"
] | k8s-ci-robot@users.noreply.github.com |
6be9e8beae39fefab37a0fbd3fd23aa4d225175f | 255f4237b7ce5bce17699f408d5cfcc0cf7cd738 | /sightings/migrations/0002_auto_20191202_0600.py | afec6f0d3fe995fe3b2c6c74a3323a9258da0e9c | [] | no_license | evo0522/squirrelTracker | d2325b3890794ca157354965fd70228eccd3e67f | 8fcb99171836d61db54ab499f076f7c773251fb1 | refs/heads/master | 2020-09-16T17:09:17.433298 | 2019-12-08T20:09:43 | 2019-12-08T20:09:43 | 223,833,863 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 708 | py | # Generated by Django 2.2.7 on 2019-12-02 06:00
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('sightings', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='squirrel',
name='Age',
field=models.CharField(choices=[('Adult', 'Adult'), ('Juvenile', 'Juvenile'), ('Unknown', 'Unknown')], default='Unknown', help_text='Age', max_length=50),
),
migrations.AlterField(
model_name='squirrel',
name='Primary_Fur_Color',
field=models.CharField(default='Unknown', help_text='Primary_Fur_Color', max_length=50),
),
]
| [
"evonnewyh@gmail.com"
] | evonnewyh@gmail.com |
c2f320d96653b374d960eeb2812f0403e6736f6f | ae74ba8c272fe5d534e73a6069dc650dc3ff49fd | /train_rnnt.py | d01e774ff3ac421601e1e4f406331e29a6805a55 | [
"MIT"
] | permissive | jackson1895/beautifulday | cabdf971102eac50d287899d1a72179bfe651b33 | 8efd27e916c8c579c4a9893c69d8847b7c23be18 | refs/heads/master | 2020-07-12T03:37:11.943220 | 2019-08-27T14:27:59 | 2019-08-27T14:27:59 | 204,707,782 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 28,074 | py | import json
import os
import argparse
import numpy as np
import random
import misc.utils as utils
from utils import loadData,averager
import opts_only
import torch
import torch.optim as optim
from torch.nn.utils import clip_grad_value_
from dataloader_only import VideoDataset
from misc.rewards import get_self_critical_reward, init_cider_scorer
from models import DecoderRNN, EncoderRNN, S2VTAttModel, S2VTModel,CTCmodel,Two_Lstm,CTC_Hieratical_LSTM,two_lstm
from torch import nn
from torch.utils.data import DataLoader
# from warpctc_pytorch import CTCLoss
from tensorboardX import SummaryWriter
from torch.autograd import Variable
from utils import collate_fn
from torch.nn.utils import rnn
import shutil
from torch.nn.functional import log_softmax
from strLabelConverter import strLabelConverter
from jiwer import wer
import time
from utils import AttrDict, init_logger, count_parameters, save_model,computer_cer,save_ctc_model,computer_wer
import yaml
from optim import Optimizer
from dataset import SignVideoDataset
from models.model import Transducer
from models.VideoModel import VideoModel
def train(loader, model, crit, optimizer, lr_scheduler, opt, rl_crit=None,converter=None):
model.cuda()
# crit.cuda()
# optimizer.cuda()
# lr_scheduler.cuda()
# video = torch.FloatTensor(params.batchSize, 3, params.imgH, params.imgH)
#TODO 原本中国手语是30
text = torch.LongTensor(opt['batch_size'] * opt['max_len'])
# text = torch.IntTensor(opt['batch_size'] * 30)
length = torch.LongTensor(opt['batch_size'])
converter = strLabelConverter(loader.dataset)
# model = nn.DataParallel(model)
writer = SummaryWriter("two_lstm_exp_German")
loss_avg = averager()
wer_val = 1.0
for epoch in range(opt["epochs"]):
n_correct = 0
model.train()
if opt['lr_schluder'] == 'StepLR':
lr_scheduler.step()
elif opt['lr_schluder'] == 'ReduceLROnPlateau':
lr_scheduler.step(wer_val)
iteration = 0
f_wer=0.0
for data in loader:
torch.cuda.synchronize()
for p in model.parameters():
p.requires_grad = True
fc_feats = data['fc_feats'].cuda() # (batch_size, 80, 512)
# 1. slice 10 * (batch_size, 8, 512)
# 2. send each slice to LSTM 10 * (batch_size, 1024)
# 3. set another mask M2(batch_size, 10)
# 4. if a slice is full of Zero, set the corresponding index of M2 zero
# 5. LSTM2
# 6. obtain final result bt *
labels = data['labels'].cuda()
# masks = data['masks'].cuda()
# clip_nums = data['clip_num']
# sorted_clip_nums,indices = torch.sort(clip_nums,descending=True)
# _, desorted_indices = torch.sort(indices, descending=False)
# fc_feats=fc_feats[indices]
# pack = rnn.pack_padded_sequence(fc_feats,sorted_clip_nums,batch_first=True)
#TODO
optimizer.zero_grad()
output = model(fc_feats)
# desorted_res = output[desorted_indices]
output=output.log_softmax(2).requires_grad_()
_, preds = output.max(2)
output = output.transpose(0, 1).contiguous()
labels_ctc = []
ys=[]
for i in labels:
for j in i:
if not j==-1:
labels_ctc.append(j)
for i in labels:
non_zero = (i == -1).nonzero()
if not non_zero.numel():
ys.append(opt['max_len'])
else:
ys.append(non_zero[0][0])
loadData(text,torch.LongTensor(labels_ctc))
loadData(length,torch.LongTensor(ys))
preds_size = Variable(torch.LongTensor([output.size(0)] * output.size(1)))
loss = crit(output, text.cuda(), preds_size.cuda(), length.cuda())
# loss= crit(output,text,preds_size,length)/opt['batch_size']
preds = preds.contiguous().view(-1)
sim_preds = converter.decode(preds.data, preds_size.data, raw=False)
list_1 = []
for pred, target in zip(sim_preds, labels):
ts = target.squeeze().cpu().numpy().tolist()
res = []
for i in ts :
if i == -1:
continue
res.append(loader.dataset.ix_to_word[str(i)])
target = ' '.join(res)
tmp_wer = wer(target,pred)
f_wer += tmp_wer
if pred == target:
n_correct += 1
loss_avg.add(loss)
loss.backward()
optimizer.step()
torch.cuda.synchronize()
iteration += 1
acc=n_correct/float(len(loader))
# print(len(loader)*opt['batch_size'])
f_wer = f_wer/float(len(loader)*opt['batch_size'])
print("[epoch %d]->train_loss = %.6f , wer = %.6f" % (epoch, loss_avg.val(),f_wer))
if epoch % opt["eval_every"] == 0:
for p in model.parameters():
p.requires_grad = False
loss_eval,wer_val=val(model,crit,opt,writer,epoch)
writer.add_scalars('loss_epcho', {'train_loss':loss_avg.val(),'val_loss':loss_eval},epoch)
writer.add_scalars('wer_epcho',{'train_wer':f_wer,'eval_wer':wer_val},epoch)
if epoch % opt["save_checkpoint_every"] == 0:
path = opt['root_model_path']
# if not os.path.exists(path):
# os.mkdir(path)
# else:
# shutil.rmtree(path)
# os.mkdir(path)
model_path = os.path.join(path,
'model_%d.pth' % (epoch))
model_info_path = os.path.join(path,
'model_score.txt')
torch.save(model.state_dict(), model_path)
print("model saved to %s" % (model_path))
with open(model_info_path, 'a') as f:
f.write("model_%d, loss: %.6f train wer: %.6f val wer: %.6f\n" % (epoch, loss_avg.val(),f_wer,wer_val))
loss_avg.reset()
def val(model, crit, opt,writer=None,epoch=0):
dataset = VideoDataset(opt,'test')
dataloader = DataLoader(dataset,batch_size=opt['batch_size'],shuffle=True)
opt["vocab_size"] = dataset.get_vocab_size()
model.eval()
# TODO 原本中国手语是30
text = torch.LongTensor(opt['batch_size'] * opt['max_len'])
# text = torch.IntTensor(opt['batch_size'] * 30)
length = torch.LongTensor(opt['batch_size'])
loss_avg=averager()
n_correct=0
f_wer= 0.0
# converter = strLabelConverter(dataset)
converter = strLabelConverter(dataloader.dataset)
for data in dataloader:
fc_feats = data['fc_feats'].cuda()
labels = data['labels'].cuda()
# masks = data['masks'].cuda()
# clip_nums = data['clip_num']
# sorted_clip_nums, indices = torch.sort(clip_nums, descending=True)
# _, desorted_indices = torch.sort(indices, descending=False)
# fc_feats = fc_feats[indices]
# pack = rnn.pack_padded_sequence(fc_feats, sorted_clip_nums, batch_first=True)
with torch.no_grad():
output = model(fc_feats)
# desorted_res = output[desorted_indices]
output = output.log_softmax(2).requires_grad_()
_, preds = output.max(2)
output = output.transpose(0, 1).contiguous()
labels_ctc = []
ys = []
for i in labels:
for j in i:
if not j == -1:
labels_ctc.append(j)
for i in labels:
non_zero = (i == -1).nonzero()
if not non_zero.numel():
ys.append(opt['max_len'])
else:
ys.append(non_zero[0][0])
loadData(text, torch.LongTensor(labels_ctc))
loadData(length, torch.LongTensor(ys))
preds_size = Variable(torch.LongTensor([output.size(0)] * output.size(1)))
loss = crit(output.cuda(), text.cuda(), preds_size.cuda(), length.cuda())
preds = preds.contiguous().view(-1)
sim_preds =converter.decode(preds.data,preds_size.data,raw=False)
for pred, target in zip(sim_preds, labels):
ts = target.squeeze().cpu().numpy().tolist()
res = []
for i in ts:
if i == -1:
continue
res.append(dataloader.dataset.ix_to_word[str(i)])
target = ' '.join(res)
tmp_wer = wer(target, pred)
f_wer += tmp_wer
if pred == target:
n_correct += 1
loss_avg.add(loss)
acc = n_correct/float(len(dataloader))
f_wer = f_wer/float(len(dataloader)*opt['batch_size'])
print("[epoch %d]->val_loss = %.6f , wer = %.6f" % (epoch, loss_avg.val(),f_wer))
# writer.add_scalar('scalar/val_loss_epcho', loss_avg.val())
return loss_avg.val(),f_wer
def main(opt):
dataset = VideoDataset(opt, 'train')
dataloader = DataLoader(dataset, batch_size=opt["batch_size"], shuffle=True)
opt["vocab_size"] = dataset.get_vocab_size()
if opt["model"] == 'S2VTModel':
model = S2VTModel(
opt["vocab_size"],
opt["max_len"],
opt["dim_hidden"],
opt["dim_word"],
opt['dim_vid'],
rnn_cell=opt['rnn_type'],
n_layers=opt['num_layers'],
rnn_dropout_p=opt["rnn_dropout_p"])
elif opt["model"] == "S2VTAttModel":
encoder = EncoderRNN(
opt["dim_vid"],
opt["dim_hidden"],
bidirectional=opt["bidirectional"],
input_dropout_p=opt["input_dropout_p"],
rnn_cell=opt['rnn_type'],
rnn_dropout_p=opt["rnn_dropout_p"])
decoder = DecoderRNN(
opt["vocab_size"],
opt["max_len"],
opt["dim_hidden"],
opt["dim_word"],
input_dropout_p=opt["input_dropout_p"],
rnn_cell=opt['rnn_type'],
rnn_dropout_p=opt["rnn_dropout_p"],
bidirectional=opt["bidirectional"])
model = S2VTAttModel(encoder, decoder)
elif opt["model"] == "CTCmodel":
# input_dim, hidden_dim, output_dim, num_layers, biFlag, dropout = 0.5
# model = CTCmodel(opt["dim_vid"],opt["dim_hidden"],opt["vocab_size"]+1)
model=CTCmodel(opt['vocab_size'],opt['dim_hidden'])
elif opt["model"] == "CTC_Hieratical_LSTM":
encoder = EncoderRNN(
opt["dim_vid"],
opt["dim_hidden"],
# bidirectional=opt["bidirectional"],
input_dropout_p=opt["input_dropout_p"],
rnn_cell=opt['rnn_type'],
rnn_dropout_p=opt["rnn_dropout_p"])
second_lstm = two_lstm(
opt["dim_hidden"]*2,
opt['vocab_size'],
# bidirectional=opt["bidirectional"],
input_dropout_p=opt["input_dropout_p"],
rnn_cell=opt['rnn_type'],
rnn_dropout_p=opt["rnn_dropout_p"]
)
model =CTC_Hieratical_LSTM(encoder,second_lstm,opt['vocab_size'],opt['dim_word'],opt['dim_hidden'],opt['duration'],opt['video_duration'])
# model = model.cuda()
# crit = utils.LanguageModelCriterion()
# rl_crit = utils.RewardCriterion()
ctc_loss = nn.CTCLoss(reduction='mean')
optimizer = optim.Adam(
model.parameters(),
lr=opt["learning_rate"],
weight_decay=opt["weight_decay"])
if opt['lr_schluder'] == 'StepLR':
lr_scheduler = optim.lr_scheduler.StepLR(
optimizer,
step_size=opt["learning_rate_decay_every"],
gamma=opt["learning_rate_decay_rate"])
elif opt['lr_schluder'] == 'ReduceLROnPlateau':
lr_scheduler = optim.lr_scheduler.ReduceLROnPlateau(
optimizer,
mode='min',
factor=0.1,
patience=opt['patience'],
verbose=True,
threshold_mode='rel',
threshold=opt['threshold'],
cooldown=0,
min_lr=opt['min_lr'],
eps=1e-8)
else:
raise NotImplementedError('Only implement ReduceLROnPlateau | StepLR')
opt['check_bool']=False
if opt['check_bool']:
check_path = os.path.join(opt['check_path'],'model_10.pth')
model.load_state_dict(torch.load(check_path))
opt['root_model_path']=opt['check_path']
print('have loaded model info from:',check_path)
#TODO断点重新训练
val(model, ctc_loss,opt)
else:
opt_json = os.path.join(opt["checkpoint_path"], time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime(time.time())),
'opt_info.json')
root_model_path = os.path.join(opt['checkpoint_path'],
time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime(time.time())))
opt['root_model_path'] = root_model_path
if not os.path.isdir(opt["checkpoint_path"]):
os.mkdir(opt["checkpoint_path"])
if not os.path.isdir(root_model_path):
os.mkdir(root_model_path)
with open(opt_json, 'w') as f:
json.dump(opt, f)
print('save opt details to %s' % (opt_json))
train(dataloader, model, ctc_loss, optimizer, lr_scheduler, opt)
def train_rnnt(epoch, config, model, training_data, optimizer, logger, visualizer=None):
model.train()
start_epoch = time.process_time()
total_loss = 0
optimizer.epoch()
batch_steps = len(training_data)
for step, (inputs, inputs_length, targets, targets_length) in enumerate(training_data):
if config.training.num_gpu > 0:
inputs, inputs_length = inputs.cuda(), inputs_length.cuda()
targets, targets_length = targets.cuda(), targets_length.cuda()
max_inputs_length = inputs_length.max().item()
max_targets_length = targets_length.max().item()
inputs = inputs[:, :max_inputs_length, :]
targets = targets[:, :max_targets_length]
optimizer.zero_grad()
start = time.process_time()
loss = model(inputs, inputs_length, targets, targets_length)
# loss = model.recognize(inputs, inputs_length)
if config.training.num_gpu > 1:
loss = torch.mean(loss)
loss.backward()
total_loss += loss.item()
grad_norm = nn.utils.clip_grad_norm_(
model.parameters(), config.training.max_grad_norm)
optimizer.step()
if visualizer is not None:
visualizer.add_scalar(
'train_loss', loss.item(), optimizer.global_step)
visualizer.add_scalar(
'learn_rate', optimizer.lr, optimizer.global_step)
avg_loss = total_loss / (step + 1)
if optimizer.global_step % config.training.show_interval == 0:
end = time.process_time()
process = step / batch_steps * 100
logger.info('-Training-Epoch:%d(%.5f%%), Global Step:%d, Learning Rate:%.6f, Grad Norm:%.5f, Loss:%.5f, '
'AverageLoss: %.5f, Run Time:%.3f' % (epoch, process, optimizer.global_step, optimizer.lr,
grad_norm, loss.item(), avg_loss, end-start))
# break
end_epoch = time.process_time()
logger.info('-Training-Epoch:%d, Average Loss: %.5f, Epoch Time: %.3f' %
(epoch, total_loss / (step+1), end_epoch-start_epoch))
return loss.item()
def eval_rnnt(epoch, config, model, validating_data, logger, visualizer=None):
model.eval()
total_loss = 0
total_dist = 0
total_word = 0
batch_steps = len(validating_data)
for step, (inputs, inputs_length, targets, targets_length) in enumerate(validating_data):
if config.training.num_gpu > 0:
inputs, inputs_length = inputs.cuda(), inputs_length.cuda()
targets, targets_length = targets.cuda(), targets_length.cuda()
max_inputs_length = inputs_length.max().item()
max_targets_length = targets_length.max().item()
inputs = inputs[:, :max_inputs_length, :]
targets = targets[:, :max_targets_length]
preds = model.recognize(inputs, inputs_length)
transcripts = [targets.cpu().numpy()[i][:targets_length[i].item()]
for i in range(targets.size(0))]
dist, num_words = computer_cer(preds, transcripts)
total_dist += dist
total_word += num_words
cer = total_dist / total_word * 100
# tmp_wer = computer_wer(preds,transcripts)*100
# total_wer +=tmp_wer
if step % config.training.show_interval == 0:
process = step / batch_steps * 100
logger.info('-Validation-Epoch:%d(%.5f%%), CER: %.5f %%' % (epoch, process, cer))
val_loss = total_loss/(step+1)
logger.info('-Validation-Epoch:%4d, AverageLoss:%.5f, AverageCER: %.5f %%' %
(epoch, val_loss, cer))
if visualizer is not None:
visualizer.add_scalar('cer', cer, epoch)
return cer
def train_ctc_model(epcho,config,model,training_data,optimizer,logger,visualizer=None):
model.train()
start_epoch = time.process_time()
total_loss = 0
optimizer.epoch()
batch_steps = len(training_data)
for step, (inputs, inputs_length, targets, targets_length) in enumerate(training_data):
if config.training.num_gpu > 0:
inputs, inputs_length = inputs.cuda(), inputs_length.cuda()
targets, targets_length = targets.cuda(), targets_length.cuda()
max_inputs_length = inputs_length.max().item()
max_targets_length = targets_length.max().item()
inputs = inputs[:, :max_inputs_length, :]
targets = targets[:, :max_targets_length]
optimizer.zero_grad()
start = time.process_time()
loss = model(inputs, inputs_length,targets,targets_length)
# loss = model.recognize(inputs,inputs_length)
if config.training.num_gpu > 1:
loss = torch.mean(loss)
loss.backward()
total_loss += loss.item()
grad_norm = nn.utils.clip_grad_norm_(
model.parameters(), config.training.max_grad_norm)
optimizer.step()
if visualizer is not None:
visualizer.add_scalar(
'train_loss', loss.item(), optimizer.global_step)
visualizer.add_scalar(
'learn_rate', optimizer.lr, optimizer.global_step)
avg_loss = total_loss / (step + 1)
if optimizer.global_step % config.training.show_interval == 0:
end = time.process_time()
process = step / batch_steps * 100
logger.info('-Training-Epoch:%d(%.5f%%), Global Step:%d, Learning Rate:%.6f, Grad Norm:%.5f, Loss:%.5f, '
'AverageLoss: %.5f, Run Time:%.3f' % (epoch, process, optimizer.global_step, optimizer.lr,
grad_norm, loss.item(), avg_loss, end - start))
# break
end_epoch = time.process_time()
logger.info('-Training-Epoch:%d, Average Loss: %.5f, Epoch Time: %.3f' %
(epoch, total_loss / (step + 1), end_epoch - start_epoch))
def eval_ctc_model(epcho,config,model,validating_data,logger,visualizer=None):
model.eval()
total_loss = 0
total_dist = 0
total_word = 0
batch_steps = len(validating_data)
for step, (inputs, inputs_length, targets, targets_length) in enumerate(validating_data):
if config.training.num_gpu > 0:
inputs, inputs_length = inputs.cuda(), inputs_length.cuda()
targets, targets_length = targets.cuda(), targets_length.cuda()
max_inputs_length = inputs_length.max().item()
max_targets_length = targets_length.max().item()
inputs = inputs[:, :max_inputs_length, :]
targets = targets[:, :max_targets_length]
preds = model.recognize(inputs, inputs_length)
transcripts = [targets.cpu().numpy()[i][:targets_length[i].item()]
for i in range(targets.size(0))]
dist, num_words = computer_cer(preds, transcripts)
total_dist += dist
total_word += num_words
cer = total_dist / total_word * 100
if step % config.training.show_interval == 0:
process = step / batch_steps * 100
logger.info('-Validation-Epoch:%d(%.5f%%), CER: %.5f %%' % (epoch, process, cer))
val_loss = total_loss / (step + 1)
logger.info('-Validation-Epoch:%4d, AverageLoss:%.5f, AverageCER: %.5f %%' %
(epoch, val_loss, cer))
if visualizer is not None:
visualizer.add_scalar('cer', cer, epoch)
return cer
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-config', type=str, default='config/rnnt.yaml')
parser.add_argument('-log', type=str, default='train.log')
parser.add_argument('-mode', type=str, default='retrain')
opt = parser.parse_args()
configfile =open(opt.config)
config = AttrDict(yaml.load(configfile, Loader=yaml.FullLoader))
exp_name = os.path.join(config.data.name, config.data.exp_name, config.training.save_model)
if not os.path.isdir(exp_name):
os.makedirs(exp_name)
logger = init_logger(os.path.join(exp_name, opt.log))
shutil.copyfile(opt.config, os.path.join(exp_name, 'config.yaml'))
logger.info(config)
logger.info('Save config info.')
os.environ['CUDA_VISIBLE_DEVICES'] = config.training.gpu
if config.training.num_gpu > 0:
torch.cuda.manual_seed(config.training.seed)
torch.backends.cudnn.deterministic = True # 保证实验结果的可重复性
else:
torch.manual_seed(config.training.seed)
logger.info('Set random seed: %d' % config.training.seed)
# opt = opts_only.parse_opt()
# opt = vars(opt)
# print(opt['input_json'])
# print(opt['info_json'])
# print(opt['caption_json'])
# os.environ['CUDA_VISIBLE_DEVICES'] = opt["gpu"]
# random.seed(1234)
# np.random.seed(1234)
# torch.manual_seed(1234)
##- loading train/val dataset -##
num_workers = config.training.num_gpu * 2
train_dataset = SignVideoDataset(config.data,'train')
train_data = torch.utils.data.DataLoader(
train_dataset, batch_size=config.data.batch_size * config.training.num_gpu,
shuffle=config.data.shuffle, num_workers=num_workers)
logger.info('Load Train Set!')
val_dataset =SignVideoDataset(config.data,'val')
val_data =torch.utils.data.DataLoader(
val_dataset, batch_size=config.data.batch_size * config.training.num_gpu,
shuffle=False, num_workers=num_workers)
logger.info('Load Dev Set!')
model = Transducer(config.model)
# model = VideoModel(config.model)
if config.training.load_model:
checkpoint = torch.load(config.training.model_path)
if config.model.fir_enc_or_not:
model.fir_enc.load_state_dict(checkpoint['fir_enc'])
model.encoder.load_state_dict(checkpoint['encoder'])
model.decoder.load_state_dict(checkpoint['decoder'])
model.joint.load_state_dict(checkpoint['joint'])
logger.info('Loaded model from %s' % config.training.model_path)
else:
model.encoder.load_state_dict(checkpoint['encoder'])
model.decoder.load_state_dict(checkpoint['decoder'])
model.joint.load_state_dict(checkpoint['joint'])
logger.info('Loaded model from %s' % config.training.load_model)
elif config.training.load_encoder or config.training.load_decoder:
if config.training.load_encoder:
checkpoint = torch.load(config.training.encoder_path)
model.encoder.load_state_dict(checkpoint['encoder'])
logger.info('Loaded encoder from %s' %
config.training.load_encoder)
if config.training.load_decoder:
checkpoint = torch.load(config.training.decoder_path)
# model.decoder.embedding.load_state_dict(checkpoint['embed'])
model.decoder.lstm.load_state_dict(checkpoint['lstm'])
# model.decoder.load_state_dict(checkpoint['decoder'])
# model.decoder.load_state_dict(checkpoint['decoder'])
logger.info('Loaded decoder from %s' %
config.training.decoder_path)
if config.training.num_gpu > 0:
model = model.cuda()
if config.training.num_gpu > 1:
device_ids = list(range(config.training.num_gpu))
model = torch.nn.DataParallel(model, device_ids=device_ids)
logger.info('Loaded the model to %d GPUs' % config.training.num_gpu)
n_params, enc, dec,fir_enc = count_parameters(model)
logger.info('# the number of parameters in the whole model: %d' % n_params)
logger.info('# the number of parameters in the Encoder: %d' % enc)
logger.info('# the number of parameters in the Decoder: %d' % dec)
logger.info('# the number of parameters in the fir_enc: %d' % fir_enc)
logger.info('# the number of parameters in the JointNet: %d' %
(n_params - dec - enc-fir_enc))
optimizer = Optimizer(model.parameters(), config.optim)
# optimizer = torch.optim.adam(model.parameters(),lr=config.optim.lr,betas=(0.9, 0.98),eps=1e-08,weight_decay=config.optim.weight_decay)
logger.info('Created a %s optimizer.' % config.optim.type)
if opt.mode == 'continue':
optimizer.load_state_dict(checkpoint['optimizer'])
start_epoch = checkpoint['epoch']
logger.info('From epcho:%d training! '%start_epoch)
logger.info('Load Optimizer State!')
else:
start_epoch = 0
# create a visualizer
if config.training.visualization:
visualizer = SummaryWriter(os.path.join(exp_name, 'log'))
logger.info('Created a visualizer.')
else:
visualizer = None
# for epoch in range(start_epoch, config.training.epochs):
#
# train_ctc_model(epoch, config, model, train_data,
# optimizer, logger, visualizer)
#
# if config.training.eval_or_not:
# _ = eval_ctc_model(epoch, config, model, val_data, logger, visualizer)
#
# save_name = os.path.join(exp_name, '%s.epoch%d.chkpt' % (config.training.save_model, epoch))
# save_ctc_model(model, optimizer, config, save_name)
# logger.info('Epoch %d model has been saved.' % epoch)
# if optimizer.lr < 1e-6:
# logger.info('The learning rate is too low to train.')
# break
# logger.info('Epoch %d update learning rate: %.6f' %(epoch, optimizer.lr))
# logger.info('The training process is OVER!')
loss_cer = {}
for epoch in range(start_epoch, config.training.epochs):
loss = train_rnnt(epoch, config, model, train_data,
optimizer, logger, visualizer)
if config.training.eval_or_not and epoch%config.training.eval_fre==0:
cer = eval_rnnt(epoch, config, model, val_data, logger, visualizer)
loss_cer[cer] = {}
loss_cer[cer]['loss']=loss
loss_cer[cer]['epoch']=epoch
index = min(list(loss_cer.keys()))
logger.info('Util epoch %d ,minmize cer : %.6f ,corresponding loss: %.6f epoch: %d' % (epoch, index,loss_cer[index]['loss'],loss_cer[index]['epoch']))
save_name = os.path.join(exp_name, '%s.epoch%d.chkpt' % (config.training.save_model, epoch))
save_model(model, optimizer, config, save_name)
logger.info('Epoch %d model has been saved.' % epoch)
if optimizer.lr < 1e-6:
logger.info('The learning rate is too low to train.')
break
logger.info('Epoch %d update learning rate: %.6f' %(epoch, optimizer.lr))
logger.info(exp_name)
logger.info('The training process is OVER!')
# main(opt)
| [
"jackson.li.cs@gmail.com"
] | jackson.li.cs@gmail.com |
f2f172726f7409004608358b7923d9c40cd45686 | 280aa61ccadbf6c0838533c52e3c6b2ba2effd46 | /P4HW3_TuitionIncrease_DilshodSadiev.py | 47865a657a39a76a363d6227590b51880c748c62 | [] | no_license | Sadiev/cti110 | d61341d4346f8c4a6c826b919b218944b14a330e | e2a59a7802e7720706599aaedcc52ebf5b3e5724 | refs/heads/master | 2020-03-27T19:02:52.311334 | 2018-10-08T00:01:10 | 2018-10-08T00:01:10 | 146,962,340 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 688 | py | # This program displays tuition amount witch increase by 3 percent each year for the next 5 years
# 09/21/2018
# CTI-110 P4HW3 - Tuition Increase
# Dilshod Sadiev
#
# initialize accumulator variables
tuition=8000.00
year=2018
print('The tuition for a full-time student is $8,000 per semester.\nThe tuition will increase by 3 percent each year for next 5 years.\n')
# display the tuition amount for next 5 years
for i in range(5):
tuition+=(tuition*3)/100 # increase tuition amout by 3 percent
year+=1 # jump to next year
print ('The tuition for a full-time student will be $',format(tuition,',.2f'),'per semester in', year)
input("Press any key to exit") | [
"noreply@github.com"
] | Sadiev.noreply@github.com |
6a103aa08789bda13b1472feb7e02f086288959c | 00e89be3c036ec45ac385f87989e5517bed108c9 | /add.py | 547936b3f478b0cdf31191c62a1b2a8504d5f819 | [] | no_license | sehgalvalue/addrep | c1c946adfcb1add9a63e9358b55ac37b7a099566 | 11501263d109702c2bb7833283a22d96382265c3 | refs/heads/master | 2020-04-28T18:49:26.820449 | 2019-03-13T20:27:37 | 2019-03-13T20:27:37 | 175,491,219 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 27 | py | a = 5
b=6
c = a+b
print(c)
| [
"noreply@github.com"
] | sehgalvalue.noreply@github.com |
a0321890fdf0babae23c4b46e7dca8a0e7afbf90 | 60dff076fae5d36af71af1066ac7eb4f833d2f2f | /tools/ci_build/github/apple/c/assemble_c_pod_package.py | 18dc8a19d23ceffa99f30900c4c998c464d550e2 | [
"MIT"
] | permissive | NervanaSystems/onnxruntime | 79e60f9c6feb8c147868d27de8077a276755cc90 | 96b3c09e2a5e0a5b4f98ed9059a719d9c7b73724 | refs/heads/master | 2023-06-22T02:55:35.250834 | 2023-01-03T22:54:46 | 2023-01-03T22:54:46 | 162,268,647 | 1 | 3 | MIT | 2021-01-14T12:56:23 | 2018-12-18T10:09:13 | C++ | UTF-8 | Python | false | false | 2,687 | py | #!/usr/bin/env python3
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import argparse
import pathlib
import shutil
import sys
_script_dir = pathlib.Path(__file__).parent.resolve(strict=True)
sys.path.append(str(_script_dir.parent))
from package_assembly_utils import ( # noqa: E402
copy_repo_relative_to_dir, gen_file_from_template, load_framework_info)
def parse_args():
parser = argparse.ArgumentParser(description="""
Assembles the files for the C/C++ pod package in a staging directory.
This directory can be validated (e.g., with `pod lib lint`) and then zipped to create a package for release.
""")
parser.add_argument("--staging-dir", type=pathlib.Path,
default=pathlib.Path("./onnxruntime-mobile-c-staging"),
help="Path to the staging directory for the C/C++ pod files.")
parser.add_argument("--pod-version", required=True,
help="C/C++ pod version.")
parser.add_argument("--framework-info-file", type=pathlib.Path, required=True,
help="Path to the framework_info.json file containing additional values for the podspec. "
"This file should be generated by CMake in the build directory.")
parser.add_argument("--framework-dir", type=pathlib.Path, required=True,
help="Path to the onnxruntime.framework directory to include in the pod.")
return parser.parse_args()
def main():
args = parse_args()
framework_info = load_framework_info(args.framework_info_file.resolve())
staging_dir = args.staging_dir.resolve()
print(f"Assembling files in staging directory: {staging_dir}")
if staging_dir.exists():
print("Warning: staging directory already exists", file=sys.stderr)
# copy the necessary files to the staging directory
framework_dir = args.framework_dir.resolve()
shutil.copytree(framework_dir, staging_dir / framework_dir.name, dirs_exist_ok=True)
copy_repo_relative_to_dir(["LICENSE"], staging_dir)
# generate the podspec file from the template
variable_substitutions = {
"VERSION": args.pod_version,
"IOS_DEPLOYMENT_TARGET": framework_info["IOS_DEPLOYMENT_TARGET"],
"WEAK_FRAMEWORK": framework_info["WEAK_FRAMEWORK"],
"LICENSE_FILE": '"LICENSE"',
}
podspec_template = _script_dir / "onnxruntime-mobile-c.podspec.template"
podspec = staging_dir / "onnxruntime-mobile-c.podspec"
gen_file_from_template(podspec_template, podspec, variable_substitutions)
return 0
if __name__ == "__main__":
sys.exit(main())
| [
"noreply@github.com"
] | NervanaSystems.noreply@github.com |
3a71ff09d8ee1fd3cb59d725a4825d66ebfb00cf | c9a3f0e2d9f79860ba5124db288cb4dc70fa4124 | /plots/plot_pocket_controller_autoscaling.py | 6500b7363881c75f5378dafa1f6ee54c494431f6 | [] | no_license | anakli/pocket-controller | 2b7605fa4b82b10252ed7a91d22d7d2cef2b2c81 | f163c7175c93eaa617210d35ccd267ad912d8aae | refs/heads/master | 2020-03-09T04:08:39.926455 | 2018-12-25T03:44:25 | 2018-12-25T03:44:25 | 128,580,226 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,502 | py | import pandas as pd
import sys
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
from matplotlib.text import OffsetFrom
#python plot_pocket_controller_autoscaling.py sort1-video-sort2-WORKS-util-v2.log
#time net_usedMbps avg_cpu dram_usedGB net_allocMbps dram_allocGB
plt.rcParams.update({'font.size': 24})
plt.rcParams.update({'pdf.fonttype': 42})
plt.rcParams.update({'ps.fonttype': 42})
def plot_usage(logfile):
data = pd.read_csv(logfile, sep=' ') # header=True) #, skipinitialspace=True)
print(list(data))
start_time = data.at[0, 'time']
data['time'] = data['time'] - start_time
REGISTER_JOB1 = 1525312251.8904905 - start_time
DEREGISTER_JOB1 = 1525312310.2906423 - start_time
REGISTER_JOB2 = 1525312332.0015373 - start_time
REGISTER_JOB3 = 1525312376.7232 - start_time
DEREGISTER_JOB3 = 1525312443.3627393 - start_time
DEREGISTER_JOB2 = 1525312542.2918663 - start_time
x = data.loc[:,'time']
net_usage = data.loc[:,'net_usedMbps'] / (8*1e3)
net_alloc = data.loc[:,'net_allocMbps'] / (8*1e3) #/ 8 * 10
cpu = data.loc[:, 'avg_cpu']
dram_usedGB = data.loc[:,'dram_usedGB']
dram_allocGB = data.loc[:, 'dram_allocGB']
fig = plt.figure(figsize=(15,8))
ax = plt.axes([0.06, 0.2, 0.9, 0.75]) # left bottom width height (fraction of total figsize)
ax.plot(x, net_alloc, label='Total GB/s allocated', linestyle='--', color="#1f77b4", linewidth=4)
ax.plot(x, net_usage, label='Total GB/s used', color="#ff7f0e", linewidth=4)
ax.set_xlabel("Time (s)")
ax.set_ylabel("Throughput (GB/s)")
ax.legend(loc='upper left')
ax.annotate('Job1', xy=(REGISTER_JOB1, -0.07), xytext=(REGISTER_JOB1, -0.2),
xycoords=('data', 'axes fraction'), textcoords=('data', 'axes fraction'),
va='center',ha='center',color='blue',
arrowprops=dict(arrowstyle='->', color='blue'), size=18
)
ax.annotate('Job1',
xy=(DEREGISTER_JOB1, -0.07), xytext=(DEREGISTER_JOB1, -0.2),
xycoords=('data', 'axes fraction'), textcoords=('data', 'axes fraction'),
va='center',ha='center', color='blue',
arrowprops=dict(arrowstyle='<-', color='blue'), size=18
)
ax.annotate('Job2', xy=(REGISTER_JOB2, -0.07), xytext=(REGISTER_JOB2, -0.2),
xycoords=('data', 'axes fraction'), textcoords=('data', 'axes fraction'),
va='center',ha='center',color='green',
arrowprops=dict(arrowstyle='->', color='green'), size=18
)
ax.annotate('Job3', xy=(REGISTER_JOB3, -0.07), xytext=(REGISTER_JOB3, -0.2),
xycoords=('data', 'axes fraction'), textcoords=('data', 'axes fraction'),
va='center',ha='center',color='grey',
arrowprops=dict(arrowstyle='->',color='grey',), size=18
)
ax.annotate('Job3', xy=(DEREGISTER_JOB3, -0.07), xytext=(DEREGISTER_JOB3, -0.2),
xycoords=('data', 'axes fraction'), textcoords=('data', 'axes fraction'),
va='center',ha='center',color='grey',
arrowprops=dict(arrowstyle='<-',color='grey',), size=18
)
ax.annotate('Job2', xy=(DEREGISTER_JOB2, -0.07), xytext=(DEREGISTER_JOB2, -0.2),
xycoords=('data', 'axes fraction'), textcoords=('data', 'axes fraction'),
va='center',ha='center',color='green',
arrowprops=dict(arrowstyle='<-',color='green',), size=18
)
#plt.show()
plt.savefig("pocket_controller_autoscale-8Gbs-.pdf")
if __name__ == '__main__':
logfile = sys.argv[1]
plot_usage(logfile)
| [
"ana.klimov@gmail.com"
] | ana.klimov@gmail.com |
dd97094e0e53418b16229ca0ca1a5efacd5e520f | 1b53325f6976bd2697f1d9678054b8a1e5dd059c | /update/without_expansion/2.run_calculate_concept_map.py | d0f902e4761716435b798ad4bda40a5255298bc5 | [
"MIT"
] | permissive | vsoch/semantic-image-comparison | d34150b4fed36d55f934e727297ee188951e3ed9 | ab029ad124fc6d6e7ae840c24a8e9471d8737525 | refs/heads/master | 2020-04-06T07:04:21.726094 | 2016-08-13T23:13:10 | 2016-08-13T23:13:10 | 48,921,431 | 1 | 3 | null | null | null | null | UTF-8 | Python | false | false | 2,208 | py | #!/usr/bin/python
from glob import glob
import sys
import pandas
import os
# Classification framework
# for image1 in all images:
# for image2 in allimages:
# if image1 != image2:
# hold out image 1 and image 2, generate regression parameter matrix using other images
# generate predicted image for image 1 [PR1]
# generate predicted image for image 2 [PR2]
# classify image 1 as fitting best to PR1 or PR2
# classify image 2 as fitting best to PR1 or PR2
base = sys.argv[1]
update = "%s/update" %base
output_folder = "%s/classification" %update # any kind of tsv/result file
results = "%s/results" %update # any kind of tsv/result file
for x in [output_folder,results]:
if not os.path.exists(x):
os.mkdir(x)
# Images by Concepts data frame (NOT including all levels of ontology)
labels_tsv = "%s/concepts_binary_df.tsv" %update
image_lookup = "%s/image_nii_lookup.pkl" %update
df = pandas.read_csv(labels_tsv,sep="\t",index_col=0)
for image1_holdout in df.index.tolist():
print "Parsing %s" %(image1_holdout)
for image2_holdout in df.index.tolist():
if (image1_holdout != image2_holdout) and (image1_holdout < image2_holdout):
output_file = "%s/%s_%s_predict.pkl" %(output_folder,image1_holdout,image2_holdout)
if not os.path.exists(output_file):
job_id = "%s_%s" %(image1_holdout,image2_holdout)
filey = ".job/class_%s.job" %(job_id)
filey = open(filey,"w")
filey.writelines("#!/bin/bash\n")
filey.writelines("#SBATCH --job-name=%s\n" %(job_id))
filey.writelines("#SBATCH --output=.out/%s.out\n" %(job_id))
filey.writelines("#SBATCH --error=.out/%s.err\n" %(job_id))
filey.writelines("#SBATCH --time=2-00:00\n")
filey.writelines("#SBATCH --mem=32000\n")
filey.writelines("python 2.calculate_concept_map.py %s %s %s %s %s" %(image1_holdout, image2_holdout, output_file, labels_tsv, image_lookup))
filey.close()
os.system("sbatch -p russpold --qos russpold " + ".job/class_%s.job" %(job_id))
| [
"vsochat@stanford.edu"
] | vsochat@stanford.edu |
65865ddc117140f9817ea18ddb903ec6a8ae3cfc | 3c8a7554fe61c5794f0bb6523fa9942680fda876 | /chp7/lib/python3.6/hashlib.py | 741c4f0c473f1dff3b49733b5404b00e4d2383d7 | [] | no_license | porte404/Assignment4_ESS520 | 9d6cb3604c67a35771c66fe43405cb7ed6bef848 | f0ff28cf1d94524da55fbb545b2a0fb0372e2b30 | refs/heads/master | 2021-01-25T09:14:16.822355 | 2017-06-08T23:46:52 | 2017-06-08T23:46:52 | 93,798,533 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 49 | py | /Users/matthew/anaconda3/lib/python3.6/hashlib.py | [
"porte404@uw.edu"
] | porte404@uw.edu |
83f8eb10ba1e92f6e90b7448980c213b30724c5a | 560d01d2c1f15410d0fd0ab8e402a5bbf8956eff | /sports_predict.py | 311da9b33a189304b58a067fd7c341766d081c69 | [] | no_license | Roland-coder/streamlit-app | 63b4a1e5289cc267121b42f43372bb6eb6197dc6 | 21f9de4598b9d0697610efd803a87eab711d590b | refs/heads/main | 2023-08-24T21:18:52.547215 | 2021-10-27T08:13:56 | 2021-10-27T08:13:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,824 | py |
import streamlit as st
import pandas as pd
import pickle
import numpy as np
from PIL import Image
from smart_open import smart_open
# model = pickle.load(open('final_model.save.pkl', 'rb'))
st.title("Sports Predict App")
st.header("Sports Performace Prediction")
st.write("This web app predicts the overall performance of a player based on particular features")
image = Image.open("sports.jpg")
st.image(image, use_column_width=True)
st.write("Please insert values to get overall prediction of player")
potential = st.slider("Player Potential: ", 0, 100)
club = st.number_input("Pick Club number based on club description above")
wage = st.number_input("Please enter wage of player")
international_reputation = st.slider("Player International Reputation: ", 0, 5)
short_passing = st.slider("Player Short Passing: ", 0, 100)
reactions = st.slider("Player Reactions: ", 0, 100)
vision = st.slider("Player Vision: ", 0, 100)
composure = st.slider("Player Composure: ", 0, 100)
data = {'Potential' : potential,
'Club' : club,
'Wage' : wage,
'International Reputation' : international_reputation,
'ShortPassing' : short_passing,
'Reactions' : reactions,
'Vision' : vision,
'Composure' : composure
}
# @st.cache
# def load_model(ttl=30):
# return pickle.load(open('final_model.save', 'rb'))
# model = load_model()
# model = pickle.load(smart_open('https://mlassignment.s3.eu-de.cloud-object-storage.appdomain.cloud/final_model%20(1).save', 'rb'))
if st.button('Predict Overall Performance'):
model = pickle.load(open('final_model.save', 'rb'))
features = pd.DataFrame(data, index=[0])
prediction = model.predict(features)
st.header("Please find predicted value below")
st.write("The overall predicted score for the above player is", np.round(prediction[0]))
else:
st.write('Thank You For Trusting Us')
| [
"noreply@github.com"
] | Roland-coder.noreply@github.com |
587618aea91148466d133ac9a83beda798de79ca | f4099d60ad515c444e6814c9879939af70d567f1 | /calc_ca.py | 8bd206f53881b0c01f3e36f26844bd2b2c414fe5 | [] | no_license | atelierkarin/fm-j-league-data-reader | 6af8cef349e4cb971163462c5a2605ee29256494 | ccf461cabc45f215c92c4d1088b2597316fab73a | refs/heads/master | 2022-09-13T12:36:28.551693 | 2020-05-23T08:52:13 | 2020-05-23T08:52:13 | 260,946,749 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,013 | py | import pandas as pd
import numpy as np
import pickle
df = pd.read_csv('raw_data.csv')
df['平均クラブ勝点'] = df['クラブ勝点'] / df['試合数']
df['平均試合出場数'] = df['試合出場数'] / df['試合数']
df['平均得点'] = df['得点'] / df['試合数']
df = pd.get_dummies(df, columns=['POS'])
# Data with CA
df_with_ca = df[df['CA'].notnull()]
from sklearn.model_selection import train_test_split
from sklearn.neural_network import MLPRegressor
from sklearn.metrics import mean_squared_error
from sklearn.pipeline import make_pipeline
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import cross_val_score
X = df_with_ca[['POS_GK', 'POS_DF', 'POS_MF', 'POS_FW', '平均クラブ勝点', 'リーグ知名度', '平均試合出場数', '平均得点']].values
y = df_with_ca['CA']
pipe = make_pipeline(MLPRegressor())
param_grid = {
'mlpregressor__activation': ['tanh'],
'mlpregressor__solver': ['adam'],
'mlpregressor__hidden_layer_sizes':[(50, 50), (50, 100), (100, 50), (100, 100)],
'mlpregressor__alpha': [0.1, 1, 10],
'mlpregressor__max_iter': [10000]
}
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42)
grid = GridSearchCV(pipe, param_grid=param_grid, cv=5, verbose=2)
grid.fit(X_train, y_train)
print("-" * 50)
print("\nGrid-Search")
print("Best parameters:", grid.best_params_)
print("Best cross-validation score: {:.3f}".format(grid.best_score_))
print("-" * 50)
scores = grid.score(X_test, y_test)
print("Test set score: {:.2f}".format(scores))
print("-" * 50)
print("Test set accuracy: {:.3f}".format(grid.score(X_test, y_test)))
# Predict and export
predict_X = df[['POS_GK', 'POS_DF', 'POS_MF', 'POS_FW', '平均クラブ勝点', 'リーグ知名度', '平均試合出場数', '平均得点']].values
predict_ca = grid.predict(predict_X)
df['CA_CALC'] = predict_ca
df.to_csv('results.csv',encoding='utf-8-sig')
with open('regional_league_model.pickle', mode='wb') as fp:
pickle.dump(grid, fp) | [
"atelierkarin@gmail.com"
] | atelierkarin@gmail.com |
0f740d27d674e2a6797cc7ad1e2ca972ce4a70f1 | 37307ea56d688297f5655ff844a44fcd4d713f52 | /偶数.py | 16ce4328f38534c1a039bf09decac1058662a3d7 | [] | no_license | know-c0de/python | e8b07784ce914745dbeff143776089408105885e | e031d5a3ddec804205a0600aea489f9ed1e0ec9c | refs/heads/master | 2021-01-10T18:40:45.932567 | 2013-12-12T06:15:30 | 2013-12-12T06:15:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 655 | py | #!/usr/bin/env python2.7.6
#coding=utf-8
#
#Copyright 2013 the Melange authors.
#
#You may obtain a copy at
#
# https://github.com/know_c0de/python/
#
#Author:know_c0de
#Time:2013.11.13
#E-mail:xxxxxxxx@qq.com
#Unless required by applicable law or agreed to in writing, software
#distributed under the license is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
#See the license for the specific language governing permissions and
#limitations under the license.
def get_even(start=0, end=0):
for x in range(start, end):
if x % 2 is 0:
print x
print get_even(100, 200)
| [
"code.sec01@gmail.com"
] | code.sec01@gmail.com |
e7468ca344e3696d679e74b70eaea262c3f43a78 | fa60288b43e02dd1ef392ab16d8efc91468e438e | /Python Programs/csv_reader.py | 2ad9c25f1e48861ef5f712592931e990813824e3 | [] | no_license | manjot-baj/My_Python_Django | 42fa5adc223888a16c3f4f017a9200cb4567d02d | 43dbe17f7588bbc0ae60e403eec3a3788ebcfb36 | refs/heads/master | 2023-01-19T03:21:18.584091 | 2020-02-14T07:44:10 | 2020-02-14T07:44:10 | 232,131,850 | 0 | 0 | null | 2022-12-26T20:33:03 | 2020-01-06T15:41:27 | CSS | UTF-8 | Python | false | false | 256 | py | from csv import reader
with open("file.csv","r") as file_csv:
with open("file1.csv","w") as w_csv:
csv_reader = reader(file_csv)
next(csv_reader)
for k,v in dict(csv_reader).items():
w_csv.write(f"{k} : {list(v)}\n") | [
"bajwa.mj78@gmail.com"
] | bajwa.mj78@gmail.com |
7615302a3a6fa18b13dcc63cd0808b3928d4bd4c | 5f801f2a34e97338591e0830fb9ba8e242a6a2b4 | /DSProject1/WebScraper.py | 2e1c132471199ddb3758e9ba7e2ce126fae99efb | [] | no_license | wesleymerrick/Data-Sci-Class | 15b7ee1f60c1ac75b6d9d8df377f530a2070221d | ff2a66c52879f7c016df7d3bc4aa94ce3bce698f | refs/heads/master | 2021-01-23T17:05:04.416498 | 2017-09-07T16:07:57 | 2017-09-07T16:07:57 | 102,756,971 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,106 | py | from __future__ import print_function
from bs4 import BeautifulSoup
import urllib
import re
# WebScraper.py
# Toby Duncan and Wesley Merrick
# COSC 480 - 01
# Mini Project 1
# 2/10/17
# List that holds the url's for each of the articles to scrape
my_url_list = ["http://www.theonion.com/article/sixth-super-bowl-win-continues-elude-patriots-55231",
"http://www.theonion.com/article/lady-gaga-panics-after-hearing-name-called-halftim-55234",
"http://www.theonion.com/article/father-teaches-son-how-shave-him-55223",
"http://www.theonion.com/article/burmese-python-shocked-amount-stress-man-holding-h-55196",
"http://www.theonion.com/article/trump-supporter-has-few-backup-scapegoats-ready-go-55186",
"http://www.theonion.com/article/mom-just-wants-watch-something-nice-55183",
"http://www.theonion.com/article/nothing-would-surprise-me-point-says-man-who-will--55179",
"http://www.theonion.com/article/it-too-late-audition-asks-perfect-actor-role-pokin-55176",
"http://www.theonion.com/article/2-year-old-unaware-hes-basis-6-couples-decisions-n-55166",
"http://www.theonion.com/article/man-spends-whole-day-dreading-fun-activity-he-sign-55165", # 10
"http://www.theonion.com/article/explanation-board-game-rules-peppered-reassurances-55162",
"http://www.theonion.com/article/man-chippewa-falls-wisconsin-hates-when-people-eag-55157",
"http://www.theonion.com/article/spider-sitting-shower-wall-cant-wait-see-look-mans-55136",
"http://www.theonion.com/article/asshole-moves-part-city-where-all-assholes-live-55074",
"http://www.theonion.com/article/32-year-old-still-not-entirely-sure-where-body-sta-55057",
"http://www.theonion.com/article/7-year-old-apparently-under-impression-everyone-kn-55027",
"http://www.theonion.com/article/man-excited-spend-weekend-back-home-catching-old-v-55019",
"http://www.theonion.com/article/mom-nightgown-mode-55001",
"http://www.theonion.com/article/controversial-puppy-bowl-star-shits-during-nationa-55240",
"http://www.theonion.com/article/area-man-totally-screwing-order-snack-consumption--55237",
]
# Scrape all url's for the article's body text, then calculate and return the number a list # of words per article
# and a list of all words found across all articles, counting repeats
def scrape_word_counts(url_list):
all_words = [] # List of all words found across all articles
words_per_article = [] # List to hold the word count of each separate article
for l in url_list:
tmp_wpa = 0 # Counter for number of words in each article, set counter to zero at the start of each article
r = urllib.urlopen(l) # sets the contents of the open url to r
soup = BeautifulSoup(r, "html.parser") # bs4 object, default html parser specified
stuff = soup.find('p').get_text() # Getting text inside paragraph tags for each article
for word in stuff.split(): # Separate scraped text into individual words
word = re.sub(r'[^\w]', '', word) # Strip all punctuation FIXME: don't strip apostrophes
all_words.append(word) # Add formatted word to the list of all scraped words
tmp_wpa += 1 # Increment the number of words found in the current article
# Add the number of words found to the appropriate list after scraping each article
words_per_article.append(tmp_wpa)
return words_per_article, all_words
# Calculate and return the mean and median # of words per article
def mid_count(words_in_article):
running_sum = 0 # Counts up the number of words
for ct in words_in_article:
running_sum += ct # Sum the word counts of all articles
mean = running_sum / len(my_url_list) # Calculate the mean word count per article
words_in_article.sort() # Sort the list to make finding the median easy
# Calculate the median word count per article
if len(words_in_article) % 2 == 0:
median = (words_in_article[(len(words_in_article) / 2)]
+ words_in_article[(len(words_in_article) / 2) + 1] / 2)
else:
median = words_in_article[(len(words_in_article) / 2)]
return mean, median
# Calculate and print the most frequently used word (or words if it's a tie)
def freq_list(word_list):
# imports defaultdict to use to make frequency list
from collections import defaultdict as dd
word_counts = dd(int) # instance of defaultdict
# loops through word_list making frequency list
for word in word_list:
word_counts[word] += 1
individual_words = [] # list to hold sorted words
individual_count = [] # list to hold number of times each word was found
# http://stackoverflow.com/questions/613183/sort-a-python-dictionary-by-value
# sorts list and appends the two lists to hold the most frequent word and number of times it shows up
# in descending order
for w in sorted(word_counts, key=word_counts.get, reverse=True):
individual_words.append(w)
individual_count.append(word_counts[w])
# Prints the most frequent word (or words if it's a tie) and how many times it shows up
if individual_count[0] == individual_count[1]: # Case where two or more words are tied for most frequent
freq_words = individual_words[0] # String to hold all words tied for most frequent
k = 1
while individual_count[k] == individual_count[0]: # Find most frequent words (works because of earlier sort)
freq_words = freq_words + ", " + individual_words[k]
k += 1
print("The most frequently used words were: \"" + freq_words + "\" with " + str(individual_count[0])
+ " usages each")
else: # Case where there is only one most frequently used word
print("The most frequently used word was: \"" + (individual_words[0]) + "\" with "
+ str(individual_count[0]) + " usages")
# prints the remaining words
print("The following is a frequency list of all other words in descending order:\n")
# http://stackoverflow.com/questions/1663807/how-can-i-iterate-through-two-lists-in-parallel-in-python
for a, b in zip(individual_words[0:], individual_count[0:]):
if b == 1: # Handle pluralization
print(a + " - " + str(b) + " usage")
else:
print(a + " - " + str(b) + " usages")
# Main method
if __name__ == '__main__':
words_per, words_master = scrape_word_counts(my_url_list) # Assign local variables so we can call other methods
avg, med = mid_count(words_per) # Calculate the mean and median number of words per article scraped
# Print those results
print("The average (mean) number of words in each article was: ", avg)
print("The median number of words in each article was: ", med)
freq_list(words_master) # Generate and print frequency list of words found across all articles
| [
"wesleymerrick@gmail.com"
] | wesleymerrick@gmail.com |
8b119fb23c23050586e6ff415a734bd89f1705a3 | e14775b035a71ad814d724abea0e41b3a9238ea4 | /toSetDefaults.py | 252225fb521f81db280a1cfbe2c04fefbc6a6042 | [] | no_license | haris314/book-review | 6aec307cc5727e4299be9eb0d90f799b2ffda04a | dfc6efd62629c312bbd20fca04c0f7b09e4eb1a2 | refs/heads/master | 2023-02-17T22:07:40.322878 | 2022-11-04T16:46:14 | 2022-11-04T16:46:14 | 213,733,741 | 1 | 0 | null | 2023-02-15T23:07:47 | 2019-10-08T19:25:08 | HTML | UTF-8 | Python | false | false | 1,372 | py | from extraLogic import shouldBePreFormatted
import psycopg2
try:
connection = psycopg2.connect(user = "acpntihfuaixij",
password = "0d4d9e9d7d7c4f424ddd2c3056b12ff2f187e2d6d1316fa6717e82cc8e9d473b",
host = "ec2-174-129-227-146.compute-1.amazonaws.com",
port = "5432",
database = "dc40r85arbm903")
cursor = connection.cursor()
# Print PostgreSQL Connection properties
print ( connection.get_dsn_parameters(),"\n")
# Print PostgreSQL version
cursor.execute("SELECT version();")
record = cursor.fetchone()
print("You are connected to - ", record,"\n")
print("--------------------------------------------")
cursor.execute("SELECT * FROM review;")
for row in cursor:
review = row[2]
isPreFormatted = shouldBePreFormatted(review)
cursor2 = connection.cursor()
cursor2.execute(f"UPDATE review SET ispreformatted = {isPreFormatted} WHERE isbn = '{row[1]}';")
connection.commit()
except (Exception, psycopg2.Error) as error :
print ("Error while connecting to PostgreSQL", error)
finally:
#closing database connection.
if(connection):
cursor.close()
connection.close()
print("PostgreSQL connection is closed") | [
"hrs.haris.hrs@gmail.com"
] | hrs.haris.hrs@gmail.com |
8a377e8d6ee0c16df50b175edee94deac516db51 | 1d85ee8d3a5ce672cb1fe77ce48f59e4b8d38402 | /frontend/frontend/__init__.py | c66a7e548ded592eb9a8b9e1c40e4fb759ae54fe | [] | no_license | thomastodon/hello-gsa | cad382bb083e1d91a072d461fa4fac5d1f68c0b6 | 1300818466cc97f56b470e481e4ebde2351a44af | refs/heads/master | 2021-05-01T03:33:32.013857 | 2016-09-19T01:55:13 | 2016-09-19T01:55:13 | 64,556,217 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 84 | py | import os
from flask import Flask
app = Flask(__name__)
import frontend.views | [
"thomas.shouler@gmail.com"
] | thomas.shouler@gmail.com |
f44574379435b1f2cd4ce38956cd022587c8a169 | f64fde1c4ae338987b76c10c1029468143f1d83a | /Test_programs/stacking_arm/main.py | 86a75d8333a3fe74d564dc64820892d75fccba01 | [] | no_license | abhijithneilabraham/Project-ANTON | 56a21941042034c9c2b407e25d4e75925a158e71 | 03478d9c9a537c2507a06e3c022a1092587cdc06 | refs/heads/master | 2023-04-01T21:01:14.568164 | 2020-05-01T14:19:24 | 2020-05-01T14:19:24 | 203,203,760 | 2 | 0 | null | 2023-03-24T22:42:40 | 2019-08-19T15:52:11 | Python | UTF-8 | Python | false | false | 1,285 | py | """
Make it more robust.
Stop episode once the finger stop at the final position for 50 steps.
Feature & reward engineering.
"""
from env import ArmEnv
from rl import DDPG
MAX_EPISODES = 900
MAX_EP_STEPS = 200
ON_TRAIN = False
# set env
env = ArmEnv()
s_dim = env.state_dim
a_dim = env.action_dim
a_bound = env.action_bound
rl = DDPG(a_dim, s_dim, a_bound)
steps = []
print(s_dim)
def train():
# start training
for i in range(MAX_EPISODES):
s = env.reset()
ep_r = 0.
for j in range(MAX_EP_STEPS):
# env.render()
a = rl.choose_action(s)
s_, r, done = env.step(a)
rl.store_transition(s, a, r, s_)
ep_r += r
if rl.memory_full:
# start to learn once has fulfilled the memory
rl.learn()
s = s_
if done or j == MAX_EP_STEPS-1:
print('Ep: %i | %s | ep_r: %.1f | step: %i' % (i, '---' if not done else 'done', ep_r, j))
break
rl.save()
def eval():
rl.restore()
env.render()
env.viewer.set_vsync(True)
s = env.reset()
while True:
env.render()
a = rl.choose_action(s)
s, r, done = env.step(a)
#if ON_TRAIN:
# train()
#else:
# eval()
| [
"abhijithneilabrahampk@gmail.com"
] | abhijithneilabrahampk@gmail.com |
19c0c8ba1f422877c884266898a022af1615d25e | dc666a49ef6e75546ab60251ea025a9b183713e3 | /products/migrations/0041_autovoisin.py | 1f65ee2b756d1aa6bda0aeb376787685be67de52 | [] | no_license | cash2one/source | 6c4503f8d05eaa149a278d4f81c80e289d6e5e61 | 61e082814d25c81007a2ff0cfae7f3a06c8c291d | refs/heads/master | 2021-06-18T03:52:56.445654 | 2017-06-13T12:20:55 | 2017-06-13T12:20:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 41,368 | py | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
class Migration(DataMigration):
depends_on = (
("accounts", "0044_autovoisin"),
)
def forwards(self, orm):
eloue_site_id = 1
autovoisin_site_id = 14
root = orm['products.Category'].objects.get(id=2700) # Automobile
# categories = tuple(root.get_descendants(include_self=True).filter(
# sites__id=eloue_site_id).values_list('id', flat=True))
categories = tuple(orm.Category.objects.filter(
tree_id=root.tree_id, sites__id=eloue_site_id
).values_list('id', flat=True))
db.start_transaction()
db.execute("""
INSERT INTO products_category_sites(category_id, site_id)
SELECT category_id, %s FROM products_category_sites WHERE
site_id = %s AND category_id IN %s""",
[autovoisin_site_id, eloue_site_id, categories])
db.execute("""
INSERT INTO products_product_sites(product_id, site_id)
SELECT DISTINCT PS.product_id, %s FROM
products_product_sites AS PS JOIN products_product2category AS PC ON
(PC.product_id=PS.product_id) WHERE PC.site_id = PS.site_id AND
PS.site_id = %s AND
PC.category_id IN %s""",
[autovoisin_site_id, eloue_site_id, categories])
db.execute("""
INSERT INTO products_product2category(product_id, category_id, site_id)
SELECT PC.product_id, PC.category_id, %s FROM
products_product2category AS PC WHERE
PC.site_id = %s AND
PC.category_id IN %s""",
[autovoisin_site_id, eloue_site_id, categories])
db.commit_transaction()
def backwards(self, orm):
autovoisin_site_id = 14
db.start_transaction()
db.execute("""DELETE FROM products_product2category WHERE site_id = %s""",
[autovoisin_site_id, ])
db.execute("""DELETE FROM products_product_sites WHERE site_id = %s""",
[autovoisin_site_id, ])
db.execute("""DELETE FROM products_category_sites WHERE site_id = %s""",
[autovoisin_site_id, ])
db.commit_transaction()
models = {
u'accounts.address': {
'Meta': {'object_name': 'Address'},
'address1': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'address2': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'country': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'patron': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'addresses'", 'to': u"orm['accounts.Patron']"}),
'position': ('django.contrib.gis.db.models.fields.PointField', [], {'null': 'True', 'blank': 'True'}),
'zipcode': ('django.db.models.fields.CharField', [], {'max_length': '9'})
},
u'accounts.language': {
'Meta': {'object_name': 'Language'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lang': ('django.db.models.fields.CharField', [], {'max_length': '30'})
},
u'accounts.patron': {
'Meta': {'object_name': 'Patron'},
'about': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'activation_key': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True', 'blank': 'True'}),
'affiliate': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'avatar': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'civility': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'company_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'customers': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['accounts.Patron']", 'symmetrical': 'False'}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_of_birth': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'default_address': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'on_delete': 'models.PROTECT', 'to': u"orm['accounts.Address']"}),
'default_number': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'on_delete': 'models.PROTECT', 'to': u"orm['accounts.PhoneNumber']"}),
'drivers_license_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'drivers_license_number': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'unique': 'True', 'max_length': '75'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'godfather_email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
'hobby': ('django.db.models.fields.CharField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'iban': ('django_iban.fields.IBANField', [], {'max_length': '34', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_professional': ('django.db.models.fields.NullBooleanField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_subscribed': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'languages': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['accounts.Language']", 'null': 'True', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'login_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'new_messages_alerted': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'paypal_email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'place_of_birth': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'rib': ('django.db.models.fields.CharField', [], {'max_length': '23', 'blank': 'True'}),
'school': ('django.db.models.fields.CharField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'sites': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'patrons'", 'symmetrical': 'False', 'to': u"orm['sites.Site']"}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'subscriptions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['accounts.ProPackage']", 'through': u"orm['accounts.Subscription']", 'symmetrical': 'False'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}),
'work': ('django.db.models.fields.CharField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'})
},
u'accounts.phonenumber': {
'Meta': {'object_name': 'PhoneNumber'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'kind': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '4'}),
'number': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'patron': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'phones'", 'to': u"orm['accounts.Patron']"})
},
u'accounts.proagency': {
'Meta': {'object_name': 'ProAgency'},
'address1': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'address2': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'country': ('django.db.models.fields.CharField', [], {'default': "'FR'", 'max_length': '2'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'patron': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'pro_agencies'", 'to': u"orm['accounts.Patron']"}),
'phone_number': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'position': ('django.contrib.gis.db.models.fields.PointField', [], {'null': 'True', 'blank': 'True'}),
'zipcode': ('django.db.models.fields.CharField', [], {'max_length': '9'})
},
u'accounts.propackage': {
'Meta': {'ordering': "('-maximum_items',)", 'unique_together': "(('maximum_items', 'valid_until'),)", 'object_name': 'ProPackage'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'maximum_items': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'price': ('django.db.models.fields.DecimalField', [], {'max_digits': '8', 'decimal_places': '2'}),
'valid_from': ('django.db.models.fields.DateField', [], {'default': 'datetime.datetime.now'}),
'valid_until': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'})
},
u'accounts.subscription': {
'Meta': {'object_name': 'Subscription'},
'annual_payment_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'comment': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'free': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'number_of_free_month': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'patron': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounts.Patron']"}),
'payment_type': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'propackage': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounts.ProPackage']"}),
'subscription_ended': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'subscription_started': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'django_messages.message': {
'Meta': {'ordering': "['-sent_at']", 'object_name': 'Message'},
'body': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parent_msg': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'next_messages'", 'null': 'True', 'to': u"orm['django_messages.Message']"}),
'read_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'recipient': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'received_messages'", 'null': 'True', 'to': u"orm['accounts.Patron']"}),
'recipient_deleted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'replied_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'sender': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sent_messages'", 'to': u"orm['accounts.Patron']"}),
'sender_deleted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'sent_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'subject': ('django.db.models.fields.CharField', [], {'max_length': '120'})
},
u'products.alert': {
'Meta': {'object_name': 'Alert'},
'address': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'alerts'", 'to': u"orm['accounts.Address']"}),
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'designation': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'patron': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'alerts'", 'to': u"orm['accounts.Patron']"}),
'sites': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'alerts'", 'symmetrical': 'False', 'to': u"orm['sites.Site']"})
},
u'products.answer': {
'Meta': {'object_name': 'Answer'},
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'answers'", 'to': u"orm['products.Question']"}),
'text': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'products.carproduct': {
'Meta': {'object_name': 'CarProduct', '_ormbases': [u'products.Product']},
'air_conditioning': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'audio_input': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'baby_seat': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'bike_rack': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'brand': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'cd_player': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'consumption': ('django.db.models.fields.PositiveIntegerField', [], {'default': '4', 'null': 'True', 'blank': 'True'}),
'costs_per_km': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '8', 'decimal_places': '3', 'blank': 'True'}),
'cruise_control': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'door_number': ('django.db.models.fields.IntegerField', [], {'default': '5', 'null': 'True', 'blank': 'True'}),
'first_registration_date': ('django.db.models.fields.DateField', [], {}),
'fuel': ('django.db.models.fields.IntegerField', [], {'default': '1', 'null': 'True', 'blank': 'True'}),
'gps': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'km_included': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'licence_plate': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'mileage': ('django.db.models.fields.IntegerField', [], {'default': '2', 'null': 'True', 'blank': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'power_steering': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
u'product_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['products.Product']", 'unique': 'True', 'primary_key': 'True'}),
'roof_box': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'seat_number': ('django.db.models.fields.IntegerField', [], {'default': '4', 'null': 'True', 'blank': 'True'}),
'ski_rack': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'snow_chains': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'snow_tires': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'tax_horsepower': ('django.db.models.fields.PositiveIntegerField', [], {}),
'transmission': ('django.db.models.fields.IntegerField', [], {'default': '1', 'null': 'True', 'blank': 'True'})
},
u'products.category': {
'Meta': {'ordering': "['name']", 'object_name': 'Category'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'footer': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'header': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
u'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
u'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'need_insurance': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'childrens'", 'null': 'True', 'to': u"orm['products.Category']"}),
'product': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "'category_product'", 'unique': 'True', 'null': 'True', 'to': u"orm['products.Product']"}),
u'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'sites': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'categories'", 'symmetrical': 'False', 'to': u"orm['sites.Site']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '150', 'blank': 'True'}),
u'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
u'products.categoryconformity': {
'Meta': {'object_name': 'CategoryConformity'},
'eloue_category': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': u"orm['products.Category']"}),
'gosport_category': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': u"orm['products.Category']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'products.curiosity': {
'Meta': {'object_name': 'Curiosity'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'curiosities'", 'to': u"orm['products.Product']"}),
'sites': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'curiosities'", 'symmetrical': 'False', 'to': u"orm['sites.Site']"})
},
u'products.messagethread': {
'Meta': {'object_name': 'MessageThread'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_message': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "'last_message_in_thread'", 'unique': 'True', 'null': 'True', 'to': u"orm['products.ProductRelatedMessage']"}),
'last_offer': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "'last_offer_in_thread'", 'unique': 'True', 'null': 'True', 'to': u"orm['products.ProductRelatedMessage']"}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'messages'", 'null': 'True', 'to': u"orm['products.Product']"}),
'recipient': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'participating_threads'", 'to': u"orm['accounts.Patron']"}),
'recipient_archived': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'sender': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'initiated_threads'", 'to': u"orm['accounts.Patron']"}),
'sender_archived': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'subject': ('django.db.models.fields.CharField', [], {'max_length': '120'})
},
u'products.patronreview': {
'Meta': {'object_name': 'PatronReview'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip': ('django.db.models.fields.IPAddressField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'}),
'patron': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'reviews'", 'to': u"orm['accounts.Patron']"}),
'reviewer': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'patronreview_reviews'", 'to': u"orm['accounts.Patron']"}),
'score': ('django.db.models.fields.FloatField', [], {}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'})
},
u'products.picture': {
'Meta': {'object_name': 'Picture'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'pictures'", 'null': 'True', 'to': u"orm['products.Product']"})
},
u'products.price': {
'Meta': {'ordering': "['unit']", 'object_name': 'Price'},
'amount': ('django.db.models.fields.DecimalField', [], {'max_digits': '10', 'decimal_places': '2'}),
'currency': ('django.db.models.fields.CharField', [], {'default': "'EUR'", 'max_length': '3'}),
'ended_at': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'prices'", 'to': u"orm['products.Product']"}),
'started_at': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'unit': ('django.db.models.fields.PositiveSmallIntegerField', [], {'db_index': 'True'})
},
u'products.product': {
'Meta': {'object_name': 'Product'},
'address': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'products'", 'on_delete': 'models.PROTECT', 'to': u"orm['accounts.Address']"}),
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'product_categories'", 'symmetrical': 'False', 'through': u"orm['products.Product2Category']", 'to': u"orm['products.Category']"}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'products'", 'to': u"orm['products.Category']"}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'blank': 'True'}),
'currency': ('django.db.models.fields.CharField', [], {'default': "'EUR'", 'max_length': '3'}),
'deposit_amount': ('django.db.models.fields.DecimalField', [], {'max_digits': '10', 'decimal_places': '2'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_allowed': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'is_archived': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'products'", 'to': u"orm['accounts.Patron']"}),
'payment_type': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '1'}),
'phone': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'products'", 'null': 'True', 'on_delete': 'models.PROTECT', 'to': u"orm['accounts.PhoneNumber']"}),
'pro_agencies': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'products'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['accounts.ProAgency']"}),
'quantity': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'shipping': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'sites': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'products'", 'symmetrical': 'False', 'to': u"orm['sites.Site']"}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'products.product2category': {
'Meta': {'object_name': 'Product2Category'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': u"orm['products.Category']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['products.Product']"}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': u"orm['sites.Site']"})
},
u'products.producthighlight': {
'Meta': {'object_name': 'ProductHighlight'},
'ended_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['products.Product']"}),
'started_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
u'products.productrelatedmessage': {
'Meta': {'ordering': "['-sent_at']", 'object_name': 'ProductRelatedMessage', '_ormbases': [u'django_messages.Message']},
u'message_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['django_messages.Message']", 'unique': 'True', 'primary_key': 'True'}),
'offer': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "'offer_in_message'", 'unique': 'True', 'null': 'True', 'to': u"orm['rent.Booking']"}),
'thread': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'messages'", 'null': 'True', 'to': u"orm['products.MessageThread']"})
},
u'products.productreview': {
'Meta': {'object_name': 'ProductReview'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip': ('django.db.models.fields.IPAddressField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'reviews'", 'to': u"orm['products.Product']"}),
'reviewer': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'productreview_reviews'", 'to': u"orm['accounts.Patron']"}),
'score': ('django.db.models.fields.FloatField', [], {}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'})
},
u'products.producttopposition': {
'Meta': {'object_name': 'ProductTopPosition'},
'ended_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['products.Product']"}),
'started_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
u'products.property': {
'Meta': {'object_name': 'Property'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'properties'", 'to': u"orm['products.Category']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'products.propertyvalue': {
'Meta': {'unique_together': "(('property', 'product'),)", 'object_name': 'PropertyValue'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'properties'", 'to': u"orm['products.Product']"}),
'property': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'values'", 'to': u"orm['products.Property']"}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'products.question': {
'Meta': {'ordering': "('modified_at', 'created_at')", 'object_name': 'Question'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'questions'", 'to': u"orm['accounts.Patron']"}),
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'questions'", 'to': u"orm['products.Product']"}),
'status': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0', 'db_index': 'True'}),
'text': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'products.realestateproduct': {
'Meta': {'object_name': 'RealEstateProduct', '_ormbases': [u'products.Product']},
'accessible': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'air_conditioning': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'balcony': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'breakfast': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'capacity': ('django.db.models.fields.IntegerField', [], {'default': '1', 'null': 'True', 'blank': 'True'}),
'chamber_number': ('django.db.models.fields.IntegerField', [], {'default': '1', 'null': 'True', 'blank': 'True'}),
'chimney': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'computer_with_internet': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'family_friendly': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'gym': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'heating': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'ideal_for_events': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'internet_access': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'jacuzzi': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'kitchen': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'lift': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'lockable_chamber': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'parking': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'private_life': ('django.db.models.fields.IntegerField', [], {'default': '1', 'null': 'True', 'blank': 'True'}),
u'product_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['products.Product']", 'unique': 'True', 'primary_key': 'True'}),
'rules': ('django.db.models.fields.TextField', [], {'max_length': '60', 'null': 'True', 'blank': 'True'}),
'smoking_accepted': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'towel': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'tumble_dryer': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'tv': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'washing_machine': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'})
},
u'products.unavailabilityperiod': {
'Meta': {'object_name': 'UnavailabilityPeriod'},
'ended_at': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['products.Product']"}),
'quantity': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'started_at': ('django.db.models.fields.DateTimeField', [], {})
},
u'rent.booking': {
'Meta': {'object_name': 'Booking'},
'borrower': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'rentals'", 'to': u"orm['accounts.Patron']"}),
'canceled_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']", 'null': 'True', 'blank': 'True'}),
'contract_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'unique': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'blank': 'True'}),
'currency': ('django.db.models.fields.CharField', [], {'default': "'EUR'", 'max_length': '3'}),
'deposit_amount': ('django.db.models.fields.DecimalField', [], {'max_digits': '8', 'decimal_places': '2'}),
'ended_at': ('django.db.models.fields.DateTimeField', [], {}),
'insurance_amount': ('django.db.models.fields.DecimalField', [], {'max_digits': '8', 'decimal_places': '2', 'blank': 'True'}),
'ip': ('django.db.models.fields.IPAddressField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'bookings'", 'to': u"orm['accounts.Patron']"}),
'pay_key': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'pin': ('django.db.models.fields.CharField', [], {'max_length': '4', 'blank': 'True'}),
'preapproval_key': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'bookings'", 'to': u"orm['products.Product']"}),
'quantity': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'sites': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'bookings'", 'symmetrical': 'False', 'to': u"orm['sites.Site']"}),
'started_at': ('django.db.models.fields.DateTimeField', [], {}),
'state': ('django_fsm.FSMField', [], {'default': "'authorizing'", 'max_length': '50'}),
'total_amount': ('django.db.models.fields.DecimalField', [], {'max_digits': '8', 'decimal_places': '2'}),
'uuid': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'primary_key': 'True'})
},
u'sites.site': {
'Meta': {'ordering': "(u'domain',)", 'object_name': 'Site', 'db_table': "u'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['products']
symmetrical = True
| [
"elena.lilac@gmail.com"
] | elena.lilac@gmail.com |
96a2c8ceb28ab064438abaa8b14ad96c713bff9c | b1d921644161105c3fa12d51702565a22b3e0d1e | /typeidea/blog/migrations/0001_initial.py | 84095c3a37f3779d83ece9dee0a3985fb3718f2e | [] | no_license | FATE-0/blog | 01e74a1f105ea2fc1b27e69be376ce4270e32f13 | fca878f68f8dc67a4e8b75d9c8f109d6e820375d | refs/heads/master | 2020-06-19T10:17:35.152719 | 2019-07-19T11:17:26 | 2019-07-19T11:17:26 | 196,675,430 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,347 | py | # Generated by Django 2.2.3 on 2019-07-14 08:23
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50, verbose_name='名称')),
('status', models.PositiveIntegerField(choices=[(1, '正常'), (0, '删除')], default=1, verbose_name='状态')),
('is_nav', models.BooleanField(default=False, verbose_name='是否为导航')),
('created_time', models.DateTimeField(auto_now_add=True, verbose_name='创建时间')),
('owner', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='作者')),
],
options={
'verbose_name': '分类',
'verbose_name_plural': '分类',
},
),
migrations.CreateModel(
name='Tag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=10, verbose_name='名称')),
('status', models.PositiveIntegerField(choices=[(1, '正常'), (0, '删除')], default=1, verbose_name='状态')),
('created_time', models.DateTimeField(auto_now_add=True, verbose_name='创建时间')),
('owner', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='作者')),
],
options={
'verbose_name': '标签',
'verbose_name_plural': '标签',
},
),
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255, verbose_name='标题')),
('desc', models.CharField(blank=True, max_length=1024, verbose_name='摘要')),
('content', models.TextField(help_text='正文必须为 MarkDown 格式', verbose_name='正文')),
('status', models.PositiveIntegerField(choices=[(1, '正常'), (0, '删除'), (2, '草稿')], default=1, verbose_name='状态')),
('created_time', models.DateTimeField(auto_now_add=True, verbose_name='创建时间')),
('category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='blog.Category', verbose_name='分类')),
('owner', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='作者')),
('tag', models.ManyToManyField(to='blog.Tag', verbose_name='标签')),
],
options={
'verbose_name': '文章',
'verbose_name_plural': '文章',
'ordering': ['-id'],
},
),
]
| [
"eric@example.com"
] | eric@example.com |
79ce3d8730d723b0fcac829ec4d82d885bbddb31 | a2e638cd0c124254e67963bda62c21351881ee75 | /Extensions/StructuredProductsDealPackage/FPythonCode/SP_TrfExerciseCalculations.py | 908ccec668aabaa4a82d6a6f0e52e0a4cb029113 | [] | no_license | webclinic017/fa-absa-py3 | 1ffa98f2bd72d541166fdaac421d3c84147a4e01 | 5e7cc7de3495145501ca53deb9efee2233ab7e1c | refs/heads/main | 2023-04-19T10:41:21.273030 | 2021-05-10T08:50:05 | 2021-05-10T08:50:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,027 | py |
#***************************************************************
#
# Module calculating the settlement amounts
#
#***************************************************************
import acm
from SP_TrfUtils import BuySellMapping, TrfHasBarrier, TrfExpiryEvent, TrfExpiryEventsSortedByDate
from SP_DealPackageHelper import GetCurrencyPairPointsDomesticPerForeign, GetCurrencyPairPointsForeignPerDomestic
epsilon = 0.000001
def IsDomesticPerForeign(rateDirection):
if rateDirection not in ('DomesticPerForeign', 'ForeignPerDomestic'):
raise RuntimeError ('Invalid value for parameter "rateDirection"')
return rateDirection == 'DomesticPerForeign'
def BuySellAsMultiplier(buySellForeign):
if buySellForeign == 'SELL':
return -1.0
elif buySellForeign == 'BUY':
return 1.0
else:
raise RuntimeError ('Invalid value for parameter "buySellForeign"')
def RateDirectionAsMultiplier(rateDirection):
return 1.0 if IsDomesticPerForeign(rateDirection) else -1.0
def InverseTargetAsMultiplier(inverseTarget):
return -1.0 if inverseTarget is True else 1.0
def FxRateValueToUse(rate, inverse, inverseMultiplier = 1.0):
return inverseMultiplier * (rate if inverse is False or abs(rate) < epsilon else 1.0/rate)
def IntrinsicForAccumulation(strike, fixing, inverseTarget, buySellForeign, rateDirection):
accumulationStrike = FxRateValueToUse(strike, inverseTarget == IsDomesticPerForeign(rateDirection), -1.0 if inverseTarget else 1.0 )
accumulationFixing = FxRateValueToUse(fixing, inverseTarget == IsDomesticPerForeign(rateDirection), -1.0 if inverseTarget else 1.0 )
return IntrinsicValue(accumulationStrike, accumulationFixing, buySellForeign)
def AdjustedStrike(strike, fixing, target, previousTarget, inverseTarget, exactTarget, buySellForeign, rateDirection):
if exactTarget is False or target < epsilon:
return strike
else:
accumulation = IntrinsicForAccumulation(strike, fixing, inverseTarget, buySellForeign, rateDirection)
return strike if accumulation < (target - previousTarget) else AdjustStrikeWithRemainingTarget(fixing, target - previousTarget, inverseTarget, buySellForeign, rateDirection)
def AdjustedStrikeCommodity(strike, fixing, target, previousTarget, exactTarget):
if exactTarget is False or target < epsilon:
return strike
else:
accumulation = fixing - strike
if accumulation < (target - previousTarget):
return strike
else:
return fixing - (target - previousTarget)
def AdjustStrikeMultiplier(inverseTarget, buySellForeign):
return BuySellAsMultiplier(buySellForeign) * InverseTargetAsMultiplier(inverseTarget)
def AdjustStrikeWithRemainingTarget(fixing, remainingTarget, inverseTarget, buySellForeign, rateDirection):
fixingForAdjustment = FxRateValueToUse(fixing, inverseTarget == IsDomesticPerForeign(rateDirection), 1.0)
adjustedStrike = fixingForAdjustment - (remainingTarget * AdjustStrikeMultiplier(inverseTarget, buySellForeign))
return FxRateValueToUse(adjustedStrike, inverseTarget == IsDomesticPerForeign(rateDirection), 1.0)
def IntrinsicValue(strike, fixing, buySellForeign):
intrinsicValue = fixing - strike
if buySellForeign != None:
intrinsicValue *= BuySellAsMultiplier(buySellForeign)
return intrinsicValue
def IntrinsicValueForPayOff(strike, fixing, buySellForeign, rateDirection):
if rateDirection == None:
strikeToUse = strike
fixingToUse = fixing
else:
strikeToUse = FxRateValueToUse(strike, not IsDomesticPerForeign(rateDirection))
fixingToUse = FxRateValueToUse(fixing, not IsDomesticPerForeign(rateDirection))
return IntrinsicValue(strikeToUse, fixingToUse, buySellForeign)
def StrikeAdjustedIntrinsicValue(strike, fixing, target, previousTarget, inverseTarget, exactTarget, buySellForeign, rateDirection):
adjustedStrike = AdjustedStrike(strike, fixing, target, previousTarget, inverseTarget, exactTarget, buySellForeign, rateDirection)
return IntrinsicValueForPayOff(adjustedStrike, fixing, buySellForeign, rateDirection)
def StrikeAdjustedIntrinsicValueCommodity(strike, fixing, target, previousTarget, exactTarget):
adjustedStrike = AdjustedStrikeCommodity(strike, fixing, target, previousTarget, exactTarget)
return IntrinsicValueForPayOff(adjustedStrike, fixing, None, None)
def NotionalAtStrike(notional1, notional2, notionalAtStrike, hasBarrier):
if notionalAtStrike == 'Notional 1':
return notional1
elif notionalAtStrike == 'Notional 2' and hasBarrier is False:
return notional2
return 0.0
def NotionalAmount(notional1, notional2, strike, fixing, buySellForeign, strikeDirection, hasBarrier, notionalAtStrike = None):
intrinsic = IntrinsicValueForPayOff(strike, fixing, buySellForeign, strikeDirection)
if abs(intrinsic) < epsilon:
return NotionalAtStrike(notional1, notional2, notionalAtStrike, hasBarrier)
return notional1 if intrinsic > 0.0 else notional2
def CurrencyConversion(fixing, settleInDomesticCurrency, rateDirection):
return 1.0 if settleInDomesticCurrency is True else FxRateValueToUse(fixing, rateDirection == 'DomesticPerForeign')
def BarrierHit(fixing, barrierLevel, barrierInterpretation, buySellForeign, rateDirection):
if barrierInterpretation == 'Past':
return IntrinsicValueForPayOff(barrierLevel, fixing, buySellForeign, rateDirection) < 0
else:
return IntrinsicValueForPayOff(barrierLevel, fixing, buySellForeign, rateDirection) <= 0
def BarrierHitOrIgnored(fixing, hasBarrier, barrierLevel, barrierInterpretation, buySellForeign, rateDirection):
return (not hasBarrier) or BarrierHit(fixing, barrierLevel, barrierInterpretation, buySellForeign, rateDirection)
def BarrierMultiplier(fixing, hasBarrier, barrierLevel, barrierInterpretation, buySellForeign, rateDirection):
return 1.0 if BarrierHitOrIgnored(fixing, hasBarrier, barrierLevel, barrierInterpretation, buySellForeign, rateDirection) else 0.0
def PhysicalStrikeToUse(strike, fixing, target, previousTarget, inverseTarget, exactTarget, buySellForeign, rateDirection):
adjustedStrike = AdjustedStrike(strike, fixing, target, previousTarget, inverseTarget, exactTarget, buySellForeign, rateDirection)
return FxRateValueToUse(adjustedStrike, not IsDomesticPerForeign(rateDirection))
def TargetMultiplier(previousTarget, targetLevel):
return 1.0 if targetLevel < epsilon or (targetLevel - previousTarget) > epsilon else 0.0
def TakeBarrierIntoAccount(hasBarrier, intrinsicValue):
return hasBarrier and intrinsicValue < -epsilon
def CalculateCashAmount(fixing,
strike,
rateDirection,
targetLevel,
inverseTarget,
previousTarget,
exactTarget,
notional1,
notional2,
settleInDomesticCurrency,
buySellForeign,
hasBarrier,
barrierLevel,
barrierInterpretation):
intrinsicValue = StrikeAdjustedIntrinsicValue(strike, fixing, targetLevel, previousTarget, inverseTarget, exactTarget, buySellForeign, rateDirection)
notional = NotionalAmount(notional1, notional2, strike, fixing, buySellForeign, rateDirection, hasBarrier)
currencyConversion = CurrencyConversion(fixing, settleInDomesticCurrency, rateDirection)
barrierMultiplier = BarrierMultiplier(fixing, TakeBarrierIntoAccount(hasBarrier, intrinsicValue), barrierLevel, barrierInterpretation, buySellForeign, rateDirection)
targetMultiplier = TargetMultiplier(previousTarget, targetLevel)
return intrinsicValue * notional * currencyConversion * barrierMultiplier * targetMultiplier
def CalculatePhysicalAmounts(fixing,
strike,
rateDirection,
targetLevel,
inverseTarget,
previousTarget,
exactTarget,
notional1,
notional2,
buySellForeign,
hasBarrier,
barrierLevel,
barrierInterpretation,
notionalAtStrike):
intrinsicValue = StrikeAdjustedIntrinsicValue(strike, fixing, targetLevel, previousTarget, inverseTarget, exactTarget, buySellForeign, rateDirection)
notionalDomestic = NotionalAmount(notional1, notional2, strike, fixing, buySellForeign, rateDirection, hasBarrier, notionalAtStrike)
strikeToUse = PhysicalStrikeToUse(strike, fixing, targetLevel, previousTarget, inverseTarget, exactTarget, buySellForeign, rateDirection)
barrierMultiplier = BarrierMultiplier(fixing, TakeBarrierIntoAccount(hasBarrier, intrinsicValue), barrierLevel, barrierInterpretation, buySellForeign, rateDirection)
targetMultiplier = TargetMultiplier(previousTarget, targetLevel)
amountDomestic = notionalDomestic * BuySellAsMultiplier(buySellForeign) * barrierMultiplier * targetMultiplier
amountForeign = -amountDomestic * strikeToUse
return amountDomestic, amountForeign
def CalculateCommodityCashAmount(fixing,
strike,
targetLevel,
previousTarget,
exactTarget,
notional1,
notional2):
intrinsicValue = StrikeAdjustedIntrinsicValueCommodity(strike, fixing, targetLevel, previousTarget, exactTarget)
notional = NotionalAmount(notional1, notional2, strike, fixing, None, None, None)
targetMultiplier = TargetMultiplier(previousTarget, targetLevel)
return intrinsicValue * notional * targetMultiplier
def GetStrikeDecimals(instrument, rateDirection):
if not hasattr(instrument, 'DecoratedObject'):
instrument = acm.FBusinessLogicDecorator.WrapObject(instrument)
if rateDirection == 'DomesticPerForeign':
return GetCurrencyPairPointsDomesticPerForeign(instrument.ForeignCurrency(), instrument.DomesticCurrency())
else:
return GetCurrencyPairPointsForeignPerDomestic(instrument.ForeignCurrency(), instrument.DomesticCurrency())
def GetFixingValue(instrument, date, rateDirection):
fixing = TrfExpiryEvent(instrument, date)
if fixing is not None:
if acm.Time.DateDifference(date, fixing.Date()) == 0.0:
if fixing.EventValue() > epsilon:
if rateDirection == 'ForeignPerDomestic':
return 1.0 / fixing.EventValue()
else:
return fixing.EventValue()
else:
raise RuntimeError ('No fixing entered for %s' % date)
raise RuntimeError ('Date %s is not a valid fixing date for %s' % (date, instrument.Name()))
def GetPreviousTarget(instrument, date):
allFixings = TrfExpiryEventsSortedByDate(instrument)
accumulation = 0.0
for fixing in allFixings:
if acm.Time.DateDifference(fixing.Date(), date) >= 0:
break
accumulation = fixing.TrfAccTarget()
return accumulation
def BaseSettlementParameters(instrument, date):
rateDirection = 'ForeignPerDomestic' if instrument.StrikeQuotation() and instrument.StrikeQuotation().Name() == 'Per Unit Inverse' else 'DomesticPerForeign'
rateDecimals = GetStrikeDecimals(instrument, rateDirection)
fixing = round(GetFixingValue(instrument, date, rateDirection), rateDecimals)
strike = round(instrument.StrikePrice(), rateDecimals)
barrier = round(instrument.Barrier(), rateDecimals)
return {
'fixing' : fixing,
'strike' : strike,
'rateDirection' : rateDirection,
'targetLevel' : instrument.AdditionalInfo().Sp_TargetLevel(),
'inverseTarget' : instrument.AdditionalInfo().Sp_InvertedTarget(),
'previousTarget' : GetPreviousTarget(instrument, date),
'exactTarget' : instrument.AdditionalInfo().Sp_AdjustedStrike(),
'notional1' : instrument.ContractSize(),
'notional2' : instrument.AdditionalInfo().Sp_LeverageNotional(),
'buySellForeign' : BuySellMapping(instrument, 'Foreign'),
'hasBarrier' : TrfHasBarrier(instrument),
'barrierLevel' : barrier,
'barrierInterpretation' : instrument.AdditionalInfo().Sp_BarrierCondition()
}
def BaseCommoditySettlementParameters(instrument, date):
return {
'fixing': GetFixingValue(instrument, date, None),
'strike': instrument.StrikePrice(),
'targetLevel': instrument.AdditionalInfo().Sp_TargetLevel(),
'previousTarget': GetPreviousTarget(instrument, date),
'exactTarget': instrument.AdditionalInfo().Sp_AdjustedStrike(),
'notional1': instrument.ContractSizeInQuotation(),
'notional2': instrument.AdditionalInfo().Sp_LeverageNotional(),
}
def CashSettlementParameters(instrument, date):
params = BaseSettlementParameters(instrument, date)
params['settleInDomesticCurrency'] = instrument.AdditionalInfo().Sp_SettleInCurr2()
return params
def PhysicalSettlementParameters(instrument, date):
params = BaseSettlementParameters(instrument, date)
params['notionalAtStrike'] = instrument.AdditionalInfo().Sp_StrikeSettle()
return params
def CommodityCashSettlementParameters(instrument, date):
params = BaseCommoditySettlementParameters(instrument, date)
return params
def CalculateTRFSettlementAmounts(trade, date):
instrument = trade.Instrument()
if instrument.AdditionalInfo().StructureType() != 'Target Redemption Forward':
raise RuntimeError('TRF settlement calculations only implemented for Target Redemption Forward')
if instrument.SettlementType() == 'Cash':
return CalculateCashAmount(**CashSettlementParameters(instrument, date))
else:
return CalculatePhysicalAmounts(**PhysicalSettlementParameters(instrument, date))
def CalculateCommodityTRFSettlementAmounts(trade, date):
instrument = trade.Instrument()
if instrument.AdditionalInfo().StructureType() != 'Target Redemption Forward':
raise RuntimeError('TRF settlement calculations only implemented for Target Redemption Forward')
if instrument.SettlementType() == 'Cash':
return CalculateCommodityCashAmount(**CommodityCashSettlementParameters(instrument, date))
else:
raise RuntimeError('TRF settlement calculations only implemented for Cash settlement')
| [
"nencho.georogiev@absa.africa"
] | nencho.georogiev@absa.africa |
416fe336e3a5e6b20ac4785817ca044c4211e1a3 | 94d1a15d9696d42667ffce54a3efcb001afb65c8 | /filtre.py | 5cd639507b463961414ceb1c42ec722bd25d1960 | [] | no_license | eren4321/pypr | 4a8d0e2e26368c4debfb1e74bc00484a799c40ed | 8ca77d1480e0f87b511d33e3199effb97dd2b27e | refs/heads/master | 2020-04-15T04:51:18.633517 | 2019-04-15T14:44:21 | 2019-04-15T14:44:21 | 164,398,691 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,686 | py | import pymongo
from pymongo import MongoClient
import io
import json
connection = MongoClient('localhost', 27017)
db = connection.mydatabase
collection = db.hurriyet
a = db.hurriyet.find().limit(10).sort([("retweet", pymongo.DESCENDING)])
b = []
# for follower in a.sort('follower_count',pymongo.DESCENDING) :
# print(follower)
# for favorite in a.sort('favorite',pymongo.DESCENDING) :
# print(favorite)
for retweet in a:
b.append(retweet)
with io.open('hurriyet.json', 'w', encoding="utf-8") as f:
for item in b:
item["_id"] = str(item["_id"])
item["created"] = ""
item["tarih"] =""
print(item)
f.write("%s\n" % json.dumps(item))
f.close()
connection = MongoClient('localhost', 27017)
db = connection.mydatabase
collection = db.milliyet
a = db.milliyet.find().limit(10).sort([("retweet", pymongo.DESCENDING)])
b = []
# for follower in a.sort('follower_count',pymongo.DESCENDING) :
# print(follower)
# for favorite in a.sort('favorite',pymongo.DESCENDING) :
# print(favorite)
for retweet in a:
b.append(retweet)
with io.open('milliyet.json', 'w', encoding="utf-8") as f:
for item in b:
item["_id"] = str(item["_id"])
item["created"] = ""
item["tarih"] =""
print(item)
f.write("%s\n" % json.dumps(item))
f.close()
connection = MongoClient('localhost', 27017)
db = connection.mydatabase
collection = db.cnn
a = db.cnn.find().limit(10).sort([("retweet", pymongo.DESCENDING)])
b = []
# for follower in a.sort('follower_count',pymongo.DESCENDING) :
# print(follower)
# for favorite in a.sort('favorite',pymongo.DESCENDING) :
# print(favorite)
for retweet in a:
b.append(retweet)
with io.open('cnn.json', 'w', encoding="utf-8") as f:
for item in b:
item["_id"] = str(item["_id"])
item["created"] = ""
item["tarih"] =""
print(item)
f.write("%s\n" % json.dumps(item))
f.close()
connection = MongoClient('localhost', 27017)
db = connection.mydatabase
collection = db.kanald
a = db.kanald.find().limit(10).sort([("retweet", pymongo.DESCENDING)])
b = []
# for follower in a.sort('follower_count',pymongo.DESCENDING) :
# print(follower)
# for favorite in a.sort('favorite',pymongo.DESCENDING) :
# print(favorite)
for retweet in a:
b.append(retweet)
with io.open('kanald.json', 'w', encoding="utf-8") as f:
for item in b:
item["_id"] = str(item["_id"])
item["created"] = ""
item["tarih"] =""
print(item)
f.write("%s\n" % json.dumps(item))
f.close()
| [
"noreply@github.com"
] | eren4321.noreply@github.com |
5e663842171e42b585b3f5adb7e25d0e60cb9a90 | a8c1e1d90579828a95a3ff03fb45805219894daa | /Modulo6/accedientoArchivos/readLine.py | 282bcb7193d9f3fcd67398ac141da6173093bb9d | [] | no_license | grimapatroy/Python_NETACAD | 6f5db9596b7ad05b35b20403b0f382fad54bfd4f | f1a36714c22e4a83ef0d208e1a9420c0b34fd5ab | refs/heads/master | 2023-08-16T23:30:15.999909 | 2021-09-16T18:27:35 | 2021-09-16T18:27:35 | 404,426,534 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 854 | py | from os import strerror
try:
ccnt = lcnt = 0
s = open("C:\\Users\\Humanitroy\\Desktop\\file.txt", "rt")
line = s.readline()
while line != '':
lcnt += 1
for ch in line:
print(ch, end='')
ccnt += 1
line = s.readline()
s.close()
print("\n\nCaracteres en el archivo: ", ccnt)
print("Lineas en el archivo: ", lcnt)
except IOError as e:
print("Se produjo un error de E/S: ", strerror(e.errno))
# embbelleciendo el codigo
from os import strerror
try:
ccnt = lcnt = 0
for line in open('text.txt', 'rt'):
lcnt += 1
for ch in line:
print(ch, end='')
ccnt += 1
print("\n\nCaracteres en el archivo: ", ccnt)
print("Lineas en el archivo: ", lcnt)
except IOError as e:
print("Se produjo un error de E/S: ", strerror(e.errno)) | [
"lenin.coste@gmail.com"
] | lenin.coste@gmail.com |
a264914ada26cf2cef65b45470569fb9c72b51bb | 01dc09fdf4a9203da336b893650235f16ff5380f | /Backtest/Historical_BackTest/Neat/tf_neat-trader-intraday/no_hidden_layer/Tech_Input/simple/genome_test.py | 91c0fbe7c5d8937396ad29d1897557fa3872d7e4 | [] | no_license | webclinic017/RayTrader_v3 | 2b15228881bf7a08e90682a2364905317c282f65 | 2ea39946a2654dbc3b05b41abcaf5a4a4082a1b6 | refs/heads/master | 2023-03-16T04:40:41.392465 | 2019-06-04T04:46:46 | 2019-06-04T04:46:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,544 | py | import glob
import multiprocessing
import trader_env
import trader_data
import visualize
import reporter
from statistics import mean
import numpy as np
import neat
import pickle
import matplotlib.pyplot as plt
file_name = "G:\\AI Trading\\Code\\RayTrader_v3\\HistoricalData\\Min_data\\ADANIPORTS-EQ.csv"
data = trader_data.csv_to_df(file_name)
train_data, test_data = trader_data.split_data(data)
env = trader_env.Weighted_Unrealized_BS_Env(train_data)
max_env_steps = len(env.data) - env.t - 1
config = neat.Config(neat.DefaultGenome, neat.DefaultReproduction,
neat.DefaultSpeciesSet, neat.DefaultStagnation,
'config.cfg')
def eval_genome(genome, config):
global env, max_env_steps
ob = env.reset()
net = neat.nn.recurrent.RecurrentNetwork.create(genome, config)
current_max_fitness = 0
fitness_current = 0
counter = 0
step = 0
step_max = max_env_steps
done = False
while not done:
# inputs = trader_data.get_inputs(signals, step)
nnOutput = net.activate(ob)
ob, rew, done, _ = env.step(np.argmax(nnOutput))
# print("id",genome_id,"Step:",step,"act:",np.argmax(nnOutput),"reward:",rew)
fitness_current += rew
step += 1
if fitness_current > current_max_fitness:
current_max_fitness = fitness_current
counter = 0
else:
counter += 1
if step >= step_max:
done = True
if done or env.amt<=0:
done = True
print("Genome id#: ", genome.key)
message = "Fitness :{} Max Fitness :{} Avg Daily Profit :{} %".format(fitness_current,
current_max_fitness,
round(mean(env.daily_profit_per), 3))
print("Initial Value: ",2000)
print("Final Value: ",env.amt)
print("Days: ",len(env.daily_profit_per))
print(message)
plt.title(genome.key)
plt.plot(env.daily_profit_per)
plt.show()
# logger.info(message)
genome.fitness = fitness_current
def run_tests(genome):
global env, max_env_steps, config
env = trader_env.Weighted_Unrealized_BS_Env(train_data)
max_env_steps = len(env.data) - env.t - 1
eval_genome(genome,config)
env = trader_env.Weighted_Unrealized_BS_Env(test_data)
max_env_steps = len(env.data) - env.t - 1
eval_genome(genome,config)
def run_files(files_set):
for genomeFile in files_set:
genome = pickle.load(open(genomeFile, 'rb'))
run_tests(genome)
print("#"*50)
def chunks(seq, num):
avg = len(seq) / float(num)
out = []
last = 0.0
while last < len(seq):
out.append(seq[int(last):int(last + avg)])
last += avg
return out
# Load all the genomes
files = glob.glob(".\\genomes\\*.pkl")
n_processes = 3
threads = []
if __name__ == "__main__":
# divide the file-list
chunks_list = chunks(files, n_processes)
for i in range(n_processes):
threads.append(multiprocessing.Process(target=run_files, args=(chunks_list[i],)))
# start all threads
for t in threads:
t.start()
# Join all threads
for t in threads:
t.join()
#
# if __name__ == "__main__":
# genomeFile = '.\\genomes\\594.pkl'
# genome = pickle.load(open(genomeFile, 'rb'))
# run_tests(genome)
| [
"rayanup3@gmail.com"
] | rayanup3@gmail.com |
408115b5701ae632cd084102df6c8ce618b892de | 986b71467d606d3f339290734c646cdffc7f822e | /homework2/Solutions/q2_b.py | cce0374e54cfd563d8ff34a86c959487baad33ae | [] | no_license | hyhyjjyjy/Machine-Learning-9417 | e4f556cab3f6d41efeaded666e32d23373a6c32f | 0d498271dbd31333028a3c643bba8b686cf92f49 | refs/heads/main | 2023-08-29T02:08:47.386712 | 2021-11-03T02:25:56 | 2021-11-03T02:25:56 | 389,868,141 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 843 | py | import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
A = np.array([[1,0,1,-1],[-1,1,0,2],[0,-1,-2,1]])
b = np.array([[1],[2],[3]])
x = np.array([[1],[1],[1],[1]])
alpha = 0.1
k = 0
all_xs = []
alphas = []
alphas.append(0.1)
all_xs.append(x)
while 1 == 1:
diff_x = np.dot(A.T,(np.dot(A, x) - b))
x = x - alpha * diff_x
x_norm = np.linalg.norm(diff_x, ord=2, axis=None)
all_xs.append(x)
C = np.dot(np.dot(A,A.T),b) - np.dot(np.dot(np.dot(A,A.T),A), x)
D = b - np.dot(A,x)
alpha = np.dot(C.T, D)/np.dot(C.T, C)
alphas.append(alpha[0][0])
if (x_norm < 0.001):
break
k += 1
print(x_norm)
for i in range(5):
print(f"k={i}, x({i})={all_xs[i].reshape(4)} ")
for i in range(k - 4, k + 1):
print(f"k={i}, x({i})={all_xs[i].reshape(4)} ")
plt.plot(alphas)
plt.show()
| [
"bunnydongao@gmail.com"
] | bunnydongao@gmail.com |
696f8b7b5820a780c5b69af4b09a910c33f89eea | 7a9d5f963cd5440ecd8b68340185628af32215a9 | /main.py | 6d519657ee2e64472db246872649fbd29e19fef1 | [] | no_license | RomanZavodskikh/house_price_prediction_service | 7e49e6e0eb4e807742ab5077fd64883b249d06b7 | 5d2a2b4ac500439961cc6d8ad18c02e69bf58341 | refs/heads/master | 2023-02-07T18:25:03.517701 | 2020-12-27T23:12:01 | 2020-12-27T23:12:01 | 323,147,091 | 0 | 1 | null | 2020-12-27T22:16:47 | 2020-12-20T19:17:48 | HTML | UTF-8 | Python | false | false | 3,691 | py | from regressor import Regressor
from codecs import open
import time
from flask import Flask, render_template, request
app = Flask(__name__)
print("Load regressor")
start_time = time.time()
regressor = Regressor()
print("Regressor is successfully loaded")
print(time.time() - start_time, "seconds")
@app.route("/", methods = ["GET"])
def index_page():
return render_template('index.html')
@app.route("/predict-hata-price", methods = ["POST", "GET"])
def predict_page(host_is_superhost = False,
host_has_profile_pic = False, host_identity_verified = False,
require_guest_profile_picture = False,
require_guest_phone_verification = False,
latitude = 51.5423, longitude = -0.1285, accommodates = "",
bathrooms = "", bedrooms = "", beds = "",
square_feet = "", security_deposit = "", cleaning_fee = "",
guests_included = "", extra_people = "", minimum_nights = "",
predicted_price = ""):
if request.method == "POST":
host_is_superhost = int(bool(request.form.get("host_is_superhost")))
host_has_profile_pic = int(bool(request.form.get("host_has_profile_pic")))
host_identity_verified = int(bool(request.form.get("host_identity_verified")))
require_guest_profile_picture = int(bool(request.form.get("require_guest_profile_picture")))
require_guest_phone_verification = int(bool(request.form.get("require_guest_phone_verification")))
latitude = float(request.form.get("latitude"))
longitude = float(request.form.get("longitude"))
accommodates = int(request.form.get("accommodates"))
bathrooms = int(request.form.get("bathrooms"))
bedrooms = int(request.form.get("bedrooms"))
beds = int(request.form.get("beds"))
square_feet = int(request.form.get("square_feet"))
security_deposit = int(request.form.get("security_deposit"))
cleaning_fee = int(request.form.get("cleaning_fee"))
guests_included = int(request.form.get("guests_included"))
extra_people = int(request.form.get("extra_people"))
minimum_nights = int(request.form.get("minimum_nights"))
logfile = open("hata_otsenyator_logs.txt", "ab", "utf-8")
logfile.write("<response>\n")
predicted_price = regressor.predict_price(
[[host_is_superhost, host_has_profile_pic,
host_identity_verified, 1, # consider location is always exact
require_guest_profile_picture, require_guest_phone_verification,
latitude, longitude, accommodates, bathrooms,
bedrooms, beds,
square_feet, security_deposit, cleaning_fee,
guests_included, extra_people, minimum_nights]]
)
logfile.write(str(predicted_price))
logfile.write("<response>\n")
logfile.close()
time.sleep(3)
return render_template('simple_page.html',
host_is_superhost = host_is_superhost,
host_has_profile_pic = host_has_profile_pic,
host_identity_verified = host_identity_verified,
require_guest_profile_picture = require_guest_profile_picture,
require_guest_phone_verification = require_guest_phone_verification,
latitude = latitude, longitude = longitude, accommodates = accommodates,
bathrooms = bathrooms, bedrooms = bedrooms, beds = beds, square_feet = square_feet,
security_deposit = security_deposit, cleaning_fee = cleaning_fee,
guests_included = guests_included, extra_people = extra_people,
minimum_nights = minimum_nights,
predicted_price = predicted_price)
if __name__ == "__main__":
app.run(host = '0.0.0.0', port = 44445, debug = True)
| [
"Roman.Zavodskikh@acronis.com"
] | Roman.Zavodskikh@acronis.com |
940b8d72bc9a7a19f531216fbae3406918a2a382 | 0d0663773f2f95b31610d53de08f883d71e4277f | /show_checkpoint.py | 856bf61aecabfe0db675b726c6a2736a1c25753f | [] | no_license | THVi-xTHU/xthu | cd9705766a587408f94e5e70f38dfe37c9813c23 | 5c82949f28a32c0be34a45994d0098ffc5a058c1 | refs/heads/master | 2021-07-20T17:59:51.026522 | 2018-07-06T16:22:39 | 2018-07-06T16:22:39 | 134,022,347 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 345 | py | from tensorflow.python.tools import inspect_checkpoint as chkp
model1='fcrn_depth_prediction/model/NYU_FCRN.ckpt'
model2='KittiSeg/RUNS/KittiSeg_pretrained/model.ckpt-15999'
#chkp.print_tensors_in_checkpoint_file(model1, tensor_name='depth', all_tensors=True)
chkp.print_tensors_in_checkpoint_file(model2, tensor_name='seg', all_tensors=True)
| [
"thushenhan@gmail.com"
] | thushenhan@gmail.com |
ec6062baf7c8fd523d227a088ce493e3a46ece16 | 8030effd125ce82de48868a320bae6a81e91c240 | /singlePYexercise/Median.py | 9ca7ed8c7909c774e2fdfaf8b206925fd9ec8ff3 | [] | no_license | ttyyxxu/pythonWork | 9da8684b3ef635d78105d9884591ce0a83416e78 | fd557f1f5e58fbd1a5e2ef39bb69843d6fdd47d8 | refs/heads/master | 2023-08-06T00:55:48.900531 | 2021-09-27T10:04:29 | 2021-09-27T10:04:29 | 350,281,553 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 473 | py | def findMedianSortedArrays(nums1, nums2):
list_merge = []
list_merge.extend(nums1)
list_merge.extend(nums2)
list_merge.sort()
print(list_merge)
if list_merge == []:
return None
if len(list_merge) % 2 == 1:
return list_merge[int((len(list_merge)-1)/2)]
else:
return (list_merge[int(len(list_merge)/2)] + list_merge[int(len(list_merge)/2 - 1)]) / 2
print(findMedianSortedArrays([1,2,3,4,7,8,100],[0,2,3,4,22,33,467])) | [
"mcdyess_dunker@hotmail.com"
] | mcdyess_dunker@hotmail.com |
f2bbd55a26eaa0aefba82dbb86521896c10127b6 | 797981a440bfd3fa6c005cc61c96d15d149ef121 | /excel/main.py | edcce86761bb379ce82780b03ff5d7869d07ba5f | [] | no_license | gvuuvg/GeekGvuuvg | e556a0c0a2a78ac86400100f43cb6cedbfc59225 | 77179359e18255b7526324d14f31776461066a41 | refs/heads/master | 2023-08-23T20:15:01.133650 | 2021-10-22T12:46:58 | 2021-10-22T12:46:58 | 415,501,211 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 476 | py | import xlrd
import json
# xlrd版本需要是1.2.0
data = xlrd.open_workbook("Book1.xlsx")
table = data.sheets()[0]
n = table.nrows
stu = []
for i in range(n):
if i == 0:
continue
stu.append(table.row_values(i))
json_data = json.dumps(stu)
with open('stu_json.json','w') as f :
f.write(json_data)
with open('stu_json.json','r') as f:
d = json.load(f)
print(d)
r = list(filter(lambda j: j[2] > 5, d))
print("亲密度大于5的人有:",r)
| [
"h13983092340@163.com"
] | h13983092340@163.com |
7f74cfbc1f54a349672430ed58e9f38036faf634 | d80627829d3c7efefe5385f5ef20a1c3ac869f96 | /service/en_patent_service.py | d3f512bf96ea1fc52beb7a4b14dfa3cb28c9be8b | [] | no_license | zhantanfeng/enterprise_project_df | 42675109a4db6c4892bf7b1a033a2abc95129029 | ffc88e871f2c050cf9ca5e54bfb2a145335554dd | refs/heads/master | 2022-12-15T04:03:42.532493 | 2020-03-19T12:56:22 | 2020-03-19T12:56:22 | 246,612,129 | 0 | 0 | null | 2022-12-08T03:49:55 | 2020-03-11T15:43:01 | JavaScript | UTF-8 | Python | false | false | 8,960 | py | """
处理有关企业专利信息的数据库获取结果的处理
"""
import dao.enterprise_patent_dao as enterprise_patent_dao
import dao.enterprise_dao as enterprise_dao
from collections import Counter
def get_en_info_by_patent(searched_patent):
"""
根据企业成果检索企业
:param searched_patent:
:return:企业信息
"""
en_id_list = enterprise_patent_dao.get_pa_id_by_patent(searched_patent)
en_name_list = []
for i in en_id_list:
en_name_list.append(enterprise_patent_dao.get_en_name_by_pa_id(i))
result = []
for i in en_name_list:
result.append(enterprise_dao.get_en_info_by_name_1(i))
return result
def get_pa_count_by_firstkind():
"""
初始获取所有一类技术领域的专利数量
:return:
"""
firstkind = get_all_field()[0]
result = []
for i in firstkind:
result.append([i, enterprise_patent_dao.get_count_by_firstkind(i)[0]])
return result
def get_count_by_firstkind(field):
"""
获取第二类的专利数量
:param field:
:return:
"""
second_field = enterprise_patent_dao.get_second_field(field)
result = []
for i in second_field:
result.append([i, enterprise_patent_dao.get_count_by_secondkind(i)[0]])
return result
def get_count_by_secondkind(field):
"""
获取第三类的专利数量
:param field:
:return:
"""
third_field = enterprise_patent_dao.get_third_field(field)
result = []
for i in third_field:
result.append([i, enterprise_patent_dao.get_count_by_thirdkind(i)[0]])
return result
def get_all_field():
"""
获取所有技术领域
:return:
"""
temp = enterprise_patent_dao.get_all_field()
firstkind = []
secondkind = []
thirdkind = []
for i in temp[0]:
for j in i:
firstkind.append(j[0])
for i in temp[1]:
for j in i:
secondkind.append(j[0])
for i in temp[2]:
for j in i:
thirdkind.append(j[0])
result = [firstkind, secondkind, thirdkind]
return result
def get_engineer_and_en_by_field(field):
"""
根据技术领域获取工程师以及所在的公司
:param field: 技术领域
:return: 前10家的企业以及专利前10的工程师
"""
temp = enterprise_patent_dao.get_engineer_and_en_by_field(field)
en_id_list = []
for i in temp:
en_id_list.append(i[0])
en_id_dict = {}
for key in en_id_list:
en_id_dict[key] = en_id_dict.get(key, 0) + 1
list1 = sorted(en_id_dict.items(), key=lambda x:x[1], reverse=True)
ten_en = []
for i in list1[0:10]:
ten_en.append(i[0])
result = []
for i in ten_en:
temp1 = []
for j in temp:
if j[0] == i:
temp1.extend(j[1].split(","))
engineer_dict = {}
for key in temp1:
engineer_dict[key] = engineer_dict.get(key, 0) + 1
engineer_list = sorted(engineer_dict.items(), key=lambda x: x[1], reverse=True)
ten_engineer = []
for x in engineer_list[0:10]:
ten_engineer.append(x[0])
if ten_engineer != ['不公告发明人']:
result.append([ enterprise_dao.get_en_name_by_en_id(i), ten_engineer ])
return result
def get_patent_by_first_ipc():
"""
获取所有第一类ipc的所有专利数量
:param ipc_id:
:return:
"""
all_first_ipc = enterprise_patent_dao.get_first_ipc()
result = []
for i in all_first_ipc:
result.append([i[0]+":"+i[1], enterprise_patent_dao.get_patent_by_ipc(i[0])])
return result
def get_patent_by_second_ipc(ipc_id):
"""
获取所有第二类ipc的所有专利数量
:param ipc_id:
:return:
"""
all_first_ipc = enterprise_patent_dao.get_second_ipc(ipc_id)
result = []
for i in all_first_ipc:
if enterprise_patent_dao.get_patent_by_ipc(i[0])[0] != 0:
result.append([i[0]+":"+i[1], enterprise_patent_dao.get_patent_by_ipc(i[0])[0]])
result = sorted(result, key=lambda x:(x[1]), reverse=True)
if len(result) > 10:
count = 0
for i in result[9:]:
count = count + i[1]
result = result[0:9]
result.append(["其他", count])
return result
def get_patent_by_third_ipc(ipc_id):
"""
获取所有第二类ipc的所有专利数量
:param ipc_id:
:return:
"""
all_first_ipc = enterprise_patent_dao.get_third_ipc(ipc_id)
result = []
for i in all_first_ipc:
temp = i[0][4:7].replace("0","")
if enterprise_patent_dao.get_patent_by_ipc(i[0][0:4]+temp+i[0][7:])[0] != 0:
result.append([i[0][0:4]+temp+i[0][7:]+":"+i[1], enterprise_patent_dao.get_patent_by_ipc(i[0][0:4]+temp+i[0][7:])[0]])
result = sorted(result, key=lambda x:(x[1]), reverse=True)
if len(result) > 10:
count = 0
for i in result[9:]:
count = count + i[1]
result = result[0:9]
result.append(["其他", count])
return result
def get_engineer_and_en_by_ipc(ipc_id):
"""
根据ipc获取工程师以及所在的公司,用于专利分组
:param ipc_id: ipc
:return: 前10家的企业以及专利前10的工程师
"""
temp = enterprise_patent_dao.get_engineer_and_en_by_ipc(ipc_id)
en_id_list = []
for i in temp:
en_id_list.append(i[0])
en_id_dict = {}
for key in en_id_list:
en_id_dict[key] = en_id_dict.get(key, 0) + 1
list1 = sorted(en_id_dict.items(), key=lambda x:x[1], reverse=True)
ten_en = []
for i in list1[0:10]:
ten_en.append(i[0])
result = []
for i in ten_en:
temp1 = []
for j in temp:
if j[0] == i:
temp1.extend(j[1].split(","))
engineer_dict = {}
for key in temp1:
engineer_dict[key] = engineer_dict.get(key, 0) + 1
engineer_list = sorted(engineer_dict.items(), key=lambda x: x[1], reverse=True)
ten_engineer = []
for x in engineer_list[0:10]:
ten_engineer.append(x[0])
if ten_engineer != ['不公告发明人']:
result.append([ enterprise_dao.get_en_name_by_en_id(i), ten_engineer ])
return result
def get_engineer_and_en_by_ipc2(ipc_id):
"""
根据ipc获取工程师以及所在的公司,用于工程师分组
:param ipc_id: ipc
:return: 前10家的企业以及工程师
"""
temp = enterprise_patent_dao.get_engineer_and_en_by_ipc(ipc_id)
en_id_list = []
for i in temp:
en_id_list.append(i[0])
en_id_dict = {}
for key in en_id_list:
en_id_dict[key] = en_id_dict.get(key, 0) + 1
list1 = sorted(en_id_dict.items(), key=lambda x:x[1], reverse=True)
ten_en = []
for i in list1[0:10]:
ten_en.append(i[0])
result = []
for i in ten_en:
temp1 = []
for j in temp:
if j[0] == i:
temp1.extend(j[1].split(","))
engineer_dict = {}
for key in temp1:
engineer_dict[key] = engineer_dict.get(key, 0) + 1
engineer_list = sorted(engineer_dict.items(), key=lambda x: x[1], reverse=True)
ten_engineer = []
for x in engineer_list[0:15]:
ten_engineer.append(x[0])
if ten_engineer != ['不公告发明人']:
result.append([ enterprise_dao.get_en_name_by_en_id(i), ten_engineer ])
return result
def get_engineer_count_with_first_ipc():
"""
根据第一类ipcid获取工程师数量
:return:
"""
all_first_ipc = enterprise_patent_dao.get_first_ipc()
result = []
for i in all_first_ipc:
result.append([i[0] + ":" + i[1], enterprise_patent_dao.get_count_with_ipc(i[0])])
result = sorted(result, key=lambda x:(x[1]), reverse=True)
return result
def get_engineer_count_with_second_ipc():
"""
根据第二类ipcid获取工程师数量
:return:
"""
all_second_ipc = enterprise_patent_dao.get_second_ipc()
result = []
for i in all_second_ipc:
if enterprise_patent_dao.get_count_with_ipc(i[0]) > 100:
result.append([i[0] + ":" + i[1], enterprise_patent_dao.get_count_with_ipc(i[0])])
result = sorted(result, key=lambda x: (x[1]), reverse=True)
return result
def get_engineer_count_with_third_ipc():
"""
根据第三类ipcid获取工程师数量
:return:
"""
all_third_ipc = enterprise_patent_dao.get_all_third_ipc()
result = []
for i in all_third_ipc:
if enterprise_patent_dao.get_count_with_ipc2(i[0]) > 50:
result.append([i[0] + ":" + i[1], enterprise_patent_dao.get_count_with_ipc2(i[0])])
result = sorted(result, key=lambda x: (x[1]), reverse=True)
return result
if __name__ == "__main__":
# print(get_count_by_firstkind("电子信息技术"))
# print(get_engineer_and_en_by_ipc("A23C7/00"))
print(get_engineer_count_with_third_ipc())
# pass | [
"1148705128@qq.com"
] | 1148705128@qq.com |
552f8201a844eb37e5bdbb4737bfcc762df98c5b | 91f665f982b164eeb3b6db21eb047746e8488ba5 | /resources.py | 35dcdf4cdbc6af34019ba977b8f9058d84115a45 | [] | no_license | 46319943/qgis_plugin_homework | 1dd6915b9617e55609c2dd858c64cb011742ab5d | 94cb6bc74512a439effad1e5556e1bf9cbfeb3bd | refs/heads/master | 2023-01-02T21:17:56.255944 | 2020-10-20T09:10:18 | 2020-10-20T09:10:18 | 305,649,392 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,912 | py | # -*- coding: utf-8 -*-
# Resource object code
#
# Created by: The Resource Compiler for PyQt5 (Qt v5.9.6)
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore
qt_resource_data = b"\
\x00\x00\x04\x0a\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x17\x00\x00\x00\x18\x08\x06\x00\x00\x00\x11\x7c\x66\x75\
\x00\x00\x00\x01\x73\x52\x47\x42\x00\xae\xce\x1c\xe9\x00\x00\x00\
\x06\x62\x4b\x47\x44\x00\xff\x00\xff\x00\xff\xa0\xbd\xa7\x93\x00\
\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0b\x13\x00\x00\x0b\x13\x01\
\x00\x9a\x9c\x18\x00\x00\x00\x07\x74\x49\x4d\x45\x07\xd9\x02\x15\
\x16\x11\x2c\x9d\x48\x83\xbb\x00\x00\x03\x8a\x49\x44\x41\x54\x48\
\xc7\xad\x95\x4b\x68\x5c\x55\x18\xc7\x7f\xe7\xdc\x7b\x67\xe6\xce\
\x4c\x66\x26\x49\xd3\x24\x26\xa6\xc6\xf8\x40\x21\xa5\x04\xb3\x28\
\xda\x98\x20\xa5\x0b\xad\x55\xa8\x2b\xc5\x50\x1f\xa0\x6e\x34\x2b\
\x45\x30\x14\x02\xba\x52\x69\x15\x17\x66\x63\x45\x97\x95\xa0\xad\
\x0b\xfb\xc0\x06\x25\xb6\x71\x61\x12\x41\x50\xdb\x2a\x21\xd1\xe2\
\x24\xf3\x9e\xc9\xcc\xbd\xe7\x1c\x17\x35\x43\x1e\x33\x21\xb6\xfd\
\x56\x87\xf3\x9d\xfb\xfb\x1e\xf7\xff\x9d\x23\x8c\x31\x43\x95\xf4\
\x85\x1e\x3f\x3b\x35\xac\xfd\xcc\x43\xdc\xa4\x49\x3b\xfe\x9d\x1d\
\xdb\x7b\x22\x90\x78\xf8\xb2\x28\xa7\xbe\x7d\xc1\x4b\x9d\x79\xdf\
\x18\x15\xe5\x16\x99\x10\x56\xde\x69\xdc\x3f\x22\xfd\xec\xd4\xf0\
\xad\x04\x03\x18\xa3\xa2\x7e\x76\x6a\x58\xde\x68\x2b\xb4\x36\xf8\
\xbe\xc6\x18\x53\xdb\xef\xe7\xfa\xec\xed\x67\x63\x10\x42\x00\xf0\
\xfb\xd5\x65\x2a\x15\x45\xc7\x6d\x0d\x00\xc4\xa2\xc1\xaa\x6f\x0d\
\x3e\x6c\xab\xc2\x1c\x56\xa4\x77\x4b\xb0\xf2\x35\x15\x5f\x21\x85\
\xe0\xc8\x6b\x5f\x92\x2d\x37\x33\x39\xf9\x03\x27\x8e\x1f\xa2\xf7\
\xbe\x9d\x04\x1c\x0b\x37\xe4\xac\xff\xa6\x30\x87\xbd\xba\x00\x6a\
\x06\x79\xe5\xf5\xaf\x89\xd9\x92\xc5\xcc\x0a\xd9\x7c\x19\xcf\xe9\
\xe2\xe4\xa9\x2f\x78\x7c\xff\x01\x72\x85\x0a\x2b\x65\x1f\xa5\x4c\
\xb5\xb2\x55\x16\x80\xbd\x31\xda\xda\x20\x1f\x7d\x3e\xcd\xc2\xfd\
\x59\xa6\x93\x39\x92\xd1\x22\xea\x9b\x16\xce\x9d\x3f\xce\xe0\x83\
\x03\x24\x82\x59\x3a\xdb\x7b\x88\xc7\x82\x68\x63\x58\xc9\xcc\x62\
\x8c\x21\x18\xb0\x6a\xc3\x37\x06\x49\x16\xff\x24\x6b\xa5\x49\xbb\
\x25\xbc\xa2\xa6\x21\xbb\x40\x7f\xdf\x00\x83\xbd\x01\x8e\x3c\xd5\
\x45\xd7\x8e\x6b\x9c\x9c\x98\x25\x1a\xb6\xe8\xbe\x3d\xc2\xdd\x77\
\x44\x48\xc4\x1c\x22\xe1\xeb\x58\x59\xaf\xcf\xd3\x33\x29\x2e\x34\
\x2d\x91\x93\x3e\xbe\x34\x78\x01\xc5\xe2\x61\xc5\xae\x72\x8e\x70\
\xc8\xc2\x0d\x5a\xbc\xf5\xee\x2f\x9c\xfa\x3e\x86\x69\x7a\x8e\xcf\
\x26\xe6\xf9\x63\xa1\x44\xa1\xa4\xd0\xda\x6c\x0d\x2f\x15\x7c\xb4\
\x67\x28\x59\x0a\xcf\xd6\x54\xe2\x06\x13\x87\x2b\x6f\x68\xa6\x27\
\xaf\x31\x32\x36\xc7\xb2\x7f\x17\xef\x7d\x7c\x8c\x33\x67\xcf\x12\
\x70\x24\x4a\x69\xd6\x6a\x46\xd6\xd3\x70\x72\xa9\x82\x67\x34\x45\
\xad\x28\xdb\x1a\x15\x34\x98\xff\x46\xed\xef\x37\x0d\x99\xbf\x4a\
\x3c\x30\x38\xc0\xc8\x4b\xaf\x92\x5a\x9c\xe2\xe0\x23\x6d\x74\xb4\
\xba\x84\x5d\x0b\x29\x45\x7d\xb8\x94\x82\x96\xb6\x10\xf3\xc5\x12\
\x2a\xef\x53\x11\x1a\x63\xad\x3f\x93\x19\x85\xf1\xb1\x77\x58\x5a\
\xf8\x99\x97\x9f\xe9\xa6\x75\x47\x90\xc6\xb8\x43\xd8\xb5\xb6\xce\
\xfc\xfa\xfd\x00\xfb\x3e\xf4\xc8\x05\x35\xba\x5e\xeb\x46\x21\xf9\
\xcf\x0a\xa9\x8c\x87\xe3\x48\xdc\x90\xb5\x6e\x98\x6a\xaa\x65\xf2\
\x52\x92\x43\x2f\x5e\xc2\x8c\x02\x1a\x10\xf5\x07\xac\xc3\x75\x70\
\x83\x92\x80\xb3\xf9\xd0\x26\xf8\x8f\xb3\x29\xc6\x3e\xb8\x8c\x19\
\x35\x75\x6b\x7b\x7e\x3c\xca\x45\x0c\x7e\x49\x31\xf4\x58\x3b\xf7\
\xf6\x34\x90\x88\x39\x04\x1c\x59\x1f\xfe\xdb\xd5\x3c\x5f\x9d\x4b\
\x32\xfd\x44\xb2\xba\xd7\xfa\xb6\x60\xcf\xde\x16\xdc\x90\x45\x4c\
\x4a\x2a\x9e\x62\xfe\x4e\xc5\xc8\xc1\x4e\xda\x76\x86\xe8\xe9\x0a\
\xe3\xd8\x92\x58\xd4\xc6\xb2\x44\x6d\x78\x2a\x53\xe1\xca\x7c\x99\
\x63\x5d\xbf\x56\x9d\xbd\x9f\x44\x18\x7a\xba\x95\x27\x0f\xb4\xd3\
\xdc\x18\xc0\xf3\x0d\x52\x40\xd8\xb5\xb0\xa4\x20\x14\xb2\x70\x6c\
\x81\x63\xcb\xaa\x42\xd6\xfd\xb7\xf4\xec\xa3\x06\xa0\x50\x52\xd8\
\x4e\x1b\x7e\x4a\xd3\x31\xf9\x29\xcf\xfe\xd4\x49\x7f\x5f\x13\xfb\
\xfa\x9b\x71\x43\x92\x58\xd4\x21\x18\x90\xac\xde\xb0\x42\x50\x13\
\x58\x33\xf3\x88\x6b\xa1\xfd\x65\x96\xf2\x79\xc6\x43\x7b\xd8\x75\
\x38\xcc\x3d\xdd\xd1\xaa\xcf\x71\xe4\xff\x7f\x91\x56\x33\xaf\xea\
\x37\xe7\xa1\x94\x21\x16\xb5\xd1\x06\x2c\x29\x36\xf5\x72\x9b\x96\
\x95\xc0\xc4\xda\x9d\x78\x83\x43\x53\x22\x80\x65\x09\x1c\xfb\x86\
\xc1\x00\xe7\x25\x70\x14\x48\x6f\x1e\x22\x51\xe3\x75\xd9\xb6\xa5\
\x81\xa3\x32\xb1\xfb\xf4\x0c\x30\xb8\xb1\x82\x9b\xb0\x09\x60\x30\
\xb1\xfb\xf4\xcc\xbf\xa0\xe9\x6e\xae\x5a\xdf\x4b\x81\x00\x00\x00\
\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
"
qt_resource_name = b"\
\x00\x07\
\x07\x3b\xe0\xb3\
\x00\x70\
\x00\x6c\x00\x75\x00\x67\x00\x69\x00\x6e\x00\x73\
\x00\x02\
\x00\x00\x05\x19\
\x00\x4c\
\x00\x59\
\x00\x08\
\x0a\x61\x5a\xa7\
\x00\x69\
\x00\x63\x00\x6f\x00\x6e\x00\x2e\x00\x70\x00\x6e\x00\x67\
"
qt_resource_struct_v1 = b"\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x02\
\x00\x00\x00\x14\x00\x02\x00\x00\x00\x01\x00\x00\x00\x03\
\x00\x00\x00\x1e\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\
"
qt_resource_struct_v2 = b"\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x02\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x14\x00\x02\x00\x00\x00\x01\x00\x00\x00\x03\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x1e\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\
\x00\x00\x01\x75\x39\xe1\x5a\x0b\
"
qt_version = QtCore.qVersion().split('.')
if qt_version < ['5', '8', '0']:
rcc_version = 1
qt_resource_struct = qt_resource_struct_v1
else:
rcc_version = 2
qt_resource_struct = qt_resource_struct_v2
def qInitResources():
QtCore.qRegisterResourceData(rcc_version, qt_resource_struct, qt_resource_name, qt_resource_data)
def qCleanupResources():
QtCore.qUnregisterResourceData(rcc_version, qt_resource_struct, qt_resource_name, qt_resource_data)
qInitResources()
| [
"495384481@qq.com"
] | 495384481@qq.com |
513d71e4bf5d4a10269d5785e060daaa8ce0b977 | 796c913ba4bdfdf9d8f9c891dd97ff90d6b1bdd4 | /plaid/api/link_token.py | 74a10c1d92823a7e3fd934f7d492c3eb1281b91c | [
"MIT"
] | permissive | InspiredMember/plaid-python | 44da758e676baac58d857588c7e34ed6e0c409f9 | 8bee5a907e0fd03c406b24a7b62166f86a42ca6b | refs/heads/master | 2021-08-06T19:38:35.609908 | 2020-08-04T23:42:41 | 2020-08-04T23:42:41 | 206,851,929 | 0 | 0 | MIT | 2019-09-06T18:26:10 | 2019-09-06T18:26:10 | null | UTF-8 | Python | false | false | 739 | py | from plaid.api.api import API
link_token_field_names = [
'user',
'client_name',
'products',
'country_codes',
'language',
'redirect_uri',
'android_package_name',
'webhook',
'link_customization_name',
'access_token',
'account_filters',
'cross_app_item_add',
'payment_initiation',
]
class LinkToken(API):
'''Endpoints for managing link tokens.'''
def create(self, configs):
'''
Create a Link token.
:param dict configs: A required dictionary to configure the Link token.
'''
body = {}
for field in link_token_field_names:
body[field] = configs.get(field)
return self.client.post('/link/token/create', body)
| [
"noreply@github.com"
] | InspiredMember.noreply@github.com |
487890ec6dfa248593a93530920bc2c0b559b453 | 3cdb4faf34d8375d6aee08bcc523adadcb0c46e2 | /web/env/lib/python3.6/site-packages/django/contrib/messages/storage/base.py | fd5d0c24aa8037c6beb35ed14e85fda6851aa798 | [
"MIT",
"GPL-3.0-only"
] | permissive | rizwansoaib/face-attendence | bc185d4de627ce5adab1cda7da466cb7a5fddcbe | 59300441b52d32f3ecb5095085ef9d448aef63af | refs/heads/master | 2020-04-25T23:47:47.303642 | 2019-09-12T14:26:17 | 2019-09-12T14:26:17 | 173,157,284 | 45 | 12 | MIT | 2020-02-11T23:47:55 | 2019-02-28T17:33:14 | Python | UTF-8 | Python | false | false | 5,643 | py | from django.conf import settings
from django.contrib.messages import constants, utils
LEVEL_TAGS = utils.get_level_tags()
class Message:
"""
Represent an actual message that can be stored in any of the supported
storage classes (typically session- or cookie-based) and rendered in a view
or template.
"""
def __init__(self, level, message, extra_tags=None):
self.level = int(level)
self.message = message
self.extra_tags = extra_tags
def _prepare(self):
"""
Prepare the message for serialization by forcing the ``message``
and ``extra_tags`` to str in case they are lazy translations.
"""
self.message = str(self.message)
self.extra_tags = str(self.extra_tags) if self.extra_tags is not None else None
def __eq__(self, other):
return isinstance(other, Message) and self.level == other.level and \
self.message == other.message
def __str__(self):
return str(self.message)
@property
def tags(self):
return ' '.join(tag for tag in [self.extra_tags, self.level_tag] if tag)
@property
def level_tag(self):
return LEVEL_TAGS.get(self.level, '')
class BaseStorage:
"""
This is the base backend for temporary message storage.
This is not a complete class; to be a usable storage backend, it must be
subclassed and the two methods ``_get`` and ``_store`` overridden.
"""
def __init__(self, request, *args, **kwargs):
self.request = request
self._queued_messages = []
self.used = False
self.added_new = False
super().__init__(*args, **kwargs)
def __len__(self):
return len(self._loaded_messages) + len(self._queued_messages)
def __iter__(self):
self.used = True
if self._queued_messages:
self._loaded_messages.extend(self._queued_messages)
self._queued_messages = []
return iter(self._loaded_messages)
def __contains__(self, item):
return item in self._loaded_messages or item in self._queued_messages
@property
def _loaded_messages(self):
"""
Return a list of loaded messages, retrieving them first if they have
not been loaded yet.
"""
if not hasattr(self, '_loaded_data'):
messages, all_retrieved = self._get()
self._loaded_data = messages or []
return self._loaded_data
def _get(self, *args, **kwargs):
"""
Retrieve a list of stored messages. Return a tuple of the messages
and a flag indicating whether or not all the messages originally
intended to be stored in this storage were, in fact, stored and
retrieved; e.g., ``(messages, all_retrieved)``.
**This method must be implemented by a subclass.**
If it is possible to tell if the backend was not used (as opposed to
just containing no messages) then ``None`` should be returned in
place of ``messages``.
"""
raise NotImplementedError('subclasses of BaseStorage must provide a _get() method')
def _store(self, messages, response, *args, **kwargs):
"""
Store a list of messages and return a list of any messages which could
not be stored.
One type of object must be able to be stored, ``Message``.
**This method must be implemented by a subclass.**
"""
raise NotImplementedError('subclasses of BaseStorage must provide a _store() method')
def _prepare_messages(self, messages):
"""
Prepare a list of messages for storage.
"""
for message in messages:
message._prepare()
def update(self, response):
"""
Store all unread messages.
If the backend has yet to be iterated, store previously stored messages
again. Otherwise, only store messages added after the last iteration.
"""
self._prepare_messages(self._queued_messages)
if self.used:
return self._store(self._queued_messages, response)
elif self.added_new:
messages = self._loaded_messages + self._queued_messages
return self._store(messages, response)
def add(self, level, message, extra_tags=''):
"""
Queue a message to be stored.
The message is only queued if it contained something and its level is
not less than the recording level (``self.level``).
"""
if not message:
return
# Check that the message level is not less than the recording level.
level = int(level)
if level < self.level:
return
# Add the message.
self.added_new = True
message = Message(level, message, extra_tags=extra_tags)
self._queued_messages.append(message)
def _get_level(self):
"""
Return the minimum recorded level.
The default level is the ``MESSAGE_LEVEL`` setting. If this is
not found, the ``INFO`` level is used.
"""
if not hasattr(self, '_level'):
self._level = getattr(settings, 'MESSAGE_LEVEL', constants.INFO)
return self._level
def _set_level(self, value=None):
"""
Set a custom minimum recorded level.
If set to ``None``, the default level will be used (see the
``_get_level`` method).
"""
if value is None and hasattr(self, '_level'):
del self._level
else:
self._level = int(value)
level = property(_get_level, _set_level, _set_level)
| [
"rizwansoaib@gmail.com"
] | rizwansoaib@gmail.com |
5b6746bc96796294065d58ec98028daa3d44bbf9 | 2f5ab43956b947b836e8377370d786e5ee16e4b0 | /sklearn2code/sym/test/test_printers.py | d1f8d27e37ac139c656be81f1359268ce15271d4 | [
"MIT"
] | permissive | modusdatascience/sklearn2code | b175fb268fa2871c95f0e319f3cd35dd54561de9 | 3ab82d82aa89b18b18ff77a49d0a524f069d24b9 | refs/heads/master | 2022-09-11T06:16:37.604407 | 2022-08-24T04:43:59 | 2022-08-24T04:43:59 | 115,747,326 | 4 | 2 | MIT | 2018-05-01T00:11:51 | 2017-12-29T19:05:03 | Python | UTF-8 | Python | false | false | 874 | py | from sklearn2code.sym.expression import FiniteMap, Integer, false, true,\
IntegerVariable, RealPiecewise, RealNumber
from sklearn2code.sym.printers import JavascriptPrinter
from nose.tools import assert_equal
def test_javascript_finite_map():
expr = FiniteMap({Integer(0): false, Integer(1): true}, IntegerVariable('x'))
assert_equal(JavascriptPrinter()(expr), '(x===0?false:(x===1?true:null))')
def test_javascript_piecewise():
expr = RealPiecewise((RealNumber(0), false), (RealNumber(1), true))
assert_equal(JavascriptPrinter()(expr), '(false?0.0:(true?1.0:null))')
if __name__ == '__main__':
import sys
import nose
# This code will run the test in this file.'
module_name = sys.modules[__name__].__file__
result = nose.run(argv=[sys.argv[0],
module_name,
'-s', '-v'])
| [
"jcrudy@gmail.com"
] | jcrudy@gmail.com |
1e50a1ac9ccf9fd437998f3c191d97aeaa03a6d2 | 288379497b95ff3a737ad7b26037138bc0d201fb | /app/markup.py | a691bdcfd25a9b4c9e95dea2c22da587f66800d4 | [
"MIT"
] | permissive | yesudeep/greatshipgroup | 5069df92bcae40d074ec9960b3d1f8161ff1046f | 28530867683da44fd5f50e648972650a7ae5320a | refs/heads/master | 2020-05-24T12:49:59.166880 | 2011-02-04T17:44:55 | 2011-02-04T17:44:55 | 466,008 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,716 | py | """
Support for different markup languages for the body of a post.
The following markup languages are supported:
- HTML
- Plain text
- ReStructured Text
- Markdown
- Textile
For ReStructuredText and Markdown syntax highlighting of source code is
available.
"""
# TODO: Add summary rendering.
# TODO: Docstrings.
import logging
import re
from cStringIO import StringIO
from django.utils import html
from django.utils import text
import configuration
import utils
# Import markup module from lib/
import markdown
import textile
from docutils.core import publish_parts
def render_rst(content):
warning_stream = StringIO()
parts = publish_parts(content, writer_name='html4css1',
settings_overrides={
'_disable_config': True,
'embed_stylesheet': False,
'warning_stream': warning_stream,
'report_level': 2,
})
rst_warnings = warning_stream.getvalue()
if rst_warnings:
logging.warn(rst_warnings)
return parts['html_body']
def render_markdown(content):
md = markdown.Markdown()
return md.convert(content)
def render_textile(content):
return textile.textile(content.encode('utf-8'))
# Mapping: string ID -> (human readable name, renderer)
MARKUP_MAP = {
'html': ('HTML', lambda c: c),
'txt': ('Plain Text', lambda c: html.linebreaks(html.escape(c))),
'markdown': ('Markdown', render_markdown),
'textile': ('Textile', render_textile),
'rst': ('ReStructuredText', render_rst),
}
def get_renderer(markup_type):
"""Returns a render based on markup type."""
return MARKUP_MAP.get(markup_type)[1]
| [
"yesudeep@mia.local"
] | yesudeep@mia.local |
23dc0ef96104eeaaf9e32141ff8e5a1af34080fe | 32e6aa08aa5fe0707e2adb092d6f6804845d94bc | /calculator.py | 48fba631283e35ea281956938ad89db5aaa9fc79 | [] | no_license | shrishail-talukar/patrick-jane-text-editor | e2a888a946b13ed97eb34274c31b01115ba2b1cd | 58feae1399b3984ec5971c185865d5ffc8b0f4af | refs/heads/master | 2023-05-11T19:05:21.889071 | 2020-07-19T21:01:19 | 2020-07-19T21:01:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,839 | py | #Author: Shrishail Talukar
#Email: shrishailtalukar@gmail.com
# A simple text editor, capable for creating a file, opening a file, and saving a file
# Feel free to improve the software
# Python 3x, and QtPy/PyQt5 is used
import sys
from qtpy import QtGui, QtCore, QtWidgets
class Workplace(QtWidgets.QMainWindow):
def __init__(self):
super().__init__()
self.setGeometry(100, 100, 300, 200)
self.setWindowTitle("Patrick Jane Calculator")
self.number_1 = QtWidgets.QLabel("Number 1: ", self)
self.number_2 = QtWidgets.QLabel("Number 2: ", self)
self.box_1 = QtWidgets.QLineEdit(self)
self.box_2 = QtWidgets.QLineEdit(self)
self.result = QtWidgets.QLabel("", self)
submit = QtWidgets.QPushButton('Calculate', self)
submit.clicked.connect(self.add)
self.number_1.move(20, 40)
self.number_2.move(20, 70)
self.box_1.move(90, 40)
self.box_1.resize(150, 30)
self.box_2.move(90, 70)
self.box_2.resize(150, 30)
submit.move(110, 110)
self.result.resize(200, 30)
self.result.move(90, 150)
self.show()
def convert_str_to_int(self, n):
l = []
number = list(n)
for i in number:
l.append(ord(i) - 48)
l.reverse()
actual_number = 0
n = 1
for j in range(0, len(l)):
actual_number += n * l[j]
n *= 10
return actual_number
def add(self):
n1 = self.convert_str_to_int(self.box_1.text())
n2 = self.convert_str_to_int(self.box_2.text())
addition = str(n1 + n2)
self.result.setText(addition)
def main():
main_app = QtWidgets.QApplication(sys.argv)
work = Workplace()
sys.exit(main_app.exec_())
main() | [
"shrishailtalukar@gmail.com"
] | shrishailtalukar@gmail.com |
6bb22a6d2fb6bfc0634b338ea727935c83a552e3 | a5b8d71fd3876b76cb5183d5396625bd49bebb79 | /mushi/apps/webui/views.py | cd21c8d6e9abbe60cc2a26308d37187b75e60efe | [
"Apache-2.0"
] | permissive | kyouko-taiga/mushi | 1943d5b27296a76c12733f71ad9f3a9ef0f5d251 | fbc6583f934aba814aca7bbd5717d8d8e146ad48 | refs/heads/master | 2021-01-10T08:04:41.000046 | 2015-06-07T22:59:39 | 2015-06-07T22:59:39 | 36,305,995 | 0 | 2 | null | 2015-07-01T17:58:58 | 2015-05-26T15:39:24 | JavaScript | UTF-8 | Python | false | false | 1,256 | py | # Copyright 2015 Dimitri Racordon
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from flask import Blueprint, current_app, redirect, render_template, url_for
from mushi.core.auth import parse_auth_token, require_auth_token, validate_auth_token
from mushi.core.exc import AuthenticationError
bp = Blueprint('views', __name__)
@bp.route('/')
@require_auth_token
def index(auth_token):
return render_template('spa.html', api_root=current_app.config['API_ROOT'])
@bp.route('/login')
def login():
try:
auth_token = parse_auth_token()
validate_auth_token(auth_token)
return redirect(url_for('views.index'))
except AuthenticationError:
return render_template('login.html', api_root=current_app.config['API_ROOT'])
| [
"kyouko.taiga@gmail.com"
] | kyouko.taiga@gmail.com |
1a01f5c2747cdd429c329c7250f34280b5f686d2 | 412b699e0f497ac03d6618fe349f4469646c6f2d | /env/lib/python3.8/site-packages/web3/_utils/threads.py | ba45d8775e0e35fd72ae6117133e9d50ea23bdc3 | [
"MIT"
] | permissive | EtienneBrJ/Portfolio | 7c70573f02a5779f9070d6d9df58d460828176e3 | 6b8d8cf9622eadef47bd10690c1bf1e7fd892bfd | refs/heads/main | 2023-09-03T15:03:43.698518 | 2021-11-04T01:02:33 | 2021-11-04T01:02:33 | 411,076,325 | 1 | 0 | MIT | 2021-10-31T13:43:09 | 2021-09-27T23:48:59 | HTML | UTF-8 | Python | false | false | 3,979 | py | """
A minimal implementation of the various gevent APIs used within this codebase.
"""
import threading
import time
from types import (
TracebackType,
)
from typing import (
Any,
Callable,
Generic,
Type,
)
from web3._utils.compat import (
Literal,
)
from web3.types import (
TReturn,
)
class Timeout(Exception):
"""
A limited subset of the `gevent.Timeout` context manager.
"""
seconds = None
exception = None
begun_at = None
is_running = None
def __init__(
self, seconds: float = None, exception: Type[BaseException] = None, *args: Any,
**kwargs: Any
) -> None:
self.seconds = seconds
self.exception = exception
def __enter__(self) -> 'Timeout':
self.start()
return self
def __exit__(
self, exc_type: Type[BaseException], exc_val: BaseException, exc_tb: TracebackType
) -> Literal[False]:
return False
def __str__(self) -> str:
if self.seconds is None:
return ''
return "{0} seconds".format(self.seconds)
@property
def expire_at(self) -> int:
if self.seconds is None:
raise ValueError("Timeouts with `seconds == None` do not have an expiration time")
elif self.begun_at is None:
raise ValueError("Timeout has not been started")
return self.begun_at + self.seconds
def start(self) -> None:
if self.is_running is not None:
raise ValueError("Timeout has already been started")
self.begun_at = time.time()
self.is_running = True
def check(self) -> None:
if self.is_running is None:
raise ValueError("Timeout has not been started")
elif self.is_running is False:
raise ValueError("Timeout has already been cancelled")
elif self.seconds is None:
return
elif time.time() > self.expire_at:
self.is_running = False
if isinstance(self.exception, type):
raise self.exception(str(self))
elif isinstance(self.exception, Exception):
raise self.exception
else:
raise self
def cancel(self) -> None:
self.is_running = False
def sleep(self, seconds: float) -> None:
time.sleep(seconds)
self.check()
class ThreadWithReturn(threading.Thread, Generic[TReturn]):
def __init__(
self, target: Callable[..., TReturn] = None, args: Any = None, kwargs: Any = None
) -> None:
super().__init__(
target=target,
args=args or tuple(),
kwargs=kwargs or {},
)
self.target = target
self.args = args
self.kwargs = kwargs
def run(self) -> None:
self._return = self.target(*self.args, **self.kwargs)
def get(self, timeout: float = None) -> TReturn:
self.join(timeout)
try:
return self._return
except AttributeError:
raise RuntimeError("Something went wrong. No `_return` property was set")
class TimerClass(threading.Thread):
def __init__(self, interval: int, callback: Callable[..., Any], *args: Any) -> None:
threading.Thread.__init__(self)
self.callback = callback
self.terminate_event = threading.Event()
self.interval = interval
self.args = args
def run(self) -> None:
while not self.terminate_event.is_set():
self.callback(*self.args)
self.terminate_event.wait(self.interval)
def stop(self) -> None:
self.terminate_event.set()
def spawn(
target: Callable[..., TReturn],
*args: Any,
thread_class: Type[ThreadWithReturn[TReturn]] = ThreadWithReturn,
**kwargs: Any,
) -> ThreadWithReturn[TReturn]:
thread = thread_class(
target=target,
args=args,
kwargs=kwargs,
)
thread.daemon = True
thread.start()
return thread
| [
"etiennebrxv@gmail.com"
] | etiennebrxv@gmail.com |
39db9f762be3ee230422c09238b1b18e127682f6 | b6145339bdc94182c6d79d9d56b42ae31f09c2ca | /problems/sliding_window/Max_Consecutive_Ones_III.py | 608c37c5ea14def09351e18833f87843be802d67 | [] | no_license | ruozhizhang/leetcode | b0f1860de9938d721d6bee81ff9ceda656ecfa40 | ac9097a400095c96bcc72eb0c80e5df3cce9b0d1 | refs/heads/master | 2023-05-22T08:15:59.782523 | 2021-06-14T15:11:45 | 2021-06-14T15:11:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,019 | py | '''
https://leetcode.com/problems/max-consecutive-ones-iii/
Given an array A of 0s and 1s, we may change up to K values from 0 to 1.
Return the length of the longest (contiguous) subarray that contains only 1s.
Example 1:
Input: A = [1,1,1,0,0,0,1,1,1,1,0], K = 2
Output: 6
Explanation:
[1,1,1,0,0,1,1,1,1,1,1]
Bolded numbers were flipped from 0 to 1. The longest subarray is underlined.
Example 2:
Input: A = [0,0,1,1,0,0,1,1,1,0,1,1,0,0,0,1,1,1,1], K = 3
Output: 10
Explanation:
[0,0,1,1,1,1,1,1,1,1,1,1,0,0,0,1,1,1,1]
Bolded numbers were flipped from 0 to 1. The longest subarray is underlined.
Note:
1 <= A.length <= 20000
0 <= K <= A.length
A[i] is 0 or 1
'''
class Solution:
def longestOnes(self, A: List[int], K: int) -> int:
count0 = 0
l = res = 0
for r in range(len(A)):
count0 += 1 if A[r] == 0 else 0
while count0 > K:
count0 -= 1 if A[l] == 0 else 0
l += 1
res = max(res, r - l + 1)
return res
| [
"ruozhizhanglife@gmail.com"
] | ruozhizhanglife@gmail.com |
98b2d069da09e52a0bcaf8917f2da0ce972ad801 | 4dca892ac2d33fcd84e78d8e6c95e7d5955f04d1 | /welcome.py | 30b2f55503bcc1441ee9712159b7ccc40b0cfc7d | [] | no_license | VipulRana/Covid-19-DataScience | a486d824b8a427dd91b44339db7bb94ccecb9c32 | 26dfa14fc5dca32e94f483434f27d8e59146bd39 | refs/heads/main | 2023-02-20T12:06:14.765997 | 2021-01-21T18:13:49 | 2021-01-21T18:13:49 | 331,713,569 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,197 | py | from PyQt5 import QtCore, QtGui, QtWidgets
from login import Ui_Login_Form
class Ui_MainWindow(object):
def login_form(self):
self.LoginWindow = QtWidgets.QMainWindow()
self.ui = Ui_Login_Form()
self.ui.setupUi(self.LoginWindow)
self.LoginWindow.show()
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(800, 600) #Size of the display window
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.welcome_label = QtWidgets.QLabel(self.centralwidget) #Sets the label in the center of the screen
self.welcome_label.setGeometry(QtCore.QRect(260, 150, 241, 71)) #Size of the label
self.welcome_label.setObjectName("welcome_label")
self.get_started_button = QtWidgets.QPushButton(self.centralwidget) #Sets button on the center
self.get_started_button.setGeometry(QtCore.QRect(320, 270, 111, 28)) #Size of the button
self.get_started_button.setObjectName("get_started_button")
MainWindow.setCentralWidget(self.centralwidget)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
self.get_started_button.clicked.connect(self.login_form) #Button clicked go to function login_form
self.get_started_button.clicked.connect(MainWindow.hide) #Hide the current window
#MainWindow.hide()
#MainWindow.close()
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "COVID-19 Project"))
self.welcome_label.setText(_translate("MainWindow", "WELCOME TO THE COVID-19 PROJECT"))
self.get_started_button.setText(_translate("MainWindow", "GET STARTED"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_()) | [
"noreply@github.com"
] | VipulRana.noreply@github.com |
1ddefea085b7f43177f6144a313adf9ae3ec8f87 | 805c4473e47c6be851c24fce0c0dfd9ea7719d56 | /MAIN.py | c06c610a3a91866cd473c9ce6575bf29677f54f5 | [] | no_license | siddharth-rawatt/Image-Caption-Genertor- | cb6ba11f37e8f0d93bea747d40e97b06d21447e5 | 9fb739ccfd9f25bb8e768d5cafbe195bfe93a7c0 | refs/heads/main | 2023-06-07T09:29:56.779318 | 2021-06-18T14:09:32 | 2021-06-18T14:09:32 | 378,161,893 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,765 | py | import string
import numpy as np
from PIL import Image
import os
from pickle import dump, load
import numpy as np
from keras.applications.xception import Xception, preprocess_input
from keras.preprocessing.image import load_img, img_to_array
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.utils import to_categorical
from keras.layers.merge import add
from keras.models import Model, load_model
from keras.layers import Input, Dense, LSTM, Embedding, Dropout
# small library for seeing the progress of loops.
from tqdm import tqdm_notebook as tqdm
# Loading a text file into memory
def load_doc(filename):
# Opening the file as read only
file = open(filename, 'r')
text = file.read()
file.close()
return text
# get all imgs with their captions
def all_img_captions(filename):
file = load_doc(filename)
captions = file.split('\n')
descriptions ={}
for caption in captions[:-1]:
img, caption = caption.split('\t')
if img[:-2] not in descriptions:
descriptions[img[:-2]] = [ caption ]
else:
descriptions[img[:-2]].append(caption)
return descriptions
#Data cleaning- lower casing, removing puntuations and words containing numbers
def cleaning_text(captions):
table = str.maketrans('','',string.punctuation)
for img,caps in captions.items():
for i,img_caption in enumerate(caps):
img_caption.replace("-"," ")
desc = img_caption.split()
#converts to lowercase
desc = [word.lower() for word in desc]
#remove punctuation from each token
desc = [word.translate(table) for word in desc]
#remove hanging 's and a
desc = [word for word in desc if(len(word)>1)]
#remove tokens with numbers in them
desc = [word for word in desc if(word.isalpha())]
#convert back to string
img_caption = ' '.join(desc)
captions[img][i]= img_caption
return captions
def text_vocabulary(descriptions):
# build vocabulary of all unique words
vocab = set()
for key in descriptions.keys():
[vocab.update(d.split()) for d in descriptions[key]]
return vocab
#All descriptions in one file
def save_descriptions(descriptions, filename):
lines = list()
for key, desc_list in descriptions.items():
for desc in desc_list:
lines.append(key + '\t' + desc )
data = "\n".join(lines)
file = open(filename,"w")
file.write(data)
file.close()
# Set these path according to project folder in you system
dataset_text = "C:/Users/siddh/PycharmProjects/MINOR/python-project-image-caption-generator/Flickr8k_text"
dataset_images = "C:/Users/siddh/PycharmProjects/MINOR/python-project-image-caption-generator/Flickr8k_Dataset/Flicker8k_Dataset"
#we prepare our text data
filename = dataset_text + "/" + "Flickr8k.token.txt"
#loading the file that contains all data
#mapping them into descriptions dictionary img to 5 captions
descriptions = all_img_captions(filename)
print("Length of descriptions =" ,len(descriptions))
#cleaning the descriptions
clean_descriptions = cleaning_text(descriptions)
#building vocabulary
vocabulary = text_vocabulary(clean_descriptions)
print("Length of vocabulary = ", len(vocabulary))
#saving each description to file
save_descriptions(clean_descriptions, "descriptions.txt")
features=load(open("features.p","rb"))
#load the data
def load_photos(filename):
file = load_doc(filename)
photos = file.split("\n")[:-1]
return photos
def load_clean_descriptions(filename, photos):
#loading clean_descriptions
file = load_doc(filename)
descriptions = {}
for line in file.split("\n"):
words = line.split()
if len(words)<1 :
continue
image, image_caption = words[0], words[1:]
if image in photos:
if image not in descriptions:
descriptions[image] = []
desc = '<start> ' + " ".join(image_caption) + ' <end>'
descriptions[image].append(desc)
return descriptions
def load_features(photos):
#loading all features
all_features = load(open("features.p","rb"))
#selecting only needed features
features = {k:all_features[k] for k in photos}
return features
filename = dataset_text + "/" + "Flickr_8k.trainImages.txt"
#train = loading_data(filename)
train_imgs = load_photos(filename)
train_descriptions = load_clean_descriptions("descriptions.txt", train_imgs)
train_features = load_features(train_imgs)
#converting dictionary to clean list of descriptions
def dict_to_list(descriptions):
all_desc = []
for key in descriptions.keys():
[all_desc.append(d) for d in descriptions[key]]
return all_desc
#creating tokenizer class
#this will vectorise text corpus
#each integer will represent token in dictionary
from keras.preprocessing.text import Tokenizer
def create_tokenizer(descriptions):
desc_list = dict_to_list(descriptions)
tokenizer = Tokenizer()
tokenizer.fit_on_texts(desc_list)
return tokenizer
# give each word an index, and store that into tokenizer.p pickle file
tokenizer = create_tokenizer(train_descriptions)
dump(tokenizer, open('tokenizer.p', 'wb'))
vocab_size = len(tokenizer.word_index) + 1
# print("Vocabulary size= ",vocab_size)
def max_length(descriptions):
desc_list = dict_to_list(descriptions)
return max(len(d.split()) for d in desc_list)
max_length = max_length(descriptions)
# print("max_length= ",max_length)
#create input-output sequence pairs from the image description.
#data generator, used by model.fit_generator()
def data_generator(descriptions, features, tokenizer, max_length):
while 1:
for key, description_list in descriptions.items():
#retrieve photo features
feature = features[key][0]
input_image, input_sequence, output_word = create_sequences(tokenizer, max_length, description_list, feature)
yield [[input_image, input_sequence], output_word]
def create_sequences(tokenizer, max_length, desc_list, feature):
X1, X2, y = list(), list(), list()
# walk through each description for the image
for desc in desc_list:
# encode the sequence
seq = tokenizer.texts_to_sequences([desc])[0]
# split one sequence into multiple X,y pairs
for i in range(1, len(seq)):
# split into input and output pair
in_seq, out_seq = seq[:i], seq[i]
# pad input sequence
in_seq = pad_sequences([in_seq], maxlen=max_length)[0]
# encode output sequence
out_seq = to_categorical([out_seq], num_classes=vocab_size)[0]
# store
X1.append(feature)
X2.append(in_seq)
y.append(out_seq)
return np.array(X1), np.array(X2), np.array(y)
#You can check the shape of the input and output for your model
[a,b],c = next(data_generator(train_descriptions, features, tokenizer, max_length))
print(a.shape, b.shape, c.shape)
#((47, 2048), (47, 32), (47, 7577))
import os
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
from keras.utils import plot_model
# define the captioning model
def define_model(vocab_size, max_length):
# features from the CNN model squeezed from 2048 to 256 nodes
inputs1 = Input(shape=(2048,))
fe1 = Dropout(0.5)(inputs1)
fe2 = Dense(256, activation='relu')(fe1)
# LSTM sequence model
inputs2 = Input(shape=(max_length,))
se1 = Embedding(vocab_size, 256, mask_zero=True)(inputs2)
se2 = Dropout(0.5)(se1)
se3 = LSTM(256)(se2)
# Merging both models
decoder1 = add([fe2, se3])
decoder2 = Dense(256, activation='relu')(decoder1)
outputs = Dense(vocab_size, activation='softmax')(decoder2)
# tie it together [image, seq] [word]
model = Model(inputs=[inputs1, inputs2], outputs=outputs)
model.compile(loss='categorical_crossentropy', optimizer='adam')
# summarize model
print(model.summary())
plot_model(model, to_file='model.png', show_shapes=True)
return model
print('Dataset: ', len(train_imgs))
print('Descriptions: train=', len(train_descriptions))
print('Photos: train=', len(train_features))
print('Vocabulary Size:', vocab_size)
print('Description Length: ', max_length)
model = define_model(vocab_size, max_length)
epochs = 10
steps = len(train_descriptions)
# making a directory models to save our models
if not os.path.exists("models"):
os.mkdir("models")
import numpy as np
from PIL import Image
import matplotlib.pyplot as plt
import argparse
ap = argparse.ArgumentParser()
ap.add_argument('-i', '--image', required=True, help="Image Path")
args = vars(ap.parse_args())
img_path = args['image']
def extract_features(filename, model):
try:
image = Image.open(filename)
except:
print("ERROR: Couldn't open image! Make sure the image path and extension is correct")
image = image.resize((299,299))
image = np.array(image)
# for images that has 4 channels, we convert them into 3 channels
if image.shape[2] == 4:
image = image[..., :3]
image = np.expand_dims(image, axis=0)
image = image/127.5
image = image - 1.0
feature = model.predict(image)
return feature
def word_for_id(integer, tokenizer):
for word, index in tokenizer.word_index.items():
if index == integer:
return word
return None
def generate_desc(model, tokenizer, photo, max_length):
in_text = 'start'
for i in range(max_length):
sequence = tokenizer.texts_to_sequences([in_text])[0]
sequence = pad_sequences([sequence], maxlen=max_length)
pred = model.predict([photo,sequence], verbose=0)
pred = np.argmax(pred)
word = word_for_id(pred, tokenizer)
if word is None:
break
in_text += ' ' + word
if word == 'end':
break
return in_text
#path = 'Flicker8k_Dataset/111537222_07e56d5a30.jpg'
max_length = 32
tokenizer = load(open("tokenizer.p","rb"))
model = load_model('models/model_9.h5')
xception_model = Xception(include_top=False, pooling="avg")
photo = extract_features(img_path, xception_model)
img = Image.open(img_path)
description = generate_desc(model, tokenizer, photo, max_length)
print("\n\n")
print(description)
plt.imshow(img) | [
"noreply@github.com"
] | siddharth-rawatt.noreply@github.com |
043d589eaaab4d2ab8c8f03c3d650ba3fcf23a53 | dbb5f26b79fc06a98ae4c34de71a27a25aa8dc88 | /ADA.py | 865dd4e0dc861db73bf0420f62f97db5fc57dd57 | [] | no_license | KordianD/Machine-Learning-Sebastian-Raschka | 5a9b2f56c073f69cd27fccd40233fdcd10792683 | fe08af9bc6b9790645120716fe78a71c12ecb9ad | refs/heads/master | 2020-03-27T05:39:59.004664 | 2018-08-24T20:48:17 | 2018-08-24T20:48:17 | 146,038,134 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,051 | py | import numpy as np
import helper
X, y = helper.get_iris_data(output_values_ranges=[-1, 1])
helper.plot_iris_data(X)
class Ada:
def __init__(self, learning_rate=0.01, epochs=50, random_state=1):
self.learning_rate = learning_rate
self.epochs = epochs
self.random_state = random_state
def fit(self, X, y):
rgen = np.random.RandomState(self.random_state)
self.weights = rgen.normal(loc=0.0, scale=0.01, size=X.shape[1])
self.weights = np.array(self.weights).reshape((3, 1))
self.errors = []
number_of_examples = X.shape[0]
for _ in range(self.epochs):
z = X.dot(self.weights)
difference = y - z
error = 0.5 * np.sum(difference ** 2)
self.errors.append(error)
gradient = - X.transpose().dot(difference)
self.weights = self.weights - self.learning_rate * gradient / number_of_examples
return self
ppn = Ada(learning_rate=0.1, epochs=100)
ppn.fit(X, y)
helper.plot_training(ppn.errors)
| [
"kordiandrozd@gmail.com"
] | kordiandrozd@gmail.com |
65f8e1881f1f41d634cf77079fc9440bb4cb9813 | 6983b7e1b874cf38a28dba0756486c8ec1546124 | /bard/views/entities_api.py | fde4e5ef5c35addd798afa3f0c7574f0a45ee5b6 | [] | no_license | MathiasDarr/dakobed_bard | 78cec739865fc092323b442bf1a2d5a41f0b970f | 52f6d8a8d7109ebb61b12ca8577bac4182b1b59a | refs/heads/master | 2023-05-10T03:27:59.781013 | 2021-05-30T03:56:53 | 2021-05-30T03:56:53 | 360,324,308 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 257 | py | from flask import Blueprint, request
import logging
log = logging.getLogger(__name__)
blueprint = Blueprint("entities_api", __name__)
@blueprint.route("/api/2/entities",methods=["GET"])
def index():
"""
:return:
"""
return "Entities API" | [
"mathias.darr@pnnl.gov"
] | mathias.darr@pnnl.gov |
06acaf902c5984258838464d08953930f011acd6 | cc36ad5eca8f31f26eea6bfe24abf5f6a484f506 | /src/utils/helper.py | 814bbfe3c350bc9a09cfa448268b268d8ae4dd4f | [
"MIT"
] | permissive | Ehsan-Tavan/twitter_crawlers | 8479904287dc8ccdc5cf1312bb087a390cad42a2 | 0f117c3ef994c9e37a5bca41140e093181e90e4e | refs/heads/master | 2023-06-03T12:41:17.700073 | 2021-06-24T04:47:53 | 2021-06-24T04:47:53 | 361,388,993 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,758 | py | import datetime
import pandas as pd
def calculate_date(days: int) -> [str, str]:
"""
:param days:
:return:
"""
# get today date
end_date = datetime.datetime.now()
day_past = datetime.timedelta(days=days)
start_date = end_date - day_past
end_date = end_date.strftime("%Y-%m-%d")
start_date = start_date.strftime("%Y-%m-%d")
return start_date, end_date
def process_time(start_time: float, end_time: float) -> [int, int]:
"""
:param start_time:
:param end_time:
:return:
"""
elapsed_time = end_time - start_time
elapsed_mins = int(elapsed_time // 60)
elapsed_secs = int(elapsed_time - (elapsed_mins * 60))
return elapsed_mins, elapsed_secs
def is_item_exist(data, item) -> bool:
"""
:param data:
:param item:
:return:
"""
if item in data:
return True
return False
def is_tweet_valid(tweet: str, key_words: list) -> bool:
"""
:param tweet: str
:param key_words: list
:return: bool
"""
for key_word in key_words:
if key_word in tweet:
return True
return False
def filter_tweets(tweets: list, dates: list, key_words: list) -> [list, list]:
"""
:param tweets: list
:param dates: list
:param key_words: lis
:return:
"""
for i in range(len(tweets)):
if not is_tweet_valid(tweets[i], key_words):
tweets[i] = None
dates[i] = None
return tweets, dates
def save_tweets(tweets: list, dates: list, path: str) -> None:
"""
:param tweets: list
:param dates: list
:param path: str
:return: None
"""
data_frame = pd.DataFrame({"tweets": tweets, "date": dates})
data_frame.to_csv(path, index=False)
| [
"tavan.ehsan@gmail.com"
] | tavan.ehsan@gmail.com |
3914c9db6feaaf9a525477b46999f04886e29072 | d9c5bee4aac8406732491c13dc2ece8f3bb1bede | /train.py | 8fa001b642925c6a72e6bff9817079beb695aeb6 | [
"Apache-2.0"
] | permissive | hehichens/NeuralStyle | db8c58927b6cbe133e27223cd8dc05cf95c74706 | cf28a1eefd8713f85e94f50935562a663a53e8b5 | refs/heads/main | 2023-02-03T17:20:04.064327 | 2020-12-11T13:24:35 | 2020-12-11T13:24:35 | 320,439,248 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,882 | py | """
trian model
edit by hichens
"""
import time
import torch
import torchvision
import os
from utils.options import opt
from utils.utils import *
from utils.visualizer import Visualizer
## net
net = create_model()
## Hyper Prameter
batch_size = opt.batch_size
## visulizer
visualizer = Visualizer()
total_iter = 0
iter_time = time.time()
losses = []
## Result
checkpoint_dir = os.path.join(opt.checkpoint_dir, opt.model)
images_dir = os.path.join(opt.images_dir, opt.model)
mkdirs([checkpoint_dir, images_dir])
print("Training on: {}".format(opt.device))
for epoch in range(opt.num_epoch):
if opt.model == 'BaseModel':
data_loader = [(None, None)]
elif opt.model == 'FST':
data_loader = load_image_datasets(batch_size=batch_size)
dataset_size = len(data_loader)
net.set_input()
for batch_id, (x, _) in enumerate(data_loader):
total_iter += 1
net.forward(x)
net.optimize_parameters()
# Print and plot training infomation
if total_iter % opt.print_freq == 0: # print training losses and save logging information to the disk
losses.append(net.get_loss())
loss_dict = {'total_loss': losses[-1]}
visualizer.plot(total_iter, losses, names=['total_loss'])
visualizer.print(epoch, loss_dict, time.time() - iter_time)
iter_time = time.time()
# Save the checkpoint
if total_iter % opt.save_epoch_freq == 0:
net.save_networks('latest', checkpoint_dir)
net.save_networks(epoch+1, checkpoint_dir)
# Display the result image
if total_iter % opt.display_freq == 0:
out_img_path = os.path.join(images_dir, "{}_{}.png".format(opt.model, epoch+1))
torchvision.utils.save_image(net.get_image(), out_img_path)
visualizer.display_image(out_img_path)
| [
"hichens@qq.com"
] | hichens@qq.com |
d5439756a472a776f6e2de4f77152fbc8854b8cf | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/303/usersdata/280/97935/submittedfiles/testes.py | d88ecb76f5e989a6ee41f07dc266207edd3ddf88 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,328 | py | """
valor=["X","O"]
symh=valor[0]
sympc=valor[1]
print(symh)
print(sympc)
line1=[" "," "," "]
line2=[" "," "," "]
line3=[" "," "," "]
print("|%s|%s|%s|" % (line1[0],line1[1],line1[2]) )
print("|%s|%s|%s|" % (line2[0],line2[1],line2[2]) )
print("|%s|%s|%s|" % (line3[0],line3[1],line3[2]) )
line1[2]=symh
print("|%s|%s|%s|" % (line1[0],line1[1],line1[2]) )
print("|%s|%s|%s|" % (line2[0],line2[1],line2[2]) )
print("|%s|%s|%s|" % (line3[0],line3[1],line3[2]) )
line2[1]=sympc
print("|%s|%s|%s|" % (line1[0],line1[1],line1[2]) )
print("|%s|%s|%s|" % (line2[0],line2[1],line2[2]) )
print("|%s|%s|%s|" % (line3[0],line3[1],line3[2]) )
line3[2]=symh
print("|%s|%s|%s|" % (line1[0],line1[1],line1[2]) )
print("|%s|%s|%s|" % (line2[0],line2[1],line2[2]) )
print("|%s|%s|%s|" % (line3[0],line3[1],line3[2]) )
"""
"""
x=int(input("Número de médias: "))
while x <= 1:
x=int(input("Número de médias: "))
notas=[]
for i in range (0,x,1):
notas.append(float(input("Insira a nota %d: " %(i+1))))
soma=sum(notas)
res=soma/x
print(res)
"""
"""
n=int(input("Insira n: "))
a=[]
for i in range (0,n,1):
a.append(int(input("Digite o termo %d do vetor a: " %(i+1))))
med=sum(a)/len(a)
somat=0
for i in range (0,len(a),1):
somat=somat + ((a[i]-med)**2)
desvpad=(((1/(n-1))*(somat))**0.5)
print(desvpad)
"""
import numpy as np
cont1=0
cont2=0
cont3=0
dim=int(input("Dimensão n da matriz: "))
matriz=np.empty([dim,dim])
matriztrans=np.empty([dim,dim])
matrizdiag=np.empty([2,dim])
for i in range (0,dim,1):
for j in range (0,dim,1):
matriz[i][j]=float(input("Digite o nº da linha %d na coluna %d: " ))
#transposta
for i in range(0,dim,1):
for j in range(0,dim,1):
matriztrans[i][j] = matriz[j][i]
#diagonais
for i in range(0,dim,1):
matrizdiag[0][i]=matriz[i][i]
for i in range(0,dim,1):
for j in range(dim-1,0,-1):
matrizdiag[1]=matriz[i][j]
print(matriz)
print(matriztrans)
print(matrizdiag)
for i in range (0,dim-1,1):
if sum(matriz[i]) == sum(matriz[i+1]):
cont1=cont1+1
for i in range (0,dim-1,1):
if sum(matriztrans[i]) == sum(matriz[i+1]):
cont2=cont2+1
for i in range (0,dim-1,1):
if matriz[i][i] == sum(matriz[i+1]):
cont3=cont3+1 | [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
6ef80220e480e093f7edc3333639f750931483ed | 14b39545e132d31b74150e5dbc04a92d6bd121bd | /1010_calculo_simples.py | f8aee14feeef4622062325214980f88dc1a4c68b | [] | no_license | VictorJurado18/ExerciciosURIJudge | cc5d5d805b39cc04fd57789dbc1a343ab348612e | cca86843f04b69852f989fb1d707538f2aab63f5 | refs/heads/master | 2023-06-30T01:48:59.650147 | 2021-08-03T16:56:43 | 2021-08-03T16:56:43 | 392,388,707 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 210 | py | c1, n1, p1 = input().split()
c2, n2, p2 = input().split()
c1 = int(c1)
c2 = int(c2)
n1 = int(n1)
n2 = int(n2)
p1 = float(p1)
p2 = float(p2)
valor = (n1 * p1) + (n2 * p2)
print(f'VALOR A PAGAR: R$ {valor:.2f}') | [
"victor.nemejur@gmail.com"
] | victor.nemejur@gmail.com |
9f79d6436bdd0d131e2f876c83e4b482712e8060 | 1abc28811934bb0c533a2b808d84c2e834348f13 | /test_map.py | 8f0e2befb9c9ca1629fab78892b18a97a189f4c7 | [] | no_license | gorohovIlya/my_project | 729ba599aa6c3b0be7c5257471c580c889ce2fba | 9f5e6a0fd719f0a90074350bbb6707617904ad33 | refs/heads/main | 2023-09-04T20:29:07.781880 | 2021-11-12T12:56:42 | 2021-11-12T12:56:42 | 423,747,948 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,491 | py | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'test_map.ui'
#
# Created by: PyQt5 UI code generator 5.15.4
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.setEnabled(True)
MainWindow.resize(750, 750)
MainWindow.setStyleSheet("")
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.label = QtWidgets.QLabel(self.centralwidget)
self.label.setGeometry(QtCore.QRect(0, 0, 751, 751))
self.label.setText("")
self.label.setObjectName("label")
self.return_back = QtWidgets.QPushButton(self.centralwidget)
self.return_back.setGeometry(QtCore.QRect(10, 720, 75, 23))
self.return_back.setObjectName("return_back")
MainWindow.setCentralWidget(self.centralwidget)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
self.return_back.setText(_translate("MainWindow", "Назад"))
| [
"noreply@github.com"
] | gorohovIlya.noreply@github.com |
3b86e81c1aefa746ea0b2327c9bc1e620689dd0a | 7a013424c82b71bc82aa312e0165a1af4170ac23 | /ABC/ABC173/C.py | c0f86d46455b822b965fac48b703f8bf73750487 | [] | no_license | kikugawa-shoma/Atcoder | fe3405e36dd3e4e25127b6110d6009db507e7095 | 7299116b7beb84815fe34d41f640a2ad1e74ba29 | refs/heads/master | 2020-12-21T19:10:12.471507 | 2020-10-10T16:38:18 | 2020-10-10T16:38:18 | 236,531,207 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 914 | py | import copy
H,W,K = map(int,input().split())
C = [list(input()) for _ in range(H)]
M = [[0]*W for _ in range(H)]
for i in range(H):
for j in range(W):
if C[i][j] == "#":
M[i][j] = 1
def bit_01(keta):
ans = []
for i in range(2**(keta)):
ans.append("".join(["{:0", str(keta), "b}"]).format(i))
return ans
vert = bit_01(H)
hori = bit_01(W)
def check(v,h,M):
M = copy.deepcopy(M)
for i in range(len(v)):
if v[i] == "1":
for ii in range(W):
M[i][ii] = 0
for j in range(len(h)):
if h[j] == "1":
for jj in range(H):
M[jj][j] = 0
S = 0
for i in range(W):
for j in range(H):
S += M[j][i]
return S == K
ans = 0
for vp in vert:
for hp in hori:
if check(vp,hp,M):
ans += 1
print(ans)
| [
"kikugawa-s@sys.i.kyoto-u.ac.jp"
] | kikugawa-s@sys.i.kyoto-u.ac.jp |
54fdddb266d157a4dd0830794c7f6c1adaf0e53d | 55a7490f3fef92cac6d7aeed9674dcfe51e3ea78 | /pollster/polls/admin.py | 02f0fe9bb0db1656b1109356dbe5a9a7278d31a1 | [
"MIT"
] | permissive | ImInYourPie/django-polls | 109a5c7cb6b637e36f6fe2be361720a3b9f45f9b | 56e0fc6152b555f7de4bdc9326ff35f3508ac4b0 | refs/heads/main | 2023-01-14T10:53:06.447824 | 2020-11-23T19:44:53 | 2020-11-23T19:44:53 | 315,326,474 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 575 | py | from django.contrib import admin
# Models
from .models import Question, Choice
admin.site.site_header = "Pollster Admin"
admin.site.site_title = "Pollster Admin Area"
admin.site.index_title = "Welcome to the Pollster admin area"
class ChoiceInline(admin.TabularInline):
model = Choice
extra = 3
class QuestionAdmin(admin.ModelAdmin):
fieldsets = [
(None, {"fields": ["question_text"]}),
("Date Information", {"fields": ["pub_date"], "classes": ["collapse"]}),
]
inlines = [ChoiceInline]
admin.site.register(Question, QuestionAdmin) | [
"iminyourpie@gmail.com"
] | iminyourpie@gmail.com |
9176d3e53da70f0692fbab648cb4c76f58216f6d | 059c4606fd93b70c244a0017cc1727d1b951e75a | /5-packages/http-examples/httpie-notes/httpie/context.py | c0840c9d051252a44b25937acfd607e94db2b7e7 | [
"BSD-3-Clause"
] | permissive | andyguwc/python-resources | 1f6850b1fde243912644530ee8985ae09773c68e | d8ab7e54d287a697e4763a36b10136af461ec820 | refs/heads/master | 2021-06-24T13:30:25.196129 | 2021-03-02T03:11:49 | 2021-03-02T03:11:49 | 210,958,803 | 1 | 1 | null | 2019-10-25T03:12:31 | 2019-09-25T23:29:29 | Python | UTF-8 | Python | false | false | 3,005 | py | import os
import sys
from pathlib import Path
from typing import Union, IO, Optional
try:
import curses
except ImportError:
curses = None # Compiled w/o curses
from httpie.compat import is_windows
from httpie.config import DEFAULT_CONFIG_DIR, Config, ConfigFileError
from httpie.utils import repr_dict
# use this to manage all things environment related
class Environment:
"""
Information about the execution context
(standard streams, config directory, etc).
By default, it represents the actual environment.
All of the attributes can be overwritten though, which
is used by the test suite to simulate various scenarios.
"""
is_windows: bool = is_windows
config_dir: Path = DEFAULT_CONFIG_DIR
stdin: Optional[IO] = sys.stdin
stdin_isatty: bool = stdin.isatty() if stdin else False
stdin_encoding: str = None
stdout: IO = sys.stdout
stdout_isatty: bool = stdout.isatty()
stdout_encoding: str = None
stderr: IO = sys.stderr
stderr_isatty: bool = stderr.isatty()
colors = 256
program_name: str = 'http'
def __init__(self, **kwargs):
"""
Use keyword arguments to overwrite
any of the class attributes for this instance.
"""
# making sure all the keyword args are actually attributes of this class
assert all(hasattr(type(self), attr) for attr in kwargs.keys())
self.__dict__.update(**kwargs) # easy way to update all attributes
# Keyword arguments > stream.encoding > default utf8
if self.stdin and self.stdin_encoding is None:
self.stdin_encoding = getattr(
self.stdin, 'encoding', None) or 'utf8'
if self.stdout_encoding is None:
actual_stdout = self.stdout
self.stdout_encoding = getattr(
actual_stdout, 'encoding', None) or 'utf8'
def __str__(self):
defaults = dict(type(self).__dict__)
actual = dict(defaults)
actual.update(self.__dict__)
actual['config'] = self.config
return repr_dict({
key: value
for key, value in actual.items()
if not key.startswith('_')
})
def __repr__(self):
return f'<{type(self).__name__} {self}>'
_config = None # this is a cache for config
# core part of Environment
# Support loading config from the config file directory https://httpie.org/doc#config-file-directory
@property
def config(self) -> Config:
config = self._config
if not config:
self._config = config = Config(directory=self.config_dir)
if not config.is_new():
try:
config.load()
except ConfigFileError as e:
self.log_error(e, level='warning')
def log_error(self, msg, level='error'):
assert level in ['error', 'warning']
self.stderr.write(f'\n{self.program_name}: {level}: {msg}\n\n')
| [
"tianyou.gu@gmail.com"
] | tianyou.gu@gmail.com |
da2ea8e51998c780767ce2552d82184f69db07fe | 8015f1c62a2cb4efd21aa8938336913bf8117868 | /bamap/ba4199.pngMap.py | 1ad5d97a840efedf5f02dfe430ab6575dc397483 | [] | no_license | GamerNoTitle/Beepers-and-OLED | 675b5e3c179df0f0e27b42bf594c43860d03b9af | afe1340e5394ae96bda5f9022a8a66824368091e | refs/heads/master | 2020-04-20T00:09:47.122471 | 2019-04-29T04:59:35 | 2019-04-29T04:59:35 | 168,515,579 | 4 | 2 | null | null | null | null | UTF-8 | Python | false | false | 8,468 | py | ba4199.pngMap = [
'00000000000000000000000000000000000000000000000000000000000000000000000011111111111111111111111111111111111111100000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000011111111111111111111111111111111111111110000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000010011111111111111111111111111111111111111110000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000001011111111111111111111111111111111111111111110000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000011111111111111111111111111111111111111111111000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000011111111111111111111111111111111111111111111000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000011111111111111111111111111111111111111111111110000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000001111111111111111111111111111111111111111111110000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000001111111111111111111111111111111111111111111111100000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000111111111111111111111111111111111111111111111100000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000011111111111111111111111111111111111111111111111000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000111111111111111111111111111111111111111111111111100000',
'11000000000000000000000000000000000000101111000000000000000000000000000000001111111111111111111111111111111111111111111111110000',
'11000000000000000000000000000000000000001111110000000000000000000000000000000111111111111111111111111111111111111111111111110000',
'10000000000000000000000000000000000111111111111000000000000000000000000000000111111111111111111111111111111111111111111111111000',
'10000000000000000000000000000000001111111111111100000000000000000000000000000111111111111111111111111111111111111111111111111100',
'00000000000000000000000000000000001111111111111100000000000000000000000000000111111111111111111111111111111111111111111111111100',
'00000000000000000000000000000000001111111111111110000000000000000000000000000111111111111111111111111111111111111111111111111100',
'00000000000000000000000000000000000111111111111110000000000000000000000000000111111111111111111111111111111111111111111111111100',
'00000000000000000000000000000000000011111111111110000000000000000000000000100111111111111111111111111111111111111111111111111111',
'00000000000000000000000000000000000000111111110000000000000000000000000000001111111111111111111111111111111111111111111111111111',
'00000000000000000000000000000000000000001111100000000000000000000000000000001111111111111111111111111111111111111111111111111111',
'00000000000000000000000000000000000000000000000000000000000000000000000000011111111111111111111111111111111111111111111111111111',
'00000000000000000000000000000000000000000000000000000000000000000000000001111111111111111111111111111111111111111111111111111111',
'00000000000000000000000000000000000000000000000000000000000000000000000011111111111111111111111111111111111111111111111111111111',
'00000000000000000000000000000000000000000000000000000000000000000000000101111111111111111111111111111111111111111111111111111111',
'00000000000000000000000000000000000000000000000000000000000000000000000011111111111111111111111111111111111111111111111111111111',
'00000000000000000000000000000000000000000000000000000000000000000000000011111111111111111111111111111111111111111111111111111111',
'00000000000000000000000000000000000000000000000000000000000000000000001111111111111111111111111111111111111111111111111111111111',
'00000000000000000000000000000000000000000000000000000000000000000000001111111111111111111111111111111111111111111111111111111111',
'00000000000000000000000000000000000000000000000000000000000000000000111111111111111111111111111111111111111111111111111111111111',
'00000000000000000000000000000000000000000000000000000000000000000000111111111111111111111111111111111111111111111111111111111111',
'00000000000000000000000000000000000000000000000000000000000000011111111111111111111111111111111111111111111111111111111111111111',
'00000000000000000000000000000000000000000000000000000000000000101111111111111111111111111111111111111111111111111111111111111111',
'00000000000000000000000000000000000000000000000000000000000000111111111111111111111111111111111111111111111111111111111111111111',
'00000000000000000000000000000000000000000000000000000000000000111111111111111111111111111111111111111111111111111111111111111111',
'00000000000000000000000000000000000000000000000000000000000011111111111111111111111111111111111111111111111111111111111111111111',
'00000000000000000000000000000000000000000000000000000000000011111111111111111111111111111111111111111111111111111111111111111111',
'00000000000000000000000000000000000000000000000000000000001111111111111111111111111111111111111111111111111111111111111111111111',
'00000000000000000000000000000000000000000000000000000000001111111111111111111111111111111111111111111111111111111111111111111111',
'00000000000000000000000000000000000000000000000000000000000011111111111111111111111111000011111111111111111111111111111111111111',
'00000000000000000000000000000000000000000000000000000000100011111111111111111111111000000001111111111111111111111111111111111111',
'00000000000000000000000000000000000000000000000000000000000111111111111111111111000000000000001111111111111111111111111111111111',
'00000000000000000000000000000000000000000000000000000000111111111111111111111111000000000000000111111111111111111111111111111111',
'00000000000000000000000000000000000000000000000000000001111111111111111111111100000000000000000111111111111111111111111111111111',
'00000000000000000000000000000000000000000000000000000000111111111111111111111100000000000000000011111111111111111111111111111110',
'00000000000000000000000000000000000000000000000000000000111111111111111111111000000000000000000011111111111111111111111111111110',
'00000000000000000000000000000000000000000000000000000000111111111111111111111000000000000000000111111111111111111111111111111100',
'00000000000000000000000000000000000000000000000000000001111111111111111111111000000000000000000111111111111111111111111111111000',
'00000000000000000000000000000000000000000000000000000001111111111111111111111000000000000000001111111111111111111111111111111000',
'10000000000000000000000000000000000000000000000000000000111111111111111111111000000000000000001111111111111111111111111111110000',
'00000000000000000000000000000000000000000000000000000000111111111111111111111100000000000000001111111111111111111111111111110000',
'11110000000000000000000000000000000000000000000000000000011111111111111111111111000000000011111111111111111111111111111111000000',
'11110000000000000000000000000000000000000000000000000000011111111111111111111100111111101011111111111111111111111111111111000000',
'11110000000000000000000000000000000000000000000000000000011111111111111111111111111111111111111111111111111111111111111010000000',
'11110000000000000000000000000000000000000000000000000000001111111111111111111111111111111111111111111111111111111111111100000110',
'11111100000000000000000000000000000000000000000000000000000011111111111111111111111111111111111111111111111111111111111000001111',
'11111110000000000000000000000000000000000000000000000000000011111111111111111111111111111111111111111111111111111111110000001111',
'11111111000000000000000000000000000000000000000000000000000011111111111111111111111111111111111111111111111111111111100000111111',
'11111111100000000000000000000000000000000000000000000000000001111111111111111111111111111111111111111111111111111110000000111111',
'11111111111000000000000000000000000000000000000000000000000000101111111111111111111111111111111111111111111111111100001111111111',
'11111111111100000000000000000000000000000000000000000000000000011111111111111111111111111111111111111111111111111000111111111111',
'11111111111110000000000000000000000000000000000000000000000000000111111111111111111111111111111111111111111111110011111111111111',
'11111111111111110000000000000000000000000000000000000000000000000011111111111111111111111111111111111111111111101111111111111111',
]
| [
"bili33@87ouo.top"
] | bili33@87ouo.top |
51e4dce7f469cd88b25fc829aac70141681ca957 | 5d428cb2022457d2e4cadd1b4898b3375446da6a | /PID Control/PController.py | 5e4260a4b6aa99de12882c9b052ed2d38ddaa2e0 | [] | no_license | Dheenu-kasinathan/AI-for-Robotics | b6d7784b34d132b533bc23ca8327ff06f0439625 | 2cc27eaeba91ab64a2f162a477a15eb3ec6cfbb4 | refs/heads/master | 2020-05-07T21:34:05.703775 | 2019-04-14T19:21:03 | 2019-04-14T19:21:03 | 180,909,977 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,846 | py | # User Instructions
#
# Implement a P controller by running 100 iterations
# of robot motion. The desired trajectory for the
# robot is the x-axis. The steering angle should be set
# by the parameter tau so that:
#
# steering = -tau * crosstrack_error
#
# You'll only need to modify the `run` function at the bottom.
# ------------
import random
import numpy as np
import matplotlib.pyplot as plt
# ------------------------------------------------
#
# this is the Robot class
#
class Robot(object):
def __init__(self, length=20.0):
"""
Creates robot and initializes location/orientation to 0, 0, 0.
"""
self.x = 0.0
self.y = 0.0
self.orientation = 0.0
self.length = length
self.steering_noise = 0.0
self.distance_noise = 0.0
self.steering_drift = 0.0
def set(self, x, y, orientation):
"""
Sets a robot coordinate.
"""
self.x = x
self.y = y
self.orientation = orientation % (2.0 * np.pi)
def set_noise(self, steering_noise, distance_noise):
"""
Sets the noise parameters.
"""
# makes it possible to change the noise parameters
# this is often useful in particle filters
self.steering_noise = steering_noise
self.distance_noise = distance_noise
def set_steering_drift(self, drift):
"""
Sets the systematical steering drift parameter
"""
self.steering_drift = drift
def move(self, steering, distance, tolerance=0.001, max_steering_angle=np.pi / 4.0):
"""
steering = front wheel steering angle, limited by max_steering_angle
distance = total distance driven, most be non-negative
"""
if steering > max_steering_angle:
steering = max_steering_angle
if steering < -max_steering_angle:
steering = -max_steering_angle
if distance < 0.0:
distance = 0.0
# apply noise
steering2 = random.gauss(steering, self.steering_noise)
distance2 = random.gauss(distance, self.distance_noise)
# apply steering drift
steering2 += self.steering_drift
# Execute motion
turn = np.tan(steering2) * distance2 / self.length
if abs(turn) < tolerance:
# approximate by straight line motion
self.x += distance2 * np.cos(self.orientation)
self.y += distance2 * np.sin(self.orientation)
self.orientation = (self.orientation + turn) % (2.0 * np.pi)
else:
# approximate bicycle model for motion
radius = distance2 / turn
cx = self.x - (np.sin(self.orientation) * radius)
cy = self.y + (np.cos(self.orientation) * radius)
self.orientation = (self.orientation + turn) % (2.0 * np.pi)
self.x = cx + (np.sin(self.orientation) * radius)
self.y = cy - (np.cos(self.orientation) * radius)
def __repr__(self):
return '[x=%.5f y=%.5f orient=%.5f]' % (self.x, self.y, self.orientation)
############## ADD / MODIFY CODE BELOW ####################
# ------------------------------------------------------------------------
#
# run - does a single control run
robot = Robot()
robot.set(0.0, 1.0, 0.0)
def run(robot, tau, n=100, speed=1.0):
x_trajectory = []
y_trajectory = []
# TODO: your code here
cte = robot.y
steer = -tau * cte
robot.move(steer, speed)
x_trajectory.append(robot.x)
y_trajectory.append(robot.y)
return x_trajectory, y_trajectory
x_trajectory, y_trajectory = run(robot, 0.1)
n = len(x_trajectory)
fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(8, 8))
ax1.plot(x_trajectory, y_trajectory, 'g', label='P controller')
ax1.plot(x_trajectory, np.zeros(n), 'r', label='reference')
| [
"noreply@github.com"
] | Dheenu-kasinathan.noreply@github.com |
7cd3d0a6157fdb13adcb05da8c44356000f7062c | 29787e0aaa63b36c78f5a9c09ce687359c348dfd | /hosting_benchmark/inc/taxonomies.py | 3ced00cb00d5907f70c512ce45a2350580b96bfb | [
"MIT"
] | permissive | samupl/hosting-benchmark | b9d7333a9aa89ee4f4bc70ec730326007b13cd83 | b5529d92c30a9de29f6799e1eba64a4df7e30617 | refs/heads/master | 2022-12-27T02:47:18.878218 | 2020-07-15T14:13:12 | 2020-07-15T14:13:12 | 258,129,105 | 2 | 2 | MIT | 2020-10-03T12:30:23 | 2020-04-23T07:37:25 | PHP | UTF-8 | Python | false | false | 191 | py | """Hosting benchmark taxonomies."""
from typing import NamedTuple
class BenchmarkResult(NamedTuple):
"""Single benchmark result."""
timestamp: float
number: int
data: dict
| [
"s@samu.pl"
] | s@samu.pl |
cf6456ac85f4543949c393b10e7523d303358601 | 6755e2b3852245cb284116765e3939bfabeb0ee0 | /mult.py | 30900973eb8dbe08778fdfb584305613539f6b88 | [
"MIT"
] | permissive | Seanny123/nef-conceptors | 97977b2bd3a1fbad6c6890576e4f3b13a7c41e9e | e6b16282d2082563b687543734d2960927380e63 | refs/heads/master | 2020-03-26T13:12:14.477642 | 2018-12-22T15:09:42 | 2018-12-22T15:09:42 | 51,094,034 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,016 | py | # try boosting a pattern
from dmp_utils import *
from process import *
import scipy.io
from scipy import interpolate
import nengo
import numpy as np
import ipdb
def make_dmp_net(functions, input_obj, output_obj, name=""):
"""create one point attractor per dimension with goals as nodes
and one unified neuron input for inhibition
TODO:
- make the connections for inhibition (proxy for output of BG)
- make the connections for scaling (proxy for output of Thal)
"""
n = nengo.Network(label=name)
n.pt_attractors = []
n.conn_funcs = []
n.f_conns = []
with n:
n.output = nengo.Node(size_in=len(functions))
for d in range(len(functions)):
goal = nengo.Node([0], label="goal_%s" %(d))
attractor = gen_point_attractor(n, goal, n_neurons=500)
attractor.label = "pt_attr_%s" %(d)
nengo.Connection(attractor[0], n.output[d], synapse=None)
n.pt_attractors.append(attractor)
for f_i, func in enumerate(functions):
dest = func(np.linspace(-np.pi, np.pi, ea_func_steps))
dest = dest.reshape((-1, 1))
force_func = gen_forcing_functions(dest, num_samples=50)[0]
n.conn_funcs.append(lambda x, force_func=force_func: force(x, force_func))
n.f_conns.append(nengo.Connection(input_obj, n.pt_attractors[f_i][1], synapse=None,
function=n.conn_funcs[f_i]))
nengo.Connection(n.output, output_obj)
return n
def make_mult_net(n_neurons, dims, name=""):
n = nengo.Network(label=name)
with n:
n.input_sig = nengo.Node(size_in=dims)
n.input_scale = nengo.Node(size_in=1)
n.output = nengo.Node(size_in=dims)
# TODO: add neuron input later for inhibition
mult_encs = nengo.dists.Choice([[1,1],[1,-1],[-1,-1],[-1,1]])
for d in range(dims):
mult_ens = nengo.Ensemble(n_neurons=n_neurons, dimensions=2,
encoders=mult_encs, neuron_type=nengo.LIFRate(), radius=2)
nengo.Connection(n.input_sig[d], mult_ens[0])
nengo.Connection(n.input_scale, mult_ens[1])
nengo.Connection(mult_ens, n.output[d], function=lambda x: x[0]*x[1])
return n
# load the patterns from matlab
pattern_file_names = (
"nnRawRunJog",
"nnRawExaStride",
"nnRawSlowWalk",
"nnRawWalk",
"nnRawCartWheel",
"nnRawWaltz",
"nnRawCrawl",
"nnRawStandup",
"nnRawGetdown",
"nnRawSitting",
"nnRawGetSeated",
"nnRawStandupFromStool",
"nnRawBox1",
"nnRawBox2",
"nnRawBox3",
)
# max is 61, but 14 is a nice leg
output_dims = 15
pattern_num = 1
pattern_file_names = pattern_file_names[:pattern_num]
function_list, min_maxs = pre(output_dims, pattern_file_names)
ea_n_neurons = 300
ea_func_steps = 100
np.random.seed(3)
# maps from input value (in this case, theta) to output value
model = nengo.Network()
tau = 0.1
#model.config[nengo.Ensemble].neuron_type = nengo.Direct()
dmp_net_list = []
with model:
osc = nengo.Ensemble(n_neurons=1, dimensions=3, neuron_type=nengo.Direct())
def cycle(x):
"""makes a speed controlled oscillator"""
a = 1.0
b = 2.0 * np.pi * x[2]
r = np.sqrt(x[0]**2.0 + x[1]**2.0)
theta = np.arctan2(x[1], x[0])
dr = 10.0*(-r**3.0 + a*r)
dtheta = b
dx = dr*np.cos(theta) - r*np.sin(theta)*dtheta
dy = dr*np.sin(theta) + r*np.cos(theta)*dtheta
return [x[0] + tau*dx, x[1] + tau*dy]
nengo.Connection(osc, osc[:2], synapse=tau, function=cycle)
rate = nengo.Node([1])
nengo.Connection(rate, osc[2])
bump = nengo.Node(lambda t: 1 if t < 0.05 else 0)
nengo.Connection(bump, osc[0])
# controllers # TODO: Make smoother transition
#inhibit_control = nengo.Node(lambda t: [0,1] if t < 2 else [1,0])
#inhibit_control = nengo.Node([0])
scale_control = nengo.Node([1]*pattern_num)
output = nengo.networks.EnsembleArray(n_neurons=1, n_ensembles=output_dims, radius=np.pi,
neuron_type=nengo.Direct(), label="output")
# one ensemble array per output pattern
# each ensemble array has the output dimensions
# combine ensemble arrays to combine patterns
ea_n_neurons = 300
for n_i, nm in enumerate(pattern_file_names):
name = nm[5:]
print(name)
# first just get inhibition working
# then convert this normal ensemble array into a fancy multiplication array
mult_ea = make_mult_net(500, output_dims, name="mult"+name)
n = make_dmp_net(function_list[n_i], osc[:2], mult_ea.input_sig, name=name)
nengo.Connection(scale_control[n_i], mult_ea.input_scale)
nengo.Connection(mult_ea.output, output.input)
dmp_net_list.append(n)
# probe the output
p_out = nengo.Probe(output.output, synapse=0.15)
with nengo.Simulator(model) as sim:
sim.run(2)
post(sim, p_out, min_maxs)
| [
"seanaubin@gmail.com"
] | seanaubin@gmail.com |
559e33669cbc8f83386aa25aa905becbb1f9ce81 | 51ea2ab69aa86d5b2cfb7e4ca8f857d34d5299ac | /parser.py | daf7511a63bc06040903ce2eb8fa1e7f4dbd90d2 | [] | no_license | bmys/py2js | 9042f29521ed60226e635db0659151cb214a0411 | 74b93ba87b93105a23b6da3d91dd961f0e68307a | refs/heads/master | 2020-04-12T17:03:57.771615 | 2018-12-20T23:39:02 | 2018-12-20T23:39:02 | 162,633,578 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,341 | py | #! /usr/bin/python3
import sys, re
# Loading file
file_name = sys.argv[1]
file_content = None
try:
with open(file_name, 'r') as file:
file_content = file.readlines()
except FileNotFoundError:
print(f'File "{file_name}" not found')
sys.exit(1)
intent_val = [(value.count(' '), value.strip()) for value in file_content]
local_vars = dict()
in_loop = False
for cur, nx in zip(intent_val, intent_val[1:]):
if in_loop:
for key, value in local_vars.items():
cur = (cur[0], cur[1].replace(key, value))
loop = re.search('for\s(.+)\sin\s(.+):', cur[1])
# assign = re.search('\s*(\w+)\s*=\s*(.+)', cur[1])
assign = re.search('\s*(.+)\s*=\s*(.+)', cur[1])
comment = re.search('$\s*#(.*)\s*', cur[1])
msg = re.search('\s*print\((.*)\)', cur[1])
is_st = re.search('.+(is).+', cur[1])
function_dec = re.search('\s*def\s+(.+):', cur[1])
get_by_id = re.search('doc\[[\'\"]#(\w+)', cur[1])
if get_by_id is not None:
get_by_id = get_by_id.groups()
# cur = (cur[0], cur[1].replace(f'doc[{get_by_id[0]}]', f'document.getElementById({get_by_id[0]})'))
cur = (cur[0], re.sub(r'(doc\[[\'\"]#\w+\'?\"?\])', f'document.getElementById("{get_by_id[0]}")', cur[1]))
if is_st is not None:
is_st = is_st.groups()
cur = (cur[0], cur[1].replace('is', '==='))
if loop is not None:
loop = loop.groups()
# print(a.groups())
print(f'var {loop[0]} = 0;')
print(f'for(;{loop[0]}<{loop[1]}.length; {loop[0]}++)', end='')
local_vars[loop[0]] = f'{loop[1]}[{loop[0]}]'
in_loop = True
elif assign is not None and not assign[0].find('.'):
assign = assign.groups()
print(f'var {assign[0]} = {assign[1]};')
elif comment is not None:
comment = comment.groups()
print(f'// {comment[0]}')
elif msg is not None:
msg = msg.groups()
print(f'console.log({msg[0]});')
elif function_dec is not None:
function_dec = function_dec.groups()
print(f'function {function_dec[0]}')
else:
print(cur[0] * ' ' + cur[1])
if cur[0] < nx[0]:
print(cur[0] * ' ' + '{')
if cur[0] > nx[0]:
print(nx[0] * ' ' + '}')
if in_loop:
in_loop = False
local_vars = dict()
| [
"noreply@github.com"
] | bmys.noreply@github.com |
96d4ef0f26cb6a613175373d07d8b3f5b5f395e2 | b643362050950db4dd62924b1d091e1a9d556fe7 | /Own notes/Ch_4_exercises.py | cbb2ee11f88d1c3f91fb3e871245fddd4fd60ac7 | [] | no_license | t-redactyl/Practical-Programming-exercises | bc6be513b79bcd70dc6777c9248c89bbe51ec9cb | 49754b13b10417746fbde73921b79343e8020b8d | refs/heads/master | 2021-01-10T03:16:09.815697 | 2015-11-08T00:28:29 | 2015-11-08T00:28:29 | 44,232,631 | 3 | 4 | null | null | null | null | UTF-8 | Python | false | false | 691 | py | # Exercises for Chapter 4
import math
print "The absolute rounded value of -4.3 is %f." % (abs(round(-4.3)))
print "The ceiling of the sin of 34.5 is %f." % (math.ceil(math.sin(34.5)))
import calendar
print "The number of leap years between 2000 and 2050 is %d." % (calendar.leapdays(2000, 2050))
print "The day of the week of July 29, 2016 is %d" % (calendar.weekday(2016, 07, 29))
print 'boolean'.capitalize()
print 'C02 H20'.find('2') # It is 2, as Python indices start at 0
print 'C02 H20'.find('2', 3)
print 'C02 H20'.find('2', 'C02 H20'.find('2') + 1)
print 'Boolean'[0].isupper() # First letter of Boolean is uppercase
print "MoNDaY".lower().upper()
print " Monday".strip()
| [
"jodie.burchell@gmail.com"
] | jodie.burchell@gmail.com |
422abca4bacf6e2ba6f5234bc20564e9345ab709 | 1ae7de4b306d61a6648062ed289c62fbfdbd0fcf | /app/core/migrations/0001_initial.py | ef0006a0b9451d0c7e3413d33c0228abb13607a8 | [
"MIT"
] | permissive | manuelmillares/recipe-app-api | 57259d72c81a2d0c27a45c7bb2ec29c74ba285b0 | eda84164b7d6256d4d39bbd688197427620b9089 | refs/heads/master | 2022-12-04T20:45:11.616756 | 2020-08-17T20:10:19 | 2020-08-17T20:10:19 | 285,604,538 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,709 | py | # Generated by Django 2.1.15 on 2020-08-10 21:36
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0009_alter_user_last_name_max_length'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('email', models.EmailField(max_length=255, unique=True)),
('name', models.CharField(max_length=255)),
('is_active', models.BooleanField(default=True)),
('is_staff', models.BooleanField(default=False)),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'abstract': False,
},
),
]
| [
"manuelmillares1@gmail.com"
] | manuelmillares1@gmail.com |
20ac39525465b241ccea7a33d723555f6ef67591 | ef91b74131b8791800d2365982edbfaf515ef54a | /day4/02_BeautifulSoup/ex04_monthly_savings.py | 971a91ad5664dac688b889dc9f58bcf2687ffcee | [] | no_license | akatkar/python-training-examples | ec749f58698fc1cfa458246ce11069f94d25027e | 8afa2e347c079a84740f4559e78c1be55eed5226 | refs/heads/master | 2020-05-16T03:32:22.613770 | 2019-04-22T09:15:14 | 2019-04-22T09:25:17 | 182,726,662 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 777 | py | from bs4 import BeautifulSoup
with open("index.html") as fp:
bs = BeautifulSoup(fp,"html.parser")
def findTable(caption):
for table in bs.find_all("table"):
if table.find("caption").text.strip() == caption:
return table
savings = findTable("Monthly savings")
if savings:
headers = [header.string for header in savings.find_all("th")]
valueList = [col.text.strip() for row in savings.find_all("tr") for col in row.find_all("td")]
values = [valueList[i * len(headers):(i + 1) * len(headers)] for i in range(len(valueList) // len(headers))]
values.insert(0, ['-'*len(header) for header in headers])
values.insert(0, headers)
for row in values:
for col in row:
print(f"{col:20}", end='')
print() | [
"alikatkar@gmail.com"
] | alikatkar@gmail.com |
ab9827b3472bc7c44a3a1d38cfa64e8dc1a5eae9 | 5a8c6abdfa55b31e42e39866122eaaaf93cd40f6 | /src/player.py | e0f075b043522c47e143537eaf4c13bc51ddeba8 | [] | no_license | ycchen00/Mancala | 671fa8834a86380f1e75b25f7b09491eb733403e | 73942072d6bd25133fc8b63272b87f0462b25f97 | refs/heads/master | 2023-08-21T01:49:17.651404 | 2021-10-26T03:05:01 | 2021-10-26T03:05:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,187 | py | from math import floor, ceil
from random import choice
from copy import deepcopy
class Player(object):
def __init__(self, index, algo, maximum_depth=float("inf"), depth=0):
"""Init a player
---
index: 1/2 -> palyer1/2
algo: the pattern of the player human/random/...
---
"""
self.index = index
self.opp_index = 3 - self.index # the opposite index
self.algo = algo
self.depth = depth
self.maximum_depth = maximum_depth
def reset(self):
"""reset the player"""
self.depth = 0
def get_move(self, game):
"""get the move with a certian player"""
algos_dict = {
'human': self.human_player,
'random': self.random_player,
'minimax': self.minmax_player,
'alphabeta': self.abpruning_player
}
return algos_dict[self.algo](game)
def ask4move(self, game):
"""for human player, ask and return a move
---
return move"""
request_str = "" # "Your turn:\n"
if self.index == 1:
request_str += f"\t{'-' * floor(7 * (game.M + 1) / 2)}Player1{'-' * ceil(7 * (game.M + 1) / 2)}\n"
request_str += (f"\tLocation: " +
" || ".join(map(str, range(1, 1 + game.M))) +
f" | - \n")
request_str += (f"\tNum pits: " +
" || ".join(map(str, game.p1_pits())) +
f" | - {game.p1_store()}\n")
else:
request_str += (f"\tLocation: - | " +
" || ".join(map(str, range(game.M, 0, -1))) +
f"\n")
request_str += (f"\tNum pits: - {game.p2_store()}| " +
" || ".join(map(str, game.p2_pits()[::-1])) +
f"\n")
request_str += f"\t{'-' * floor(7 * (game.M + 1) / 2)}Player2{'-' * ceil(7 * (game.M + 1) / 2)}\n"
print(request_str)
try:
move = int(input('\tPlease enter your target pit:'))
except:
move = int(input('\tWrong input. Please try again:'))
return move - 1
def human_player(self, game):
"""human player"""
move = self.ask4move(game)
while not game.check_illegal_move(self, move):
move = self.ask4move(game)
return move
def random_player(self, game):
"""random player"""
legal_actions = game.filter_actions(self)
move = choice(legal_actions)
return move
def score(self, game, h_choice=0):
"""calculate the current score of self player"""
win_index, p1_score, p2_score = game.find_winner_scores()
if game.check_end_game():
if h_choice == 0:
if win_index == self.index:
return 50
elif win_index == self.opp_index:
return -50
else:
return 0
elif h_choice == 1: # depth consider
if win_index == self.index:
return 50 - self.depth
elif win_index == self.opp_index:
return self.depth - 50
else:
return 0
pass
elif h_choice == 2: # diff consider
if win_index == self.index:
return abs(p1_score - p2_score)
elif win_index == self.opp_index:
return -abs(p1_score - p2_score)
else:
return 0
elif h_choice == 3: # all consider
if win_index == self.index:
return abs(p1_score - p2_score) - self.depth
elif win_index == self.opp_index:
return -abs(p1_score - p2_score) + self.depth
else:
return 0
else:
pass
if win_index == self.index:
return abs(p1_score - p2_score)
elif win_index == self.opp_index:
return -abs(p1_score - p2_score)
else:
return 0
def reach_max_depth(self):
"""check whether it reachs the maximun depth"""
return self.depth >= self.maximum_depth
def max_value(self, game, ab_flag=False, alpha=float("-inf"), beta=float("inf")):
"""Find the max value for the next move"""
if game.check_end_game() or self.reach_max_depth():
return self.score(game), None
v = float("-inf")
move = -1
for a in game.filter_actions(self):
opp_player = Player(self.opp_index, self.algo, self.maximum_depth, self.depth + 1)
next_game = deepcopy(game)
next_game.sowing(self, a)
v2, _ = opp_player.min_value(next_game, ab_flag, alpha, beta)
if v2 > v:
v = v2
move = a
alpha = max(alpha, v)
if ab_flag and v >= beta:
return v, move
return v, move
def min_value(self, game, ab_flag=False, alpha=float("-inf"), beta=float("inf")):
"""Find the min value for the next move"""
if game.check_end_game() or self.reach_max_depth():
return self.score(game), None
v = float("inf")
move = -1
for a in game.filter_actions(self):
opp_player = Player(self.opp_index, self.algo, self.maximum_depth, self.depth + 1)
next_game = deepcopy(game)
next_game.sowing(self, a)
v2, _ = opp_player.max_value(next_game, ab_flag, alpha, beta)
if v2 < v:
v = v2
move = a
beta = min(beta, v)
if ab_flag and v <= alpha:
return v, move
return v, move
def minmax_player(self, game):
"""minmax player"""
move = self.max_value(game)[1]
return move
def abpruning_player(self, game):
"""alpha-beta pruning player"""
move = self.max_value(game, True)[1]
return move
| [
"chen.yuchi@zkyunkang.com"
] | chen.yuchi@zkyunkang.com |
05eacae54547837444451aba6a9ab0c685add15e | 03198f075072bfb9d5c5afab2fef99d3ec5f37db | /source/api_v2/serializers/advert.py | 8c9cf5e5ce4d0f747676fb2b5908d2bbc2e61240 | [] | no_license | Azer-Denker/Ex_12 | 2c402dffddbf726bfaab61f5022ea0cf6b6b3562 | 97d4eda2d621163c6e12ea388569b50157d09fd5 | refs/heads/main | 2023-07-14T19:05:39.763400 | 2021-08-21T13:30:31 | 2021-08-21T13:30:31 | 398,558,342 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 649 | py | from rest_framework import serializers
from webapp.models import Advert
class AdvertSerializer(serializers.ModelSerializer):
class Meta:
model = Advert
fields = ('id', 'title', 'text', 'author', 'created_at')
read_only_fields = ('author', 'id')
def create(self, validated_data):
return Advert.objects.create(**validated_data)
def update(self, instance, validated_data):
for field, value in validated_data.items():
setattr(instance, field, value)
instance.save()
return instance
def delete(self, instance):
instance.delete()
return instance.pk
| [
"azerarlen312@gmail.com"
] | azerarlen312@gmail.com |
29b9f78a2e90b472d3ff3098109f5326941fa354 | 3fcf1e68ef4a0af441842212335a47e11c42781c | /Examples_2/tut9.py | 9512d4a888251b2223e8a8dd0bc744102354555d | [] | no_license | fatihinz/Python | 27fac03b2a12a6e2593fef1d1a2515a4fbc401d6 | d28cecf9c4c50346dcf1a88ff46305527662ec12 | refs/heads/master | 2022-09-27T01:58:15.604257 | 2020-06-08T15:58:15 | 2020-06-08T15:58:15 | 270,626,738 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 955 | py | class Pet:
def __init__(self, name, age):
self.name = name
self.age = age
def talk(self):
raise NotImplementedError("Subclass must implement abstract method")
class Cat(Pet):
def __init__(self, name, age):
super().__init__(name, age) #super class method is here
def talk(self):
return "meowww"
class Dog(Pet):
def __init__(self, name, age):
super().__init__(name, age)
def talk(self):
return "wooooffff"
def Main():
# thePet = Pet("Pet", 1)
# jess = Cat("jess", 3)
# print("is jess a cat"+ str(isinstance(jess, Cat)))
# print("is jess a pet"+ str(isinstance(jess, Pet)))
# print("is pet a cat"+ str(isinstance(Pet, Cat)))
# print("is pet a pet"+ str(isinstance(thePet, Pet)))
# print(jess.name)
pets = [Cat("jess", 3), Dog("jack", 2), Cat("Fred", 7), Pet("ThePet", 5)]
for pet in pets:
print("Name :" + pet.name+",Age :"+ str(pet.age)+ "talks :" + pet.talk())
if __name__ == '__main__':
Main() | [
"fatihyavuzyilmaz@gmail.com"
] | fatihyavuzyilmaz@gmail.com |
e9078507a849826c1f513f40a618688cfec435d6 | ea6e3f65e16dea89f2504f3831962d50092da3a5 | /node.py | a51a5c9efedef48dbc1b4683d4cf176cbb7a6c5f | [] | no_license | evelyn2309/ba-bernhardt | 8dacf2e736392117894b0f432648ce3ce226b371 | 301a2f21ab1e331ff1884a8e1207f3480be037ef | refs/heads/master | 2020-08-04T06:50:45.627979 | 2019-10-01T11:42:46 | 2019-10-01T11:42:46 | 212,044,693 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 441 | py | from connection import *
class Node(object):
def __init__(self):
self.conn = Connection()
def setup_node(self, address, port):
"""
Fucntion to setup a node.
:param address: must be string.
:param port: must be int.
:return: None.
"""
self.conn.connect_server(address, port)
n = Node()
# modify here the IP-address and the port
n.setup_node("192.168.2.226", 5563)
| [
"ebernhar@students.uni-mainz.de"
] | ebernhar@students.uni-mainz.de |
cb77d2630711423eb7b83a180fcc926ec8cfc5e2 | 7c7246dc641f2fc8799acaa10869b5e9b6d354ed | /venv/Scripts/django-admin.py | 09976efe86b1bbd77b0366473720877ede991118 | [] | no_license | lamador25/DevOps | 3acb061277b67db5dc87ca62a83332ca54ca6d31 | f84183d6cd1df801163b2b1438babfab4be3e369 | refs/heads/master | 2020-03-27T03:57:14.895937 | 2018-08-24T23:18:34 | 2018-08-24T23:18:41 | 145,901,127 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 158 | py | #!D:\PycharmProjects\DevOps\venv\Scripts\python.exe
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
| [
"lamador@homecenter.co"
] | lamador@homecenter.co |
115a35d4b021fae36e6c329d886c3c877510221f | 2813274dd110773a70f3c7192ff9fb0a5c5233b4 | /UI_Sqlite3_PyQt/sqlite_main.py | 6f670f62c1da7cb05cfcdaaf9b6b72d6f45c613a | [] | no_license | krzover/myPython | 4a37a363cf4ae3838c0f95ffb6126d1811b7cca1 | 38b3d69e02dee6b8e5b8b22e546080359ce16380 | refs/heads/master | 2021-01-13T01:18:50.366259 | 2017-04-28T06:54:12 | 2017-04-28T06:54:12 | 81,800,729 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,452 | py | #coding:utf-8
from PyQt5.QtWidgets import QApplication,QMainWindow
from PyQt5 import uic
import sqlite3
import sqlite_table
import sqlite_class
import sys
import os
class manager(QMainWindow):
def __init__(self):
QMainWindow.__init__(self)
self = uic.loadUi('ui_main.ui',self)
def sql_add(self):
# reload(sqlite_class)
global add_face
add_face = sqlite_class.addsql()
add_face.show()
self.close()
def sql_change(self):
print 'change'
self.close()
global change_face
change_face = sqlite_class.changesql()
change_face.show()
def sql_find(self):
print 'find'
self.close()
global find_face
find_face = sqlite_class.findsql()
find_face.show()
def sql_del(self):
print 'del'
self.close()
global del_face
del_face = sqlite_class.delsql()
del_face.show()
def lookall(self):
print 'lookall'
listt = []
conn = sqlite3.connect('database.db')
sear = "SELECT * FROM people ORDER BY id ASC"
idlist = conn.execute(sear)
for x in idlist:
listt.append(x)
global mytable
mytable = sqlite_table.mytable(listt)
mytable.show()
if __name__ == '__main__':
app = QApplication(sys.argv)
m = manager()
m.show()
# add_face=None
sys.exit(app.exec_()) | [
"krzover@gmail.com"
] | krzover@gmail.com |
577585d5d219c57f9600b650ba385b7862d9cbc8 | f5d79d70faf6e12ed1152f1fbbf0441c28c1acbf | /process_incoming_data.py | 7e49b75a8aab35bfe10af3121d4057c0e633c236 | [] | no_license | DalavanCloud/consumer-credit-trends-data | f1ebfa0b83e498b086a5f4fabcebb63f1c0e0e48 | 1cd04b96051de7b874e3a1097110c913a0166fbc | refs/heads/master | 2020-04-25T06:43:18.892148 | 2018-10-05T16:08:37 | 2018-10-05T16:08:37 | 172,590,367 | 1 | 0 | null | 2019-02-25T21:47:06 | 2019-02-25T21:47:06 | null | UTF-8 | Python | false | false | 27,011 | py | #!/usr/bin/env python
"""
Processes incoming data from the Office of Research and munges it into
the output formats expected by the CFPB chart display organisms.
Output formats are documented at
www.github.com/cfpb/consumer-credit-trends
"""
# Python library imports
import os
import datetime
import logging
# Local imports
import process_globals as cfg
import process_utils as util
__author__ = "Consumer Financial Protection Bureau"
__credits__ = ["Hillary Jeffrey"]
__license__ = "CC0-1.0"
__version__ = "2.0"
__maintainer__ = "CFPB"
__email__ = "tech@cfpb.gov"
__status__ = "Development"
# Constants
# Market+.csv filename suffix length
MKT_SFX_LEN = -8
# Set up logging
logging.basicConfig(level="INFO")
logger = logging.getLogger(__name__)
# Utility Methods
# Generalized utility methods are found in process_utils.py
def load_paths(inpath=cfg.DEFAULT_INPUT_FOLDER,
outpath=cfg.DEFAULT_OUTPUT_FOLDER):
"""Loads the root path and destination paths and performs path checking"""
inpath = util.expand_path(inpath)
outpath = util.expand_path(outpath)
return inpath, outpath
def find_market(input, possible_names=cfg.MARKET_NAMES):
"""Uses the input string and a specified dictionary of market names to
determine which credit market the input string describes."""
for abbr, name in possible_names.items():
if abbr in input:
return name
return None
def actual_date(month, schema=cfg.DATA_FILE_DATE_SCHEMA):
"""
Takes a month number from Office of Research files and computes
a date from it.
January 2000 = month zero
"""
addl_years = int(month/12)
addl_months = (month % 12) + 1 # offset for January: month input is 1-12
date = datetime.date(cfg.BASE_YEAR + addl_years, addl_months, 1)
return date.strftime(schema)
# Main program functionality
def process_data_files(inputpath,
outputpath,
data_snapshot_fname=cfg.SNAPSHOT_FNAME_KEY,
data_snapshot_path=''):
"""Processes raw csv data from the Office of Research"""
inputfiles = util.get_csv_list(inputpath)
logger.debug("Found files:\n{}".format(inputfiles))
logger.info(
"Found {} csv files in '{}'".format(
len(inputfiles),
inputpath
)
)
if len(inputfiles) == 0:
logger.warn("No csv data files found in {}".format(inputpath))
return []
successes = []
failures = []
# For each file, open and munge data
for filename in inputfiles:
filepath = os.path.join(inputpath, filename)
# Check for market in filename
market = find_market(filename)
if market is None:
if data_snapshot_fname in filename:
if len(data_snapshot_path) <= 0:
logger.warn(
"Data snapshot output path is not specified."
)
logger.warn(
"To process data snapshot file, specify " +
"the --data-snapshot-path command-line " +
"argument."
)
continue
# Check/process Data Snapshot file into human-readable snippets
snapshots = process_data_snapshot(filepath)
# Generate output dictionary
today = datetime.datetime.today()
logger.info(
"Date published is {}".format(
today.strftime(cfg.SNAPSHOT_DATE_SCHEMA)
)
)
content_updates = {
'date_published': today.strftime(cfg.SNAPSHOT_DATE_SCHEMA),
'markets': snapshots
}
# Save data snapshot info as JSON
data_snapshot_path = util.expand_path(data_snapshot_path)
if not os.path.exists(os.path.dirname(data_snapshot_path)):
os.makedirs(os.path.dirname(data_snapshot_path))
util.save_json(data_snapshot_path, content_updates)
logger.info(
"Saved output data snapshot information to '{}'".format(
data_snapshot_path
)
)
successes.append(filename)
# Doesn't match an expected filename; may not be a CCT file
else:
logger.info(
"Ignoring file '{}' as not CCT related".format(filename)
)
failures.append(filename)
continue
else:
# Run file per market-type
try:
current_prefix = filename[:MKT_SFX_LEN].lower()
cond, data, json = FILE_PREFIXES[current_prefix](filepath)
except ValueError as e:
logger.error("Error occurred during {}".format(filename))
raise e
if cond:
# Determine output directory
outpath = os.path.join(outputpath, market, filename)
if len(data) > 0:
util.save_csv(outpath, data)
util.save_json(outpath.replace(".csv", ".json"), json)
successes.append(filename)
else:
failures.append(filename)
# Summarize processing statistics
logger.info(
"** Processed {} input data files successfully".format(
len(successes)
)
)
if len(failures) > 0:
logger.warn(
"** Unable to process {} input data files".format(
len(failures)
)
)
return
# Process state-by-state map files
def process_map(filename, output_schema=cfg.MAP_OUTPUT_SCHEMA):
"""Processes specified map file and outputs data per the schema"""
# Input columns: "state", "value"
# Output columns: "fips_code", "state_abbr", "value"
# Load specified file as input data
inputdata = util.load_csv(filename)
# Initialize output data with column headers
data = [output_schema]
# Process data
# TODO: Add error handling for unsupported FIPS codes
# TODO: Make sure all 50 states (or other expected data) is represented
for row in inputdata:
data.append([row[0], cfg.FIPS_CODES[int(row[0])], row[1]])
# Check if data exists and JSON-format
if len(data) > 1:
json = json_for_tile_map(data[1:])
return True, data, json
return True, [], []
# Process inquiry index file
def process_inquiry_index(filename):
"""Call proess_file_summary on the specified inquiry file and
returns output data and json per the output_schema"""
logger.debug("Running process_inquiry_index")
return process_file_summary(filename, cfg.INQUIRY_INDEX_OUTPUT_SCHEMA)
# Process inferred credit tightness index file
def process_tightness_index(filename):
"""Processes specified credit tightness file and
returns output data and json per the output_schema"""
logger.debug("Running process_tightness_index")
return process_file_summary(filename, cfg.TIGHTNESS_INDEX_OUTPUT_SCHEMA)
# Process summary files with loan numbers or volumes
def process_num_summary(filename):
"""Calls process_file_summary with correct output schema"""
# Output columns: "month", "date", "num", "num_unadj"
return process_file_summary(filename, cfg.SUMMARY_NUM_OUTPUT_SCHEMA)
def process_vol_summary(filename):
"""Calls process_file_summary with correct output schema"""
# Output columns: "month", "date", "vol", "vol_unadj"
return process_file_summary(filename, cfg.SUMMARY_VOL_OUTPUT_SCHEMA)
def process_file_summary(filename, output_schema):
"""Processes specified summary file and outputs data per the schema"""
# Load specified file as input data
inputdata = util.load_csv(filename)
# Process data
proc = {}
for row in inputdata:
monthstr, value, is_adj_str = row
monthnum = int(monthstr)
if monthnum not in proc:
proc[monthnum] = {"adj": None, "unadj": None}
if "unadjust" in is_adj_str.lower():
proc[monthnum]["unadj"] = value
elif "seasonal" in is_adj_str.lower():
proc[monthnum]["adj"] = value
else:
msg = "Data row (below) does not specify seasonal adjustment " + \
"in {}\n{}".format(
filename, ",".join(row)
)
logger.error(msg)
raise TypeError(msg)
# Turn dictionaries into a data list for output
# This order MUST match the provided schema order
data = []
for monthnum, value in proc.items():
data.append([monthnum,
actual_date(monthnum),
value["adj"],
value["unadj"]])
# Prep for output by sorting (by month number) and inserting a header
data.sort()
data.insert(0, output_schema)
# Check if data exists and JSON-format
if len(data) > 1:
json = json_for_line_chart(data[1:])
return True, data, json
return True, [], []
# Process volume files with groups (borrower age, income level, credit score)
# Output columns: "month", "date", "volume", "volume_unadj", "<type>_group"
def process_group_age_vol(filename):
"""Calls process_group_file with correct
group and output schema"""
schema = list(cfg.GROUP_VOL_OUTPUT_SCHEMA)
schema[-1] = schema[-1].format(cfg.AGE)
return process_group_file(filename, schema)
def process_group_income_vol(filename):
"""Calls process_group_file with correct group and output schema"""
schema = list(cfg.GROUP_VOL_OUTPUT_SCHEMA)
schema[-1] = schema[-1].format(cfg.INCOME)
return process_group_file(filename, schema)
def process_group_score_vol(filename):
"""Calls process_group_file with correct
group and output schema"""
schema = list(cfg.GROUP_VOL_OUTPUT_SCHEMA)
schema[-1] = schema[-1].format(cfg.SCORE)
return process_group_file(filename, schema)
def process_group_file(filename, output_schema):
"""Processes specified group volume file and outputs data per the schema"""
# Load specified file as input data
inputdata = util.load_csv(filename)
# Initialize output data with column headers
data = []
proc = {}
# Process data
for row in inputdata:
monthstr, value, group, is_adj_str = row
monthnum = int(monthstr)
if monthnum not in proc:
proc[monthnum] = {}
if group not in proc[monthnum]:
proc[monthnum][group] = {"adj": None, "unadj": None}
if "unadjust" in is_adj_str.lower():
proc[monthnum][group]["unadj"] = value
elif "seasonal" in is_adj_str.lower():
proc[monthnum][group]["adj"] = value
else:
msg = "Data row (below) does not specify seasonal adjustment " + \
"in {}\n{}".format(
filename,
",".join(row)
)
logger.error(msg)
raise TypeError(msg)
# Turn dictionaries into a data list for output
# This order MUST match the provided schema order
for monthnum, group in proc.items():
for groupname, value in group.items():
# Parse for any text fixes required
if groupname in cfg.TEXT_FIXES:
data.append([monthnum,
actual_date(monthnum),
value["adj"],
value["unadj"],
cfg.TEXT_FIXES[groupname]])
else:
data.append([monthnum,
actual_date(monthnum),
value["adj"],
value["unadj"],
groupname])
# Prep for output by sorting (by month number) and inserting a header
data.sort()
data.insert(0, output_schema)
# Check if data exists and JSON-format
if len(data) > 1:
json = json_for_group_line_chart(data[1:])
return True, data, json
return True, [], []
# Process year-over-year files with groups
# (i.e. borrower age, income level, credit score)
# Output columns: "month", "date", "yoy_<type>", ... , "yoy_<type>"
def process_group_age_yoy(filename):
"""Calls process_group_yoy_groups with correct group and output schema"""
postfix = "{}_yoy"
output_schema = list(cfg.GROUP_YOY_OUTPUT_SCHEMA)
output_schema += [postfix.format(gname) for gname in cfg.AGE_YOY_COLS]
cond, data = process_group_yoy_groups(
filename,
cfg.AGE_YOY_IN,
output_schema
)
# Format for JSON
json = []
if len(data) > 1:
json = json_for_group_bar_chart(
data[1:],
cfg.AGE_YOY_COLS,
cfg.AGE_YOY_JSON
)
return cond, data, json
def process_group_income_yoy(filename):
"""Calls process_group_yoy_groups with correct group and output schema"""
# Generate output schema from group YOY column names
postfix = "{}_yoy"
output_schema = list(cfg.GROUP_YOY_OUTPUT_SCHEMA)
output_schema += [postfix.format(gname) for gname in cfg.INCOME_YOY_COLS]
cond, data = process_group_yoy_groups(
filename,
cfg.INCOME_YOY_IN,
output_schema
)
# Format for JSON
json = []
if len(data) > 1:
json = json_for_group_bar_chart(
data[1:],
cfg.INCOME_YOY_COLS,
cfg.INCOME_YOY_JSON
)
return cond, data, json
def process_group_score_yoy(filename):
"""Calls process_group_yoy_groups with correct group and output schema"""
# Generate output schema from group YOY column names
postfix = "{}_yoy"
output_schema = list(cfg.GROUP_YOY_OUTPUT_SCHEMA)
output_schema += [postfix.format(gname) for gname in cfg.SCORE_YOY_COLS]
cond, data = process_group_yoy_groups(
filename,
cfg.SCORE_YOY_IN,
output_schema
)
# Format for JSON
json = []
if len(data) > 1:
json = json_for_group_bar_chart(
data[1:],
cfg.SCORE_YOY_COLS,
cfg.SCORE_YOY_JSON
)
return cond, data, json
def process_group_yoy_groups(filename, group_names, output_schema):
"""Processes specified group year-over-year file and outputs data
per the provided output schema"""
# Load specified file as input data
inputdata = util.load_csv(filename)
# Initialize output data with column headers
data = []
proc = {}
# Process data
for row in inputdata:
monthstr, value, group = row
monthnum = int(monthstr)
if monthnum not in proc:
proc[monthnum] = {name: None for name in group_names}
if group in group_names:
proc[monthnum][group] = value
else:
msg = "Data row (below) contains illegal group " + \
"name '{}'\n{}".format(filename, ",".join(row))
logger.error(msg)
raise TypeError(msg)
# Turn dictionaries into a data list for output
for monthnum, values in proc.items():
data.append([monthnum, actual_date(monthnum)] +
[values[gname] for gname in group_names])
# Prep for output by sorting (by month number) and inserting a header
data.sort()
data.insert(0, output_schema)
# Check if data exists and JSON-format
# Unlike other methods, the individual group calls handle the JSON
if len(data) > 1:
return True, data
return True, []
def process_yoy_summary(filename, output_schema=cfg.YOY_SUMMARY_OUTPUT_SCHEMA):
"""Processes specified year-over-year summary file and outputs data
per the provided output schema"""
# Output columns: "month", "date", "yoy_num", "yoy_vol"
# Load specified file as input data
inputdata = util.load_csv(filename)
# Initialize output data
data = []
proc = {}
# Process data
for row in inputdata:
monthstr, value, type_str = row
monthnum = int(monthstr)
if monthnum not in proc:
proc[monthnum] = {"num": None, "vol": None}
# Input column "group" is "Dollar Volume" or "Number of Loans"
if "number" in type_str.lower():
proc[monthnum]["num"] = value
elif "volume" in type_str.lower():
proc[monthnum]["vol"] = value
elif "inquiry" in type_str.lower():
# Ignore 'Inquiry Index' entries in current output
pass
elif "tightness" in type_str.lower():
# Ignore 'Credit Tightness Index' entries in current output
pass
else:
msg = "YOY Summary Data row (below) improperly " + \
"formatted in {}\n{}".format(filename, row)
logger.error(msg)
raise TypeError(msg)
# Turn dictionaries into a data list for output
# This order MUST match the provided schema order
for monthnum, value in proc.items():
data.append([monthnum,
actual_date(monthnum),
value["num"],
value["vol"]])
# Prep for output by sorting (by month number) and inserting a header
data.sort()
data.insert(0, output_schema)
# Check if data exists and JSON-format
if len(data) > 1:
json = json_for_bar_chart(data[1:])
return True, data, json
return True, [], []
# JSON output processing
def json_for_bar_chart(data):
"""Takes input data and returns formatted values for a JSON file"""
outnum = []
outvol = []
for month, date, yoy_num, yoy_vol in data:
sec = util.epochtime(date, schema=cfg.DATA_FILE_DATE_SCHEMA)
try:
outnum.append([util.milliseconds(sec), float(yoy_num)])
outvol.append([util.milliseconds(sec), float(yoy_vol)])
except ValueError:
logger.debug(
"Ignore ValueError: Discard 'NA' and other non-float values"
)
continue
return {"Number of Loans": outnum, "Dollar Volume": outvol}
def json_for_group_bar_chart(data, val_cols, out_names):
"""Takes input data and returns formatted values for a JSON file """
tmp = {}
for col in val_cols:
tmp[col] = []
# Group bar charts (yoy) have a variable numbers of columns by groups
for row in data:
sec = util.epochtime(row[1])
for colnum in range(len(val_cols)):
try:
tmp_col = val_cols[colnum]
tmp[tmp_col].append(
[util.milliseconds(sec), float(row[2+colnum])]
)
except ValueError:
logger.debug(
"Ignore ValueError: Discard 'NA' and other " +
"non-float values"
)
continue
out = {}
# Translate into JSON output columns
for col_key in tmp:
idx = val_cols.index(col_key)
if idx < 0:
msg = "Key '{}' does not exist in {}".format(col_key, val_cols)
logger.error(msg)
raise IndexError(msg)
out[out_names[idx]] = tmp[col_key][:]
return out
def json_for_line_chart(data):
"""Takes input data and returns formatted values for a JSON file """
out = {"adjusted": [], "unadjusted": []}
for monthnum, date, v_adj, v_unadj in data:
sec = util.epochtime(date)
try:
out["adjusted"].append([util.milliseconds(sec), float(v_adj)])
out["unadjusted"].append([util.milliseconds(sec), float(v_unadj)])
except ValueError:
logger.debug(
"Ignore ValueError: Discard 'NA' and other non-float values"
)
continue
return out
def json_for_group_line_chart(data):
"""Takes input data and returns formatted values for to a JSON file"""
# TODO: Maybe use the known global key groups to init groupname dicts once
out = {}
# Group line charts (vol/num) have the group name in the last column
for month, date, v_adj, v_unadj, groupname in data:
sec = util.epochtime(date)
# JSON fix for age groups - strip off the "Age "
if groupname.lower().find("age ") == 0:
groupname = groupname[4:]
# Initialize if first time groupname is encountered
if groupname not in out:
out[groupname] = {"adjusted": [], "unadjusted": []}
try:
out[groupname]["adjusted"].append([
util.milliseconds(sec),
float(v_adj)
])
out[groupname]["unadjusted"].append([
util.milliseconds(sec),
float(v_unadj)
])
except ValueError:
logger.debug(
"Ignore ValueError: Discard 'NA' and other non-float values"
)
continue
return out
def json_for_tile_map(data):
"""Takes input data and returns a list of dicts of state names and
percentages for dumping to a JSON file:
Input is a list of lists: [[FIPS code, state abbr, percentages],...]
Output is list of dicts: [{"name": abbr, "value": percentage},...]
"""
out = []
for code, state, value in data:
try:
value = "{:0.2f}".format(float(value) * 100)
except ValueError:
logger.debug(
"Ignore ValueError: Leave 'NA' as-is for states if found"
)
out.append({"name": state, "value": value})
return out
def process_data_snapshot(filepath, date_schema=cfg.SNAPSHOT_DATE_SCHEMA):
"""Process a file at filepath that contains data snapshot information
for all markets and prepare human-readable text for output.
Returns a list of market-data dictionaries."""
# Load specified file as input data
inputdata = util.load_csv(filepath)
logger.info("Loaded data snapshot file from {}".format(filepath))
# Initialize output data
market_info = {}
for row in inputdata:
# Unpack the row values
monthnum, market, var_name, value, value_yoy = row
monthnum = int(monthnum)
var_name = var_name.lower()
# Determine month string from month number
month = actual_date(monthnum, schema=date_schema)
# If first time seeing market, create sub-dict
if market not in market_info:
market_info[market] = {"market_key": market}
# Handle the variable type
# Each variable has value and value_yoy
if "originations" in var_name:
# Calculate originations
orig_fmt = util.human_numbers(float(value), whole_units_only=1)
# Calculate year-over-year change in originations
yoy = float(value_yoy)
yoy_num = "{:.1f}".format(abs(yoy))
yoy_desc = cfg.PERCENT_CHANGE_DESCRIPTORS[yoy > 0]
yoy_fmt = "{}% {}".format(yoy_num, yoy_desc)
# Store data for market
market_info[market]["data_month"] = month
market_info[market]["num_originations"] = orig_fmt
market_info[market]["year_over_year_change"] = yoy_fmt
elif "volume" in var_name:
vol_fmt = "${}".format(util.human_numbers(float(value)))
market_info[market]["value_originations"] = vol_fmt
# Volume month is the same as origination month
elif "inquiry" in var_name:
yoy = float(value_yoy)
yoy_num = "{:.1f}".format(abs(yoy))
yoy_desc = cfg.PERCENT_CHANGE_DESCRIPTORS[yoy > 0]
yoy_fmt = "{}% {}".format(yoy_num, yoy_desc)
market_info[market]["inquiry_yoy_change"] = yoy_fmt
market_info[market]["inquiry_month"] = month
elif "tightness" in var_name:
yoy = float(value_yoy)
yoy_num = "{:.1f}".format(abs(yoy))
yoy_desc = cfg.PERCENT_CHANGE_DESCRIPTORS[yoy > 0]
yoy_fmt = "{}% {}".format(yoy_num, yoy_desc)
market_info[market]["tightness_yoy_change"] = yoy_fmt
market_info[market]["tightness_month"] = month
else:
msg = "Data snapshot row (below) contains unknown " + \
"var_name name '{}'\n{}".format(
var_name, ",".join(row)
)
logger.error(msg)
raise ValueError(msg)
return market_info.values()
# Filenames are formatted as:
# "<prefix>_<market>.csv"
# NOTE: This global set must come after the methods are defined
FILE_PREFIXES = {"map_data": process_map,
"num_data": process_num_summary,
"vol_data": process_vol_summary,
"volume_data_age_group": process_group_age_vol,
"volume_data_income_level": process_group_income_vol,
"volume_data_score_level": process_group_score_vol,
"yoy_data_all": process_yoy_summary,
"yoy_data_age_group": process_group_age_yoy,
"yoy_data_income_level": process_group_income_yoy,
"yoy_data_score_level": process_group_score_yoy,
"inq_data": process_inquiry_index,
"crt_data": process_tightness_index,
}
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(
description='Processes data files from the CFPB Office of Research.'
)
parser.add_argument(
'-i',
'--input-path',
metavar="INPUTDIR",
type=str,
dest='inputdir',
default=cfg.DEFAULT_INPUT_FOLDER,
help='Specifies directory path for folder containing input data ' +
'files (default: "{}")'.format(cfg.DEFAULT_INPUT_FOLDER)
)
parser.add_argument(
'-o',
'--output-path',
metavar="OUTPUTDIR",
type=str,
dest='outputdir',
default=cfg.DEFAULT_OUTPUT_FOLDER,
help='Specifies directory path for root folder to put processed ' +
'data (default: "{}")'.format(cfg.DEFAULT_OUTPUT_FOLDER)
)
parser.add_argument(
'-d',
'--data-snapshot-path',
type=str,
default='',
dest='output_data_snapshot_file',
help='Specifies path and filename for where to save data snapshot ' +
'updates as json; if blank (default), no file will be saved'
)
args = parser.parse_args()
# Parse the given paths
inputdir, outputdir = load_paths(args.inputdir, args.outputdir)
# Process the data
snapshot_updates = process_data_files(
inputdir,
outputdir,
data_snapshot_path=args.output_data_snapshot_file
)
| [
"hillary.jeffrey@cfpb.gov"
] | hillary.jeffrey@cfpb.gov |
efceb7bfde0ca3da44a812a43f838b7ac79170bb | 79eb159b3ee36eb76bd921be24081708f44ac735 | /tests/test_codec.py | d9f73bcd0a34d29503f034d3199e6d5a2172f9d8 | [] | no_license | osh/PyAV | d7139f8faf7ee0973376db807e3b917863e9fb73 | 5fa85fd142ee8dabf01f4873e29678aeca153b4f | refs/heads/master | 2021-01-18T13:24:52.202662 | 2015-12-11T04:01:52 | 2015-12-11T04:01:52 | 47,802,016 | 1 | 0 | null | 2015-12-11T03:12:18 | 2015-12-11T03:12:18 | null | UTF-8 | Python | false | false | 620 | py | from .common import *
from av.codec import Codec
from av.video.format import VideoFormat
class TestCodecs(TestCase):
def test_codec_mpeg4(self):
c = Codec('mpeg4')
self.assertEqual(c.name, 'mpeg4')
self.assertEqual(c.long_name, 'MPEG-4 part 2')
self.assertEqual(c.type, 'video')
self.assertEqual(c.id, 13)
self.assertTrue(c.is_encoder)
self.assertTrue(c.is_decoder)
formats = c.video_formats
self.assertTrue(formats)
self.assertIsInstance(formats[0], VideoFormat)
self.assertTrue(any(f.name == 'yuv420p' for f in formats))
| [
"github@mikeboers.com"
] | github@mikeboers.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.