code
stringlengths 13
6.09M
| order_type
stringclasses 2
values | original_example
dict | step_ids
listlengths 1
5
|
|---|---|---|---|
class Node:
def __init__(self,data):
self.data = data
self.next = None
def Add(Head,data):
Temp = Head
while(Temp.next != None):
Temp = Temp.next
Temp.next = Node(data)
# print(Temp.data)
def create(data):
Head = Node(data)
return Head
def printLL(Head):
Temp = Head
while(Temp != None):
# input()
print(Temp.data,end=" ")
Temp = Temp.next
print()
def Reverse(Head):
Temp = Head
TempNext = Head.next
# curr = TempNext
while(TempNext != None):
NextSaved = TempNext.next
TempNext.next = Temp
Temp = TempNext
TempNext = NextSaved
Head.next = None
Head = Temp
return Head
if __name__ == '__main__':
Head = create(5)
Add(Head,6)
Add(Head,7)
Add(Head,8)
Add(Head,9)
Add(Head,10)
printLL(Head)
NewHead = Reverse(Head)
printLL(NewHead)
|
normal
|
{
"blob_id": "ff137b51ea5b8c21e335a38a3d307a3302921245",
"index": 9993,
"step-1": "class Node:\n\n def __init__(self, data):\n self.data = data\n self.next = None\n\n\n<mask token>\n\n\ndef Reverse(Head):\n Temp = Head\n TempNext = Head.next\n while TempNext != None:\n NextSaved = TempNext.next\n TempNext.next = Temp\n Temp = TempNext\n TempNext = NextSaved\n Head.next = None\n Head = Temp\n return Head\n\n\n<mask token>\n",
"step-2": "class Node:\n\n def __init__(self, data):\n self.data = data\n self.next = None\n\n\ndef Add(Head, data):\n Temp = Head\n while Temp.next != None:\n Temp = Temp.next\n Temp.next = Node(data)\n\n\ndef create(data):\n Head = Node(data)\n return Head\n\n\n<mask token>\n\n\ndef Reverse(Head):\n Temp = Head\n TempNext = Head.next\n while TempNext != None:\n NextSaved = TempNext.next\n TempNext.next = Temp\n Temp = TempNext\n TempNext = NextSaved\n Head.next = None\n Head = Temp\n return Head\n\n\n<mask token>\n",
"step-3": "class Node:\n\n def __init__(self, data):\n self.data = data\n self.next = None\n\n\ndef Add(Head, data):\n Temp = Head\n while Temp.next != None:\n Temp = Temp.next\n Temp.next = Node(data)\n\n\ndef create(data):\n Head = Node(data)\n return Head\n\n\ndef printLL(Head):\n Temp = Head\n while Temp != None:\n print(Temp.data, end=' ')\n Temp = Temp.next\n print()\n\n\ndef Reverse(Head):\n Temp = Head\n TempNext = Head.next\n while TempNext != None:\n NextSaved = TempNext.next\n TempNext.next = Temp\n Temp = TempNext\n TempNext = NextSaved\n Head.next = None\n Head = Temp\n return Head\n\n\n<mask token>\n",
"step-4": "class Node:\n\n def __init__(self, data):\n self.data = data\n self.next = None\n\n\ndef Add(Head, data):\n Temp = Head\n while Temp.next != None:\n Temp = Temp.next\n Temp.next = Node(data)\n\n\ndef create(data):\n Head = Node(data)\n return Head\n\n\ndef printLL(Head):\n Temp = Head\n while Temp != None:\n print(Temp.data, end=' ')\n Temp = Temp.next\n print()\n\n\ndef Reverse(Head):\n Temp = Head\n TempNext = Head.next\n while TempNext != None:\n NextSaved = TempNext.next\n TempNext.next = Temp\n Temp = TempNext\n TempNext = NextSaved\n Head.next = None\n Head = Temp\n return Head\n\n\nif __name__ == '__main__':\n Head = create(5)\n Add(Head, 6)\n Add(Head, 7)\n Add(Head, 8)\n Add(Head, 9)\n Add(Head, 10)\n printLL(Head)\n NewHead = Reverse(Head)\n printLL(NewHead)\n",
"step-5": "\nclass Node:\n def __init__(self,data):\n self.data = data\n self.next = None\n\ndef Add(Head,data):\n Temp = Head\n while(Temp.next != None):\n Temp = Temp.next\n Temp.next = Node(data)\n # print(Temp.data)\n\ndef create(data):\n Head = Node(data)\n return Head\n\ndef printLL(Head):\n Temp = Head\n while(Temp != None):\n # input()\n print(Temp.data,end=\" \")\n Temp = Temp.next\n print()\n\ndef Reverse(Head): \n Temp = Head\n TempNext = Head.next\n # curr = TempNext\n while(TempNext != None):\n NextSaved = TempNext.next\n TempNext.next = Temp\n \n Temp = TempNext\n TempNext = NextSaved\n \n Head.next = None\n Head = Temp\n return Head\n\nif __name__ == '__main__':\n Head = create(5)\n Add(Head,6)\n Add(Head,7)\n Add(Head,8)\n Add(Head,9)\n Add(Head,10)\n printLL(Head)\n NewHead = Reverse(Head)\n printLL(NewHead)\n\n",
"step-ids": [
3,
5,
6,
7,
8
]
}
|
[
3,
5,
6,
7,
8
] |
<|reserved_special_token_0|>
def Actual_Value():
global df
print('The Actual Closing Value is Displayed below')
df = data.DataReader(stockTicker, 'yahoo', '2017-01-28', '2017-02-5')
ao = df['Close']
print(str(ao))
return
<|reserved_special_token_0|>
def Forcast_Values():
global forecast_out
global forecast_col
forecast_col = 'Close'
forecast_out = int(math.ceil(0.01 * len(df)))
return
<|reserved_special_token_0|>
def Setup_Validate_data():
global y
global X
global X_train, X_test, y_train, y_test
X = np.array(df.drop(['label'], 1))
y = np.array(df['label'])
X_train, X_test, y_train, y_test = cross_validation.train_test_split(X,
y, test_size=0.2)
return
<|reserved_special_token_0|>
def get_Accuracy():
global accuracy
accuracy = clf.score(X_test, y_test)
return ()
def Prediction():
global X
X = X[:-forecast_out]
global X_lately
global forecast_set
X_lately = X[-forecast_out:]
forecast_set = clf.predict(X_lately)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
style.use('ggplot')
<|reserved_special_token_0|>
def Set_Ticker():
global stockTicker
stockTicker = 'ONGC.NS'
print('Possible options: ONGC.NS, ')
return
def Set_Date():
global end_date
global start_date
end_date = datetime.datetime(2017, 1, 30)
start_date = end_date
print(end_date)
return
def Actual_Value():
global df
print('The Actual Closing Value is Displayed below')
df = data.DataReader(stockTicker, 'yahoo', '2017-01-28', '2017-02-5')
ao = df['Close']
print(str(ao))
return
def Add_Features_x():
global df
df['OC_Change'] = df['Close'] - df['Open'] / df['Open'] * 100
df['HL_Change'] = df['High'] - df['Low'] / df['Low'] * 100
df = df[['Close', 'HL_Change', 'OC_Change', 'Volume']]
return
def Forcast_Values():
global forecast_out
global forecast_col
forecast_col = 'Close'
forecast_out = int(math.ceil(0.01 * len(df)))
return
def Add_Features_y():
df['label'] = df[forecast_col].shift(-forecast_out)
df.dropna(inplace=True)
return
def Setup_Validate_data():
global y
global X
global X_train, X_test, y_train, y_test
X = np.array(df.drop(['label'], 1))
y = np.array(df['label'])
X_train, X_test, y_train, y_test = cross_validation.train_test_split(X,
y, test_size=0.2)
return
def Set_Model():
global clf
clf = LinearRegression()
clf.fit(X_train, y_train)
return
def get_Accuracy():
global accuracy
accuracy = clf.score(X_test, y_test)
return ()
def Prediction():
global X
X = X[:-forecast_out]
global X_lately
global forecast_set
X_lately = X[-forecast_out:]
forecast_set = clf.predict(X_lately)
def Data_frame_Create():
global df
df = data.DataReader(stockTicker, 'yahoo', start_date, end_date)
return
Set_Ticker()
Actual_Value()
Set_Date()
start_date += datetime.timedelta(weeks=-100)
Data_frame_Create()
Add_Features_x()
Forcast_Values()
Add_Features_y()
Setup_Validate_data()
Set_Model()
get_Accuracy()
Prediction()
print(stockTicker.partition('.')[0])
print('Accuracy: ' + str(accuracy * 100))
print('Next day value: ' + str(forecast_set[0]))
print(forecast_set)
print('3rd day value: ' + str(forecast_set[1]))
print('5th day value: ' + str(forecast_set[2]))
print('7th day value: ' + str(forecast_set[3]))
print('10th day value: ' + str(forecast_set[4]))
<|reserved_special_token_0|>
with open('mycsvfile.csv', 'wb') as f:
w = csv.writer(f)
w.writerows(somedict.items())
<|reserved_special_token_1|>
<|reserved_special_token_0|>
style.use('ggplot')
<|reserved_special_token_0|>
def Set_Ticker():
global stockTicker
stockTicker = 'ONGC.NS'
print('Possible options: ONGC.NS, ')
return
def Set_Date():
global end_date
global start_date
end_date = datetime.datetime(2017, 1, 30)
start_date = end_date
print(end_date)
return
def Actual_Value():
global df
print('The Actual Closing Value is Displayed below')
df = data.DataReader(stockTicker, 'yahoo', '2017-01-28', '2017-02-5')
ao = df['Close']
print(str(ao))
return
def Add_Features_x():
global df
df['OC_Change'] = df['Close'] - df['Open'] / df['Open'] * 100
df['HL_Change'] = df['High'] - df['Low'] / df['Low'] * 100
df = df[['Close', 'HL_Change', 'OC_Change', 'Volume']]
return
def Forcast_Values():
global forecast_out
global forecast_col
forecast_col = 'Close'
forecast_out = int(math.ceil(0.01 * len(df)))
return
def Add_Features_y():
df['label'] = df[forecast_col].shift(-forecast_out)
df.dropna(inplace=True)
return
def Setup_Validate_data():
global y
global X
global X_train, X_test, y_train, y_test
X = np.array(df.drop(['label'], 1))
y = np.array(df['label'])
X_train, X_test, y_train, y_test = cross_validation.train_test_split(X,
y, test_size=0.2)
return
def Set_Model():
global clf
clf = LinearRegression()
clf.fit(X_train, y_train)
return
def get_Accuracy():
global accuracy
accuracy = clf.score(X_test, y_test)
return ()
def Prediction():
global X
X = X[:-forecast_out]
global X_lately
global forecast_set
X_lately = X[-forecast_out:]
forecast_set = clf.predict(X_lately)
def Data_frame_Create():
global df
df = data.DataReader(stockTicker, 'yahoo', start_date, end_date)
return
Set_Ticker()
Actual_Value()
Set_Date()
start_date += datetime.timedelta(weeks=-100)
Data_frame_Create()
Add_Features_x()
Forcast_Values()
Add_Features_y()
Setup_Validate_data()
Set_Model()
get_Accuracy()
Prediction()
print(stockTicker.partition('.')[0])
print('Accuracy: ' + str(accuracy * 100))
print('Next day value: ' + str(forecast_set[0]))
print(forecast_set)
print('3rd day value: ' + str(forecast_set[1]))
print('5th day value: ' + str(forecast_set[2]))
print('7th day value: ' + str(forecast_set[3]))
print('10th day value: ' + str(forecast_set[4]))
somedict = dict(NextDay=forecast_set[0], ThirdDay=forecast_set[1], FifthDay
=forecast_set[2])
with open('mycsvfile.csv', 'wb') as f:
w = csv.writer(f)
w.writerows(somedict.items())
<|reserved_special_token_1|>
import pandas as pd
import math, datetime
import time
import numpy as np
from pandas.tools.plotting import scatter_matrix
import matplotlib.pyplot as plt
from sklearn import cross_validation, preprocessing, svm
from sklearn.metrics import accuracy_score
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import LinearRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
from matplotlib import style
style.use('ggplot')
import datetime
from pandas_datareader import data
import csv
def Set_Ticker():
global stockTicker
stockTicker = 'ONGC.NS'
print('Possible options: ONGC.NS, ')
return
def Set_Date():
global end_date
global start_date
end_date = datetime.datetime(2017, 1, 30)
start_date = end_date
print(end_date)
return
def Actual_Value():
global df
print('The Actual Closing Value is Displayed below')
df = data.DataReader(stockTicker, 'yahoo', '2017-01-28', '2017-02-5')
ao = df['Close']
print(str(ao))
return
def Add_Features_x():
global df
df['OC_Change'] = df['Close'] - df['Open'] / df['Open'] * 100
df['HL_Change'] = df['High'] - df['Low'] / df['Low'] * 100
df = df[['Close', 'HL_Change', 'OC_Change', 'Volume']]
return
def Forcast_Values():
global forecast_out
global forecast_col
forecast_col = 'Close'
forecast_out = int(math.ceil(0.01 * len(df)))
return
def Add_Features_y():
df['label'] = df[forecast_col].shift(-forecast_out)
df.dropna(inplace=True)
return
def Setup_Validate_data():
global y
global X
global X_train, X_test, y_train, y_test
X = np.array(df.drop(['label'], 1))
y = np.array(df['label'])
X_train, X_test, y_train, y_test = cross_validation.train_test_split(X,
y, test_size=0.2)
return
def Set_Model():
global clf
clf = LinearRegression()
clf.fit(X_train, y_train)
return
def get_Accuracy():
global accuracy
accuracy = clf.score(X_test, y_test)
return ()
def Prediction():
global X
X = X[:-forecast_out]
global X_lately
global forecast_set
X_lately = X[-forecast_out:]
forecast_set = clf.predict(X_lately)
def Data_frame_Create():
global df
df = data.DataReader(stockTicker, 'yahoo', start_date, end_date)
return
Set_Ticker()
Actual_Value()
Set_Date()
start_date += datetime.timedelta(weeks=-100)
Data_frame_Create()
Add_Features_x()
Forcast_Values()
Add_Features_y()
Setup_Validate_data()
Set_Model()
get_Accuracy()
Prediction()
print(stockTicker.partition('.')[0])
print('Accuracy: ' + str(accuracy * 100))
print('Next day value: ' + str(forecast_set[0]))
print(forecast_set)
print('3rd day value: ' + str(forecast_set[1]))
print('5th day value: ' + str(forecast_set[2]))
print('7th day value: ' + str(forecast_set[3]))
print('10th day value: ' + str(forecast_set[4]))
somedict = dict(NextDay=forecast_set[0], ThirdDay=forecast_set[1], FifthDay
=forecast_set[2])
with open('mycsvfile.csv', 'wb') as f:
w = csv.writer(f)
w.writerows(somedict.items())
<|reserved_special_token_1|>
#https://www.youtube.com/watch?v=CQ5kc_j4RjA
import pandas as pd
#import quandl
import math, datetime
import time
import numpy as np
from pandas.tools.plotting import scatter_matrix
import matplotlib.pyplot as plt
from sklearn import cross_validation, preprocessing, svm
from sklearn.metrics import accuracy_score
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import LinearRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
from matplotlib import style
style.use ('ggplot')
import datetime
from pandas_datareader import data
import csv
#Setting Companies
def Set_Ticker():
global stockTicker
stockTicker = 'ONGC.NS'
## stockTicker = input("Enter the Ticker: ")
print ("Possible options: ONGC.NS, ")
return
def Set_Date():
#Setting Date
global end_date
global start_date
## end_date = input("Enter prediction date(YYYY-MM-DD):")
end_date = datetime.datetime(2017,1,30)
start_date = end_date
print (end_date)
return
def Actual_Value():
#Actual Value
global df
print("The Actual Closing Value is Displayed below")
df = data.DataReader(stockTicker, 'yahoo', '2017-01-28', '2017-02-5')
ao=df['Close']
print (str(ao))
return
def Add_Features_x():
#Create Features - X
global df
df ['OC_Change'] = (df['Close']-df['Open']/df['Open']*100)
df ['HL_Change'] = (df['High']-df['Low']/df['Low']*100)
df = df[['Close', 'HL_Change', 'OC_Change', 'Volume']]
return
def Forcast_Values():
#Forecast
global forecast_out
global forecast_col
forecast_col = 'Close'
forecast_out = int(math.ceil(0.01*len(df)))
return
def Add_Features_y():
#Label - y
df['label'] = df[forecast_col].shift(-forecast_out)
df.dropna(inplace=True)
return
def Setup_Validate_data():
#Set X and y
global y
global X
global X_train, X_test, y_train, y_test
X = np.array(df.drop(['label'],1))
y = np.array(df['label'])
#Split Training and Testing Data
X_train, X_test, y_train, y_test = cross_validation.train_test_split(X,y,test_size=0.2)
return
def Set_Model():
#Set Model for ML
global clf
clf = LinearRegression()
clf.fit(X_train, y_train)
return
def get_Accuracy():
#Accuracy of Test Data
global accuracy
accuracy = clf.score(X_test, y_test)
return()
def Prediction():
#Predict Next Values
global X
X = X[:-forecast_out]
global X_lately
global forecast_set
X_lately = X[-forecast_out:]
forecast_set = clf.predict(X_lately)
def Data_frame_Create():
#Creat a DataFrame
global df
df = data.DataReader(stockTicker, 'yahoo', start_date, end_date)
## df.plot(kind="box", subplots=True, layout=(1,6), sharex=False, sharey=False)
## plt.show()
## df.hist()
## plt.show()
## scatter_matrix(df)
## plt.show()
return
Set_Ticker()
Actual_Value()
#Setting Date
Set_Date()
#Gap of 1 month in time
#n = int(input("Enter the No. of Years in Months:"))
start_date += datetime.timedelta(weeks=-100)
#Creat a DataFrame
Data_frame_Create()
#Create Features - X
Add_Features_x()
#Forecast
Forcast_Values()
#Label - y
Add_Features_y()
#Split Training and Testing Data
Setup_Validate_data()
#Set Model for ML
Set_Model()
#Accuracy of Test Data
get_Accuracy()
#Predict Next Values
Prediction()
print (stockTicker.partition('.')[0])
##print ("Start Date:" + str(start_date))
print ("Accuracy: " + str(accuracy*100))
print ("Next day value: "+ str(forecast_set[0]))
print (forecast_set)
print ("3rd day value: "+ str(forecast_set[1]))
print ("5th day value: "+ str(forecast_set[2]))
print ("7th day value: "+ str(forecast_set[3]))
print ("10th day value: "+ str(forecast_set[4]))
##dict = {'Next Day':forecast_set[0],'3rd Day':forecast_set[1],'5th Day':forecast_set[2]}
##print (dict)
somedict = dict(NextDay=forecast_set[0],ThirdDay=forecast_set[1],FifthDay=forecast_set[2])
with open('mycsvfile.csv','wb') as f:
w = csv.writer(f)
w.writerows(somedict.items())
|
flexible
|
{
"blob_id": "9c4676edbeef3748a4947f827fefa29e95674bfa",
"index": 121,
"step-1": "<mask token>\n\n\ndef Actual_Value():\n global df\n print('The Actual Closing Value is Displayed below')\n df = data.DataReader(stockTicker, 'yahoo', '2017-01-28', '2017-02-5')\n ao = df['Close']\n print(str(ao))\n return\n\n\n<mask token>\n\n\ndef Forcast_Values():\n global forecast_out\n global forecast_col\n forecast_col = 'Close'\n forecast_out = int(math.ceil(0.01 * len(df)))\n return\n\n\n<mask token>\n\n\ndef Setup_Validate_data():\n global y\n global X\n global X_train, X_test, y_train, y_test\n X = np.array(df.drop(['label'], 1))\n y = np.array(df['label'])\n X_train, X_test, y_train, y_test = cross_validation.train_test_split(X,\n y, test_size=0.2)\n return\n\n\n<mask token>\n\n\ndef get_Accuracy():\n global accuracy\n accuracy = clf.score(X_test, y_test)\n return ()\n\n\ndef Prediction():\n global X\n X = X[:-forecast_out]\n global X_lately\n global forecast_set\n X_lately = X[-forecast_out:]\n forecast_set = clf.predict(X_lately)\n\n\n<mask token>\n",
"step-2": "<mask token>\nstyle.use('ggplot')\n<mask token>\n\n\ndef Set_Ticker():\n global stockTicker\n stockTicker = 'ONGC.NS'\n print('Possible options: ONGC.NS, ')\n return\n\n\ndef Set_Date():\n global end_date\n global start_date\n end_date = datetime.datetime(2017, 1, 30)\n start_date = end_date\n print(end_date)\n return\n\n\ndef Actual_Value():\n global df\n print('The Actual Closing Value is Displayed below')\n df = data.DataReader(stockTicker, 'yahoo', '2017-01-28', '2017-02-5')\n ao = df['Close']\n print(str(ao))\n return\n\n\ndef Add_Features_x():\n global df\n df['OC_Change'] = df['Close'] - df['Open'] / df['Open'] * 100\n df['HL_Change'] = df['High'] - df['Low'] / df['Low'] * 100\n df = df[['Close', 'HL_Change', 'OC_Change', 'Volume']]\n return\n\n\ndef Forcast_Values():\n global forecast_out\n global forecast_col\n forecast_col = 'Close'\n forecast_out = int(math.ceil(0.01 * len(df)))\n return\n\n\ndef Add_Features_y():\n df['label'] = df[forecast_col].shift(-forecast_out)\n df.dropna(inplace=True)\n return\n\n\ndef Setup_Validate_data():\n global y\n global X\n global X_train, X_test, y_train, y_test\n X = np.array(df.drop(['label'], 1))\n y = np.array(df['label'])\n X_train, X_test, y_train, y_test = cross_validation.train_test_split(X,\n y, test_size=0.2)\n return\n\n\ndef Set_Model():\n global clf\n clf = LinearRegression()\n clf.fit(X_train, y_train)\n return\n\n\ndef get_Accuracy():\n global accuracy\n accuracy = clf.score(X_test, y_test)\n return ()\n\n\ndef Prediction():\n global X\n X = X[:-forecast_out]\n global X_lately\n global forecast_set\n X_lately = X[-forecast_out:]\n forecast_set = clf.predict(X_lately)\n\n\ndef Data_frame_Create():\n global df\n df = data.DataReader(stockTicker, 'yahoo', start_date, end_date)\n return\n\n\nSet_Ticker()\nActual_Value()\nSet_Date()\nstart_date += datetime.timedelta(weeks=-100)\nData_frame_Create()\nAdd_Features_x()\nForcast_Values()\nAdd_Features_y()\nSetup_Validate_data()\nSet_Model()\nget_Accuracy()\nPrediction()\nprint(stockTicker.partition('.')[0])\nprint('Accuracy: ' + str(accuracy * 100))\nprint('Next day value: ' + str(forecast_set[0]))\nprint(forecast_set)\nprint('3rd day value: ' + str(forecast_set[1]))\nprint('5th day value: ' + str(forecast_set[2]))\nprint('7th day value: ' + str(forecast_set[3]))\nprint('10th day value: ' + str(forecast_set[4]))\n<mask token>\nwith open('mycsvfile.csv', 'wb') as f:\n w = csv.writer(f)\n w.writerows(somedict.items())\n",
"step-3": "<mask token>\nstyle.use('ggplot')\n<mask token>\n\n\ndef Set_Ticker():\n global stockTicker\n stockTicker = 'ONGC.NS'\n print('Possible options: ONGC.NS, ')\n return\n\n\ndef Set_Date():\n global end_date\n global start_date\n end_date = datetime.datetime(2017, 1, 30)\n start_date = end_date\n print(end_date)\n return\n\n\ndef Actual_Value():\n global df\n print('The Actual Closing Value is Displayed below')\n df = data.DataReader(stockTicker, 'yahoo', '2017-01-28', '2017-02-5')\n ao = df['Close']\n print(str(ao))\n return\n\n\ndef Add_Features_x():\n global df\n df['OC_Change'] = df['Close'] - df['Open'] / df['Open'] * 100\n df['HL_Change'] = df['High'] - df['Low'] / df['Low'] * 100\n df = df[['Close', 'HL_Change', 'OC_Change', 'Volume']]\n return\n\n\ndef Forcast_Values():\n global forecast_out\n global forecast_col\n forecast_col = 'Close'\n forecast_out = int(math.ceil(0.01 * len(df)))\n return\n\n\ndef Add_Features_y():\n df['label'] = df[forecast_col].shift(-forecast_out)\n df.dropna(inplace=True)\n return\n\n\ndef Setup_Validate_data():\n global y\n global X\n global X_train, X_test, y_train, y_test\n X = np.array(df.drop(['label'], 1))\n y = np.array(df['label'])\n X_train, X_test, y_train, y_test = cross_validation.train_test_split(X,\n y, test_size=0.2)\n return\n\n\ndef Set_Model():\n global clf\n clf = LinearRegression()\n clf.fit(X_train, y_train)\n return\n\n\ndef get_Accuracy():\n global accuracy\n accuracy = clf.score(X_test, y_test)\n return ()\n\n\ndef Prediction():\n global X\n X = X[:-forecast_out]\n global X_lately\n global forecast_set\n X_lately = X[-forecast_out:]\n forecast_set = clf.predict(X_lately)\n\n\ndef Data_frame_Create():\n global df\n df = data.DataReader(stockTicker, 'yahoo', start_date, end_date)\n return\n\n\nSet_Ticker()\nActual_Value()\nSet_Date()\nstart_date += datetime.timedelta(weeks=-100)\nData_frame_Create()\nAdd_Features_x()\nForcast_Values()\nAdd_Features_y()\nSetup_Validate_data()\nSet_Model()\nget_Accuracy()\nPrediction()\nprint(stockTicker.partition('.')[0])\nprint('Accuracy: ' + str(accuracy * 100))\nprint('Next day value: ' + str(forecast_set[0]))\nprint(forecast_set)\nprint('3rd day value: ' + str(forecast_set[1]))\nprint('5th day value: ' + str(forecast_set[2]))\nprint('7th day value: ' + str(forecast_set[3]))\nprint('10th day value: ' + str(forecast_set[4]))\nsomedict = dict(NextDay=forecast_set[0], ThirdDay=forecast_set[1], FifthDay\n =forecast_set[2])\nwith open('mycsvfile.csv', 'wb') as f:\n w = csv.writer(f)\n w.writerows(somedict.items())\n",
"step-4": "import pandas as pd\nimport math, datetime\nimport time\nimport numpy as np\nfrom pandas.tools.plotting import scatter_matrix\nimport matplotlib.pyplot as plt\nfrom sklearn import cross_validation, preprocessing, svm\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.discriminant_analysis import LinearDiscriminantAnalysis\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.svm import SVC\nfrom matplotlib import style\nstyle.use('ggplot')\nimport datetime\nfrom pandas_datareader import data\nimport csv\n\n\ndef Set_Ticker():\n global stockTicker\n stockTicker = 'ONGC.NS'\n print('Possible options: ONGC.NS, ')\n return\n\n\ndef Set_Date():\n global end_date\n global start_date\n end_date = datetime.datetime(2017, 1, 30)\n start_date = end_date\n print(end_date)\n return\n\n\ndef Actual_Value():\n global df\n print('The Actual Closing Value is Displayed below')\n df = data.DataReader(stockTicker, 'yahoo', '2017-01-28', '2017-02-5')\n ao = df['Close']\n print(str(ao))\n return\n\n\ndef Add_Features_x():\n global df\n df['OC_Change'] = df['Close'] - df['Open'] / df['Open'] * 100\n df['HL_Change'] = df['High'] - df['Low'] / df['Low'] * 100\n df = df[['Close', 'HL_Change', 'OC_Change', 'Volume']]\n return\n\n\ndef Forcast_Values():\n global forecast_out\n global forecast_col\n forecast_col = 'Close'\n forecast_out = int(math.ceil(0.01 * len(df)))\n return\n\n\ndef Add_Features_y():\n df['label'] = df[forecast_col].shift(-forecast_out)\n df.dropna(inplace=True)\n return\n\n\ndef Setup_Validate_data():\n global y\n global X\n global X_train, X_test, y_train, y_test\n X = np.array(df.drop(['label'], 1))\n y = np.array(df['label'])\n X_train, X_test, y_train, y_test = cross_validation.train_test_split(X,\n y, test_size=0.2)\n return\n\n\ndef Set_Model():\n global clf\n clf = LinearRegression()\n clf.fit(X_train, y_train)\n return\n\n\ndef get_Accuracy():\n global accuracy\n accuracy = clf.score(X_test, y_test)\n return ()\n\n\ndef Prediction():\n global X\n X = X[:-forecast_out]\n global X_lately\n global forecast_set\n X_lately = X[-forecast_out:]\n forecast_set = clf.predict(X_lately)\n\n\ndef Data_frame_Create():\n global df\n df = data.DataReader(stockTicker, 'yahoo', start_date, end_date)\n return\n\n\nSet_Ticker()\nActual_Value()\nSet_Date()\nstart_date += datetime.timedelta(weeks=-100)\nData_frame_Create()\nAdd_Features_x()\nForcast_Values()\nAdd_Features_y()\nSetup_Validate_data()\nSet_Model()\nget_Accuracy()\nPrediction()\nprint(stockTicker.partition('.')[0])\nprint('Accuracy: ' + str(accuracy * 100))\nprint('Next day value: ' + str(forecast_set[0]))\nprint(forecast_set)\nprint('3rd day value: ' + str(forecast_set[1]))\nprint('5th day value: ' + str(forecast_set[2]))\nprint('7th day value: ' + str(forecast_set[3]))\nprint('10th day value: ' + str(forecast_set[4]))\nsomedict = dict(NextDay=forecast_set[0], ThirdDay=forecast_set[1], FifthDay\n =forecast_set[2])\nwith open('mycsvfile.csv', 'wb') as f:\n w = csv.writer(f)\n w.writerows(somedict.items())\n",
"step-5": "#https://www.youtube.com/watch?v=CQ5kc_j4RjA\r\n\r\nimport pandas as pd\r\n#import quandl\r\nimport math, datetime\r\nimport time\r\nimport numpy as np\r\nfrom pandas.tools.plotting import scatter_matrix\r\nimport matplotlib.pyplot as plt\r\nfrom sklearn import cross_validation, preprocessing, svm\r\nfrom sklearn.metrics import accuracy_score\r\nfrom sklearn.linear_model import LogisticRegression\r\nfrom sklearn.linear_model import LinearRegression\r\nfrom sklearn.tree import DecisionTreeClassifier\r\nfrom sklearn.neighbors import KNeighborsClassifier\r\nfrom sklearn.discriminant_analysis import LinearDiscriminantAnalysis\r\nfrom sklearn.naive_bayes import GaussianNB\r\nfrom sklearn.svm import SVC\r\nfrom matplotlib import style\r\nstyle.use ('ggplot')\r\nimport datetime\r\nfrom pandas_datareader import data\r\nimport csv\r\n\r\n\r\n#Setting Companies\r\ndef Set_Ticker():\r\n global stockTicker\r\n stockTicker = 'ONGC.NS'\r\n## stockTicker = input(\"Enter the Ticker: \")\r\n print (\"Possible options: ONGC.NS, \")\r\n return \r\n\r\ndef Set_Date():\r\n #Setting Date\r\n global end_date\r\n global start_date\r\n## end_date = input(\"Enter prediction date(YYYY-MM-DD):\")\r\n end_date = datetime.datetime(2017,1,30)\r\n start_date = end_date\r\n print (end_date)\r\n return\r\n\r\n\r\ndef Actual_Value():\r\n #Actual Value\r\n global df\r\n print(\"The Actual Closing Value is Displayed below\")\r\n df = data.DataReader(stockTicker, 'yahoo', '2017-01-28', '2017-02-5')\r\n ao=df['Close']\r\n print (str(ao))\r\n return\r\n\r\n\r\ndef Add_Features_x():\r\n #Create Features - X\r\n global df\r\n df ['OC_Change'] = (df['Close']-df['Open']/df['Open']*100)\r\n df ['HL_Change'] = (df['High']-df['Low']/df['Low']*100)\r\n df = df[['Close', 'HL_Change', 'OC_Change', 'Volume']]\r\n return\r\n\r\ndef Forcast_Values():\r\n #Forecast\r\n global forecast_out\r\n global forecast_col\r\n forecast_col = 'Close'\r\n forecast_out = int(math.ceil(0.01*len(df)))\r\n return\r\n\r\ndef Add_Features_y():\r\n #Label - y\r\n df['label'] = df[forecast_col].shift(-forecast_out)\r\n df.dropna(inplace=True)\r\n return\r\n\r\ndef Setup_Validate_data():\r\n #Set X and y \r\n global y\r\n global X\r\n global X_train, X_test, y_train, y_test\r\n X = np.array(df.drop(['label'],1))\r\n y = np.array(df['label'])\r\n #Split Training and Testing Data\r\n X_train, X_test, y_train, y_test = cross_validation.train_test_split(X,y,test_size=0.2)\r\n return\r\n\r\ndef Set_Model():\r\n #Set Model for ML\r\n global clf\r\n clf = LinearRegression()\r\n clf.fit(X_train, y_train)\r\n return\r\n\r\ndef get_Accuracy():\r\n #Accuracy of Test Data\r\n global accuracy\r\n accuracy = clf.score(X_test, y_test)\r\n return()\r\n\r\ndef Prediction():\r\n #Predict Next Values\r\n global X\r\n X = X[:-forecast_out]\r\n global X_lately\r\n global forecast_set\r\n X_lately = X[-forecast_out:]\r\n forecast_set = clf.predict(X_lately)\r\n\r\ndef Data_frame_Create():\r\n #Creat a DataFrame \r\n global df\r\n df = data.DataReader(stockTicker, 'yahoo', start_date, end_date)\r\n## df.plot(kind=\"box\", subplots=True, layout=(1,6), sharex=False, sharey=False)\r\n## plt.show()\r\n## df.hist()\r\n## plt.show()\r\n## scatter_matrix(df)\r\n## plt.show()\r\n return\r\n\r\n\r\nSet_Ticker()\r\nActual_Value()\r\n\r\n#Setting Date\r\nSet_Date()\r\n\r\n#Gap of 1 month in time\r\n#n = int(input(\"Enter the No. of Years in Months:\"))\r\nstart_date += datetime.timedelta(weeks=-100)\r\n\r\n#Creat a DataFrame\r\nData_frame_Create() \r\n\r\n#Create Features - X\r\nAdd_Features_x()\r\n\r\n#Forecast\r\nForcast_Values()\r\n\r\n#Label - y\r\nAdd_Features_y()\r\n\r\n#Split Training and Testing Data\r\nSetup_Validate_data()\r\n\r\n#Set Model for ML\r\nSet_Model()\r\n\r\n#Accuracy of Test Data\r\nget_Accuracy()\r\n\r\n#Predict Next Values\r\nPrediction()\r\n \r\nprint (stockTicker.partition('.')[0])\r\n##print (\"Start Date:\" + str(start_date))\r\nprint (\"Accuracy: \" + str(accuracy*100))\r\nprint (\"Next day value: \"+ str(forecast_set[0]))\r\nprint (forecast_set)\r\nprint (\"3rd day value: \"+ str(forecast_set[1]))\r\nprint (\"5th day value: \"+ str(forecast_set[2]))\r\nprint (\"7th day value: \"+ str(forecast_set[3]))\r\nprint (\"10th day value: \"+ str(forecast_set[4]))\r\n\r\n##dict = {'Next Day':forecast_set[0],'3rd Day':forecast_set[1],'5th Day':forecast_set[2]}\r\n##print (dict)\r\n\r\nsomedict = dict(NextDay=forecast_set[0],ThirdDay=forecast_set[1],FifthDay=forecast_set[2])\r\n\r\nwith open('mycsvfile.csv','wb') as f:\r\n w = csv.writer(f)\r\n w.writerows(somedict.items())\r\n",
"step-ids": [
5,
12,
13,
14,
15
]
}
|
[
5,
12,
13,
14,
15
] |
<|reserved_special_token_0|>
class NovelsSpider(scrapy.Spider):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def parse(self, response):
path = '/Users/qx/Documents/小说/new/'
all = response.xpath(".//div[@class='novellist']")
for oneitem in all:
classname = oneitem.xpath('./h2/text()').extract_first()
if classname == '奇幻小说、玄幻小说大全列表':
classname = 'xuanhuan'
if classname == '历史小说、军事小说、穿越小说大全列表':
classname = 'chuanyue'
if classname == '武侠小说、仙侠小说、修真小说大全列表':
classname = 'xiuzhen'
if classname == '言情小说、都市小说大全列表':
classname = 'dushi'
if classname == '异灵小说、科幻小说大全列表':
classname = 'kehuan'
if classname == '游戏小说、竞技小说、网游小说大全列表':
classname = 'wangyou'
urls = oneitem.xpath('./ul/li/a/@href').extract()
names = oneitem.xpath('./ul/li/a/text()').extract()
for i in range(len(urls)):
url = urls[i]
name = names[i]
yield scrapy.Request(url, meta={'name': name, 'classname':
classname}, callback=self.url_parse)
def url_parse(self, response):
print('小说章节')
path = '/Users/qx/Documents/小说/new/'
name = response.meta['name']
classname = response.meta['classname']
author = response.xpath("//div[@id ='info']/p/text()").extract_first()
if author:
author = author.split(':', 1)[1]
print(name + '-' + author)
listurls = response.xpath("//div[@id ='list']/dl/dd/a/@href").extract()
chapternames = response.xpath("//div[@id ='list']/dl/dd/a/text()"
).extract()
for i in range(len(listurls)):
url = 'http://www.xbiquge.la' + listurls[i]
chaptername = chapternames[i]
oldname = path + classname + '/' + name + '-作者:' + author
newname = path + classname + '/' + name
if os.path.exists(oldname):
os.rename(oldname, newname)
if not os.path.exists(newname):
os.makedirs(newname)
if not os.path.exists(newname + '/' + str(i) + '.txt'):
yield scrapy.Request(url, meta={'chaptername': chaptername,
'tag': classname, 'name': name, 'author': author,
'index': i}, callback=self.detail_parse)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class NovelsSpider(scrapy.Spider):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def parse(self, response):
path = '/Users/qx/Documents/小说/new/'
all = response.xpath(".//div[@class='novellist']")
for oneitem in all:
classname = oneitem.xpath('./h2/text()').extract_first()
if classname == '奇幻小说、玄幻小说大全列表':
classname = 'xuanhuan'
if classname == '历史小说、军事小说、穿越小说大全列表':
classname = 'chuanyue'
if classname == '武侠小说、仙侠小说、修真小说大全列表':
classname = 'xiuzhen'
if classname == '言情小说、都市小说大全列表':
classname = 'dushi'
if classname == '异灵小说、科幻小说大全列表':
classname = 'kehuan'
if classname == '游戏小说、竞技小说、网游小说大全列表':
classname = 'wangyou'
urls = oneitem.xpath('./ul/li/a/@href').extract()
names = oneitem.xpath('./ul/li/a/text()').extract()
for i in range(len(urls)):
url = urls[i]
name = names[i]
yield scrapy.Request(url, meta={'name': name, 'classname':
classname}, callback=self.url_parse)
def url_parse(self, response):
print('小说章节')
path = '/Users/qx/Documents/小说/new/'
name = response.meta['name']
classname = response.meta['classname']
author = response.xpath("//div[@id ='info']/p/text()").extract_first()
if author:
author = author.split(':', 1)[1]
print(name + '-' + author)
listurls = response.xpath("//div[@id ='list']/dl/dd/a/@href").extract()
chapternames = response.xpath("//div[@id ='list']/dl/dd/a/text()"
).extract()
for i in range(len(listurls)):
url = 'http://www.xbiquge.la' + listurls[i]
chaptername = chapternames[i]
oldname = path + classname + '/' + name + '-作者:' + author
newname = path + classname + '/' + name
if os.path.exists(oldname):
os.rename(oldname, newname)
if not os.path.exists(newname):
os.makedirs(newname)
if not os.path.exists(newname + '/' + str(i) + '.txt'):
yield scrapy.Request(url, meta={'chaptername': chaptername,
'tag': classname, 'name': name, 'author': author,
'index': i}, callback=self.detail_parse)
def detail_parse(self, response):
tag = response.meta['tag']
name = response.meta['name']
author = response.meta['author']
chaptername = response.meta['chaptername']
index = response.meta['index']
item = BiqugeItem()
novel = response.xpath("//div[@id='content']/text()").extract()
item['novel'] = '\n'.join(novel).replace(' ', ' ')
item['name'] = name
item['tag'] = tag
item['author'] = author
item['chapter'] = chaptername
item['index'] = index
yield item
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class NovelsSpider(scrapy.Spider):
name = 'novels'
allowed_domains = ['xbiquge.la']
start_urls = ['http://www.xbiquge.la/xiaoshuodaquan/']
def parse(self, response):
path = '/Users/qx/Documents/小说/new/'
all = response.xpath(".//div[@class='novellist']")
for oneitem in all:
classname = oneitem.xpath('./h2/text()').extract_first()
if classname == '奇幻小说、玄幻小说大全列表':
classname = 'xuanhuan'
if classname == '历史小说、军事小说、穿越小说大全列表':
classname = 'chuanyue'
if classname == '武侠小说、仙侠小说、修真小说大全列表':
classname = 'xiuzhen'
if classname == '言情小说、都市小说大全列表':
classname = 'dushi'
if classname == '异灵小说、科幻小说大全列表':
classname = 'kehuan'
if classname == '游戏小说、竞技小说、网游小说大全列表':
classname = 'wangyou'
urls = oneitem.xpath('./ul/li/a/@href').extract()
names = oneitem.xpath('./ul/li/a/text()').extract()
for i in range(len(urls)):
url = urls[i]
name = names[i]
yield scrapy.Request(url, meta={'name': name, 'classname':
classname}, callback=self.url_parse)
def url_parse(self, response):
print('小说章节')
path = '/Users/qx/Documents/小说/new/'
name = response.meta['name']
classname = response.meta['classname']
author = response.xpath("//div[@id ='info']/p/text()").extract_first()
if author:
author = author.split(':', 1)[1]
print(name + '-' + author)
listurls = response.xpath("//div[@id ='list']/dl/dd/a/@href").extract()
chapternames = response.xpath("//div[@id ='list']/dl/dd/a/text()"
).extract()
for i in range(len(listurls)):
url = 'http://www.xbiquge.la' + listurls[i]
chaptername = chapternames[i]
oldname = path + classname + '/' + name + '-作者:' + author
newname = path + classname + '/' + name
if os.path.exists(oldname):
os.rename(oldname, newname)
if not os.path.exists(newname):
os.makedirs(newname)
if not os.path.exists(newname + '/' + str(i) + '.txt'):
yield scrapy.Request(url, meta={'chaptername': chaptername,
'tag': classname, 'name': name, 'author': author,
'index': i}, callback=self.detail_parse)
def detail_parse(self, response):
tag = response.meta['tag']
name = response.meta['name']
author = response.meta['author']
chaptername = response.meta['chaptername']
index = response.meta['index']
item = BiqugeItem()
novel = response.xpath("//div[@id='content']/text()").extract()
item['novel'] = '\n'.join(novel).replace(' ', ' ')
item['name'] = name
item['tag'] = tag
item['author'] = author
item['chapter'] = chaptername
item['index'] = index
yield item
<|reserved_special_token_1|>
import scrapy
import os
from topdb.items import BiqugeItem
class NovelsSpider(scrapy.Spider):
name = 'novels'
allowed_domains = ['xbiquge.la']
start_urls = ['http://www.xbiquge.la/xiaoshuodaquan/']
def parse(self, response):
path = '/Users/qx/Documents/小说/new/'
all = response.xpath(".//div[@class='novellist']")
for oneitem in all:
classname = oneitem.xpath('./h2/text()').extract_first()
if classname == '奇幻小说、玄幻小说大全列表':
classname = 'xuanhuan'
if classname == '历史小说、军事小说、穿越小说大全列表':
classname = 'chuanyue'
if classname == '武侠小说、仙侠小说、修真小说大全列表':
classname = 'xiuzhen'
if classname == '言情小说、都市小说大全列表':
classname = 'dushi'
if classname == '异灵小说、科幻小说大全列表':
classname = 'kehuan'
if classname == '游戏小说、竞技小说、网游小说大全列表':
classname = 'wangyou'
urls = oneitem.xpath('./ul/li/a/@href').extract()
names = oneitem.xpath('./ul/li/a/text()').extract()
for i in range(len(urls)):
url = urls[i]
name = names[i]
yield scrapy.Request(url, meta={'name': name, 'classname':
classname}, callback=self.url_parse)
def url_parse(self, response):
print('小说章节')
path = '/Users/qx/Documents/小说/new/'
name = response.meta['name']
classname = response.meta['classname']
author = response.xpath("//div[@id ='info']/p/text()").extract_first()
if author:
author = author.split(':', 1)[1]
print(name + '-' + author)
listurls = response.xpath("//div[@id ='list']/dl/dd/a/@href").extract()
chapternames = response.xpath("//div[@id ='list']/dl/dd/a/text()"
).extract()
for i in range(len(listurls)):
url = 'http://www.xbiquge.la' + listurls[i]
chaptername = chapternames[i]
oldname = path + classname + '/' + name + '-作者:' + author
newname = path + classname + '/' + name
if os.path.exists(oldname):
os.rename(oldname, newname)
if not os.path.exists(newname):
os.makedirs(newname)
if not os.path.exists(newname + '/' + str(i) + '.txt'):
yield scrapy.Request(url, meta={'chaptername': chaptername,
'tag': classname, 'name': name, 'author': author,
'index': i}, callback=self.detail_parse)
def detail_parse(self, response):
tag = response.meta['tag']
name = response.meta['name']
author = response.meta['author']
chaptername = response.meta['chaptername']
index = response.meta['index']
item = BiqugeItem()
novel = response.xpath("//div[@id='content']/text()").extract()
item['novel'] = '\n'.join(novel).replace(' ', ' ')
item['name'] = name
item['tag'] = tag
item['author'] = author
item['chapter'] = chaptername
item['index'] = index
yield item
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
import scrapy
import os
from topdb.items import BiqugeItem
class NovelsSpider(scrapy.Spider):
name = 'novels'
allowed_domains = ['xbiquge.la']
start_urls = ['http://www.xbiquge.la/xiaoshuodaquan/']
def parse(self, response):
# 小说分类
path = '/Users/qx/Documents/小说/new/'
all=response.xpath(".//div[@class='novellist']")
for oneitem in all:
classname=oneitem.xpath('./h2/text()').extract_first()
if classname=='奇幻小说、玄幻小说大全列表':
classname='xuanhuan'
if classname=='历史小说、军事小说、穿越小说大全列表':
classname='chuanyue'
if classname=='武侠小说、仙侠小说、修真小说大全列表':
classname='xiuzhen'
if classname=='言情小说、都市小说大全列表':
classname='dushi'
if classname=='异灵小说、科幻小说大全列表':
classname='kehuan'
if classname=='游戏小说、竞技小说、网游小说大全列表':
classname='wangyou'
urls=oneitem.xpath('./ul/li/a/@href').extract()
names=oneitem.xpath('./ul/li/a/text()').extract()
for i in range(len(urls)):
url=urls[i]
name=names[i]
yield scrapy.Request(url, meta={'name': name, 'classname': classname}, callback=self.url_parse)
def url_parse(self, response):
# 小说章节列表
print('小说章节')
path = '/Users/qx/Documents/小说/new/'
name = response.meta['name']
classname = response.meta['classname']
author = response.xpath("//div[@id ='info']/p/text()").extract_first()
if author:
author=author.split(':',1)[1]
print(name+'-'+author)
listurls = response.xpath("//div[@id ='list']/dl/dd/a/@href").extract()
chapternames = response.xpath("//div[@id ='list']/dl/dd/a/text()").extract()
for i in range(len(listurls)):
url = "http://www.xbiquge.la" + listurls[i]
chaptername=chapternames[i]
oldname=path+ classname+'/'+name+ '-作者:' + author
newname=path+ classname+'/'+name
if (os.path.exists(oldname)):
os.rename(oldname,newname)
if (not os.path.exists(newname)):
os.makedirs(newname)
if(not os.path.exists(newname+'/'+ str(i) + ".txt")):
yield scrapy.Request(url, meta={'chaptername':chaptername,'tag':classname,'name':name,'author':author,'index':i}, callback=self.detail_parse)
def detail_parse(self, response):
# 章节详细内容
tag = response.meta['tag']
name = response.meta['name']
author = response.meta['author']
chaptername = response.meta['chaptername']
index = response.meta['index']
item = BiqugeItem()
novel = response.xpath("//div[@id='content']/text()").extract()
item['novel'] = "\n".join(novel).replace(" ", " ")
item['name'] = name
item['tag'] = tag
item['author'] = author
item['chapter'] = chaptername
item['index'] = index
# print(item['classname'])
# print(item['name'])
# print(item['title'])
# print('\n')
yield item
# 这里是爬取整个网站且按照分类进行爬取 但是重点是 爬取太慢scrapy 是异步操作 还需要了解一下多线程的问题 这样速度能更快些
|
flexible
|
{
"blob_id": "af668751074df6f182c7121821587270734ea5af",
"index": 1075,
"step-1": "<mask token>\n\n\nclass NovelsSpider(scrapy.Spider):\n <mask token>\n <mask token>\n <mask token>\n\n def parse(self, response):\n path = '/Users/qx/Documents/小说/new/'\n all = response.xpath(\".//div[@class='novellist']\")\n for oneitem in all:\n classname = oneitem.xpath('./h2/text()').extract_first()\n if classname == '奇幻小说、玄幻小说大全列表':\n classname = 'xuanhuan'\n if classname == '历史小说、军事小说、穿越小说大全列表':\n classname = 'chuanyue'\n if classname == '武侠小说、仙侠小说、修真小说大全列表':\n classname = 'xiuzhen'\n if classname == '言情小说、都市小说大全列表':\n classname = 'dushi'\n if classname == '异灵小说、科幻小说大全列表':\n classname = 'kehuan'\n if classname == '游戏小说、竞技小说、网游小说大全列表':\n classname = 'wangyou'\n urls = oneitem.xpath('./ul/li/a/@href').extract()\n names = oneitem.xpath('./ul/li/a/text()').extract()\n for i in range(len(urls)):\n url = urls[i]\n name = names[i]\n yield scrapy.Request(url, meta={'name': name, 'classname':\n classname}, callback=self.url_parse)\n\n def url_parse(self, response):\n print('小说章节')\n path = '/Users/qx/Documents/小说/new/'\n name = response.meta['name']\n classname = response.meta['classname']\n author = response.xpath(\"//div[@id ='info']/p/text()\").extract_first()\n if author:\n author = author.split(':', 1)[1]\n print(name + '-' + author)\n listurls = response.xpath(\"//div[@id ='list']/dl/dd/a/@href\").extract()\n chapternames = response.xpath(\"//div[@id ='list']/dl/dd/a/text()\"\n ).extract()\n for i in range(len(listurls)):\n url = 'http://www.xbiquge.la' + listurls[i]\n chaptername = chapternames[i]\n oldname = path + classname + '/' + name + '-作者:' + author\n newname = path + classname + '/' + name\n if os.path.exists(oldname):\n os.rename(oldname, newname)\n if not os.path.exists(newname):\n os.makedirs(newname)\n if not os.path.exists(newname + '/' + str(i) + '.txt'):\n yield scrapy.Request(url, meta={'chaptername': chaptername,\n 'tag': classname, 'name': name, 'author': author,\n 'index': i}, callback=self.detail_parse)\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass NovelsSpider(scrapy.Spider):\n <mask token>\n <mask token>\n <mask token>\n\n def parse(self, response):\n path = '/Users/qx/Documents/小说/new/'\n all = response.xpath(\".//div[@class='novellist']\")\n for oneitem in all:\n classname = oneitem.xpath('./h2/text()').extract_first()\n if classname == '奇幻小说、玄幻小说大全列表':\n classname = 'xuanhuan'\n if classname == '历史小说、军事小说、穿越小说大全列表':\n classname = 'chuanyue'\n if classname == '武侠小说、仙侠小说、修真小说大全列表':\n classname = 'xiuzhen'\n if classname == '言情小说、都市小说大全列表':\n classname = 'dushi'\n if classname == '异灵小说、科幻小说大全列表':\n classname = 'kehuan'\n if classname == '游戏小说、竞技小说、网游小说大全列表':\n classname = 'wangyou'\n urls = oneitem.xpath('./ul/li/a/@href').extract()\n names = oneitem.xpath('./ul/li/a/text()').extract()\n for i in range(len(urls)):\n url = urls[i]\n name = names[i]\n yield scrapy.Request(url, meta={'name': name, 'classname':\n classname}, callback=self.url_parse)\n\n def url_parse(self, response):\n print('小说章节')\n path = '/Users/qx/Documents/小说/new/'\n name = response.meta['name']\n classname = response.meta['classname']\n author = response.xpath(\"//div[@id ='info']/p/text()\").extract_first()\n if author:\n author = author.split(':', 1)[1]\n print(name + '-' + author)\n listurls = response.xpath(\"//div[@id ='list']/dl/dd/a/@href\").extract()\n chapternames = response.xpath(\"//div[@id ='list']/dl/dd/a/text()\"\n ).extract()\n for i in range(len(listurls)):\n url = 'http://www.xbiquge.la' + listurls[i]\n chaptername = chapternames[i]\n oldname = path + classname + '/' + name + '-作者:' + author\n newname = path + classname + '/' + name\n if os.path.exists(oldname):\n os.rename(oldname, newname)\n if not os.path.exists(newname):\n os.makedirs(newname)\n if not os.path.exists(newname + '/' + str(i) + '.txt'):\n yield scrapy.Request(url, meta={'chaptername': chaptername,\n 'tag': classname, 'name': name, 'author': author,\n 'index': i}, callback=self.detail_parse)\n\n def detail_parse(self, response):\n tag = response.meta['tag']\n name = response.meta['name']\n author = response.meta['author']\n chaptername = response.meta['chaptername']\n index = response.meta['index']\n item = BiqugeItem()\n novel = response.xpath(\"//div[@id='content']/text()\").extract()\n item['novel'] = '\\n'.join(novel).replace(' ', ' ')\n item['name'] = name\n item['tag'] = tag\n item['author'] = author\n item['chapter'] = chaptername\n item['index'] = index\n yield item\n",
"step-3": "<mask token>\n\n\nclass NovelsSpider(scrapy.Spider):\n name = 'novels'\n allowed_domains = ['xbiquge.la']\n start_urls = ['http://www.xbiquge.la/xiaoshuodaquan/']\n\n def parse(self, response):\n path = '/Users/qx/Documents/小说/new/'\n all = response.xpath(\".//div[@class='novellist']\")\n for oneitem in all:\n classname = oneitem.xpath('./h2/text()').extract_first()\n if classname == '奇幻小说、玄幻小说大全列表':\n classname = 'xuanhuan'\n if classname == '历史小说、军事小说、穿越小说大全列表':\n classname = 'chuanyue'\n if classname == '武侠小说、仙侠小说、修真小说大全列表':\n classname = 'xiuzhen'\n if classname == '言情小说、都市小说大全列表':\n classname = 'dushi'\n if classname == '异灵小说、科幻小说大全列表':\n classname = 'kehuan'\n if classname == '游戏小说、竞技小说、网游小说大全列表':\n classname = 'wangyou'\n urls = oneitem.xpath('./ul/li/a/@href').extract()\n names = oneitem.xpath('./ul/li/a/text()').extract()\n for i in range(len(urls)):\n url = urls[i]\n name = names[i]\n yield scrapy.Request(url, meta={'name': name, 'classname':\n classname}, callback=self.url_parse)\n\n def url_parse(self, response):\n print('小说章节')\n path = '/Users/qx/Documents/小说/new/'\n name = response.meta['name']\n classname = response.meta['classname']\n author = response.xpath(\"//div[@id ='info']/p/text()\").extract_first()\n if author:\n author = author.split(':', 1)[1]\n print(name + '-' + author)\n listurls = response.xpath(\"//div[@id ='list']/dl/dd/a/@href\").extract()\n chapternames = response.xpath(\"//div[@id ='list']/dl/dd/a/text()\"\n ).extract()\n for i in range(len(listurls)):\n url = 'http://www.xbiquge.la' + listurls[i]\n chaptername = chapternames[i]\n oldname = path + classname + '/' + name + '-作者:' + author\n newname = path + classname + '/' + name\n if os.path.exists(oldname):\n os.rename(oldname, newname)\n if not os.path.exists(newname):\n os.makedirs(newname)\n if not os.path.exists(newname + '/' + str(i) + '.txt'):\n yield scrapy.Request(url, meta={'chaptername': chaptername,\n 'tag': classname, 'name': name, 'author': author,\n 'index': i}, callback=self.detail_parse)\n\n def detail_parse(self, response):\n tag = response.meta['tag']\n name = response.meta['name']\n author = response.meta['author']\n chaptername = response.meta['chaptername']\n index = response.meta['index']\n item = BiqugeItem()\n novel = response.xpath(\"//div[@id='content']/text()\").extract()\n item['novel'] = '\\n'.join(novel).replace(' ', ' ')\n item['name'] = name\n item['tag'] = tag\n item['author'] = author\n item['chapter'] = chaptername\n item['index'] = index\n yield item\n",
"step-4": "import scrapy\nimport os\nfrom topdb.items import BiqugeItem\n\n\nclass NovelsSpider(scrapy.Spider):\n name = 'novels'\n allowed_domains = ['xbiquge.la']\n start_urls = ['http://www.xbiquge.la/xiaoshuodaquan/']\n\n def parse(self, response):\n path = '/Users/qx/Documents/小说/new/'\n all = response.xpath(\".//div[@class='novellist']\")\n for oneitem in all:\n classname = oneitem.xpath('./h2/text()').extract_first()\n if classname == '奇幻小说、玄幻小说大全列表':\n classname = 'xuanhuan'\n if classname == '历史小说、军事小说、穿越小说大全列表':\n classname = 'chuanyue'\n if classname == '武侠小说、仙侠小说、修真小说大全列表':\n classname = 'xiuzhen'\n if classname == '言情小说、都市小说大全列表':\n classname = 'dushi'\n if classname == '异灵小说、科幻小说大全列表':\n classname = 'kehuan'\n if classname == '游戏小说、竞技小说、网游小说大全列表':\n classname = 'wangyou'\n urls = oneitem.xpath('./ul/li/a/@href').extract()\n names = oneitem.xpath('./ul/li/a/text()').extract()\n for i in range(len(urls)):\n url = urls[i]\n name = names[i]\n yield scrapy.Request(url, meta={'name': name, 'classname':\n classname}, callback=self.url_parse)\n\n def url_parse(self, response):\n print('小说章节')\n path = '/Users/qx/Documents/小说/new/'\n name = response.meta['name']\n classname = response.meta['classname']\n author = response.xpath(\"//div[@id ='info']/p/text()\").extract_first()\n if author:\n author = author.split(':', 1)[1]\n print(name + '-' + author)\n listurls = response.xpath(\"//div[@id ='list']/dl/dd/a/@href\").extract()\n chapternames = response.xpath(\"//div[@id ='list']/dl/dd/a/text()\"\n ).extract()\n for i in range(len(listurls)):\n url = 'http://www.xbiquge.la' + listurls[i]\n chaptername = chapternames[i]\n oldname = path + classname + '/' + name + '-作者:' + author\n newname = path + classname + '/' + name\n if os.path.exists(oldname):\n os.rename(oldname, newname)\n if not os.path.exists(newname):\n os.makedirs(newname)\n if not os.path.exists(newname + '/' + str(i) + '.txt'):\n yield scrapy.Request(url, meta={'chaptername': chaptername,\n 'tag': classname, 'name': name, 'author': author,\n 'index': i}, callback=self.detail_parse)\n\n def detail_parse(self, response):\n tag = response.meta['tag']\n name = response.meta['name']\n author = response.meta['author']\n chaptername = response.meta['chaptername']\n index = response.meta['index']\n item = BiqugeItem()\n novel = response.xpath(\"//div[@id='content']/text()\").extract()\n item['novel'] = '\\n'.join(novel).replace(' ', ' ')\n item['name'] = name\n item['tag'] = tag\n item['author'] = author\n item['chapter'] = chaptername\n item['index'] = index\n yield item\n",
"step-5": "# -*- coding: utf-8 -*-\nimport scrapy\n\nimport os\nfrom topdb.items import BiqugeItem\n\nclass NovelsSpider(scrapy.Spider):\n name = 'novels'\n allowed_domains = ['xbiquge.la']\n start_urls = ['http://www.xbiquge.la/xiaoshuodaquan/']\n\n def parse(self, response):\n # 小说分类\n path = '/Users/qx/Documents/小说/new/'\n\n all=response.xpath(\".//div[@class='novellist']\")\n\n for oneitem in all:\n\n classname=oneitem.xpath('./h2/text()').extract_first()\n if classname=='奇幻小说、玄幻小说大全列表':\n classname='xuanhuan'\n if classname=='历史小说、军事小说、穿越小说大全列表':\n classname='chuanyue'\n if classname=='武侠小说、仙侠小说、修真小说大全列表':\n classname='xiuzhen'\n if classname=='言情小说、都市小说大全列表':\n classname='dushi'\n if classname=='异灵小说、科幻小说大全列表':\n classname='kehuan'\n if classname=='游戏小说、竞技小说、网游小说大全列表':\n classname='wangyou'\n\n urls=oneitem.xpath('./ul/li/a/@href').extract()\n\n names=oneitem.xpath('./ul/li/a/text()').extract()\n\n for i in range(len(urls)):\n url=urls[i]\n name=names[i]\n yield scrapy.Request(url, meta={'name': name, 'classname': classname}, callback=self.url_parse)\n\n\n def url_parse(self, response):\n # 小说章节列表\n print('小说章节')\n path = '/Users/qx/Documents/小说/new/'\n\n name = response.meta['name']\n classname = response.meta['classname']\n\n author = response.xpath(\"//div[@id ='info']/p/text()\").extract_first()\n\n if author:\n author=author.split(':',1)[1]\n\n print(name+'-'+author)\n\n listurls = response.xpath(\"//div[@id ='list']/dl/dd/a/@href\").extract()\n chapternames = response.xpath(\"//div[@id ='list']/dl/dd/a/text()\").extract()\n\n for i in range(len(listurls)):\n url = \"http://www.xbiquge.la\" + listurls[i]\n chaptername=chapternames[i]\n\n oldname=path+ classname+'/'+name+ '-作者:' + author\n newname=path+ classname+'/'+name\n\n if (os.path.exists(oldname)):\n os.rename(oldname,newname)\n\n if (not os.path.exists(newname)):\n os.makedirs(newname)\n\n if(not os.path.exists(newname+'/'+ str(i) + \".txt\")):\n yield scrapy.Request(url, meta={'chaptername':chaptername,'tag':classname,'name':name,'author':author,'index':i}, callback=self.detail_parse)\n\n def detail_parse(self, response):\n # 章节详细内容\n\n tag = response.meta['tag']\n name = response.meta['name']\n author = response.meta['author']\n chaptername = response.meta['chaptername']\n index = response.meta['index']\n\n item = BiqugeItem()\n\n novel = response.xpath(\"//div[@id='content']/text()\").extract()\n item['novel'] = \"\\n\".join(novel).replace(\" \", \" \")\n item['name'] = name\n item['tag'] = tag\n item['author'] = author\n item['chapter'] = chaptername\n item['index'] = index\n\n # print(item['classname'])\n # print(item['name'])\n # print(item['title'])\n # print('\\n')\n yield item\n\n # 这里是爬取整个网站且按照分类进行爬取 但是重点是 爬取太慢scrapy 是异步操作 还需要了解一下多线程的问题 这样速度能更快些\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
# Generated by Django 3.0.5 on 2020-05-02 18:58
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('weatherData', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='city',
name='username',
field=models.CharField(default='test@gmail.com', max_length=100),
),
]
|
normal
|
{
"blob_id": "6b6b734c136f3c4ed5b2789ab384bab9a9ea7b58",
"index": 9368,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('weatherData', '0001_initial')]\n operations = [migrations.AddField(model_name='city', name='username',\n field=models.CharField(default='test@gmail.com', max_length=100))]\n",
"step-4": "from django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('weatherData', '0001_initial')]\n operations = [migrations.AddField(model_name='city', name='username',\n field=models.CharField(default='test@gmail.com', max_length=100))]\n",
"step-5": "# Generated by Django 3.0.5 on 2020-05-02 18:58\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('weatherData', '0001_initial'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='city',\n name='username',\n field=models.CharField(default='test@gmail.com', max_length=100),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def downgrade():
op.alter_column('run', 'polarion_id', type_=ty.String(1024))
op.alter_column('auto_result', 'skip', type_=ty.String(65535))
op.alter_column('auto_result', 'failure', type_=ty.String(65535))
op.alter_column('auto_result', 'comment', type_=ty.String(65535))
op.alter_column('manual_result', 'comment', type_=ty.String(65535))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def upgrade():
op.alter_column('run', 'polarion_id', type_=ty.String(65535))
op.alter_column('auto_result', 'skip', type_=ty.Text())
op.alter_column('auto_result', 'failure', type_=ty.Text())
op.alter_column('auto_result', 'comment', type_=ty.Text())
op.alter_column('manual_result', 'comment', type_=ty.Text())
def downgrade():
op.alter_column('run', 'polarion_id', type_=ty.String(1024))
op.alter_column('auto_result', 'skip', type_=ty.String(65535))
op.alter_column('auto_result', 'failure', type_=ty.String(65535))
op.alter_column('auto_result', 'comment', type_=ty.String(65535))
op.alter_column('manual_result', 'comment', type_=ty.String(65535))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
revision = '6374505f9e6e'
down_revision = '9dc91bb7d2ba'
<|reserved_special_token_0|>
def upgrade():
op.alter_column('run', 'polarion_id', type_=ty.String(65535))
op.alter_column('auto_result', 'skip', type_=ty.Text())
op.alter_column('auto_result', 'failure', type_=ty.Text())
op.alter_column('auto_result', 'comment', type_=ty.Text())
op.alter_column('manual_result', 'comment', type_=ty.Text())
def downgrade():
op.alter_column('run', 'polarion_id', type_=ty.String(1024))
op.alter_column('auto_result', 'skip', type_=ty.String(65535))
op.alter_column('auto_result', 'failure', type_=ty.String(65535))
op.alter_column('auto_result', 'comment', type_=ty.String(65535))
op.alter_column('manual_result', 'comment', type_=ty.String(65535))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
revision = '6374505f9e6e'
down_revision = '9dc91bb7d2ba'
from alembic import op
import sqlalchemy as sa
import sqlalchemy.types as ty
def upgrade():
op.alter_column('run', 'polarion_id', type_=ty.String(65535))
op.alter_column('auto_result', 'skip', type_=ty.Text())
op.alter_column('auto_result', 'failure', type_=ty.Text())
op.alter_column('auto_result', 'comment', type_=ty.Text())
op.alter_column('manual_result', 'comment', type_=ty.Text())
def downgrade():
op.alter_column('run', 'polarion_id', type_=ty.String(1024))
op.alter_column('auto_result', 'skip', type_=ty.String(65535))
op.alter_column('auto_result', 'failure', type_=ty.String(65535))
op.alter_column('auto_result', 'comment', type_=ty.String(65535))
op.alter_column('manual_result', 'comment', type_=ty.String(65535))
<|reserved_special_token_1|>
"""empty message
Revision ID: 6374505f9e6e
Revises: 9dc91bb7d2ba
Create Date: 2016-11-14 10:55:08.418923
"""
# revision identifiers, used by Alembic.
revision = '6374505f9e6e'
down_revision = '9dc91bb7d2ba'
from alembic import op
import sqlalchemy as sa
import sqlalchemy.types as ty
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.alter_column('run', 'polarion_id', type_=ty.String(65535))
op.alter_column('auto_result', 'skip', type_=ty.Text())
op.alter_column('auto_result', 'failure', type_=ty.Text())
op.alter_column('auto_result', 'comment', type_=ty.Text())
op.alter_column('manual_result', 'comment', type_=ty.Text())
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.alter_column('run', 'polarion_id', type_=ty.String(1024))
op.alter_column('auto_result', 'skip', type_=ty.String(65535))
op.alter_column('auto_result', 'failure', type_=ty.String(65535))
op.alter_column('auto_result', 'comment', type_=ty.String(65535))
op.alter_column('manual_result', 'comment', type_=ty.String(65535))
### end Alembic commands ###
|
flexible
|
{
"blob_id": "7badb7c9f1e00dfc379468b7bd73a3f09bffe6de",
"index": 1191,
"step-1": "<mask token>\n\n\ndef downgrade():\n op.alter_column('run', 'polarion_id', type_=ty.String(1024))\n op.alter_column('auto_result', 'skip', type_=ty.String(65535))\n op.alter_column('auto_result', 'failure', type_=ty.String(65535))\n op.alter_column('auto_result', 'comment', type_=ty.String(65535))\n op.alter_column('manual_result', 'comment', type_=ty.String(65535))\n",
"step-2": "<mask token>\n\n\ndef upgrade():\n op.alter_column('run', 'polarion_id', type_=ty.String(65535))\n op.alter_column('auto_result', 'skip', type_=ty.Text())\n op.alter_column('auto_result', 'failure', type_=ty.Text())\n op.alter_column('auto_result', 'comment', type_=ty.Text())\n op.alter_column('manual_result', 'comment', type_=ty.Text())\n\n\ndef downgrade():\n op.alter_column('run', 'polarion_id', type_=ty.String(1024))\n op.alter_column('auto_result', 'skip', type_=ty.String(65535))\n op.alter_column('auto_result', 'failure', type_=ty.String(65535))\n op.alter_column('auto_result', 'comment', type_=ty.String(65535))\n op.alter_column('manual_result', 'comment', type_=ty.String(65535))\n",
"step-3": "<mask token>\nrevision = '6374505f9e6e'\ndown_revision = '9dc91bb7d2ba'\n<mask token>\n\n\ndef upgrade():\n op.alter_column('run', 'polarion_id', type_=ty.String(65535))\n op.alter_column('auto_result', 'skip', type_=ty.Text())\n op.alter_column('auto_result', 'failure', type_=ty.Text())\n op.alter_column('auto_result', 'comment', type_=ty.Text())\n op.alter_column('manual_result', 'comment', type_=ty.Text())\n\n\ndef downgrade():\n op.alter_column('run', 'polarion_id', type_=ty.String(1024))\n op.alter_column('auto_result', 'skip', type_=ty.String(65535))\n op.alter_column('auto_result', 'failure', type_=ty.String(65535))\n op.alter_column('auto_result', 'comment', type_=ty.String(65535))\n op.alter_column('manual_result', 'comment', type_=ty.String(65535))\n",
"step-4": "<mask token>\nrevision = '6374505f9e6e'\ndown_revision = '9dc91bb7d2ba'\nfrom alembic import op\nimport sqlalchemy as sa\nimport sqlalchemy.types as ty\n\n\ndef upgrade():\n op.alter_column('run', 'polarion_id', type_=ty.String(65535))\n op.alter_column('auto_result', 'skip', type_=ty.Text())\n op.alter_column('auto_result', 'failure', type_=ty.Text())\n op.alter_column('auto_result', 'comment', type_=ty.Text())\n op.alter_column('manual_result', 'comment', type_=ty.Text())\n\n\ndef downgrade():\n op.alter_column('run', 'polarion_id', type_=ty.String(1024))\n op.alter_column('auto_result', 'skip', type_=ty.String(65535))\n op.alter_column('auto_result', 'failure', type_=ty.String(65535))\n op.alter_column('auto_result', 'comment', type_=ty.String(65535))\n op.alter_column('manual_result', 'comment', type_=ty.String(65535))\n",
"step-5": "\"\"\"empty message\n\nRevision ID: 6374505f9e6e\nRevises: 9dc91bb7d2ba\nCreate Date: 2016-11-14 10:55:08.418923\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '6374505f9e6e'\ndown_revision = '9dc91bb7d2ba'\n\nfrom alembic import op\nimport sqlalchemy as sa\nimport sqlalchemy.types as ty\n\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.alter_column('run', 'polarion_id', type_=ty.String(65535))\n op.alter_column('auto_result', 'skip', type_=ty.Text())\n op.alter_column('auto_result', 'failure', type_=ty.Text())\n op.alter_column('auto_result', 'comment', type_=ty.Text())\n op.alter_column('manual_result', 'comment', type_=ty.Text())\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.alter_column('run', 'polarion_id', type_=ty.String(1024))\n op.alter_column('auto_result', 'skip', type_=ty.String(65535))\n op.alter_column('auto_result', 'failure', type_=ty.String(65535))\n op.alter_column('auto_result', 'comment', type_=ty.String(65535))\n op.alter_column('manual_result', 'comment', type_=ty.String(65535))\n ### end Alembic commands ###\n\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
class TreePrinter:
@addToClass(Node)
def printTree(self, indent=0):
raise Exception('printTree not defined in class ' + self.__class__.
__name__)
@addToClass(Instruction)
def printTree(self, indent=0):
print_intended(self.type, indent)
<|reserved_special_token_0|>
@addToClass(Block)
def printTree(self, indent=0):
print_intended(self.type, indent)
if self.instructions is not None:
self.instructions.printTree(indent + 1)
@addToClass(Assignment)
def printTree(self, indent=0):
print_intended(self.operator, indent)
self.left.printTree(indent + 1)
self.right.printTree(indent + 1)
@addToClass(For)
def printTree(self, indent=0):
print_intended(self.type, indent)
self.variable.printTree(indent + 1)
self.range.printTree(indent + 1)
self.instruction.printTree(indent + 1)
@addToClass(While)
def printTree(self, indent=0):
print_intended(self.type, indent)
self.condition.printTree(indent + 1)
self.instruction.printTree(indent + 1)
<|reserved_special_token_0|>
@addToClass(Print)
def printTree(self, indent=0):
print_intended(self.type, indent)
self.args.printTree(indent + 1)
@addToClass(Return)
def printTree(self, indent=0):
print_intended(self.type, indent)
if self.args is not None:
self.args.printTree(indent + 1)
@addToClass(ArrayElement)
def printTree(self, indent=0):
print_intended('get_element', indent)
self.array.printTree(indent + 1)
self.ids.printTree(indent + 1)
@addToClass(Value)
def printTree(self, indent=0):
print_intended(str(self.value), indent)
@addToClass(Array)
def printTree(self, indent=0):
if self.list is not None:
print_intended('array', indent)
self.list.printTree(indent + 1)
else:
print_intended('empty_array', indent)
<|reserved_special_token_0|>
@addToClass(MatrixFunction)
def printTree(self, indent=0):
print_intended(self.function, indent)
self.parameter.printTree(indent + 1)
@addToClass(UnaryMinus)
def printTree(self, indent=0):
print_intended('-', indent)
self.value.printTree(indent + 1)
@addToClass(Transpose)
def printTree(self, indent=0):
print_intended(self.type, indent)
self.value.printTree(indent + 1)
@addToClass(Program)
def printTree(self, indent=0):
print_intended(self.type, indent)
self.instructions_opt.printTree(indent + 1)
<|reserved_special_token_0|>
@addToClass(Range)
def printTree(self, indent=0):
print_intended(self.type, indent)
self.start_value.printTree(indent + 1)
self.end_value.printTree(indent + 1)
@addToClass(List)
def printTree(self, indent=0):
for element in self.elements:
element.printTree(indent)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TreePrinter:
@addToClass(Node)
def printTree(self, indent=0):
raise Exception('printTree not defined in class ' + self.__class__.
__name__)
@addToClass(Instruction)
def printTree(self, indent=0):
print_intended(self.type, indent)
@addToClass(Expression)
def printTree(self, indent=0):
print_intended(self.type, indent)
@addToClass(Block)
def printTree(self, indent=0):
print_intended(self.type, indent)
if self.instructions is not None:
self.instructions.printTree(indent + 1)
@addToClass(Assignment)
def printTree(self, indent=0):
print_intended(self.operator, indent)
self.left.printTree(indent + 1)
self.right.printTree(indent + 1)
@addToClass(For)
def printTree(self, indent=0):
print_intended(self.type, indent)
self.variable.printTree(indent + 1)
self.range.printTree(indent + 1)
self.instruction.printTree(indent + 1)
@addToClass(While)
def printTree(self, indent=0):
print_intended(self.type, indent)
self.condition.printTree(indent + 1)
self.instruction.printTree(indent + 1)
@addToClass(If)
def printTree(self, indent=0):
print_intended(self.type, indent)
self.condition.printTree(indent + 1)
print_intended('then', indent)
self.if_block.printTree(indent + 1)
if self.else_block is not None:
print_intended('else', indent)
self.else_block.printTree(indent + 1)
@addToClass(Print)
def printTree(self, indent=0):
print_intended(self.type, indent)
self.args.printTree(indent + 1)
@addToClass(Return)
def printTree(self, indent=0):
print_intended(self.type, indent)
if self.args is not None:
self.args.printTree(indent + 1)
@addToClass(ArrayElement)
def printTree(self, indent=0):
print_intended('get_element', indent)
self.array.printTree(indent + 1)
self.ids.printTree(indent + 1)
@addToClass(Value)
def printTree(self, indent=0):
print_intended(str(self.value), indent)
@addToClass(Array)
def printTree(self, indent=0):
if self.list is not None:
print_intended('array', indent)
self.list.printTree(indent + 1)
else:
print_intended('empty_array', indent)
<|reserved_special_token_0|>
@addToClass(MatrixFunction)
def printTree(self, indent=0):
print_intended(self.function, indent)
self.parameter.printTree(indent + 1)
@addToClass(UnaryMinus)
def printTree(self, indent=0):
print_intended('-', indent)
self.value.printTree(indent + 1)
@addToClass(Transpose)
def printTree(self, indent=0):
print_intended(self.type, indent)
self.value.printTree(indent + 1)
@addToClass(Program)
def printTree(self, indent=0):
print_intended(self.type, indent)
self.instructions_opt.printTree(indent + 1)
@addToClass(Identifier)
def printTree(self, indent=0):
print_intended(self.name, indent)
@addToClass(Range)
def printTree(self, indent=0):
print_intended(self.type, indent)
self.start_value.printTree(indent + 1)
self.end_value.printTree(indent + 1)
@addToClass(List)
def printTree(self, indent=0):
for element in self.elements:
element.printTree(indent)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TreePrinter:
@addToClass(Node)
def printTree(self, indent=0):
raise Exception('printTree not defined in class ' + self.__class__.
__name__)
@addToClass(Instruction)
def printTree(self, indent=0):
print_intended(self.type, indent)
@addToClass(Expression)
def printTree(self, indent=0):
print_intended(self.type, indent)
@addToClass(Block)
def printTree(self, indent=0):
print_intended(self.type, indent)
if self.instructions is not None:
self.instructions.printTree(indent + 1)
@addToClass(Assignment)
def printTree(self, indent=0):
print_intended(self.operator, indent)
self.left.printTree(indent + 1)
self.right.printTree(indent + 1)
@addToClass(For)
def printTree(self, indent=0):
print_intended(self.type, indent)
self.variable.printTree(indent + 1)
self.range.printTree(indent + 1)
self.instruction.printTree(indent + 1)
@addToClass(While)
def printTree(self, indent=0):
print_intended(self.type, indent)
self.condition.printTree(indent + 1)
self.instruction.printTree(indent + 1)
@addToClass(If)
def printTree(self, indent=0):
print_intended(self.type, indent)
self.condition.printTree(indent + 1)
print_intended('then', indent)
self.if_block.printTree(indent + 1)
if self.else_block is not None:
print_intended('else', indent)
self.else_block.printTree(indent + 1)
@addToClass(Print)
def printTree(self, indent=0):
print_intended(self.type, indent)
self.args.printTree(indent + 1)
@addToClass(Return)
def printTree(self, indent=0):
print_intended(self.type, indent)
if self.args is not None:
self.args.printTree(indent + 1)
@addToClass(ArrayElement)
def printTree(self, indent=0):
print_intended('get_element', indent)
self.array.printTree(indent + 1)
self.ids.printTree(indent + 1)
@addToClass(Value)
def printTree(self, indent=0):
print_intended(str(self.value), indent)
@addToClass(Array)
def printTree(self, indent=0):
if self.list is not None:
print_intended('array', indent)
self.list.printTree(indent + 1)
else:
print_intended('empty_array', indent)
@addToClass(BinaryExpression)
def printTree(self, indent=0):
print_intended(self.operator, indent)
self.left.printTree(indent + 1)
self.right.printTree(indent + 1)
@addToClass(MatrixFunction)
def printTree(self, indent=0):
print_intended(self.function, indent)
self.parameter.printTree(indent + 1)
@addToClass(UnaryMinus)
def printTree(self, indent=0):
print_intended('-', indent)
self.value.printTree(indent + 1)
@addToClass(Transpose)
def printTree(self, indent=0):
print_intended(self.type, indent)
self.value.printTree(indent + 1)
@addToClass(Program)
def printTree(self, indent=0):
print_intended(self.type, indent)
self.instructions_opt.printTree(indent + 1)
@addToClass(Identifier)
def printTree(self, indent=0):
print_intended(self.name, indent)
@addToClass(Range)
def printTree(self, indent=0):
print_intended(self.type, indent)
self.start_value.printTree(indent + 1)
self.end_value.printTree(indent + 1)
@addToClass(List)
def printTree(self, indent=0):
for element in self.elements:
element.printTree(indent)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def addToClass(cls):
def decorator(func):
setattr(cls, func.__name__, func)
return func
return decorator
def print_intended(to_print, intend):
print(intend * '| ' + to_print)
class TreePrinter:
@addToClass(Node)
def printTree(self, indent=0):
raise Exception('printTree not defined in class ' + self.__class__.
__name__)
@addToClass(Instruction)
def printTree(self, indent=0):
print_intended(self.type, indent)
@addToClass(Expression)
def printTree(self, indent=0):
print_intended(self.type, indent)
@addToClass(Block)
def printTree(self, indent=0):
print_intended(self.type, indent)
if self.instructions is not None:
self.instructions.printTree(indent + 1)
@addToClass(Assignment)
def printTree(self, indent=0):
print_intended(self.operator, indent)
self.left.printTree(indent + 1)
self.right.printTree(indent + 1)
@addToClass(For)
def printTree(self, indent=0):
print_intended(self.type, indent)
self.variable.printTree(indent + 1)
self.range.printTree(indent + 1)
self.instruction.printTree(indent + 1)
@addToClass(While)
def printTree(self, indent=0):
print_intended(self.type, indent)
self.condition.printTree(indent + 1)
self.instruction.printTree(indent + 1)
@addToClass(If)
def printTree(self, indent=0):
print_intended(self.type, indent)
self.condition.printTree(indent + 1)
print_intended('then', indent)
self.if_block.printTree(indent + 1)
if self.else_block is not None:
print_intended('else', indent)
self.else_block.printTree(indent + 1)
@addToClass(Print)
def printTree(self, indent=0):
print_intended(self.type, indent)
self.args.printTree(indent + 1)
@addToClass(Return)
def printTree(self, indent=0):
print_intended(self.type, indent)
if self.args is not None:
self.args.printTree(indent + 1)
@addToClass(ArrayElement)
def printTree(self, indent=0):
print_intended('get_element', indent)
self.array.printTree(indent + 1)
self.ids.printTree(indent + 1)
@addToClass(Value)
def printTree(self, indent=0):
print_intended(str(self.value), indent)
@addToClass(Array)
def printTree(self, indent=0):
if self.list is not None:
print_intended('array', indent)
self.list.printTree(indent + 1)
else:
print_intended('empty_array', indent)
@addToClass(BinaryExpression)
def printTree(self, indent=0):
print_intended(self.operator, indent)
self.left.printTree(indent + 1)
self.right.printTree(indent + 1)
@addToClass(MatrixFunction)
def printTree(self, indent=0):
print_intended(self.function, indent)
self.parameter.printTree(indent + 1)
@addToClass(UnaryMinus)
def printTree(self, indent=0):
print_intended('-', indent)
self.value.printTree(indent + 1)
@addToClass(Transpose)
def printTree(self, indent=0):
print_intended(self.type, indent)
self.value.printTree(indent + 1)
@addToClass(Program)
def printTree(self, indent=0):
print_intended(self.type, indent)
self.instructions_opt.printTree(indent + 1)
@addToClass(Identifier)
def printTree(self, indent=0):
print_intended(self.name, indent)
@addToClass(Range)
def printTree(self, indent=0):
print_intended(self.type, indent)
self.start_value.printTree(indent + 1)
self.end_value.printTree(indent + 1)
@addToClass(List)
def printTree(self, indent=0):
for element in self.elements:
element.printTree(indent)
<|reserved_special_token_1|>
from .ast import *
# noinspection PyPep8Naming
def addToClass(cls):
def decorator(func):
setattr(cls, func.__name__, func)
return func
return decorator
def print_intended(to_print, intend):
print(intend * "| " + to_print)
# noinspection PyPep8Naming,PyUnresolvedReferences
class TreePrinter:
# General
@addToClass(Node)
def printTree(self, indent=0):
raise Exception("printTree not defined in class " + self.__class__.__name__)
@addToClass(Instruction)
def printTree(self, indent=0):
print_intended(self.type, indent)
@addToClass(Expression)
def printTree(self, indent=0):
print_intended(self.type, indent)
# Instructions
@addToClass(Block)
def printTree(self, indent=0):
print_intended(self.type, indent)
if self.instructions is not None:
self.instructions.printTree(indent + 1)
@addToClass(Assignment)
def printTree(self, indent=0):
print_intended(self.operator, indent)
self.left.printTree(indent + 1)
self.right.printTree(indent + 1)
@addToClass(For)
def printTree(self, indent=0):
print_intended(self.type, indent)
self.variable.printTree(indent + 1)
self.range.printTree(indent + 1)
self.instruction.printTree(indent + 1)
@addToClass(While)
def printTree(self, indent=0):
print_intended(self.type, indent)
self.condition.printTree(indent + 1)
self.instruction.printTree(indent + 1)
@addToClass(If)
def printTree(self, indent=0):
print_intended(self.type, indent)
self.condition.printTree(indent + 1)
print_intended('then', indent)
self.if_block.printTree(indent + 1)
if self.else_block is not None:
print_intended('else', indent)
self.else_block.printTree(indent + 1)
@addToClass(Print)
def printTree(self, indent=0):
print_intended(self.type, indent)
self.args.printTree(indent + 1)
@addToClass(Return)
def printTree(self, indent=0):
print_intended(self.type, indent)
if self.args is not None:
self.args.printTree(indent + 1)
@addToClass(ArrayElement)
def printTree(self, indent=0):
print_intended("get_element", indent)
self.array.printTree(indent + 1)
self.ids.printTree(indent + 1)
# Expressions
@addToClass(Value)
def printTree(self, indent=0):
print_intended(str(self.value), indent)
@addToClass(Array)
def printTree(self, indent=0):
if self.list is not None:
print_intended('array', indent)
self.list.printTree(indent + 1)
else:
print_intended('empty_array', indent)
@addToClass(BinaryExpression)
def printTree(self, indent=0):
print_intended(self.operator, indent)
self.left.printTree(indent + 1)
self.right.printTree(indent + 1)
@addToClass(MatrixFunction)
def printTree(self, indent=0):
print_intended(self.function, indent)
self.parameter.printTree(indent + 1)
@addToClass(UnaryMinus)
def printTree(self, indent=0):
print_intended('-', indent)
self.value.printTree(indent + 1)
@addToClass(Transpose)
def printTree(self, indent=0):
print_intended(self.type, indent)
self.value.printTree(indent + 1)
# Other
@addToClass(Program)
def printTree(self, indent=0):
print_intended(self.type, indent)
self.instructions_opt.printTree(indent + 1)
@addToClass(Identifier)
def printTree(self, indent=0):
print_intended(self.name, indent)
@addToClass(Range)
def printTree(self, indent=0):
print_intended(self.type, indent)
self.start_value.printTree(indent + 1)
self.end_value.printTree(indent + 1)
@addToClass(List)
def printTree(self, indent=0):
for element in self.elements:
element.printTree(indent)
|
flexible
|
{
"blob_id": "1084478226777b9259274e053984ac34d461198d",
"index": 42,
"step-1": "<mask token>\n\n\nclass TreePrinter:\n\n @addToClass(Node)\n def printTree(self, indent=0):\n raise Exception('printTree not defined in class ' + self.__class__.\n __name__)\n\n @addToClass(Instruction)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n <mask token>\n\n @addToClass(Block)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n if self.instructions is not None:\n self.instructions.printTree(indent + 1)\n\n @addToClass(Assignment)\n def printTree(self, indent=0):\n print_intended(self.operator, indent)\n self.left.printTree(indent + 1)\n self.right.printTree(indent + 1)\n\n @addToClass(For)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n self.variable.printTree(indent + 1)\n self.range.printTree(indent + 1)\n self.instruction.printTree(indent + 1)\n\n @addToClass(While)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n self.condition.printTree(indent + 1)\n self.instruction.printTree(indent + 1)\n <mask token>\n\n @addToClass(Print)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n self.args.printTree(indent + 1)\n\n @addToClass(Return)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n if self.args is not None:\n self.args.printTree(indent + 1)\n\n @addToClass(ArrayElement)\n def printTree(self, indent=0):\n print_intended('get_element', indent)\n self.array.printTree(indent + 1)\n self.ids.printTree(indent + 1)\n\n @addToClass(Value)\n def printTree(self, indent=0):\n print_intended(str(self.value), indent)\n\n @addToClass(Array)\n def printTree(self, indent=0):\n if self.list is not None:\n print_intended('array', indent)\n self.list.printTree(indent + 1)\n else:\n print_intended('empty_array', indent)\n <mask token>\n\n @addToClass(MatrixFunction)\n def printTree(self, indent=0):\n print_intended(self.function, indent)\n self.parameter.printTree(indent + 1)\n\n @addToClass(UnaryMinus)\n def printTree(self, indent=0):\n print_intended('-', indent)\n self.value.printTree(indent + 1)\n\n @addToClass(Transpose)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n self.value.printTree(indent + 1)\n\n @addToClass(Program)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n self.instructions_opt.printTree(indent + 1)\n <mask token>\n\n @addToClass(Range)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n self.start_value.printTree(indent + 1)\n self.end_value.printTree(indent + 1)\n\n @addToClass(List)\n def printTree(self, indent=0):\n for element in self.elements:\n element.printTree(indent)\n",
"step-2": "<mask token>\n\n\nclass TreePrinter:\n\n @addToClass(Node)\n def printTree(self, indent=0):\n raise Exception('printTree not defined in class ' + self.__class__.\n __name__)\n\n @addToClass(Instruction)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n\n @addToClass(Expression)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n\n @addToClass(Block)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n if self.instructions is not None:\n self.instructions.printTree(indent + 1)\n\n @addToClass(Assignment)\n def printTree(self, indent=0):\n print_intended(self.operator, indent)\n self.left.printTree(indent + 1)\n self.right.printTree(indent + 1)\n\n @addToClass(For)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n self.variable.printTree(indent + 1)\n self.range.printTree(indent + 1)\n self.instruction.printTree(indent + 1)\n\n @addToClass(While)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n self.condition.printTree(indent + 1)\n self.instruction.printTree(indent + 1)\n\n @addToClass(If)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n self.condition.printTree(indent + 1)\n print_intended('then', indent)\n self.if_block.printTree(indent + 1)\n if self.else_block is not None:\n print_intended('else', indent)\n self.else_block.printTree(indent + 1)\n\n @addToClass(Print)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n self.args.printTree(indent + 1)\n\n @addToClass(Return)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n if self.args is not None:\n self.args.printTree(indent + 1)\n\n @addToClass(ArrayElement)\n def printTree(self, indent=0):\n print_intended('get_element', indent)\n self.array.printTree(indent + 1)\n self.ids.printTree(indent + 1)\n\n @addToClass(Value)\n def printTree(self, indent=0):\n print_intended(str(self.value), indent)\n\n @addToClass(Array)\n def printTree(self, indent=0):\n if self.list is not None:\n print_intended('array', indent)\n self.list.printTree(indent + 1)\n else:\n print_intended('empty_array', indent)\n <mask token>\n\n @addToClass(MatrixFunction)\n def printTree(self, indent=0):\n print_intended(self.function, indent)\n self.parameter.printTree(indent + 1)\n\n @addToClass(UnaryMinus)\n def printTree(self, indent=0):\n print_intended('-', indent)\n self.value.printTree(indent + 1)\n\n @addToClass(Transpose)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n self.value.printTree(indent + 1)\n\n @addToClass(Program)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n self.instructions_opt.printTree(indent + 1)\n\n @addToClass(Identifier)\n def printTree(self, indent=0):\n print_intended(self.name, indent)\n\n @addToClass(Range)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n self.start_value.printTree(indent + 1)\n self.end_value.printTree(indent + 1)\n\n @addToClass(List)\n def printTree(self, indent=0):\n for element in self.elements:\n element.printTree(indent)\n",
"step-3": "<mask token>\n\n\nclass TreePrinter:\n\n @addToClass(Node)\n def printTree(self, indent=0):\n raise Exception('printTree not defined in class ' + self.__class__.\n __name__)\n\n @addToClass(Instruction)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n\n @addToClass(Expression)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n\n @addToClass(Block)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n if self.instructions is not None:\n self.instructions.printTree(indent + 1)\n\n @addToClass(Assignment)\n def printTree(self, indent=0):\n print_intended(self.operator, indent)\n self.left.printTree(indent + 1)\n self.right.printTree(indent + 1)\n\n @addToClass(For)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n self.variable.printTree(indent + 1)\n self.range.printTree(indent + 1)\n self.instruction.printTree(indent + 1)\n\n @addToClass(While)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n self.condition.printTree(indent + 1)\n self.instruction.printTree(indent + 1)\n\n @addToClass(If)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n self.condition.printTree(indent + 1)\n print_intended('then', indent)\n self.if_block.printTree(indent + 1)\n if self.else_block is not None:\n print_intended('else', indent)\n self.else_block.printTree(indent + 1)\n\n @addToClass(Print)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n self.args.printTree(indent + 1)\n\n @addToClass(Return)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n if self.args is not None:\n self.args.printTree(indent + 1)\n\n @addToClass(ArrayElement)\n def printTree(self, indent=0):\n print_intended('get_element', indent)\n self.array.printTree(indent + 1)\n self.ids.printTree(indent + 1)\n\n @addToClass(Value)\n def printTree(self, indent=0):\n print_intended(str(self.value), indent)\n\n @addToClass(Array)\n def printTree(self, indent=0):\n if self.list is not None:\n print_intended('array', indent)\n self.list.printTree(indent + 1)\n else:\n print_intended('empty_array', indent)\n\n @addToClass(BinaryExpression)\n def printTree(self, indent=0):\n print_intended(self.operator, indent)\n self.left.printTree(indent + 1)\n self.right.printTree(indent + 1)\n\n @addToClass(MatrixFunction)\n def printTree(self, indent=0):\n print_intended(self.function, indent)\n self.parameter.printTree(indent + 1)\n\n @addToClass(UnaryMinus)\n def printTree(self, indent=0):\n print_intended('-', indent)\n self.value.printTree(indent + 1)\n\n @addToClass(Transpose)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n self.value.printTree(indent + 1)\n\n @addToClass(Program)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n self.instructions_opt.printTree(indent + 1)\n\n @addToClass(Identifier)\n def printTree(self, indent=0):\n print_intended(self.name, indent)\n\n @addToClass(Range)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n self.start_value.printTree(indent + 1)\n self.end_value.printTree(indent + 1)\n\n @addToClass(List)\n def printTree(self, indent=0):\n for element in self.elements:\n element.printTree(indent)\n",
"step-4": "<mask token>\n\n\ndef addToClass(cls):\n\n def decorator(func):\n setattr(cls, func.__name__, func)\n return func\n return decorator\n\n\ndef print_intended(to_print, intend):\n print(intend * '| ' + to_print)\n\n\nclass TreePrinter:\n\n @addToClass(Node)\n def printTree(self, indent=0):\n raise Exception('printTree not defined in class ' + self.__class__.\n __name__)\n\n @addToClass(Instruction)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n\n @addToClass(Expression)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n\n @addToClass(Block)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n if self.instructions is not None:\n self.instructions.printTree(indent + 1)\n\n @addToClass(Assignment)\n def printTree(self, indent=0):\n print_intended(self.operator, indent)\n self.left.printTree(indent + 1)\n self.right.printTree(indent + 1)\n\n @addToClass(For)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n self.variable.printTree(indent + 1)\n self.range.printTree(indent + 1)\n self.instruction.printTree(indent + 1)\n\n @addToClass(While)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n self.condition.printTree(indent + 1)\n self.instruction.printTree(indent + 1)\n\n @addToClass(If)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n self.condition.printTree(indent + 1)\n print_intended('then', indent)\n self.if_block.printTree(indent + 1)\n if self.else_block is not None:\n print_intended('else', indent)\n self.else_block.printTree(indent + 1)\n\n @addToClass(Print)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n self.args.printTree(indent + 1)\n\n @addToClass(Return)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n if self.args is not None:\n self.args.printTree(indent + 1)\n\n @addToClass(ArrayElement)\n def printTree(self, indent=0):\n print_intended('get_element', indent)\n self.array.printTree(indent + 1)\n self.ids.printTree(indent + 1)\n\n @addToClass(Value)\n def printTree(self, indent=0):\n print_intended(str(self.value), indent)\n\n @addToClass(Array)\n def printTree(self, indent=0):\n if self.list is not None:\n print_intended('array', indent)\n self.list.printTree(indent + 1)\n else:\n print_intended('empty_array', indent)\n\n @addToClass(BinaryExpression)\n def printTree(self, indent=0):\n print_intended(self.operator, indent)\n self.left.printTree(indent + 1)\n self.right.printTree(indent + 1)\n\n @addToClass(MatrixFunction)\n def printTree(self, indent=0):\n print_intended(self.function, indent)\n self.parameter.printTree(indent + 1)\n\n @addToClass(UnaryMinus)\n def printTree(self, indent=0):\n print_intended('-', indent)\n self.value.printTree(indent + 1)\n\n @addToClass(Transpose)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n self.value.printTree(indent + 1)\n\n @addToClass(Program)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n self.instructions_opt.printTree(indent + 1)\n\n @addToClass(Identifier)\n def printTree(self, indent=0):\n print_intended(self.name, indent)\n\n @addToClass(Range)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n self.start_value.printTree(indent + 1)\n self.end_value.printTree(indent + 1)\n\n @addToClass(List)\n def printTree(self, indent=0):\n for element in self.elements:\n element.printTree(indent)\n",
"step-5": "from .ast import *\n\n\n# noinspection PyPep8Naming\ndef addToClass(cls):\n def decorator(func):\n setattr(cls, func.__name__, func)\n return func\n\n return decorator\n\n\ndef print_intended(to_print, intend):\n print(intend * \"| \" + to_print)\n\n\n# noinspection PyPep8Naming,PyUnresolvedReferences\nclass TreePrinter:\n\n # General\n @addToClass(Node)\n def printTree(self, indent=0):\n raise Exception(\"printTree not defined in class \" + self.__class__.__name__)\n\n @addToClass(Instruction)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n\n @addToClass(Expression)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n\n # Instructions\n @addToClass(Block)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n if self.instructions is not None:\n self.instructions.printTree(indent + 1)\n\n @addToClass(Assignment)\n def printTree(self, indent=0):\n print_intended(self.operator, indent)\n self.left.printTree(indent + 1)\n self.right.printTree(indent + 1)\n\n @addToClass(For)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n self.variable.printTree(indent + 1)\n self.range.printTree(indent + 1)\n self.instruction.printTree(indent + 1)\n\n @addToClass(While)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n self.condition.printTree(indent + 1)\n self.instruction.printTree(indent + 1)\n\n @addToClass(If)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n self.condition.printTree(indent + 1)\n print_intended('then', indent)\n self.if_block.printTree(indent + 1)\n if self.else_block is not None:\n print_intended('else', indent)\n self.else_block.printTree(indent + 1)\n\n @addToClass(Print)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n self.args.printTree(indent + 1)\n\n @addToClass(Return)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n if self.args is not None:\n self.args.printTree(indent + 1)\n\n @addToClass(ArrayElement)\n def printTree(self, indent=0):\n print_intended(\"get_element\", indent)\n self.array.printTree(indent + 1)\n self.ids.printTree(indent + 1)\n\n # Expressions\n @addToClass(Value)\n def printTree(self, indent=0):\n print_intended(str(self.value), indent)\n\n @addToClass(Array)\n def printTree(self, indent=0):\n if self.list is not None:\n print_intended('array', indent)\n self.list.printTree(indent + 1)\n else:\n print_intended('empty_array', indent)\n\n @addToClass(BinaryExpression)\n def printTree(self, indent=0):\n print_intended(self.operator, indent)\n self.left.printTree(indent + 1)\n self.right.printTree(indent + 1)\n\n @addToClass(MatrixFunction)\n def printTree(self, indent=0):\n print_intended(self.function, indent)\n self.parameter.printTree(indent + 1)\n\n @addToClass(UnaryMinus)\n def printTree(self, indent=0):\n print_intended('-', indent)\n self.value.printTree(indent + 1)\n\n @addToClass(Transpose)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n self.value.printTree(indent + 1)\n\n # Other\n @addToClass(Program)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n self.instructions_opt.printTree(indent + 1)\n\n @addToClass(Identifier)\n def printTree(self, indent=0):\n print_intended(self.name, indent)\n\n @addToClass(Range)\n def printTree(self, indent=0):\n print_intended(self.type, indent)\n self.start_value.printTree(indent + 1)\n self.end_value.printTree(indent + 1)\n\n @addToClass(List)\n def printTree(self, indent=0):\n for element in self.elements:\n element.printTree(indent)\n",
"step-ids": [
18,
21,
22,
24,
26
]
}
|
[
18,
21,
22,
24,
26
] |
STATUS_CHOICES = (
(-1, 'Eliminado'),
(0, 'Inactivo'),
(1, 'Activo'),
)
USERTYPES_CHOICES = ()
#-- Activation Request Values
ACTIVATION_CHOICES = (
(1, 'Activacion'),
(2, 'Solicitud Password'),
(3, 'Invitacion'),
)
#-- Activation Status Values
ACTIVATIONSTATUS_CHOICES = (
(-1, 'Expirado'),
(0, 'Enviado'),
(1, 'Activado'),
)
|
normal
|
{
"blob_id": "200552b638d6b1a6879b455837677b82689e0069",
"index": 5479,
"step-1": "<mask token>\n",
"step-2": "STATUS_CHOICES = (-1, 'Eliminado'), (0, 'Inactivo'), (1, 'Activo')\nUSERTYPES_CHOICES = ()\nACTIVATION_CHOICES = (1, 'Activacion'), (2, 'Solicitud Password'), (3,\n 'Invitacion')\nACTIVATIONSTATUS_CHOICES = (-1, 'Expirado'), (0, 'Enviado'), (1, 'Activado')\n",
"step-3": "\n\nSTATUS_CHOICES = (\n (-1, 'Eliminado'),\n (0, 'Inactivo'),\n (1, 'Activo'),\n)\n\nUSERTYPES_CHOICES = ()\n\n#-- Activation Request Values\nACTIVATION_CHOICES = (\n (1, 'Activacion'),\n (2, 'Solicitud Password'),\n (3, 'Invitacion'),\n)\n\n#-- Activation Status Values\nACTIVATIONSTATUS_CHOICES = (\n (-1, 'Expirado'),\n (0, 'Enviado'),\n (1, 'Activado'),\n)",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
import os
import random
import argparse
from vapory import *
from data import colors, object_types
class Torus(POVRayElement):
""""""
def render_scene(filename, object_type, color, location, rotation):
assert (object_type in object_types)
assert (color in colors)
color = colors[color]
size = 2
radius = size/2
attributes = Texture(Pigment('color', color)), Finish('ambient', 0.7), 'rotate', (0, rotation, 0)
if object_type == 'box':
location.insert(1, size/2)
obj = Box([x - size/2 for x in location], [x + size/2 for x in location], *attributes)
if object_type == 'sphere':
location.insert(1, radius)
obj = Sphere(location, radius, *attributes)
if object_type == 'torus':
location.insert(1, radius/2)
obj = Torus(radius, radius/2, 'translate', location, *attributes)
if object_type == 'ellipsoid':
location.insert(1, radius)
obj = Sphere(location, radius, 'scale', (0.75, 0.45, 1.5), *attributes)
if object_type == 'cylinder':
location.insert(1, 0)
location2 = list(location)
location2[1] = size*2
obj = Cylinder(location, location2, radius, *attributes)
camera = Camera('location', [0, 8, 7], 'look_at', [0, 0, 0])
light = LightSource([0, 10, 0], 'color', [1, 1, 1])
chessboard = Plane([0, 1, 0], 0, 'hollow',
Texture(Pigment('checker',
'color', [.47, .6, .74],
'color', [.34, 0.48, 0.6]),
'scale', 4), Finish('ambient', 0.5))
scene = Scene(camera, objects=[light, obj, chessboard])
scene.render(filename, width=128, height=128, antialiasing=1.0)
parser = argparse.ArgumentParser()
parser.add_argument('--n_samples', type=int, default=100)
parser.add_argument('--seed', type=int, default=2018)
args = parser.parse_args()
random.seed(args.seed)
os.makedirs('assets', exist_ok=True)
print("Rendering scenes...")
for color in colors:
for object_type in object_types:
for i in range(args.n_samples):
filename = 'assets/%s-%s-%d' % (color, object_type, i)
if os.path.exists(filename):
print("%s exists, skipping" % filename)
continue
location = [random.uniform(-3, 3), random.uniform(-3, 3)]
rotation = random.uniform(0, 360)
render_scene(filename, object_type, color, location, rotation)
print("Finished")
|
normal
|
{
"blob_id": "f8972067fa88e7e74e05cdcc7bdec184116dec4a",
"index": 7771,
"step-1": "<mask token>\n\n\nclass Torus(POVRayElement):\n \"\"\"\"\"\"\n\n\ndef render_scene(filename, object_type, color, location, rotation):\n assert object_type in object_types\n assert color in colors\n color = colors[color]\n size = 2\n radius = size / 2\n attributes = Texture(Pigment('color', color)), Finish('ambient', 0.7\n ), 'rotate', (0, rotation, 0)\n if object_type == 'box':\n location.insert(1, size / 2)\n obj = Box([(x - size / 2) for x in location], [(x + size / 2) for x in\n location], *attributes)\n if object_type == 'sphere':\n location.insert(1, radius)\n obj = Sphere(location, radius, *attributes)\n if object_type == 'torus':\n location.insert(1, radius / 2)\n obj = Torus(radius, radius / 2, 'translate', location, *attributes)\n if object_type == 'ellipsoid':\n location.insert(1, radius)\n obj = Sphere(location, radius, 'scale', (0.75, 0.45, 1.5), *attributes)\n if object_type == 'cylinder':\n location.insert(1, 0)\n location2 = list(location)\n location2[1] = size * 2\n obj = Cylinder(location, location2, radius, *attributes)\n camera = Camera('location', [0, 8, 7], 'look_at', [0, 0, 0])\n light = LightSource([0, 10, 0], 'color', [1, 1, 1])\n chessboard = Plane([0, 1, 0], 0, 'hollow', Texture(Pigment('checker',\n 'color', [0.47, 0.6, 0.74], 'color', [0.34, 0.48, 0.6]), 'scale', 4\n ), Finish('ambient', 0.5))\n scene = Scene(camera, objects=[light, obj, chessboard])\n scene.render(filename, width=128, height=128, antialiasing=1.0)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Torus(POVRayElement):\n \"\"\"\"\"\"\n\n\ndef render_scene(filename, object_type, color, location, rotation):\n assert object_type in object_types\n assert color in colors\n color = colors[color]\n size = 2\n radius = size / 2\n attributes = Texture(Pigment('color', color)), Finish('ambient', 0.7\n ), 'rotate', (0, rotation, 0)\n if object_type == 'box':\n location.insert(1, size / 2)\n obj = Box([(x - size / 2) for x in location], [(x + size / 2) for x in\n location], *attributes)\n if object_type == 'sphere':\n location.insert(1, radius)\n obj = Sphere(location, radius, *attributes)\n if object_type == 'torus':\n location.insert(1, radius / 2)\n obj = Torus(radius, radius / 2, 'translate', location, *attributes)\n if object_type == 'ellipsoid':\n location.insert(1, radius)\n obj = Sphere(location, radius, 'scale', (0.75, 0.45, 1.5), *attributes)\n if object_type == 'cylinder':\n location.insert(1, 0)\n location2 = list(location)\n location2[1] = size * 2\n obj = Cylinder(location, location2, radius, *attributes)\n camera = Camera('location', [0, 8, 7], 'look_at', [0, 0, 0])\n light = LightSource([0, 10, 0], 'color', [1, 1, 1])\n chessboard = Plane([0, 1, 0], 0, 'hollow', Texture(Pigment('checker',\n 'color', [0.47, 0.6, 0.74], 'color', [0.34, 0.48, 0.6]), 'scale', 4\n ), Finish('ambient', 0.5))\n scene = Scene(camera, objects=[light, obj, chessboard])\n scene.render(filename, width=128, height=128, antialiasing=1.0)\n\n\n<mask token>\nparser.add_argument('--n_samples', type=int, default=100)\nparser.add_argument('--seed', type=int, default=2018)\n<mask token>\nrandom.seed(args.seed)\nos.makedirs('assets', exist_ok=True)\nprint('Rendering scenes...')\nfor color in colors:\n for object_type in object_types:\n for i in range(args.n_samples):\n filename = 'assets/%s-%s-%d' % (color, object_type, i)\n if os.path.exists(filename):\n print('%s exists, skipping' % filename)\n continue\n location = [random.uniform(-3, 3), random.uniform(-3, 3)]\n rotation = random.uniform(0, 360)\n render_scene(filename, object_type, color, location, rotation)\nprint('Finished')\n",
"step-3": "<mask token>\n\n\nclass Torus(POVRayElement):\n \"\"\"\"\"\"\n\n\ndef render_scene(filename, object_type, color, location, rotation):\n assert object_type in object_types\n assert color in colors\n color = colors[color]\n size = 2\n radius = size / 2\n attributes = Texture(Pigment('color', color)), Finish('ambient', 0.7\n ), 'rotate', (0, rotation, 0)\n if object_type == 'box':\n location.insert(1, size / 2)\n obj = Box([(x - size / 2) for x in location], [(x + size / 2) for x in\n location], *attributes)\n if object_type == 'sphere':\n location.insert(1, radius)\n obj = Sphere(location, radius, *attributes)\n if object_type == 'torus':\n location.insert(1, radius / 2)\n obj = Torus(radius, radius / 2, 'translate', location, *attributes)\n if object_type == 'ellipsoid':\n location.insert(1, radius)\n obj = Sphere(location, radius, 'scale', (0.75, 0.45, 1.5), *attributes)\n if object_type == 'cylinder':\n location.insert(1, 0)\n location2 = list(location)\n location2[1] = size * 2\n obj = Cylinder(location, location2, radius, *attributes)\n camera = Camera('location', [0, 8, 7], 'look_at', [0, 0, 0])\n light = LightSource([0, 10, 0], 'color', [1, 1, 1])\n chessboard = Plane([0, 1, 0], 0, 'hollow', Texture(Pigment('checker',\n 'color', [0.47, 0.6, 0.74], 'color', [0.34, 0.48, 0.6]), 'scale', 4\n ), Finish('ambient', 0.5))\n scene = Scene(camera, objects=[light, obj, chessboard])\n scene.render(filename, width=128, height=128, antialiasing=1.0)\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--n_samples', type=int, default=100)\nparser.add_argument('--seed', type=int, default=2018)\nargs = parser.parse_args()\nrandom.seed(args.seed)\nos.makedirs('assets', exist_ok=True)\nprint('Rendering scenes...')\nfor color in colors:\n for object_type in object_types:\n for i in range(args.n_samples):\n filename = 'assets/%s-%s-%d' % (color, object_type, i)\n if os.path.exists(filename):\n print('%s exists, skipping' % filename)\n continue\n location = [random.uniform(-3, 3), random.uniform(-3, 3)]\n rotation = random.uniform(0, 360)\n render_scene(filename, object_type, color, location, rotation)\nprint('Finished')\n",
"step-4": "import os\nimport random\nimport argparse\nfrom vapory import *\nfrom data import colors, object_types\n\n\nclass Torus(POVRayElement):\n \"\"\"\"\"\"\n\n\ndef render_scene(filename, object_type, color, location, rotation):\n assert object_type in object_types\n assert color in colors\n color = colors[color]\n size = 2\n radius = size / 2\n attributes = Texture(Pigment('color', color)), Finish('ambient', 0.7\n ), 'rotate', (0, rotation, 0)\n if object_type == 'box':\n location.insert(1, size / 2)\n obj = Box([(x - size / 2) for x in location], [(x + size / 2) for x in\n location], *attributes)\n if object_type == 'sphere':\n location.insert(1, radius)\n obj = Sphere(location, radius, *attributes)\n if object_type == 'torus':\n location.insert(1, radius / 2)\n obj = Torus(radius, radius / 2, 'translate', location, *attributes)\n if object_type == 'ellipsoid':\n location.insert(1, radius)\n obj = Sphere(location, radius, 'scale', (0.75, 0.45, 1.5), *attributes)\n if object_type == 'cylinder':\n location.insert(1, 0)\n location2 = list(location)\n location2[1] = size * 2\n obj = Cylinder(location, location2, radius, *attributes)\n camera = Camera('location', [0, 8, 7], 'look_at', [0, 0, 0])\n light = LightSource([0, 10, 0], 'color', [1, 1, 1])\n chessboard = Plane([0, 1, 0], 0, 'hollow', Texture(Pigment('checker',\n 'color', [0.47, 0.6, 0.74], 'color', [0.34, 0.48, 0.6]), 'scale', 4\n ), Finish('ambient', 0.5))\n scene = Scene(camera, objects=[light, obj, chessboard])\n scene.render(filename, width=128, height=128, antialiasing=1.0)\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--n_samples', type=int, default=100)\nparser.add_argument('--seed', type=int, default=2018)\nargs = parser.parse_args()\nrandom.seed(args.seed)\nos.makedirs('assets', exist_ok=True)\nprint('Rendering scenes...')\nfor color in colors:\n for object_type in object_types:\n for i in range(args.n_samples):\n filename = 'assets/%s-%s-%d' % (color, object_type, i)\n if os.path.exists(filename):\n print('%s exists, skipping' % filename)\n continue\n location = [random.uniform(-3, 3), random.uniform(-3, 3)]\n rotation = random.uniform(0, 360)\n render_scene(filename, object_type, color, location, rotation)\nprint('Finished')\n",
"step-5": "import os\nimport random\nimport argparse\n\nfrom vapory import *\n\nfrom data import colors, object_types\n\n\nclass Torus(POVRayElement):\n \"\"\"\"\"\"\n\n\ndef render_scene(filename, object_type, color, location, rotation):\n assert (object_type in object_types)\n assert (color in colors)\n\n color = colors[color]\n size = 2\n radius = size/2\n attributes = Texture(Pigment('color', color)), Finish('ambient', 0.7), 'rotate', (0, rotation, 0)\n if object_type == 'box':\n location.insert(1, size/2)\n obj = Box([x - size/2 for x in location], [x + size/2 for x in location], *attributes)\n if object_type == 'sphere':\n location.insert(1, radius)\n obj = Sphere(location, radius, *attributes)\n if object_type == 'torus':\n location.insert(1, radius/2)\n obj = Torus(radius, radius/2, 'translate', location, *attributes)\n if object_type == 'ellipsoid':\n location.insert(1, radius)\n obj = Sphere(location, radius, 'scale', (0.75, 0.45, 1.5), *attributes)\n if object_type == 'cylinder':\n location.insert(1, 0)\n location2 = list(location)\n location2[1] = size*2\n obj = Cylinder(location, location2, radius, *attributes)\n\n camera = Camera('location', [0, 8, 7], 'look_at', [0, 0, 0])\n light = LightSource([0, 10, 0], 'color', [1, 1, 1])\n\n chessboard = Plane([0, 1, 0], 0, 'hollow',\n Texture(Pigment('checker',\n 'color', [.47, .6, .74],\n 'color', [.34, 0.48, 0.6]),\n 'scale', 4), Finish('ambient', 0.5))\n\n scene = Scene(camera, objects=[light, obj, chessboard])\n scene.render(filename, width=128, height=128, antialiasing=1.0)\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--n_samples', type=int, default=100)\nparser.add_argument('--seed', type=int, default=2018)\nargs = parser.parse_args()\n\nrandom.seed(args.seed)\n\nos.makedirs('assets', exist_ok=True)\n\nprint(\"Rendering scenes...\")\nfor color in colors:\n for object_type in object_types:\n for i in range(args.n_samples):\n filename = 'assets/%s-%s-%d' % (color, object_type, i)\n if os.path.exists(filename):\n print(\"%s exists, skipping\" % filename)\n continue\n location = [random.uniform(-3, 3), random.uniform(-3, 3)]\n rotation = random.uniform(0, 360)\n render_scene(filename, object_type, color, location, rotation)\n\nprint(\"Finished\")\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
<|reserved_special_token_0|>
def load_setting(setting: str):
with open(SETTINGS_FILE) as f:
return json.load(f)[setting]
@lru_cache()
def get_bot_name():
return load_setting('bot_name')
@lru_cache()
def get_app_id():
return load_setting('app_id')
@lru_cache()
def get_app_hash():
return load_setting('app_hash')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def load_setting(setting: str):
with open(SETTINGS_FILE) as f:
return json.load(f)[setting]
@lru_cache()
def get_bot_name():
return load_setting('bot_name')
@lru_cache()
def get_app_id():
return load_setting('app_id')
@lru_cache()
def get_app_hash():
return load_setting('app_hash')
async def initialize_client():
app = Client('testing', get_app_id(), get_app_hash())
async with app:
with open(CONN_FILE, 'w+') as f:
f.write(json.dumps({'connection_string': await app.
export_session_string()}))
print('Connection string was saved to conn.json')
if __name__ == '__main__':
asyncio.get_event_loop().run_until_complete(initialize_client())
<|reserved_special_token_1|>
<|reserved_special_token_0|>
SETTINGS_FILE = '/src/settings.json'
CONN_FILE = '/src/conn.json'
def load_setting(setting: str):
with open(SETTINGS_FILE) as f:
return json.load(f)[setting]
@lru_cache()
def get_bot_name():
return load_setting('bot_name')
@lru_cache()
def get_app_id():
return load_setting('app_id')
@lru_cache()
def get_app_hash():
return load_setting('app_hash')
async def initialize_client():
app = Client('testing', get_app_id(), get_app_hash())
async with app:
with open(CONN_FILE, 'w+') as f:
f.write(json.dumps({'connection_string': await app.
export_session_string()}))
print('Connection string was saved to conn.json')
if __name__ == '__main__':
asyncio.get_event_loop().run_until_complete(initialize_client())
<|reserved_special_token_1|>
import asyncio
import json
from functools import lru_cache
from pyrogram import Client
SETTINGS_FILE = '/src/settings.json'
CONN_FILE = '/src/conn.json'
def load_setting(setting: str):
with open(SETTINGS_FILE) as f:
return json.load(f)[setting]
@lru_cache()
def get_bot_name():
return load_setting('bot_name')
@lru_cache()
def get_app_id():
return load_setting('app_id')
@lru_cache()
def get_app_hash():
return load_setting('app_hash')
async def initialize_client():
app = Client('testing', get_app_id(), get_app_hash())
async with app:
with open(CONN_FILE, 'w+') as f:
f.write(json.dumps({'connection_string': await app.
export_session_string()}))
print('Connection string was saved to conn.json')
if __name__ == '__main__':
asyncio.get_event_loop().run_until_complete(initialize_client())
<|reserved_special_token_1|>
import asyncio
import json
from functools import lru_cache
from pyrogram import Client
SETTINGS_FILE = "/src/settings.json"
CONN_FILE = "/src/conn.json"
def load_setting(setting: str):
with open(SETTINGS_FILE) as f:
return json.load(f)[setting]
@lru_cache()
def get_bot_name():
return load_setting("bot_name")
@lru_cache()
def get_app_id():
return load_setting("app_id")
@lru_cache()
def get_app_hash():
return load_setting("app_hash")
async def initialize_client():
app = Client("testing", get_app_id(), get_app_hash())
async with app:
with open(CONN_FILE, "w+") as f:
f.write(json.dumps({"connection_string": await app.export_session_string()}))
print("Connection string was saved to conn.json")
if __name__ == "__main__":
asyncio.get_event_loop().run_until_complete(initialize_client())
|
flexible
|
{
"blob_id": "14e1af3d60efef842c72bf9b55143d0e14f3a7b8",
"index": 5897,
"step-1": "<mask token>\n\n\ndef load_setting(setting: str):\n with open(SETTINGS_FILE) as f:\n return json.load(f)[setting]\n\n\n@lru_cache()\ndef get_bot_name():\n return load_setting('bot_name')\n\n\n@lru_cache()\ndef get_app_id():\n return load_setting('app_id')\n\n\n@lru_cache()\ndef get_app_hash():\n return load_setting('app_hash')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef load_setting(setting: str):\n with open(SETTINGS_FILE) as f:\n return json.load(f)[setting]\n\n\n@lru_cache()\ndef get_bot_name():\n return load_setting('bot_name')\n\n\n@lru_cache()\ndef get_app_id():\n return load_setting('app_id')\n\n\n@lru_cache()\ndef get_app_hash():\n return load_setting('app_hash')\n\n\nasync def initialize_client():\n app = Client('testing', get_app_id(), get_app_hash())\n async with app:\n with open(CONN_FILE, 'w+') as f:\n f.write(json.dumps({'connection_string': await app.\n export_session_string()}))\n print('Connection string was saved to conn.json')\n\n\nif __name__ == '__main__':\n asyncio.get_event_loop().run_until_complete(initialize_client())\n",
"step-3": "<mask token>\nSETTINGS_FILE = '/src/settings.json'\nCONN_FILE = '/src/conn.json'\n\n\ndef load_setting(setting: str):\n with open(SETTINGS_FILE) as f:\n return json.load(f)[setting]\n\n\n@lru_cache()\ndef get_bot_name():\n return load_setting('bot_name')\n\n\n@lru_cache()\ndef get_app_id():\n return load_setting('app_id')\n\n\n@lru_cache()\ndef get_app_hash():\n return load_setting('app_hash')\n\n\nasync def initialize_client():\n app = Client('testing', get_app_id(), get_app_hash())\n async with app:\n with open(CONN_FILE, 'w+') as f:\n f.write(json.dumps({'connection_string': await app.\n export_session_string()}))\n print('Connection string was saved to conn.json')\n\n\nif __name__ == '__main__':\n asyncio.get_event_loop().run_until_complete(initialize_client())\n",
"step-4": "import asyncio\nimport json\nfrom functools import lru_cache\nfrom pyrogram import Client\nSETTINGS_FILE = '/src/settings.json'\nCONN_FILE = '/src/conn.json'\n\n\ndef load_setting(setting: str):\n with open(SETTINGS_FILE) as f:\n return json.load(f)[setting]\n\n\n@lru_cache()\ndef get_bot_name():\n return load_setting('bot_name')\n\n\n@lru_cache()\ndef get_app_id():\n return load_setting('app_id')\n\n\n@lru_cache()\ndef get_app_hash():\n return load_setting('app_hash')\n\n\nasync def initialize_client():\n app = Client('testing', get_app_id(), get_app_hash())\n async with app:\n with open(CONN_FILE, 'w+') as f:\n f.write(json.dumps({'connection_string': await app.\n export_session_string()}))\n print('Connection string was saved to conn.json')\n\n\nif __name__ == '__main__':\n asyncio.get_event_loop().run_until_complete(initialize_client())\n",
"step-5": "import asyncio\nimport json\nfrom functools import lru_cache\n\nfrom pyrogram import Client\n\n\nSETTINGS_FILE = \"/src/settings.json\"\nCONN_FILE = \"/src/conn.json\"\n\ndef load_setting(setting: str):\n with open(SETTINGS_FILE) as f:\n return json.load(f)[setting]\n\n\n@lru_cache()\ndef get_bot_name():\n return load_setting(\"bot_name\")\n\n\n@lru_cache()\ndef get_app_id():\n return load_setting(\"app_id\")\n\n\n@lru_cache()\ndef get_app_hash():\n return load_setting(\"app_hash\")\n\n\nasync def initialize_client():\n app = Client(\"testing\", get_app_id(), get_app_hash())\n async with app:\n with open(CONN_FILE, \"w+\") as f:\n f.write(json.dumps({\"connection_string\": await app.export_session_string()}))\n print(\"Connection string was saved to conn.json\")\n\nif __name__ == \"__main__\":\n asyncio.get_event_loop().run_until_complete(initialize_client())\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
<|reserved_special_token_0|>
class Broker:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def liquidation(self, pos, price, date, commission):
"""
clean the last position
"""
o = Order(-1 * pos, limit_price=None, stop_loss=None, stop_profit=
None, is_fill=False)
setattr(o, 'trading_price', price[0])
setattr(o, 'trading_date', date)
order_execute.append(o)
self.work(price=price, date=date, commission=commission)
def get_log(self):
log_dict = {'BuyDate': buy_date, 'BuyPrice': buy_price, 'BuyUnits':
buy_unit, 'CashPaying': amnt_paying, 'SellDate': sell_date,
'SellPrice': sell_price, 'SellUnits': sell_unit,
'CashReceiving': amnt_receiving}
log = pd.DataFrame(log_dict)
for i in list(log_dict.values()):
i.clear()
return log
class Execute:
def __init__(self, equity):
self.__equity = equity
def trading(self, price, date, commission):
c = price[3]
for t in order_execute:
if not t.is_filled:
position_list.append(t.units)
if t.is_short and add_position_long_order and t.is_parents:
self.split_add_pos_order(t, add_position_long_order,
commission)
elif t.is_long and add_position_short_order and t.is_parents:
self.split_add_pos_order(t, add_position_short_order,
commission)
else:
self.fill(t, commission)
if position() == 0 and t in order_execute:
del order_execute[:order_execute.index(t) + 1]
def fill(self, t, commission):
adj_price = util.adjust_price(trade=t, commission=commission)
if t.is_long:
assert self.__equity >= adj_price * t.units, 'Your money is empty'
buy_price.append(t.trading_price)
buy_date.append(t.trading_date)
buy_unit.append(t.units)
amnt_paying.append(adj_price * t.units)
self.__equity -= t.units * adj_price
setattr(t, 'is_filled', True)
elif t.is_short:
sell_price.append(t.trading_price)
sell_date.append(t.trading_date)
sell_unit.append(t.units)
amnt_receiving.append(abs(t.units) * adj_price)
self.__equity += abs(t.units) * adj_price
setattr(t, 'is_filled', True)
def split_add_pos_order(self, trade_order, add_position_order: list,
commission):
"""
split the order which include overweight order into a list of single order and fill them
e.g. a sell order [with 6 units has an parent order and an overweight order] becomes
[an parent order with -4 units , an order with -2 units]
"""
temp_order_list = []
origin_trader_order_sign = np.sign(trade_order.units)
if trade_order.is_short:
parents_unit = trade_order.units + sum(abs(_o.units) for _o in
add_position_order)
else:
parents_unit = trade_order.units - sum(abs(_o.units) for _o in
add_position_order)
trade_order.units = parents_unit
if trade_order.units != 0:
temp_order_list.append(trade_order)
for _t in add_position_order:
if np.sign(_t.units) == origin_trader_order_sign:
temp_order_list.append(_t)
else:
ct = deepcopy(_t)
ct.units = -_t.units
ct.trading_date = trade_order.trading_date
ct.trading_prices = trade_order.trading_price
temp_order_list.append(ct)
for temp_o in temp_order_list:
self.fill(temp_o, commission)
add_position_order.clear()
@property
def equity(self):
return self.__equity
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Broker:
def __init__(self, equity):
self.execute = Execute(equity)
<|reserved_special_token_0|>
def check_order(self, ohlc, date, commission):
"""
check the order and set the information to order by different condition
"""
op = ohlc[0]
for o in order_queue:
if position() != 0 and position() + o.units != 0 and len(
order_queue) == 1:
o.is_parents = False
if o.limit_price:
trading_price = o.limit_price
else:
trading_price = op
setattr(o, 'trading_price', trading_price)
setattr(o, 'trading_date', date)
if o.is_long:
if 1 > o.units > 0:
size = int(self.execute.equity * o.units / trading_price)
setattr(o, 'units', size)
if o.stop_loss:
stop_loss_price = o.trading_price * (1 - o.stop_loss)
setattr(o, 'stop_loss_prices', stop_loss_price)
if o.stop_profit:
stop_profit_price = o.trading_price * (1 + o.stop_profit)
setattr(o, 'stop_profit_prices', stop_profit_price)
if not o.is_parents:
add_position_long_order.append(o)
elif o.is_short:
if -1 < o.units < 0:
size = int(self.execute.equity * o.units / trading_price)
setattr(o, 'units', size)
if o.stop_loss:
stop_loss_price = o.trading_price * (1 + o.stop_loss)
setattr(o, 'stop_loss_prices', stop_loss_price)
if o.stop_profit:
stop_profit_price = o.trading_price * (1 - o.stop_profit)
setattr(o, 'stop_profit_prices', stop_profit_price)
if not o.is_parents:
add_position_short_order.append(o)
order_execute.append(o)
self.work(ohlc, date=date, commission=commission)
order_queue.clear()
self.check_if_sl_or_sp(ohlc=ohlc, date=date, commission=commission)
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def liquidation(self, pos, price, date, commission):
"""
clean the last position
"""
o = Order(-1 * pos, limit_price=None, stop_loss=None, stop_profit=
None, is_fill=False)
setattr(o, 'trading_price', price[0])
setattr(o, 'trading_date', date)
order_execute.append(o)
self.work(price=price, date=date, commission=commission)
def get_log(self):
log_dict = {'BuyDate': buy_date, 'BuyPrice': buy_price, 'BuyUnits':
buy_unit, 'CashPaying': amnt_paying, 'SellDate': sell_date,
'SellPrice': sell_price, 'SellUnits': sell_unit,
'CashReceiving': amnt_receiving}
log = pd.DataFrame(log_dict)
for i in list(log_dict.values()):
i.clear()
return log
class Execute:
def __init__(self, equity):
self.__equity = equity
def trading(self, price, date, commission):
c = price[3]
for t in order_execute:
if not t.is_filled:
position_list.append(t.units)
if t.is_short and add_position_long_order and t.is_parents:
self.split_add_pos_order(t, add_position_long_order,
commission)
elif t.is_long and add_position_short_order and t.is_parents:
self.split_add_pos_order(t, add_position_short_order,
commission)
else:
self.fill(t, commission)
if position() == 0 and t in order_execute:
del order_execute[:order_execute.index(t) + 1]
def fill(self, t, commission):
adj_price = util.adjust_price(trade=t, commission=commission)
if t.is_long:
assert self.__equity >= adj_price * t.units, 'Your money is empty'
buy_price.append(t.trading_price)
buy_date.append(t.trading_date)
buy_unit.append(t.units)
amnt_paying.append(adj_price * t.units)
self.__equity -= t.units * adj_price
setattr(t, 'is_filled', True)
elif t.is_short:
sell_price.append(t.trading_price)
sell_date.append(t.trading_date)
sell_unit.append(t.units)
amnt_receiving.append(abs(t.units) * adj_price)
self.__equity += abs(t.units) * adj_price
setattr(t, 'is_filled', True)
def split_add_pos_order(self, trade_order, add_position_order: list,
commission):
"""
split the order which include overweight order into a list of single order and fill them
e.g. a sell order [with 6 units has an parent order and an overweight order] becomes
[an parent order with -4 units , an order with -2 units]
"""
temp_order_list = []
origin_trader_order_sign = np.sign(trade_order.units)
if trade_order.is_short:
parents_unit = trade_order.units + sum(abs(_o.units) for _o in
add_position_order)
else:
parents_unit = trade_order.units - sum(abs(_o.units) for _o in
add_position_order)
trade_order.units = parents_unit
if trade_order.units != 0:
temp_order_list.append(trade_order)
for _t in add_position_order:
if np.sign(_t.units) == origin_trader_order_sign:
temp_order_list.append(_t)
else:
ct = deepcopy(_t)
ct.units = -_t.units
ct.trading_date = trade_order.trading_date
ct.trading_prices = trade_order.trading_price
temp_order_list.append(ct)
for temp_o in temp_order_list:
self.fill(temp_o, commission)
add_position_order.clear()
@property
def equity(self):
return self.__equity
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Broker:
def __init__(self, equity):
self.execute = Execute(equity)
<|reserved_special_token_0|>
def check_order(self, ohlc, date, commission):
"""
check the order and set the information to order by different condition
"""
op = ohlc[0]
for o in order_queue:
if position() != 0 and position() + o.units != 0 and len(
order_queue) == 1:
o.is_parents = False
if o.limit_price:
trading_price = o.limit_price
else:
trading_price = op
setattr(o, 'trading_price', trading_price)
setattr(o, 'trading_date', date)
if o.is_long:
if 1 > o.units > 0:
size = int(self.execute.equity * o.units / trading_price)
setattr(o, 'units', size)
if o.stop_loss:
stop_loss_price = o.trading_price * (1 - o.stop_loss)
setattr(o, 'stop_loss_prices', stop_loss_price)
if o.stop_profit:
stop_profit_price = o.trading_price * (1 + o.stop_profit)
setattr(o, 'stop_profit_prices', stop_profit_price)
if not o.is_parents:
add_position_long_order.append(o)
elif o.is_short:
if -1 < o.units < 0:
size = int(self.execute.equity * o.units / trading_price)
setattr(o, 'units', size)
if o.stop_loss:
stop_loss_price = o.trading_price * (1 + o.stop_loss)
setattr(o, 'stop_loss_prices', stop_loss_price)
if o.stop_profit:
stop_profit_price = o.trading_price * (1 - o.stop_profit)
setattr(o, 'stop_profit_prices', stop_profit_price)
if not o.is_parents:
add_position_short_order.append(o)
order_execute.append(o)
self.work(ohlc, date=date, commission=commission)
order_queue.clear()
self.check_if_sl_or_sp(ohlc=ohlc, date=date, commission=commission)
def check_if_sl_or_sp(self, ohlc, date, commission):
for t in order_execute:
origin_o = deepcopy(t).is_parents
if util.touch_stop_loss(order=t, price=ohlc[3], date=date):
t.replace(_unit=-t.units, _trading_price=t.stop_loss_prices,
trading_date=date, _is_fill=False, _is_parent=False,
stop_loss=None)
elif util.touch_stop_profit(order=t, price=ohlc[3], date=date):
t.replace(_unit=-t.units, _trading_price=t.
stop_profit_prices, trading_date=date, _is_fill=False,
_is_parent=False, stop_loss=None)
if not origin_o:
order_execute.remove(t)
self.work(ohlc, date=date, commission=commission)
<|reserved_special_token_0|>
def liquidation(self, pos, price, date, commission):
"""
clean the last position
"""
o = Order(-1 * pos, limit_price=None, stop_loss=None, stop_profit=
None, is_fill=False)
setattr(o, 'trading_price', price[0])
setattr(o, 'trading_date', date)
order_execute.append(o)
self.work(price=price, date=date, commission=commission)
def get_log(self):
log_dict = {'BuyDate': buy_date, 'BuyPrice': buy_price, 'BuyUnits':
buy_unit, 'CashPaying': amnt_paying, 'SellDate': sell_date,
'SellPrice': sell_price, 'SellUnits': sell_unit,
'CashReceiving': amnt_receiving}
log = pd.DataFrame(log_dict)
for i in list(log_dict.values()):
i.clear()
return log
class Execute:
def __init__(self, equity):
self.__equity = equity
def trading(self, price, date, commission):
c = price[3]
for t in order_execute:
if not t.is_filled:
position_list.append(t.units)
if t.is_short and add_position_long_order and t.is_parents:
self.split_add_pos_order(t, add_position_long_order,
commission)
elif t.is_long and add_position_short_order and t.is_parents:
self.split_add_pos_order(t, add_position_short_order,
commission)
else:
self.fill(t, commission)
if position() == 0 and t in order_execute:
del order_execute[:order_execute.index(t) + 1]
def fill(self, t, commission):
adj_price = util.adjust_price(trade=t, commission=commission)
if t.is_long:
assert self.__equity >= adj_price * t.units, 'Your money is empty'
buy_price.append(t.trading_price)
buy_date.append(t.trading_date)
buy_unit.append(t.units)
amnt_paying.append(adj_price * t.units)
self.__equity -= t.units * adj_price
setattr(t, 'is_filled', True)
elif t.is_short:
sell_price.append(t.trading_price)
sell_date.append(t.trading_date)
sell_unit.append(t.units)
amnt_receiving.append(abs(t.units) * adj_price)
self.__equity += abs(t.units) * adj_price
setattr(t, 'is_filled', True)
def split_add_pos_order(self, trade_order, add_position_order: list,
commission):
"""
split the order which include overweight order into a list of single order and fill them
e.g. a sell order [with 6 units has an parent order and an overweight order] becomes
[an parent order with -4 units , an order with -2 units]
"""
temp_order_list = []
origin_trader_order_sign = np.sign(trade_order.units)
if trade_order.is_short:
parents_unit = trade_order.units + sum(abs(_o.units) for _o in
add_position_order)
else:
parents_unit = trade_order.units - sum(abs(_o.units) for _o in
add_position_order)
trade_order.units = parents_unit
if trade_order.units != 0:
temp_order_list.append(trade_order)
for _t in add_position_order:
if np.sign(_t.units) == origin_trader_order_sign:
temp_order_list.append(_t)
else:
ct = deepcopy(_t)
ct.units = -_t.units
ct.trading_date = trade_order.trading_date
ct.trading_prices = trade_order.trading_price
temp_order_list.append(ct)
for temp_o in temp_order_list:
self.fill(temp_o, commission)
add_position_order.clear()
@property
def equity(self):
return self.__equity
<|reserved_special_token_0|>
<|reserved_special_token_1|>
from accessor import *
from order import Order
from copy import deepcopy
import pandas as pd
import numpy as np
import util
class Broker:
def __init__(self, equity):
self.execute = Execute(equity)
def make_order(self, unit, limit_price, stop_loss, stop_profit):
order_queue.append(Order(unit, limit_price, stop_loss, stop_profit))
def check_order(self, ohlc, date, commission):
"""
check the order and set the information to order by different condition
"""
op = ohlc[0]
for o in order_queue:
if position() != 0 and position() + o.units != 0 and len(
order_queue) == 1:
o.is_parents = False
if o.limit_price:
trading_price = o.limit_price
else:
trading_price = op
setattr(o, 'trading_price', trading_price)
setattr(o, 'trading_date', date)
if o.is_long:
if 1 > o.units > 0:
size = int(self.execute.equity * o.units / trading_price)
setattr(o, 'units', size)
if o.stop_loss:
stop_loss_price = o.trading_price * (1 - o.stop_loss)
setattr(o, 'stop_loss_prices', stop_loss_price)
if o.stop_profit:
stop_profit_price = o.trading_price * (1 + o.stop_profit)
setattr(o, 'stop_profit_prices', stop_profit_price)
if not o.is_parents:
add_position_long_order.append(o)
elif o.is_short:
if -1 < o.units < 0:
size = int(self.execute.equity * o.units / trading_price)
setattr(o, 'units', size)
if o.stop_loss:
stop_loss_price = o.trading_price * (1 + o.stop_loss)
setattr(o, 'stop_loss_prices', stop_loss_price)
if o.stop_profit:
stop_profit_price = o.trading_price * (1 - o.stop_profit)
setattr(o, 'stop_profit_prices', stop_profit_price)
if not o.is_parents:
add_position_short_order.append(o)
order_execute.append(o)
self.work(ohlc, date=date, commission=commission)
order_queue.clear()
self.check_if_sl_or_sp(ohlc=ohlc, date=date, commission=commission)
def check_if_sl_or_sp(self, ohlc, date, commission):
for t in order_execute:
origin_o = deepcopy(t).is_parents
if util.touch_stop_loss(order=t, price=ohlc[3], date=date):
t.replace(_unit=-t.units, _trading_price=t.stop_loss_prices,
trading_date=date, _is_fill=False, _is_parent=False,
stop_loss=None)
elif util.touch_stop_profit(order=t, price=ohlc[3], date=date):
t.replace(_unit=-t.units, _trading_price=t.
stop_profit_prices, trading_date=date, _is_fill=False,
_is_parent=False, stop_loss=None)
if not origin_o:
order_execute.remove(t)
self.work(ohlc, date=date, commission=commission)
def work(self, price, date, commission):
self.execute.trading(price, date, commission)
def liquidation(self, pos, price, date, commission):
"""
clean the last position
"""
o = Order(-1 * pos, limit_price=None, stop_loss=None, stop_profit=
None, is_fill=False)
setattr(o, 'trading_price', price[0])
setattr(o, 'trading_date', date)
order_execute.append(o)
self.work(price=price, date=date, commission=commission)
def get_log(self):
log_dict = {'BuyDate': buy_date, 'BuyPrice': buy_price, 'BuyUnits':
buy_unit, 'CashPaying': amnt_paying, 'SellDate': sell_date,
'SellPrice': sell_price, 'SellUnits': sell_unit,
'CashReceiving': amnt_receiving}
log = pd.DataFrame(log_dict)
for i in list(log_dict.values()):
i.clear()
return log
class Execute:
def __init__(self, equity):
self.__equity = equity
def trading(self, price, date, commission):
c = price[3]
for t in order_execute:
if not t.is_filled:
position_list.append(t.units)
if t.is_short and add_position_long_order and t.is_parents:
self.split_add_pos_order(t, add_position_long_order,
commission)
elif t.is_long and add_position_short_order and t.is_parents:
self.split_add_pos_order(t, add_position_short_order,
commission)
else:
self.fill(t, commission)
if position() == 0 and t in order_execute:
del order_execute[:order_execute.index(t) + 1]
def fill(self, t, commission):
adj_price = util.adjust_price(trade=t, commission=commission)
if t.is_long:
assert self.__equity >= adj_price * t.units, 'Your money is empty'
buy_price.append(t.trading_price)
buy_date.append(t.trading_date)
buy_unit.append(t.units)
amnt_paying.append(adj_price * t.units)
self.__equity -= t.units * adj_price
setattr(t, 'is_filled', True)
elif t.is_short:
sell_price.append(t.trading_price)
sell_date.append(t.trading_date)
sell_unit.append(t.units)
amnt_receiving.append(abs(t.units) * adj_price)
self.__equity += abs(t.units) * adj_price
setattr(t, 'is_filled', True)
def split_add_pos_order(self, trade_order, add_position_order: list,
commission):
"""
split the order which include overweight order into a list of single order and fill them
e.g. a sell order [with 6 units has an parent order and an overweight order] becomes
[an parent order with -4 units , an order with -2 units]
"""
temp_order_list = []
origin_trader_order_sign = np.sign(trade_order.units)
if trade_order.is_short:
parents_unit = trade_order.units + sum(abs(_o.units) for _o in
add_position_order)
else:
parents_unit = trade_order.units - sum(abs(_o.units) for _o in
add_position_order)
trade_order.units = parents_unit
if trade_order.units != 0:
temp_order_list.append(trade_order)
for _t in add_position_order:
if np.sign(_t.units) == origin_trader_order_sign:
temp_order_list.append(_t)
else:
ct = deepcopy(_t)
ct.units = -_t.units
ct.trading_date = trade_order.trading_date
ct.trading_prices = trade_order.trading_price
temp_order_list.append(ct)
for temp_o in temp_order_list:
self.fill(temp_o, commission)
add_position_order.clear()
@property
def equity(self):
return self.__equity
def position():
return sum(size for size in position_list)
<|reserved_special_token_1|>
from accessor import *
from order import Order
from copy import deepcopy
import pandas as pd
import numpy as np
import util
class Broker:
def __init__(self, equity):
self.execute = Execute(equity) # Execute
def make_order(self, unit, limit_price, stop_loss, stop_profit):
order_queue.append(Order(unit, limit_price, stop_loss, stop_profit))
def check_order(self, ohlc, date, commission):
"""
check the order and set the information to order by different condition
"""
op = ohlc[0]
for o in order_queue:
if position() != 0 and position() + o.units != 0 and len(order_queue) == 1:
o.is_parents = False
if o.limit_price:
trading_price = o.limit_price
else:
trading_price = op
setattr(o, 'trading_price', trading_price)
setattr(o, 'trading_date', date)
if o.is_long:
if 1 > o.units > 0:
size = int((self.execute.equity * o.units) / trading_price)
setattr(o, 'units', size)
if o.stop_loss:
stop_loss_price = o.trading_price * (1 - o.stop_loss)
setattr(o, 'stop_loss_prices', stop_loss_price)
if o.stop_profit:
stop_profit_price = o.trading_price * (1 + o.stop_profit)
setattr(o, 'stop_profit_prices', stop_profit_price)
if not o.is_parents:
add_position_long_order.append(o)
elif o.is_short:
if -1 < o.units < 0:
size = int((self.execute.equity * o.units) / trading_price)
setattr(o, 'units', size)
if o.stop_loss:
stop_loss_price = o.trading_price * (1 + o.stop_loss)
setattr(o, 'stop_loss_prices', stop_loss_price)
if o.stop_profit:
stop_profit_price = o.trading_price * (1 - o.stop_profit)
setattr(o, 'stop_profit_prices', stop_profit_price)
if not o.is_parents:
add_position_short_order.append(o)
order_execute.append(o)
self.work(ohlc, date=date, commission=commission)
order_queue.clear()
self.check_if_sl_or_sp(ohlc=ohlc, date=date, commission=commission)
def check_if_sl_or_sp(self, ohlc, date, commission):
for t in order_execute:
origin_o = deepcopy(t).is_parents
if util.touch_stop_loss(order=t, price=ohlc[3], date=date) :
t.replace(_unit=-t.units, _trading_price=t.stop_loss_prices, trading_date=date, _is_fill=False,
_is_parent=False, stop_loss=None)
elif util.touch_stop_profit(order=t, price=ohlc[3], date=date):
t.replace(_unit=-t.units, _trading_price=t.stop_profit_prices, trading_date=date, _is_fill=False,
_is_parent=False, stop_loss=None)
if not origin_o:
order_execute.remove(t)
self.work(ohlc, date=date, commission=commission)
def work(self, price, date, commission):
self.execute.trading(price, date, commission)
def liquidation(self, pos, price, date, commission):
"""
clean the last position
"""
o = Order(-1 * pos, limit_price=None, stop_loss=None, stop_profit=None, is_fill=False)
setattr(o, 'trading_price', price[0])
setattr(o, 'trading_date', date)
order_execute.append(o)
self.work(price=price, date=date, commission=commission)
def get_log(self):
log_dict = {'BuyDate': buy_date, 'BuyPrice': buy_price, 'BuyUnits': buy_unit, 'CashPaying': amnt_paying,
'SellDate': sell_date, 'SellPrice': sell_price, 'SellUnits': sell_unit,
'CashReceiving': amnt_receiving}
log = pd.DataFrame(log_dict)
for i in list(log_dict.values()):
i.clear()
return log
class Execute:
def __init__(self, equity):
self.__equity = equity
def trading(self, price, date, commission):
c = price[3]
for t in order_execute:
if not t.is_filled:
position_list.append(t.units)
if t.is_short and add_position_long_order and t.is_parents:
self.split_add_pos_order(t, add_position_long_order, commission)
elif t.is_long and add_position_short_order and t.is_parents:
self.split_add_pos_order(t, add_position_short_order, commission)
else:
self.fill(t, commission)
# if self._touch_stop_loss(order=t, price=c):
# origin_o = deepcopy(t).is_parents
# t.replace(units=-t.units, trading_prices=t.stop_loss_price, trading_date=date, is_filled=False,
# is_parent=False, stop_loss=None)
# if not origin_o:
# order_execute.remove(t)
if position() == 0 and t in order_execute: del order_execute[: order_execute.index(t) + 1]
def fill(self, t, commission):
adj_price = util.adjust_price(trade=t, commission=commission)
if t.is_long:
assert self.__equity >= adj_price * t.units, 'Your money is empty'
buy_price.append(t.trading_price)
buy_date.append(t.trading_date)
buy_unit.append(t.units)
amnt_paying.append(adj_price * t.units)
self.__equity -= t.units * adj_price
setattr(t, 'is_filled', True)
elif t.is_short:
sell_price.append(t.trading_price)
sell_date.append(t.trading_date)
sell_unit.append(t.units)
amnt_receiving.append(abs(t.units) * adj_price)
self.__equity += abs(t.units) * adj_price
setattr(t, 'is_filled', True)
def split_add_pos_order(self, trade_order, add_position_order: list, commission):
"""
split the order which include overweight order into a list of single order and fill them
e.g. a sell order [with 6 units has an parent order and an overweight order] becomes
[an parent order with -4 units , an order with -2 units]
"""
temp_order_list = []
origin_trader_order_sign = np.sign(trade_order.units)
if trade_order.is_short:
parents_unit = trade_order.units + sum(abs(_o.units) for _o in add_position_order)
else:
parents_unit = trade_order.units - sum(abs(_o.units) for _o in add_position_order)
trade_order.units = parents_unit
if trade_order.units != 0:
temp_order_list.append(trade_order)
for _t in add_position_order:
if np.sign(_t.units) == origin_trader_order_sign:
temp_order_list.append(_t)
else:
ct = deepcopy(_t)
ct.units = -_t.units
ct.trading_date = trade_order.trading_date
ct.trading_prices = trade_order.trading_price
temp_order_list.append(ct)
for temp_o in temp_order_list:
self.fill(temp_o, commission)
add_position_order.clear()
@property
def equity(self):
return self.__equity
def position():
return sum(size for size in position_list)
|
flexible
|
{
"blob_id": "ca0aedcfb997299240870649823fb872e0d9f99a",
"index": 6023,
"step-1": "<mask token>\n\n\nclass Broker:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def liquidation(self, pos, price, date, commission):\n \"\"\"\n clean the last position\n \"\"\"\n o = Order(-1 * pos, limit_price=None, stop_loss=None, stop_profit=\n None, is_fill=False)\n setattr(o, 'trading_price', price[0])\n setattr(o, 'trading_date', date)\n order_execute.append(o)\n self.work(price=price, date=date, commission=commission)\n\n def get_log(self):\n log_dict = {'BuyDate': buy_date, 'BuyPrice': buy_price, 'BuyUnits':\n buy_unit, 'CashPaying': amnt_paying, 'SellDate': sell_date,\n 'SellPrice': sell_price, 'SellUnits': sell_unit,\n 'CashReceiving': amnt_receiving}\n log = pd.DataFrame(log_dict)\n for i in list(log_dict.values()):\n i.clear()\n return log\n\n\nclass Execute:\n\n def __init__(self, equity):\n self.__equity = equity\n\n def trading(self, price, date, commission):\n c = price[3]\n for t in order_execute:\n if not t.is_filled:\n position_list.append(t.units)\n if t.is_short and add_position_long_order and t.is_parents:\n self.split_add_pos_order(t, add_position_long_order,\n commission)\n elif t.is_long and add_position_short_order and t.is_parents:\n self.split_add_pos_order(t, add_position_short_order,\n commission)\n else:\n self.fill(t, commission)\n if position() == 0 and t in order_execute:\n del order_execute[:order_execute.index(t) + 1]\n\n def fill(self, t, commission):\n adj_price = util.adjust_price(trade=t, commission=commission)\n if t.is_long:\n assert self.__equity >= adj_price * t.units, 'Your money is empty'\n buy_price.append(t.trading_price)\n buy_date.append(t.trading_date)\n buy_unit.append(t.units)\n amnt_paying.append(adj_price * t.units)\n self.__equity -= t.units * adj_price\n setattr(t, 'is_filled', True)\n elif t.is_short:\n sell_price.append(t.trading_price)\n sell_date.append(t.trading_date)\n sell_unit.append(t.units)\n amnt_receiving.append(abs(t.units) * adj_price)\n self.__equity += abs(t.units) * adj_price\n setattr(t, 'is_filled', True)\n\n def split_add_pos_order(self, trade_order, add_position_order: list,\n commission):\n \"\"\"\n split the order which include overweight order into a list of single order and fill them\n e.g. a sell order [with 6 units has an parent order and an overweight order] becomes\n [an parent order with -4 units , an order with -2 units]\n \"\"\"\n temp_order_list = []\n origin_trader_order_sign = np.sign(trade_order.units)\n if trade_order.is_short:\n parents_unit = trade_order.units + sum(abs(_o.units) for _o in\n add_position_order)\n else:\n parents_unit = trade_order.units - sum(abs(_o.units) for _o in\n add_position_order)\n trade_order.units = parents_unit\n if trade_order.units != 0:\n temp_order_list.append(trade_order)\n for _t in add_position_order:\n if np.sign(_t.units) == origin_trader_order_sign:\n temp_order_list.append(_t)\n else:\n ct = deepcopy(_t)\n ct.units = -_t.units\n ct.trading_date = trade_order.trading_date\n ct.trading_prices = trade_order.trading_price\n temp_order_list.append(ct)\n for temp_o in temp_order_list:\n self.fill(temp_o, commission)\n add_position_order.clear()\n\n @property\n def equity(self):\n return self.__equity\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Broker:\n\n def __init__(self, equity):\n self.execute = Execute(equity)\n <mask token>\n\n def check_order(self, ohlc, date, commission):\n \"\"\"\n check the order and set the information to order by different condition\n \"\"\"\n op = ohlc[0]\n for o in order_queue:\n if position() != 0 and position() + o.units != 0 and len(\n order_queue) == 1:\n o.is_parents = False\n if o.limit_price:\n trading_price = o.limit_price\n else:\n trading_price = op\n setattr(o, 'trading_price', trading_price)\n setattr(o, 'trading_date', date)\n if o.is_long:\n if 1 > o.units > 0:\n size = int(self.execute.equity * o.units / trading_price)\n setattr(o, 'units', size)\n if o.stop_loss:\n stop_loss_price = o.trading_price * (1 - o.stop_loss)\n setattr(o, 'stop_loss_prices', stop_loss_price)\n if o.stop_profit:\n stop_profit_price = o.trading_price * (1 + o.stop_profit)\n setattr(o, 'stop_profit_prices', stop_profit_price)\n if not o.is_parents:\n add_position_long_order.append(o)\n elif o.is_short:\n if -1 < o.units < 0:\n size = int(self.execute.equity * o.units / trading_price)\n setattr(o, 'units', size)\n if o.stop_loss:\n stop_loss_price = o.trading_price * (1 + o.stop_loss)\n setattr(o, 'stop_loss_prices', stop_loss_price)\n if o.stop_profit:\n stop_profit_price = o.trading_price * (1 - o.stop_profit)\n setattr(o, 'stop_profit_prices', stop_profit_price)\n if not o.is_parents:\n add_position_short_order.append(o)\n order_execute.append(o)\n self.work(ohlc, date=date, commission=commission)\n order_queue.clear()\n self.check_if_sl_or_sp(ohlc=ohlc, date=date, commission=commission)\n <mask token>\n <mask token>\n\n def liquidation(self, pos, price, date, commission):\n \"\"\"\n clean the last position\n \"\"\"\n o = Order(-1 * pos, limit_price=None, stop_loss=None, stop_profit=\n None, is_fill=False)\n setattr(o, 'trading_price', price[0])\n setattr(o, 'trading_date', date)\n order_execute.append(o)\n self.work(price=price, date=date, commission=commission)\n\n def get_log(self):\n log_dict = {'BuyDate': buy_date, 'BuyPrice': buy_price, 'BuyUnits':\n buy_unit, 'CashPaying': amnt_paying, 'SellDate': sell_date,\n 'SellPrice': sell_price, 'SellUnits': sell_unit,\n 'CashReceiving': amnt_receiving}\n log = pd.DataFrame(log_dict)\n for i in list(log_dict.values()):\n i.clear()\n return log\n\n\nclass Execute:\n\n def __init__(self, equity):\n self.__equity = equity\n\n def trading(self, price, date, commission):\n c = price[3]\n for t in order_execute:\n if not t.is_filled:\n position_list.append(t.units)\n if t.is_short and add_position_long_order and t.is_parents:\n self.split_add_pos_order(t, add_position_long_order,\n commission)\n elif t.is_long and add_position_short_order and t.is_parents:\n self.split_add_pos_order(t, add_position_short_order,\n commission)\n else:\n self.fill(t, commission)\n if position() == 0 and t in order_execute:\n del order_execute[:order_execute.index(t) + 1]\n\n def fill(self, t, commission):\n adj_price = util.adjust_price(trade=t, commission=commission)\n if t.is_long:\n assert self.__equity >= adj_price * t.units, 'Your money is empty'\n buy_price.append(t.trading_price)\n buy_date.append(t.trading_date)\n buy_unit.append(t.units)\n amnt_paying.append(adj_price * t.units)\n self.__equity -= t.units * adj_price\n setattr(t, 'is_filled', True)\n elif t.is_short:\n sell_price.append(t.trading_price)\n sell_date.append(t.trading_date)\n sell_unit.append(t.units)\n amnt_receiving.append(abs(t.units) * adj_price)\n self.__equity += abs(t.units) * adj_price\n setattr(t, 'is_filled', True)\n\n def split_add_pos_order(self, trade_order, add_position_order: list,\n commission):\n \"\"\"\n split the order which include overweight order into a list of single order and fill them\n e.g. a sell order [with 6 units has an parent order and an overweight order] becomes\n [an parent order with -4 units , an order with -2 units]\n \"\"\"\n temp_order_list = []\n origin_trader_order_sign = np.sign(trade_order.units)\n if trade_order.is_short:\n parents_unit = trade_order.units + sum(abs(_o.units) for _o in\n add_position_order)\n else:\n parents_unit = trade_order.units - sum(abs(_o.units) for _o in\n add_position_order)\n trade_order.units = parents_unit\n if trade_order.units != 0:\n temp_order_list.append(trade_order)\n for _t in add_position_order:\n if np.sign(_t.units) == origin_trader_order_sign:\n temp_order_list.append(_t)\n else:\n ct = deepcopy(_t)\n ct.units = -_t.units\n ct.trading_date = trade_order.trading_date\n ct.trading_prices = trade_order.trading_price\n temp_order_list.append(ct)\n for temp_o in temp_order_list:\n self.fill(temp_o, commission)\n add_position_order.clear()\n\n @property\n def equity(self):\n return self.__equity\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Broker:\n\n def __init__(self, equity):\n self.execute = Execute(equity)\n <mask token>\n\n def check_order(self, ohlc, date, commission):\n \"\"\"\n check the order and set the information to order by different condition\n \"\"\"\n op = ohlc[0]\n for o in order_queue:\n if position() != 0 and position() + o.units != 0 and len(\n order_queue) == 1:\n o.is_parents = False\n if o.limit_price:\n trading_price = o.limit_price\n else:\n trading_price = op\n setattr(o, 'trading_price', trading_price)\n setattr(o, 'trading_date', date)\n if o.is_long:\n if 1 > o.units > 0:\n size = int(self.execute.equity * o.units / trading_price)\n setattr(o, 'units', size)\n if o.stop_loss:\n stop_loss_price = o.trading_price * (1 - o.stop_loss)\n setattr(o, 'stop_loss_prices', stop_loss_price)\n if o.stop_profit:\n stop_profit_price = o.trading_price * (1 + o.stop_profit)\n setattr(o, 'stop_profit_prices', stop_profit_price)\n if not o.is_parents:\n add_position_long_order.append(o)\n elif o.is_short:\n if -1 < o.units < 0:\n size = int(self.execute.equity * o.units / trading_price)\n setattr(o, 'units', size)\n if o.stop_loss:\n stop_loss_price = o.trading_price * (1 + o.stop_loss)\n setattr(o, 'stop_loss_prices', stop_loss_price)\n if o.stop_profit:\n stop_profit_price = o.trading_price * (1 - o.stop_profit)\n setattr(o, 'stop_profit_prices', stop_profit_price)\n if not o.is_parents:\n add_position_short_order.append(o)\n order_execute.append(o)\n self.work(ohlc, date=date, commission=commission)\n order_queue.clear()\n self.check_if_sl_or_sp(ohlc=ohlc, date=date, commission=commission)\n\n def check_if_sl_or_sp(self, ohlc, date, commission):\n for t in order_execute:\n origin_o = deepcopy(t).is_parents\n if util.touch_stop_loss(order=t, price=ohlc[3], date=date):\n t.replace(_unit=-t.units, _trading_price=t.stop_loss_prices,\n trading_date=date, _is_fill=False, _is_parent=False,\n stop_loss=None)\n elif util.touch_stop_profit(order=t, price=ohlc[3], date=date):\n t.replace(_unit=-t.units, _trading_price=t.\n stop_profit_prices, trading_date=date, _is_fill=False,\n _is_parent=False, stop_loss=None)\n if not origin_o:\n order_execute.remove(t)\n self.work(ohlc, date=date, commission=commission)\n <mask token>\n\n def liquidation(self, pos, price, date, commission):\n \"\"\"\n clean the last position\n \"\"\"\n o = Order(-1 * pos, limit_price=None, stop_loss=None, stop_profit=\n None, is_fill=False)\n setattr(o, 'trading_price', price[0])\n setattr(o, 'trading_date', date)\n order_execute.append(o)\n self.work(price=price, date=date, commission=commission)\n\n def get_log(self):\n log_dict = {'BuyDate': buy_date, 'BuyPrice': buy_price, 'BuyUnits':\n buy_unit, 'CashPaying': amnt_paying, 'SellDate': sell_date,\n 'SellPrice': sell_price, 'SellUnits': sell_unit,\n 'CashReceiving': amnt_receiving}\n log = pd.DataFrame(log_dict)\n for i in list(log_dict.values()):\n i.clear()\n return log\n\n\nclass Execute:\n\n def __init__(self, equity):\n self.__equity = equity\n\n def trading(self, price, date, commission):\n c = price[3]\n for t in order_execute:\n if not t.is_filled:\n position_list.append(t.units)\n if t.is_short and add_position_long_order and t.is_parents:\n self.split_add_pos_order(t, add_position_long_order,\n commission)\n elif t.is_long and add_position_short_order and t.is_parents:\n self.split_add_pos_order(t, add_position_short_order,\n commission)\n else:\n self.fill(t, commission)\n if position() == 0 and t in order_execute:\n del order_execute[:order_execute.index(t) + 1]\n\n def fill(self, t, commission):\n adj_price = util.adjust_price(trade=t, commission=commission)\n if t.is_long:\n assert self.__equity >= adj_price * t.units, 'Your money is empty'\n buy_price.append(t.trading_price)\n buy_date.append(t.trading_date)\n buy_unit.append(t.units)\n amnt_paying.append(adj_price * t.units)\n self.__equity -= t.units * adj_price\n setattr(t, 'is_filled', True)\n elif t.is_short:\n sell_price.append(t.trading_price)\n sell_date.append(t.trading_date)\n sell_unit.append(t.units)\n amnt_receiving.append(abs(t.units) * adj_price)\n self.__equity += abs(t.units) * adj_price\n setattr(t, 'is_filled', True)\n\n def split_add_pos_order(self, trade_order, add_position_order: list,\n commission):\n \"\"\"\n split the order which include overweight order into a list of single order and fill them\n e.g. a sell order [with 6 units has an parent order and an overweight order] becomes\n [an parent order with -4 units , an order with -2 units]\n \"\"\"\n temp_order_list = []\n origin_trader_order_sign = np.sign(trade_order.units)\n if trade_order.is_short:\n parents_unit = trade_order.units + sum(abs(_o.units) for _o in\n add_position_order)\n else:\n parents_unit = trade_order.units - sum(abs(_o.units) for _o in\n add_position_order)\n trade_order.units = parents_unit\n if trade_order.units != 0:\n temp_order_list.append(trade_order)\n for _t in add_position_order:\n if np.sign(_t.units) == origin_trader_order_sign:\n temp_order_list.append(_t)\n else:\n ct = deepcopy(_t)\n ct.units = -_t.units\n ct.trading_date = trade_order.trading_date\n ct.trading_prices = trade_order.trading_price\n temp_order_list.append(ct)\n for temp_o in temp_order_list:\n self.fill(temp_o, commission)\n add_position_order.clear()\n\n @property\n def equity(self):\n return self.__equity\n\n\n<mask token>\n",
"step-4": "from accessor import *\nfrom order import Order\nfrom copy import deepcopy\nimport pandas as pd\nimport numpy as np\nimport util\n\n\nclass Broker:\n\n def __init__(self, equity):\n self.execute = Execute(equity)\n\n def make_order(self, unit, limit_price, stop_loss, stop_profit):\n order_queue.append(Order(unit, limit_price, stop_loss, stop_profit))\n\n def check_order(self, ohlc, date, commission):\n \"\"\"\n check the order and set the information to order by different condition\n \"\"\"\n op = ohlc[0]\n for o in order_queue:\n if position() != 0 and position() + o.units != 0 and len(\n order_queue) == 1:\n o.is_parents = False\n if o.limit_price:\n trading_price = o.limit_price\n else:\n trading_price = op\n setattr(o, 'trading_price', trading_price)\n setattr(o, 'trading_date', date)\n if o.is_long:\n if 1 > o.units > 0:\n size = int(self.execute.equity * o.units / trading_price)\n setattr(o, 'units', size)\n if o.stop_loss:\n stop_loss_price = o.trading_price * (1 - o.stop_loss)\n setattr(o, 'stop_loss_prices', stop_loss_price)\n if o.stop_profit:\n stop_profit_price = o.trading_price * (1 + o.stop_profit)\n setattr(o, 'stop_profit_prices', stop_profit_price)\n if not o.is_parents:\n add_position_long_order.append(o)\n elif o.is_short:\n if -1 < o.units < 0:\n size = int(self.execute.equity * o.units / trading_price)\n setattr(o, 'units', size)\n if o.stop_loss:\n stop_loss_price = o.trading_price * (1 + o.stop_loss)\n setattr(o, 'stop_loss_prices', stop_loss_price)\n if o.stop_profit:\n stop_profit_price = o.trading_price * (1 - o.stop_profit)\n setattr(o, 'stop_profit_prices', stop_profit_price)\n if not o.is_parents:\n add_position_short_order.append(o)\n order_execute.append(o)\n self.work(ohlc, date=date, commission=commission)\n order_queue.clear()\n self.check_if_sl_or_sp(ohlc=ohlc, date=date, commission=commission)\n\n def check_if_sl_or_sp(self, ohlc, date, commission):\n for t in order_execute:\n origin_o = deepcopy(t).is_parents\n if util.touch_stop_loss(order=t, price=ohlc[3], date=date):\n t.replace(_unit=-t.units, _trading_price=t.stop_loss_prices,\n trading_date=date, _is_fill=False, _is_parent=False,\n stop_loss=None)\n elif util.touch_stop_profit(order=t, price=ohlc[3], date=date):\n t.replace(_unit=-t.units, _trading_price=t.\n stop_profit_prices, trading_date=date, _is_fill=False,\n _is_parent=False, stop_loss=None)\n if not origin_o:\n order_execute.remove(t)\n self.work(ohlc, date=date, commission=commission)\n\n def work(self, price, date, commission):\n self.execute.trading(price, date, commission)\n\n def liquidation(self, pos, price, date, commission):\n \"\"\"\n clean the last position\n \"\"\"\n o = Order(-1 * pos, limit_price=None, stop_loss=None, stop_profit=\n None, is_fill=False)\n setattr(o, 'trading_price', price[0])\n setattr(o, 'trading_date', date)\n order_execute.append(o)\n self.work(price=price, date=date, commission=commission)\n\n def get_log(self):\n log_dict = {'BuyDate': buy_date, 'BuyPrice': buy_price, 'BuyUnits':\n buy_unit, 'CashPaying': amnt_paying, 'SellDate': sell_date,\n 'SellPrice': sell_price, 'SellUnits': sell_unit,\n 'CashReceiving': amnt_receiving}\n log = pd.DataFrame(log_dict)\n for i in list(log_dict.values()):\n i.clear()\n return log\n\n\nclass Execute:\n\n def __init__(self, equity):\n self.__equity = equity\n\n def trading(self, price, date, commission):\n c = price[3]\n for t in order_execute:\n if not t.is_filled:\n position_list.append(t.units)\n if t.is_short and add_position_long_order and t.is_parents:\n self.split_add_pos_order(t, add_position_long_order,\n commission)\n elif t.is_long and add_position_short_order and t.is_parents:\n self.split_add_pos_order(t, add_position_short_order,\n commission)\n else:\n self.fill(t, commission)\n if position() == 0 and t in order_execute:\n del order_execute[:order_execute.index(t) + 1]\n\n def fill(self, t, commission):\n adj_price = util.adjust_price(trade=t, commission=commission)\n if t.is_long:\n assert self.__equity >= adj_price * t.units, 'Your money is empty'\n buy_price.append(t.trading_price)\n buy_date.append(t.trading_date)\n buy_unit.append(t.units)\n amnt_paying.append(adj_price * t.units)\n self.__equity -= t.units * adj_price\n setattr(t, 'is_filled', True)\n elif t.is_short:\n sell_price.append(t.trading_price)\n sell_date.append(t.trading_date)\n sell_unit.append(t.units)\n amnt_receiving.append(abs(t.units) * adj_price)\n self.__equity += abs(t.units) * adj_price\n setattr(t, 'is_filled', True)\n\n def split_add_pos_order(self, trade_order, add_position_order: list,\n commission):\n \"\"\"\n split the order which include overweight order into a list of single order and fill them\n e.g. a sell order [with 6 units has an parent order and an overweight order] becomes\n [an parent order with -4 units , an order with -2 units]\n \"\"\"\n temp_order_list = []\n origin_trader_order_sign = np.sign(trade_order.units)\n if trade_order.is_short:\n parents_unit = trade_order.units + sum(abs(_o.units) for _o in\n add_position_order)\n else:\n parents_unit = trade_order.units - sum(abs(_o.units) for _o in\n add_position_order)\n trade_order.units = parents_unit\n if trade_order.units != 0:\n temp_order_list.append(trade_order)\n for _t in add_position_order:\n if np.sign(_t.units) == origin_trader_order_sign:\n temp_order_list.append(_t)\n else:\n ct = deepcopy(_t)\n ct.units = -_t.units\n ct.trading_date = trade_order.trading_date\n ct.trading_prices = trade_order.trading_price\n temp_order_list.append(ct)\n for temp_o in temp_order_list:\n self.fill(temp_o, commission)\n add_position_order.clear()\n\n @property\n def equity(self):\n return self.__equity\n\n\ndef position():\n return sum(size for size in position_list)\n",
"step-5": "from accessor import *\r\nfrom order import Order\r\nfrom copy import deepcopy\r\nimport pandas as pd\r\nimport numpy as np\r\nimport util\r\n\r\n\r\nclass Broker:\r\n def __init__(self, equity):\r\n\r\n self.execute = Execute(equity) # Execute\r\n\r\n def make_order(self, unit, limit_price, stop_loss, stop_profit):\r\n\r\n order_queue.append(Order(unit, limit_price, stop_loss, stop_profit))\r\n\r\n def check_order(self, ohlc, date, commission):\r\n \"\"\"\r\n check the order and set the information to order by different condition\r\n \"\"\"\r\n\r\n op = ohlc[0]\r\n\r\n for o in order_queue:\r\n if position() != 0 and position() + o.units != 0 and len(order_queue) == 1:\r\n o.is_parents = False\r\n\r\n if o.limit_price:\r\n trading_price = o.limit_price\r\n\r\n else:\r\n trading_price = op\r\n\r\n setattr(o, 'trading_price', trading_price)\r\n setattr(o, 'trading_date', date)\r\n\r\n if o.is_long:\r\n if 1 > o.units > 0:\r\n\r\n size = int((self.execute.equity * o.units) / trading_price)\r\n setattr(o, 'units', size)\r\n\r\n if o.stop_loss:\r\n stop_loss_price = o.trading_price * (1 - o.stop_loss)\r\n setattr(o, 'stop_loss_prices', stop_loss_price)\r\n\r\n if o.stop_profit:\r\n stop_profit_price = o.trading_price * (1 + o.stop_profit)\r\n setattr(o, 'stop_profit_prices', stop_profit_price)\r\n\r\n if not o.is_parents:\r\n add_position_long_order.append(o)\r\n\r\n elif o.is_short:\r\n\r\n if -1 < o.units < 0:\r\n size = int((self.execute.equity * o.units) / trading_price)\r\n\r\n setattr(o, 'units', size)\r\n\r\n if o.stop_loss:\r\n stop_loss_price = o.trading_price * (1 + o.stop_loss)\r\n setattr(o, 'stop_loss_prices', stop_loss_price)\r\n\r\n if o.stop_profit:\r\n stop_profit_price = o.trading_price * (1 - o.stop_profit)\r\n setattr(o, 'stop_profit_prices', stop_profit_price)\r\n\r\n if not o.is_parents:\r\n add_position_short_order.append(o)\r\n\r\n order_execute.append(o)\r\n self.work(ohlc, date=date, commission=commission)\r\n\r\n order_queue.clear()\r\n\r\n self.check_if_sl_or_sp(ohlc=ohlc, date=date, commission=commission)\r\n\r\n def check_if_sl_or_sp(self, ohlc, date, commission):\r\n for t in order_execute:\r\n origin_o = deepcopy(t).is_parents\r\n if util.touch_stop_loss(order=t, price=ohlc[3], date=date) :\r\n\r\n t.replace(_unit=-t.units, _trading_price=t.stop_loss_prices, trading_date=date, _is_fill=False,\r\n _is_parent=False, stop_loss=None)\r\n\r\n elif util.touch_stop_profit(order=t, price=ohlc[3], date=date):\r\n t.replace(_unit=-t.units, _trading_price=t.stop_profit_prices, trading_date=date, _is_fill=False,\r\n _is_parent=False, stop_loss=None)\r\n\r\n if not origin_o:\r\n order_execute.remove(t)\r\n\r\n self.work(ohlc, date=date, commission=commission)\r\n\r\n def work(self, price, date, commission):\r\n\r\n self.execute.trading(price, date, commission)\r\n\r\n def liquidation(self, pos, price, date, commission):\r\n \"\"\"\r\n clean the last position\r\n \"\"\"\r\n o = Order(-1 * pos, limit_price=None, stop_loss=None, stop_profit=None, is_fill=False)\r\n setattr(o, 'trading_price', price[0])\r\n setattr(o, 'trading_date', date)\r\n order_execute.append(o)\r\n\r\n self.work(price=price, date=date, commission=commission)\r\n\r\n def get_log(self):\r\n log_dict = {'BuyDate': buy_date, 'BuyPrice': buy_price, 'BuyUnits': buy_unit, 'CashPaying': amnt_paying,\r\n 'SellDate': sell_date, 'SellPrice': sell_price, 'SellUnits': sell_unit,\r\n 'CashReceiving': amnt_receiving}\r\n\r\n log = pd.DataFrame(log_dict)\r\n\r\n for i in list(log_dict.values()):\r\n i.clear()\r\n\r\n return log\r\n\r\n\r\nclass Execute:\r\n def __init__(self, equity):\r\n self.__equity = equity\r\n\r\n def trading(self, price, date, commission):\r\n\r\n c = price[3]\r\n\r\n for t in order_execute:\r\n if not t.is_filled:\r\n position_list.append(t.units)\r\n\r\n if t.is_short and add_position_long_order and t.is_parents:\r\n self.split_add_pos_order(t, add_position_long_order, commission)\r\n elif t.is_long and add_position_short_order and t.is_parents:\r\n self.split_add_pos_order(t, add_position_short_order, commission)\r\n\r\n else:\r\n self.fill(t, commission)\r\n\r\n # if self._touch_stop_loss(order=t, price=c):\r\n # origin_o = deepcopy(t).is_parents\r\n # t.replace(units=-t.units, trading_prices=t.stop_loss_price, trading_date=date, is_filled=False,\r\n # is_parent=False, stop_loss=None)\r\n # if not origin_o:\r\n # order_execute.remove(t)\r\n\r\n if position() == 0 and t in order_execute: del order_execute[: order_execute.index(t) + 1]\r\n\r\n def fill(self, t, commission):\r\n adj_price = util.adjust_price(trade=t, commission=commission)\r\n\r\n if t.is_long:\r\n assert self.__equity >= adj_price * t.units, 'Your money is empty'\r\n\r\n buy_price.append(t.trading_price)\r\n buy_date.append(t.trading_date)\r\n buy_unit.append(t.units)\r\n amnt_paying.append(adj_price * t.units)\r\n\r\n self.__equity -= t.units * adj_price\r\n setattr(t, 'is_filled', True)\r\n\r\n elif t.is_short:\r\n\r\n sell_price.append(t.trading_price)\r\n sell_date.append(t.trading_date)\r\n sell_unit.append(t.units)\r\n amnt_receiving.append(abs(t.units) * adj_price)\r\n\r\n self.__equity += abs(t.units) * adj_price\r\n setattr(t, 'is_filled', True)\r\n\r\n\r\n def split_add_pos_order(self, trade_order, add_position_order: list, commission):\r\n \"\"\"\r\n split the order which include overweight order into a list of single order and fill them\r\n e.g. a sell order [with 6 units has an parent order and an overweight order] becomes\r\n [an parent order with -4 units , an order with -2 units]\r\n \"\"\"\r\n temp_order_list = []\r\n origin_trader_order_sign = np.sign(trade_order.units)\r\n if trade_order.is_short:\r\n parents_unit = trade_order.units + sum(abs(_o.units) for _o in add_position_order)\r\n else:\r\n parents_unit = trade_order.units - sum(abs(_o.units) for _o in add_position_order)\r\n trade_order.units = parents_unit\r\n if trade_order.units != 0:\r\n temp_order_list.append(trade_order)\r\n for _t in add_position_order:\r\n if np.sign(_t.units) == origin_trader_order_sign:\r\n temp_order_list.append(_t)\r\n\r\n else:\r\n ct = deepcopy(_t)\r\n\r\n ct.units = -_t.units\r\n ct.trading_date = trade_order.trading_date\r\n ct.trading_prices = trade_order.trading_price\r\n\r\n temp_order_list.append(ct)\r\n for temp_o in temp_order_list:\r\n self.fill(temp_o, commission)\r\n\r\n add_position_order.clear()\r\n\r\n @property\r\n def equity(self):\r\n return self.__equity\r\n\r\n\r\ndef position():\r\n return sum(size for size in position_list)\r\n",
"step-ids": [
9,
11,
12,
16,
17
]
}
|
[
9,
11,
12,
16,
17
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class RC165(IReg):
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class RC165(IReg):
def __init__(self):
self._header = ['REG', 'COD_PART', 'VEIC_ID', 'COD_AUT', 'NR_PASSE',
'HORA', 'TEMPER', 'QTD_VOL', 'PESO_BRT', 'PESO_LIQ', 'NOM_MOT',
'CPF', 'UF_ID']
self._hierarchy = '3'
<|reserved_special_token_1|>
from ..IReg import IReg
class RC165(IReg):
def __init__(self):
self._header = ['REG', 'COD_PART', 'VEIC_ID', 'COD_AUT', 'NR_PASSE',
'HORA', 'TEMPER', 'QTD_VOL', 'PESO_BRT', 'PESO_LIQ', 'NOM_MOT',
'CPF', 'UF_ID']
self._hierarchy = '3'
<|reserved_special_token_1|>
from ..IReg import IReg
class RC165(IReg):
def __init__(self):
self._header = ['REG',
'COD_PART',
'VEIC_ID',
'COD_AUT',
'NR_PASSE',
'HORA',
'TEMPER',
'QTD_VOL',
'PESO_BRT',
'PESO_LIQ',
'NOM_MOT',
'CPF',
'UF_ID']
self._hierarchy = "3"
|
flexible
|
{
"blob_id": "bf73e2109f11b2214fae060bc343b01091765c2a",
"index": 2325,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass RC165(IReg):\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass RC165(IReg):\n\n def __init__(self):\n self._header = ['REG', 'COD_PART', 'VEIC_ID', 'COD_AUT', 'NR_PASSE',\n 'HORA', 'TEMPER', 'QTD_VOL', 'PESO_BRT', 'PESO_LIQ', 'NOM_MOT',\n 'CPF', 'UF_ID']\n self._hierarchy = '3'\n",
"step-4": "from ..IReg import IReg\n\n\nclass RC165(IReg):\n\n def __init__(self):\n self._header = ['REG', 'COD_PART', 'VEIC_ID', 'COD_AUT', 'NR_PASSE',\n 'HORA', 'TEMPER', 'QTD_VOL', 'PESO_BRT', 'PESO_LIQ', 'NOM_MOT',\n 'CPF', 'UF_ID']\n self._hierarchy = '3'\n",
"step-5": "from ..IReg import IReg\n\n\nclass RC165(IReg):\n\n def __init__(self):\n self._header = ['REG',\n 'COD_PART',\n 'VEIC_ID',\n 'COD_AUT',\n 'NR_PASSE',\n 'HORA',\n 'TEMPER',\n 'QTD_VOL',\n 'PESO_BRT',\n 'PESO_LIQ',\n 'NOM_MOT',\n 'CPF',\n 'UF_ID']\n\n self._hierarchy = \"3\"\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# block-comments.py
'''
Block comments generally apply to some (or all) code that follows them, and are
indented to the same level as that code. Each line of a block comment starts
with a # and a single space (unless it is indented text inside the comment).
Paragraphs inside a block comment are separated by a line containing a single #.
'''
|
normal
|
{
"blob_id": "83bac8176caafc5551089c4bef5c1f38e1e8d4da",
"index": 5952,
"step-1": "<mask token>\n",
"step-2": "# block-comments.py\n'''\nBlock comments generally apply to some (or all) code that follows them, and are\nindented to the same level as that code. Each line of a block comment starts\nwith a # and a single space (unless it is indented text inside the comment).\n\nParagraphs inside a block comment are separated by a line containing a single #.\n'''\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
arr = []
for i in range(5):
arr.append(int(input()))
print(min(arr[0], arr[1], arr[2]) + min(arr[3], arr[4]) - 50)
|
normal
|
{
"blob_id": "8745855d86dcdabe55f8d1622b66b3613dbfe3e1",
"index": 4015,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in range(5):\n arr.append(int(input()))\nprint(min(arr[0], arr[1], arr[2]) + min(arr[3], arr[4]) - 50)\n",
"step-3": "arr = []\nfor i in range(5):\n arr.append(int(input()))\nprint(min(arr[0], arr[1], arr[2]) + min(arr[3], arr[4]) - 50)\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
setup(name='testspace-python', version='', packages=find_packages(include=[
'testspace', 'testspace.*']), url='', license='MIT license', author=
'Jeffrey Schultz', author_email='jeffs@s2technologies.com', description
='Module for interacting with Testspace Server', install_requires=[
'requests'])
<|reserved_special_token_1|>
from setuptools import setup, find_packages
setup(name='testspace-python', version='', packages=find_packages(include=[
'testspace', 'testspace.*']), url='', license='MIT license', author=
'Jeffrey Schultz', author_email='jeffs@s2technologies.com', description
='Module for interacting with Testspace Server', install_requires=[
'requests'])
<|reserved_special_token_1|>
from setuptools import setup, find_packages
setup(
name='testspace-python',
version='',
packages=find_packages(include=['testspace', 'testspace.*']),
url='',
license="MIT license",
author="Jeffrey Schultz",
author_email='jeffs@s2technologies.com',
description="Module for interacting with Testspace Server",
install_requires=[
'requests',
]
)
|
flexible
|
{
"blob_id": "7bc2a02d85c3b1a2b7ed61dc7567d1097b63d658",
"index": 3559,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nsetup(name='testspace-python', version='', packages=find_packages(include=[\n 'testspace', 'testspace.*']), url='', license='MIT license', author=\n 'Jeffrey Schultz', author_email='jeffs@s2technologies.com', description\n ='Module for interacting with Testspace Server', install_requires=[\n 'requests'])\n",
"step-3": "from setuptools import setup, find_packages\nsetup(name='testspace-python', version='', packages=find_packages(include=[\n 'testspace', 'testspace.*']), url='', license='MIT license', author=\n 'Jeffrey Schultz', author_email='jeffs@s2technologies.com', description\n ='Module for interacting with Testspace Server', install_requires=[\n 'requests'])\n",
"step-4": "from setuptools import setup, find_packages\n\nsetup(\n name='testspace-python',\n version='',\n packages=find_packages(include=['testspace', 'testspace.*']),\n url='',\n license=\"MIT license\",\n author=\"Jeffrey Schultz\",\n author_email='jeffs@s2technologies.com',\n description=\"Module for interacting with Testspace Server\",\n install_requires=[\n 'requests',\n ]\n)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import re # regex module
from ftplib import FTP, error_perm
from itertools import groupby
from typing import List, Tuple, Dict
import requests # HTTP requests module
from util import retry_multi, GLOBAL_TIMEOUT # from util.py
class ReleaseFile:
"""! Class representing a Released file on Nebula
`name`: str
Mod (or build) name,
`url`: str
Primary host URL,
`group`: str
Mod group string,
`subgroup`: str
Mod subgroup string,
`mirrors`: List[str]
List of URL's of FTP mirrors
"""
def __init__(self, name, url, group, subgroup=None, mirrors=None):
if mirrors is None:
mirrors = []
self.mirrors = mirrors
self.subgroup = subgroup
self.group = group
self.url = url
self.name = name
self.base_url = "/".join(url.split('/')[0:-1]) + "/"
self.filename = url.split('/')[-1]
# A list of tuples of (filename, hash)
self.content_hashes = None
self.hash = None
self.size = 0
def __repr__(self):
return repr((self.name))
class SourceFile:
"""! Class represeting a source file
`name`: str
File name,
`url`: str
FTP URL,
`group`
<unknown>
@details More details
"""
def __init__(self, name, url, group):
self.group = group
self.url = url
self.name = name
class FileGroup:
"""! Represents a file group
`name`: str
Name of this group
`files`: List[ReleaseFile]
List of files within this group
`mainFile`: str
If this FileGroup has a subgroup, `mainFile` is the head of that group
`subFiles`: List[ReleaseFile]
Files within a subgroup
"""
def __init__(self, name, files: List[ReleaseFile]):
self.files = files
self.name = name
if len(files) == 1:
self.mainFile = files[0]
self.subFiles = {}
else:
self.mainFile = None
subFiles = []
for file in files:
# We only have subcategories for Windows where SSE2 is the main group
if file.subgroup == "SSE2":
self.mainFile = file
else:
subFiles.append(file)
self.subFiles = dict(((x[0], next(x[1])) for x in groupby(subFiles, lambda f: f.subgroup)))
def get_release_files(tag_name, config) -> Tuple[List[ReleaseFile], Dict[str, SourceFile]]:
"""! Brief Gets the binary and source files from the Github Release server
@param[in] `tag_name` Git tag of the current release
@param[in] `config` confi metadata set in main.py
@returns `List[ReleaseFile]` List of release files
@returns `Dict[str, SourceFile]` Dictionary of source files
@details Sends an `HTTP GET` request to github using their REST API to retrieve metadata. The files are not
actually downloaded here, just their metadata is gathered and organized in their respective container for later
use.
"""
@retry_multi(5) # retry at most 5 times
def execute_request(path):
"""!
@brief Performs a GET request with the given path. To be used with Github's REST API.
@returns If successful, returns a .JSON object
"""
headers = {
"Accept": "application/vnd.github.v3+json"
}
url = "https://api.github.com" + path
# GET https://api.github.com/<path> Accept: "application/vnd.github.v3+json"
response = requests.get(url, headers=headers, timeout=GLOBAL_TIMEOUT)
response.raise_for_status() # Raise a RequestException if we failed, and trigger retry
return response.json()
build_group_regex = re.compile("fs2_open_.*-builds-([^.-]*)(-([^.]*))?.*") # regex for matching binary .zip's and .7z's
source_file_regex = re.compile("fs2_open_.*-source-([^.]*)?.*") # regex for matching source .zip's and .7z's
# Get the github release metadata of the given tag name
response = execute_request(
"/repos/{}/releases/tags/{}".format(config["github"]["repo"], tag_name))
# Extract the binary and source files from the response["asset"] metadata
binary_files = []
source_files = {}
for asset in response["assets"]:
url = asset["browser_download_url"]
name = asset["name"]
group_match = build_group_regex.match(name)
if group_match is not None:
platform = group_match.group(1)
# x64 is the Visual Studio name but for consistency we need Win64
if platform == "x64":
platform = "Win64"
binary_files.append(ReleaseFile(name, url, platform, group_match.group(3)))
else:
group_match = source_file_regex.match(name)
if group_match is None:
continue
group = group_match.group(1)
source_files[group] = SourceFile(name, url, group)
binary_files.sort(key=lambda ReleaseFile: ReleaseFile.name)
return binary_files, source_files
def get_ftp_files(build_type, tag_name, config) -> List[ReleaseFile] :
"""!
@brief Gets file metadata for nightlies hosted on FTP, as determined by config["ftp"] attributes
@param [in] `build_type` Unknown str
@param [in] `tag_name` Github tag name of the release
@param [in] `config` config metadata set in main.py
"""
tag_regex = re.compile("nightly_(.*)")
build_group_regex = re.compile("nightly_.*-builds-([^.]+).*")
files = []
try:
with FTP(config["ftp"]["host"], config["ftp"]["user"], config["ftp"]["pass"]) as ftp:
# extract version
version_str = tag_regex.match(tag_name).group(1)
# extract filepath w/ version
# then list all ftp hits with that path
path_template = config["ftp"]["path"]
path = path_template.format(type=build_type, version=version_str)
file_entries = list(ftp.mlsd(path, ["type"]))
# get all ftp hits of type file
for entry in file_entries:
if entry[1]["type"] == "file":
files.append(entry[0])
except error_perm:
print("Received permanent FTP error!")
return []
out_data = []
for file in files:
# from the file list, extract only nightly files
file_match = build_group_regex.match(file)
if file_match is None:
print("Ignoring non nightly file '{}'".format(file))
continue
group_match = file_match.group(1)
primary_url = None
mirrors = []
# x64 is the name Visual Studio uses but Win64 works better for us since that gets displayed in the nightly post
if "x64" in group_match:
group_match = group_match.replace("x64", "Win64")
# construct the download URL list for all mirrors. The first listed ftp location is taken as the Primary
for mirror in config["ftp"]["mirrors"]:
download_url = mirror.format(type=build_type, version=version_str, file=file)
if primary_url is None:
primary_url = download_url
else:
mirrors.append(download_url)
# Form the List[ReleaseFile] list with the download URL links
out_data.append(ReleaseFile(file, primary_url, group_match, None, mirrors))
return out_data
|
normal
|
{
"blob_id": "612b1851ba5a07a277982ed5be334392182c66ef",
"index": 4064,
"step-1": "<mask token>\n\n\nclass ReleaseFile:\n <mask token>\n <mask token>\n\n def __repr__(self):\n return repr(self.name)\n\n\nclass SourceFile:\n \"\"\"! Class represeting a source file\n\n `name`: str\n File name,\n `url`: str\n FTP URL,\n `group`\n <unknown>\n \n @details More details\n \"\"\"\n\n def __init__(self, name, url, group):\n self.group = group\n self.url = url\n self.name = name\n\n\nclass FileGroup:\n \"\"\"! Represents a file group\n\n `name`: str\n Name of this group\n `files`: List[ReleaseFile]\n List of files within this group\n `mainFile`: str\n If this FileGroup has a subgroup, `mainFile` is the head of that group\n `subFiles`: List[ReleaseFile]\n Files within a subgroup\n \"\"\"\n\n def __init__(self, name, files: List[ReleaseFile]):\n self.files = files\n self.name = name\n if len(files) == 1:\n self.mainFile = files[0]\n self.subFiles = {}\n else:\n self.mainFile = None\n subFiles = []\n for file in files:\n if file.subgroup == 'SSE2':\n self.mainFile = file\n else:\n subFiles.append(file)\n self.subFiles = dict((x[0], next(x[1])) for x in groupby(\n subFiles, lambda f: f.subgroup))\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass ReleaseFile:\n \"\"\"! Class representing a Released file on Nebula\n\n `name`: str\n Mod (or build) name,\n `url`: str\n Primary host URL,\n `group`: str\n Mod group string,\n `subgroup`: str\n Mod subgroup string,\n `mirrors`: List[str]\n List of URL's of FTP mirrors\n \"\"\"\n\n def __init__(self, name, url, group, subgroup=None, mirrors=None):\n if mirrors is None:\n mirrors = []\n self.mirrors = mirrors\n self.subgroup = subgroup\n self.group = group\n self.url = url\n self.name = name\n self.base_url = '/'.join(url.split('/')[0:-1]) + '/'\n self.filename = url.split('/')[-1]\n self.content_hashes = None\n self.hash = None\n self.size = 0\n\n def __repr__(self):\n return repr(self.name)\n\n\nclass SourceFile:\n \"\"\"! Class represeting a source file\n\n `name`: str\n File name,\n `url`: str\n FTP URL,\n `group`\n <unknown>\n \n @details More details\n \"\"\"\n\n def __init__(self, name, url, group):\n self.group = group\n self.url = url\n self.name = name\n\n\nclass FileGroup:\n \"\"\"! Represents a file group\n\n `name`: str\n Name of this group\n `files`: List[ReleaseFile]\n List of files within this group\n `mainFile`: str\n If this FileGroup has a subgroup, `mainFile` is the head of that group\n `subFiles`: List[ReleaseFile]\n Files within a subgroup\n \"\"\"\n\n def __init__(self, name, files: List[ReleaseFile]):\n self.files = files\n self.name = name\n if len(files) == 1:\n self.mainFile = files[0]\n self.subFiles = {}\n else:\n self.mainFile = None\n subFiles = []\n for file in files:\n if file.subgroup == 'SSE2':\n self.mainFile = file\n else:\n subFiles.append(file)\n self.subFiles = dict((x[0], next(x[1])) for x in groupby(\n subFiles, lambda f: f.subgroup))\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass ReleaseFile:\n \"\"\"! Class representing a Released file on Nebula\n\n `name`: str\n Mod (or build) name,\n `url`: str\n Primary host URL,\n `group`: str\n Mod group string,\n `subgroup`: str\n Mod subgroup string,\n `mirrors`: List[str]\n List of URL's of FTP mirrors\n \"\"\"\n\n def __init__(self, name, url, group, subgroup=None, mirrors=None):\n if mirrors is None:\n mirrors = []\n self.mirrors = mirrors\n self.subgroup = subgroup\n self.group = group\n self.url = url\n self.name = name\n self.base_url = '/'.join(url.split('/')[0:-1]) + '/'\n self.filename = url.split('/')[-1]\n self.content_hashes = None\n self.hash = None\n self.size = 0\n\n def __repr__(self):\n return repr(self.name)\n\n\nclass SourceFile:\n \"\"\"! Class represeting a source file\n\n `name`: str\n File name,\n `url`: str\n FTP URL,\n `group`\n <unknown>\n \n @details More details\n \"\"\"\n\n def __init__(self, name, url, group):\n self.group = group\n self.url = url\n self.name = name\n\n\nclass FileGroup:\n \"\"\"! Represents a file group\n\n `name`: str\n Name of this group\n `files`: List[ReleaseFile]\n List of files within this group\n `mainFile`: str\n If this FileGroup has a subgroup, `mainFile` is the head of that group\n `subFiles`: List[ReleaseFile]\n Files within a subgroup\n \"\"\"\n\n def __init__(self, name, files: List[ReleaseFile]):\n self.files = files\n self.name = name\n if len(files) == 1:\n self.mainFile = files[0]\n self.subFiles = {}\n else:\n self.mainFile = None\n subFiles = []\n for file in files:\n if file.subgroup == 'SSE2':\n self.mainFile = file\n else:\n subFiles.append(file)\n self.subFiles = dict((x[0], next(x[1])) for x in groupby(\n subFiles, lambda f: f.subgroup))\n\n\ndef get_release_files(tag_name, config) ->Tuple[List[ReleaseFile], Dict[str,\n SourceFile]]:\n \"\"\"! Brief Gets the binary and source files from the Github Release server\n\n @param[in] `tag_name` Git tag of the current release\n @param[in] `config` confi metadata set in main.py\n\n @returns `List[ReleaseFile]` List of release files\n @returns `Dict[str, SourceFile]` Dictionary of source files\n\n @details Sends an `HTTP GET` request to github using their REST API to retrieve metadata. The files are not\n actually downloaded here, just their metadata is gathered and organized in their respective container for later\n use.\n \"\"\"\n\n @retry_multi(5)\n def execute_request(path):\n \"\"\"!\n @brief Performs a GET request with the given path. To be used with Github's REST API.\n @returns If successful, returns a .JSON object\n \"\"\"\n headers = {'Accept': 'application/vnd.github.v3+json'}\n url = 'https://api.github.com' + path\n response = requests.get(url, headers=headers, timeout=GLOBAL_TIMEOUT)\n response.raise_for_status()\n return response.json()\n build_group_regex = re.compile('fs2_open_.*-builds-([^.-]*)(-([^.]*))?.*')\n source_file_regex = re.compile('fs2_open_.*-source-([^.]*)?.*')\n response = execute_request('/repos/{}/releases/tags/{}'.format(config[\n 'github']['repo'], tag_name))\n binary_files = []\n source_files = {}\n for asset in response['assets']:\n url = asset['browser_download_url']\n name = asset['name']\n group_match = build_group_regex.match(name)\n if group_match is not None:\n platform = group_match.group(1)\n if platform == 'x64':\n platform = 'Win64'\n binary_files.append(ReleaseFile(name, url, platform,\n group_match.group(3)))\n else:\n group_match = source_file_regex.match(name)\n if group_match is None:\n continue\n group = group_match.group(1)\n source_files[group] = SourceFile(name, url, group)\n binary_files.sort(key=lambda ReleaseFile: ReleaseFile.name)\n return binary_files, source_files\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass ReleaseFile:\n \"\"\"! Class representing a Released file on Nebula\n\n `name`: str\n Mod (or build) name,\n `url`: str\n Primary host URL,\n `group`: str\n Mod group string,\n `subgroup`: str\n Mod subgroup string,\n `mirrors`: List[str]\n List of URL's of FTP mirrors\n \"\"\"\n\n def __init__(self, name, url, group, subgroup=None, mirrors=None):\n if mirrors is None:\n mirrors = []\n self.mirrors = mirrors\n self.subgroup = subgroup\n self.group = group\n self.url = url\n self.name = name\n self.base_url = '/'.join(url.split('/')[0:-1]) + '/'\n self.filename = url.split('/')[-1]\n self.content_hashes = None\n self.hash = None\n self.size = 0\n\n def __repr__(self):\n return repr(self.name)\n\n\nclass SourceFile:\n \"\"\"! Class represeting a source file\n\n `name`: str\n File name,\n `url`: str\n FTP URL,\n `group`\n <unknown>\n \n @details More details\n \"\"\"\n\n def __init__(self, name, url, group):\n self.group = group\n self.url = url\n self.name = name\n\n\nclass FileGroup:\n \"\"\"! Represents a file group\n\n `name`: str\n Name of this group\n `files`: List[ReleaseFile]\n List of files within this group\n `mainFile`: str\n If this FileGroup has a subgroup, `mainFile` is the head of that group\n `subFiles`: List[ReleaseFile]\n Files within a subgroup\n \"\"\"\n\n def __init__(self, name, files: List[ReleaseFile]):\n self.files = files\n self.name = name\n if len(files) == 1:\n self.mainFile = files[0]\n self.subFiles = {}\n else:\n self.mainFile = None\n subFiles = []\n for file in files:\n if file.subgroup == 'SSE2':\n self.mainFile = file\n else:\n subFiles.append(file)\n self.subFiles = dict((x[0], next(x[1])) for x in groupby(\n subFiles, lambda f: f.subgroup))\n\n\ndef get_release_files(tag_name, config) ->Tuple[List[ReleaseFile], Dict[str,\n SourceFile]]:\n \"\"\"! Brief Gets the binary and source files from the Github Release server\n\n @param[in] `tag_name` Git tag of the current release\n @param[in] `config` confi metadata set in main.py\n\n @returns `List[ReleaseFile]` List of release files\n @returns `Dict[str, SourceFile]` Dictionary of source files\n\n @details Sends an `HTTP GET` request to github using their REST API to retrieve metadata. The files are not\n actually downloaded here, just their metadata is gathered and organized in their respective container for later\n use.\n \"\"\"\n\n @retry_multi(5)\n def execute_request(path):\n \"\"\"!\n @brief Performs a GET request with the given path. To be used with Github's REST API.\n @returns If successful, returns a .JSON object\n \"\"\"\n headers = {'Accept': 'application/vnd.github.v3+json'}\n url = 'https://api.github.com' + path\n response = requests.get(url, headers=headers, timeout=GLOBAL_TIMEOUT)\n response.raise_for_status()\n return response.json()\n build_group_regex = re.compile('fs2_open_.*-builds-([^.-]*)(-([^.]*))?.*')\n source_file_regex = re.compile('fs2_open_.*-source-([^.]*)?.*')\n response = execute_request('/repos/{}/releases/tags/{}'.format(config[\n 'github']['repo'], tag_name))\n binary_files = []\n source_files = {}\n for asset in response['assets']:\n url = asset['browser_download_url']\n name = asset['name']\n group_match = build_group_regex.match(name)\n if group_match is not None:\n platform = group_match.group(1)\n if platform == 'x64':\n platform = 'Win64'\n binary_files.append(ReleaseFile(name, url, platform,\n group_match.group(3)))\n else:\n group_match = source_file_regex.match(name)\n if group_match is None:\n continue\n group = group_match.group(1)\n source_files[group] = SourceFile(name, url, group)\n binary_files.sort(key=lambda ReleaseFile: ReleaseFile.name)\n return binary_files, source_files\n\n\ndef get_ftp_files(build_type, tag_name, config) ->List[ReleaseFile]:\n \"\"\"!\n @brief Gets file metadata for nightlies hosted on FTP, as determined by config[\"ftp\"] attributes\n \n @param [in] `build_type` Unknown str\n @param [in] `tag_name` Github tag name of the release\n @param [in] `config` config metadata set in main.py\n \"\"\"\n tag_regex = re.compile('nightly_(.*)')\n build_group_regex = re.compile('nightly_.*-builds-([^.]+).*')\n files = []\n try:\n with FTP(config['ftp']['host'], config['ftp']['user'], config['ftp'\n ]['pass']) as ftp:\n version_str = tag_regex.match(tag_name).group(1)\n path_template = config['ftp']['path']\n path = path_template.format(type=build_type, version=version_str)\n file_entries = list(ftp.mlsd(path, ['type']))\n for entry in file_entries:\n if entry[1]['type'] == 'file':\n files.append(entry[0])\n except error_perm:\n print('Received permanent FTP error!')\n return []\n out_data = []\n for file in files:\n file_match = build_group_regex.match(file)\n if file_match is None:\n print(\"Ignoring non nightly file '{}'\".format(file))\n continue\n group_match = file_match.group(1)\n primary_url = None\n mirrors = []\n if 'x64' in group_match:\n group_match = group_match.replace('x64', 'Win64')\n for mirror in config['ftp']['mirrors']:\n download_url = mirror.format(type=build_type, version=\n version_str, file=file)\n if primary_url is None:\n primary_url = download_url\n else:\n mirrors.append(download_url)\n out_data.append(ReleaseFile(file, primary_url, group_match, None,\n mirrors))\n return out_data\n",
"step-5": "import re # regex module\nfrom ftplib import FTP, error_perm\nfrom itertools import groupby\nfrom typing import List, Tuple, Dict\n\nimport requests # HTTP requests module\n\nfrom util import retry_multi, GLOBAL_TIMEOUT\t# from util.py\n\n\nclass ReleaseFile:\n \"\"\"! Class representing a Released file on Nebula\n\n `name`: str\n Mod (or build) name,\n `url`: str\n Primary host URL,\n `group`: str\n Mod group string,\n `subgroup`: str\n Mod subgroup string,\n `mirrors`: List[str]\n List of URL's of FTP mirrors\n \"\"\"\n def __init__(self, name, url, group, subgroup=None, mirrors=None):\n if mirrors is None:\n mirrors = []\n self.mirrors = mirrors\n self.subgroup = subgroup\n self.group = group\n self.url = url\n self.name = name\n\n self.base_url = \"/\".join(url.split('/')[0:-1]) + \"/\"\n self.filename = url.split('/')[-1]\n\n # A list of tuples of (filename, hash)\n self.content_hashes = None\n\n self.hash = None\n self.size = 0\n \n def __repr__(self):\n return repr((self.name))\n\n\nclass SourceFile:\n \"\"\"! Class represeting a source file\n\n `name`: str\n File name,\n `url`: str\n FTP URL,\n `group`\n <unknown>\n \n @details More details\n \"\"\"\n def __init__(self, name, url, group):\n self.group = group\n self.url = url\n self.name = name\n\n\nclass FileGroup:\n \"\"\"! Represents a file group\n\n `name`: str\n Name of this group\n `files`: List[ReleaseFile]\n List of files within this group\n `mainFile`: str\n If this FileGroup has a subgroup, `mainFile` is the head of that group\n `subFiles`: List[ReleaseFile]\n Files within a subgroup\n \"\"\"\n\n def __init__(self, name, files: List[ReleaseFile]):\n self.files = files\n self.name = name\n\n if len(files) == 1:\n self.mainFile = files[0]\n self.subFiles = {}\n else:\n self.mainFile = None\n subFiles = []\n for file in files:\n # We only have subcategories for Windows where SSE2 is the main group\n if file.subgroup == \"SSE2\":\n self.mainFile = file\n else:\n subFiles.append(file)\n\n self.subFiles = dict(((x[0], next(x[1])) for x in groupby(subFiles, lambda f: f.subgroup)))\n\n\ndef get_release_files(tag_name, config) -> Tuple[List[ReleaseFile], Dict[str, SourceFile]]:\n \"\"\"! Brief Gets the binary and source files from the Github Release server\n\n @param[in] `tag_name` Git tag of the current release\n @param[in] `config` confi metadata set in main.py\n\n @returns `List[ReleaseFile]` List of release files\n @returns `Dict[str, SourceFile]` Dictionary of source files\n\n @details Sends an `HTTP GET` request to github using their REST API to retrieve metadata. The files are not\n actually downloaded here, just their metadata is gathered and organized in their respective container for later\n use.\n \"\"\"\n\n @retry_multi(5)\t# retry at most 5 times\n def execute_request(path):\n \"\"\"!\n @brief Performs a GET request with the given path. To be used with Github's REST API.\n @returns If successful, returns a .JSON object\n \"\"\"\n headers = {\n \"Accept\": \"application/vnd.github.v3+json\"\n }\n url = \"https://api.github.com\" + path\n\n # GET https://api.github.com/<path> Accept: \"application/vnd.github.v3+json\"\n\n response = requests.get(url, headers=headers, timeout=GLOBAL_TIMEOUT)\n\n response.raise_for_status() # Raise a RequestException if we failed, and trigger retry\n\n return response.json()\n\n build_group_regex = re.compile(\"fs2_open_.*-builds-([^.-]*)(-([^.]*))?.*\") # regex for matching binary .zip's and .7z's\n source_file_regex = re.compile(\"fs2_open_.*-source-([^.]*)?.*\") # regex for matching source .zip's and .7z's\n\n # Get the github release metadata of the given tag name\n response = execute_request(\n \"/repos/{}/releases/tags/{}\".format(config[\"github\"][\"repo\"], tag_name))\n\n # Extract the binary and source files from the response[\"asset\"] metadata\n binary_files = []\n source_files = {}\n for asset in response[\"assets\"]:\n url = asset[\"browser_download_url\"]\n name = asset[\"name\"]\n\n group_match = build_group_regex.match(name)\n\n if group_match is not None:\n platform = group_match.group(1)\n # x64 is the Visual Studio name but for consistency we need Win64\n if platform == \"x64\":\n platform = \"Win64\"\n\n binary_files.append(ReleaseFile(name, url, platform, group_match.group(3)))\n else:\n group_match = source_file_regex.match(name)\n\n if group_match is None:\n continue\n\n group = group_match.group(1)\n\n source_files[group] = SourceFile(name, url, group)\n\n binary_files.sort(key=lambda ReleaseFile: ReleaseFile.name)\n\n return binary_files, source_files\n\n\ndef get_ftp_files(build_type, tag_name, config) -> List[ReleaseFile] :\n \"\"\"!\n @brief Gets file metadata for nightlies hosted on FTP, as determined by config[\"ftp\"] attributes\n \n @param [in] `build_type` Unknown str\n @param [in] `tag_name` Github tag name of the release\n @param [in] `config` config metadata set in main.py\n \"\"\"\n\n tag_regex = re.compile(\"nightly_(.*)\")\n build_group_regex = re.compile(\"nightly_.*-builds-([^.]+).*\")\n\n files = []\n try:\n with FTP(config[\"ftp\"][\"host\"], config[\"ftp\"][\"user\"], config[\"ftp\"][\"pass\"]) as ftp:\n # extract version\n version_str = tag_regex.match(tag_name).group(1)\n\n # extract filepath w/ version\n # then list all ftp hits with that path\n path_template = config[\"ftp\"][\"path\"]\n path = path_template.format(type=build_type, version=version_str)\n file_entries = list(ftp.mlsd(path, [\"type\"]))\n\n # get all ftp hits of type file\n for entry in file_entries:\n if entry[1][\"type\"] == \"file\":\n files.append(entry[0])\n except error_perm:\n print(\"Received permanent FTP error!\")\n return []\n\n out_data = []\n for file in files:\n # from the file list, extract only nightly files\n file_match = build_group_regex.match(file)\n if file_match is None:\n print(\"Ignoring non nightly file '{}'\".format(file))\n continue\n\n group_match = file_match.group(1)\n primary_url = None\n mirrors = []\n\n # x64 is the name Visual Studio uses but Win64 works better for us since that gets displayed in the nightly post\n if \"x64\" in group_match:\n group_match = group_match.replace(\"x64\", \"Win64\")\n\n # construct the download URL list for all mirrors. The first listed ftp location is taken as the Primary\n for mirror in config[\"ftp\"][\"mirrors\"]:\n download_url = mirror.format(type=build_type, version=version_str, file=file)\n if primary_url is None:\n primary_url = download_url\n else:\n mirrors.append(download_url)\n\n # Form the List[ReleaseFile] list with the download URL links\n out_data.append(ReleaseFile(file, primary_url, group_match, None, mirrors))\n\n return out_data",
"step-ids": [
8,
10,
11,
12,
14
]
}
|
[
8,
10,
11,
12,
14
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
dependencies = [('blog', '0004_auto_20200825_1318')]
operations = [migrations.RenameField(model_name='cv', old_name=
'additionalskills_text', new_name='additional_skills_text'),
migrations.RenameField(model_name='cv', old_name=
'additionalskills_title', new_name='additional_skills_title'),
migrations.RenameField(model_name='cv', old_name=
'workexperience_date', new_name='work_experience_date'), migrations
.RenameField(model_name='cv', old_name='workexperience_header',
new_name='work_experience_header'), migrations.RenameField(
model_name='cv', old_name='workexperience_text', new_name=
'work_experience_text'), migrations.RenameField(model_name='cv',
old_name='workexperience_title', new_name='work_experience_title')]
<|reserved_special_token_1|>
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [('blog', '0004_auto_20200825_1318')]
operations = [migrations.RenameField(model_name='cv', old_name=
'additionalskills_text', new_name='additional_skills_text'),
migrations.RenameField(model_name='cv', old_name=
'additionalskills_title', new_name='additional_skills_title'),
migrations.RenameField(model_name='cv', old_name=
'workexperience_date', new_name='work_experience_date'), migrations
.RenameField(model_name='cv', old_name='workexperience_header',
new_name='work_experience_header'), migrations.RenameField(
model_name='cv', old_name='workexperience_text', new_name=
'work_experience_text'), migrations.RenameField(model_name='cv',
old_name='workexperience_title', new_name='work_experience_title')]
<|reserved_special_token_1|>
# Generated by Django 2.2.14 on 2020-08-25 17:00
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('blog', '0004_auto_20200825_1318'),
]
operations = [
migrations.RenameField(
model_name='cv',
old_name='additionalskills_text',
new_name='additional_skills_text',
),
migrations.RenameField(
model_name='cv',
old_name='additionalskills_title',
new_name='additional_skills_title',
),
migrations.RenameField(
model_name='cv',
old_name='workexperience_date',
new_name='work_experience_date',
),
migrations.RenameField(
model_name='cv',
old_name='workexperience_header',
new_name='work_experience_header',
),
migrations.RenameField(
model_name='cv',
old_name='workexperience_text',
new_name='work_experience_text',
),
migrations.RenameField(
model_name='cv',
old_name='workexperience_title',
new_name='work_experience_title',
),
]
|
flexible
|
{
"blob_id": "e296a5bea5465c2b84e37c7d83922adb01feab70",
"index": 9828,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('blog', '0004_auto_20200825_1318')]\n operations = [migrations.RenameField(model_name='cv', old_name=\n 'additionalskills_text', new_name='additional_skills_text'),\n migrations.RenameField(model_name='cv', old_name=\n 'additionalskills_title', new_name='additional_skills_title'),\n migrations.RenameField(model_name='cv', old_name=\n 'workexperience_date', new_name='work_experience_date'), migrations\n .RenameField(model_name='cv', old_name='workexperience_header',\n new_name='work_experience_header'), migrations.RenameField(\n model_name='cv', old_name='workexperience_text', new_name=\n 'work_experience_text'), migrations.RenameField(model_name='cv',\n old_name='workexperience_title', new_name='work_experience_title')]\n",
"step-4": "from django.db import migrations\n\n\nclass Migration(migrations.Migration):\n dependencies = [('blog', '0004_auto_20200825_1318')]\n operations = [migrations.RenameField(model_name='cv', old_name=\n 'additionalskills_text', new_name='additional_skills_text'),\n migrations.RenameField(model_name='cv', old_name=\n 'additionalskills_title', new_name='additional_skills_title'),\n migrations.RenameField(model_name='cv', old_name=\n 'workexperience_date', new_name='work_experience_date'), migrations\n .RenameField(model_name='cv', old_name='workexperience_header',\n new_name='work_experience_header'), migrations.RenameField(\n model_name='cv', old_name='workexperience_text', new_name=\n 'work_experience_text'), migrations.RenameField(model_name='cv',\n old_name='workexperience_title', new_name='work_experience_title')]\n",
"step-5": "# Generated by Django 2.2.14 on 2020-08-25 17:00\n\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('blog', '0004_auto_20200825_1318'),\n ]\n\n operations = [\n migrations.RenameField(\n model_name='cv',\n old_name='additionalskills_text',\n new_name='additional_skills_text',\n ),\n migrations.RenameField(\n model_name='cv',\n old_name='additionalskills_title',\n new_name='additional_skills_title',\n ),\n migrations.RenameField(\n model_name='cv',\n old_name='workexperience_date',\n new_name='work_experience_date',\n ),\n migrations.RenameField(\n model_name='cv',\n old_name='workexperience_header',\n new_name='work_experience_header',\n ),\n migrations.RenameField(\n model_name='cv',\n old_name='workexperience_text',\n new_name='work_experience_text',\n ),\n migrations.RenameField(\n model_name='cv',\n old_name='workexperience_title',\n new_name='work_experience_title',\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import random
class Madlib:
'''
This class generates the madlib from word lists.
'''
def get_madlib(self):
madlib = """
Once there was a {0}. It {1} at the {2}.
Then because of its {3} it {4}. Wow! You sure are {5}!
Thanks! I {6} you very much.
"""
nouns = ['cheesecakes', 'bicycle', 'park', 'computer']
verbs = ['watched tv', 'voted', 'fell over']
adjectives = ['smelly', 'slimy', 'soft', 'loud']
output = madlib.format(
random.choice(nouns),
random.choice(verbs),
random.choice(nouns),
random.choice(nouns),
random.choice(verbs),
random.choice(adjectives),
random.choice(adjectives)
)
return output
|
normal
|
{
"blob_id": "2b23237e697cb4ca8f1013d7be343c70fba9541d",
"index": 6342,
"step-1": "<mask token>\n\n\nclass Madlib:\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Madlib:\n <mask token>\n\n def get_madlib(self):\n madlib = \"\"\"\n Once there was a {0}. It {1} at the {2}.\n Then because of its {3} it {4}. Wow! You sure are {5}!\n Thanks! I {6} you very much.\n \"\"\"\n nouns = ['cheesecakes', 'bicycle', 'park', 'computer']\n verbs = ['watched tv', 'voted', 'fell over']\n adjectives = ['smelly', 'slimy', 'soft', 'loud']\n output = madlib.format(random.choice(nouns), random.choice(verbs),\n random.choice(nouns), random.choice(nouns), random.choice(verbs\n ), random.choice(adjectives), random.choice(adjectives))\n return output\n",
"step-3": "<mask token>\n\n\nclass Madlib:\n \"\"\"\n This class generates the madlib from word lists.\n \"\"\"\n\n def get_madlib(self):\n madlib = \"\"\"\n Once there was a {0}. It {1} at the {2}.\n Then because of its {3} it {4}. Wow! You sure are {5}!\n Thanks! I {6} you very much.\n \"\"\"\n nouns = ['cheesecakes', 'bicycle', 'park', 'computer']\n verbs = ['watched tv', 'voted', 'fell over']\n adjectives = ['smelly', 'slimy', 'soft', 'loud']\n output = madlib.format(random.choice(nouns), random.choice(verbs),\n random.choice(nouns), random.choice(nouns), random.choice(verbs\n ), random.choice(adjectives), random.choice(adjectives))\n return output\n",
"step-4": "import random\n\n\nclass Madlib:\n \"\"\"\n This class generates the madlib from word lists.\n \"\"\"\n\n def get_madlib(self):\n madlib = \"\"\"\n Once there was a {0}. It {1} at the {2}.\n Then because of its {3} it {4}. Wow! You sure are {5}!\n Thanks! I {6} you very much.\n \"\"\"\n nouns = ['cheesecakes', 'bicycle', 'park', 'computer']\n verbs = ['watched tv', 'voted', 'fell over']\n adjectives = ['smelly', 'slimy', 'soft', 'loud']\n output = madlib.format(random.choice(nouns), random.choice(verbs),\n random.choice(nouns), random.choice(nouns), random.choice(verbs\n ), random.choice(adjectives), random.choice(adjectives))\n return output\n",
"step-5": "import random\n\n\nclass Madlib:\n '''\n This class generates the madlib from word lists.\n '''\n def get_madlib(self):\n madlib = \"\"\"\n Once there was a {0}. It {1} at the {2}.\n Then because of its {3} it {4}. Wow! You sure are {5}!\n Thanks! I {6} you very much.\n \"\"\"\n nouns = ['cheesecakes', 'bicycle', 'park', 'computer']\n verbs = ['watched tv', 'voted', 'fell over']\n adjectives = ['smelly', 'slimy', 'soft', 'loud']\n\n output = madlib.format(\n random.choice(nouns),\n random.choice(verbs),\n random.choice(nouns),\n random.choice(nouns),\n random.choice(verbs),\n random.choice(adjectives),\n random.choice(adjectives)\n )\n return output\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
# Generated by Django 2.2.2 on 2019-07-09 20:04
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0015_auto_20190709_1543'),
]
operations = [
migrations.CreateModel(
name='ExampleModel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('model_pic', models.ImageField(null=True, upload_to='image/')),
],
),
migrations.RemoveField(
model_name='post',
name='photo',
),
]
|
normal
|
{
"blob_id": "d6e06a78c9a5d8184e5adf9b99cc6030c3434558",
"index": 8464,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('blog', '0015_auto_20190709_1543')]\n operations = [migrations.CreateModel(name='ExampleModel', fields=[('id',\n models.AutoField(auto_created=True, primary_key=True, serialize=\n False, verbose_name='ID')), ('model_pic', models.ImageField(null=\n True, upload_to='image/'))]), migrations.RemoveField(model_name=\n 'post', name='photo')]\n",
"step-4": "from django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('blog', '0015_auto_20190709_1543')]\n operations = [migrations.CreateModel(name='ExampleModel', fields=[('id',\n models.AutoField(auto_created=True, primary_key=True, serialize=\n False, verbose_name='ID')), ('model_pic', models.ImageField(null=\n True, upload_to='image/'))]), migrations.RemoveField(model_name=\n 'post', name='photo')]\n",
"step-5": "# Generated by Django 2.2.2 on 2019-07-09 20:04\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('blog', '0015_auto_20190709_1543'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='ExampleModel',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('model_pic', models.ImageField(null=True, upload_to='image/')),\n ],\n ),\n migrations.RemoveField(\n model_name='post',\n name='photo',\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for f in pyfiles:
contents = open(f).read()
for m in modulenames:
v1 = 'import ' + m
v2 = 'from ' + m
if v1 or v2 in contents:
contents = contents.replace(v1, 'import .' + m)
contents = contents.replace(v2, 'from .' + m)
with open('new_' + f, 'w') as outf:
outf.write(contents)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
pyfiles = glob.glob('*.py')
modulenames = [f.split('.')[0] for f in pyfiles]
for f in pyfiles:
contents = open(f).read()
for m in modulenames:
v1 = 'import ' + m
v2 = 'from ' + m
if v1 or v2 in contents:
contents = contents.replace(v1, 'import .' + m)
contents = contents.replace(v2, 'from .' + m)
with open('new_' + f, 'w') as outf:
outf.write(contents)
<|reserved_special_token_1|>
import glob
pyfiles = glob.glob('*.py')
modulenames = [f.split('.')[0] for f in pyfiles]
for f in pyfiles:
contents = open(f).read()
for m in modulenames:
v1 = 'import ' + m
v2 = 'from ' + m
if v1 or v2 in contents:
contents = contents.replace(v1, 'import .' + m)
contents = contents.replace(v2, 'from .' + m)
with open('new_' + f, 'w') as outf:
outf.write(contents)
<|reserved_special_token_1|>
import glob
pyfiles = glob.glob('*.py')
modulenames = [f.split('.')[0] for f in pyfiles]
# print(modulenames)
for f in pyfiles:
contents = open(f).read()
for m in modulenames:
v1 = "import " + m
v2 = "from " + m
if v1 or v2 in contents:
contents = contents.replace(v1, "import ."+m)
contents = contents.replace(v2, "from ."+m)
with open('new_'+f, 'w') as outf:
outf.write(contents)
|
flexible
|
{
"blob_id": "d6a73365aa32c74798b6887ff46c0ed2323ed1a6",
"index": 2324,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor f in pyfiles:\n contents = open(f).read()\n for m in modulenames:\n v1 = 'import ' + m\n v2 = 'from ' + m\n if v1 or v2 in contents:\n contents = contents.replace(v1, 'import .' + m)\n contents = contents.replace(v2, 'from .' + m)\n with open('new_' + f, 'w') as outf:\n outf.write(contents)\n",
"step-3": "<mask token>\npyfiles = glob.glob('*.py')\nmodulenames = [f.split('.')[0] for f in pyfiles]\nfor f in pyfiles:\n contents = open(f).read()\n for m in modulenames:\n v1 = 'import ' + m\n v2 = 'from ' + m\n if v1 or v2 in contents:\n contents = contents.replace(v1, 'import .' + m)\n contents = contents.replace(v2, 'from .' + m)\n with open('new_' + f, 'w') as outf:\n outf.write(contents)\n",
"step-4": "import glob\npyfiles = glob.glob('*.py')\nmodulenames = [f.split('.')[0] for f in pyfiles]\nfor f in pyfiles:\n contents = open(f).read()\n for m in modulenames:\n v1 = 'import ' + m\n v2 = 'from ' + m\n if v1 or v2 in contents:\n contents = contents.replace(v1, 'import .' + m)\n contents = contents.replace(v2, 'from .' + m)\n with open('new_' + f, 'w') as outf:\n outf.write(contents)\n",
"step-5": "import glob\n\npyfiles = glob.glob('*.py')\n\nmodulenames = [f.split('.')[0] for f in pyfiles]\n\n# print(modulenames)\n\nfor f in pyfiles:\n contents = open(f).read()\n for m in modulenames:\n v1 = \"import \" + m\n v2 = \"from \" + m\n if v1 or v2 in contents:\n contents = contents.replace(v1, \"import .\"+m)\n contents = contents.replace(v2, \"from .\"+m)\n with open('new_'+f, 'w') as outf:\n outf.write(contents)\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class DojoBookAppConfig(AppConfig):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class DojoBookAppConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'dojo_book_app'
<|reserved_special_token_1|>
from django.apps import AppConfig
class DojoBookAppConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'dojo_book_app'
|
flexible
|
{
"blob_id": "314f6cc97f53fa5bd8bf0ec0e1e305ca6384f1a2",
"index": 1559,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass DojoBookAppConfig(AppConfig):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass DojoBookAppConfig(AppConfig):\n default_auto_field = 'django.db.models.BigAutoField'\n name = 'dojo_book_app'\n",
"step-4": "from django.apps import AppConfig\n\n\nclass DojoBookAppConfig(AppConfig):\n default_auto_field = 'django.db.models.BigAutoField'\n name = 'dojo_book_app'\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def chargeable_deposit(amount, months, charge=0):
percent = get_percent(amount, months)
if not percent:
print('Нет подходящего тарифа')
total = amount
for month in range(months):
profit = total * percent / 100 / 12
total += profit
if month != 0 and month != months - 1:
total += charge + charge * percent / 100 / 12
print(round(total, 2))
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def chargeable_deposit(amount, months, charge=0):
percent = get_percent(amount, months)
if not percent:
print('Нет подходящего тарифа')
total = amount
for month in range(months):
profit = total * percent / 100 / 12
total += profit
if month != 0 and month != months - 1:
total += charge + charge * percent / 100 / 12
print(round(total, 2))
chargeable_deposit(10000, 24, 100)
<|reserved_special_token_1|>
from task_1_4 import get_percent
def chargeable_deposit(amount, months, charge=0):
percent = get_percent(amount, months)
if not percent:
print('Нет подходящего тарифа')
total = amount
for month in range(months):
profit = total * percent / 100 / 12
total += profit
if month != 0 and month != months - 1:
total += charge + charge * percent / 100 / 12
print(round(total, 2))
chargeable_deposit(10000, 24, 100)
<|reserved_special_token_1|>
# 5. Усовершенствовать программу «Банковский депозит». Третьим аргументом в функцию должна
# передаваться фиксированная ежемесячная сумма пополнения вклада. Необходимо в главной
# функции реализовать вложенную функцию подсчета процентов для пополняемой суммы.
# Примем, что клиент вносит средства в последний день каждого месяца, кроме первого и
# последнего. Например, при сроке вклада в 6 месяцев пополнение происходит в течение 4
# месяцев. Вложенная функция возвращает сумму дополнительно внесенных средств (с
# процентами), а главная функция — общую сумму по вкладу на конец периода.
from task_1_4 import get_percent
def chargeable_deposit(amount, months, charge=0):
percent = get_percent(amount, months)
if not percent:
print('Нет подходящего тарифа')
total = amount
for month in range(months):
profit = total * percent / 100 / 12
total += profit
if month != 0 and month != months - 1:
total += charge + charge * percent / 100 / 12
print(round(total, 2))
chargeable_deposit(10000, 24, 100)
|
flexible
|
{
"blob_id": "bf9e83591f737caec3060b72d86d56faec9bb23b",
"index": 8079,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef chargeable_deposit(amount, months, charge=0):\n percent = get_percent(amount, months)\n if not percent:\n print('Нет подходящего тарифа')\n total = amount\n for month in range(months):\n profit = total * percent / 100 / 12\n total += profit\n if month != 0 and month != months - 1:\n total += charge + charge * percent / 100 / 12\n print(round(total, 2))\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef chargeable_deposit(amount, months, charge=0):\n percent = get_percent(amount, months)\n if not percent:\n print('Нет подходящего тарифа')\n total = amount\n for month in range(months):\n profit = total * percent / 100 / 12\n total += profit\n if month != 0 and month != months - 1:\n total += charge + charge * percent / 100 / 12\n print(round(total, 2))\n\n\nchargeable_deposit(10000, 24, 100)\n",
"step-4": "from task_1_4 import get_percent\n\n\ndef chargeable_deposit(amount, months, charge=0):\n percent = get_percent(amount, months)\n if not percent:\n print('Нет подходящего тарифа')\n total = amount\n for month in range(months):\n profit = total * percent / 100 / 12\n total += profit\n if month != 0 and month != months - 1:\n total += charge + charge * percent / 100 / 12\n print(round(total, 2))\n\n\nchargeable_deposit(10000, 24, 100)\n",
"step-5": "# 5. Усовершенствовать программу «Банковский депозит». Третьим аргументом в функцию должна\r\n# передаваться фиксированная ежемесячная сумма пополнения вклада. Необходимо в главной\r\n# функции реализовать вложенную функцию подсчета процентов для пополняемой суммы.\r\n# Примем, что клиент вносит средства в последний день каждого месяца, кроме первого и\r\n# последнего. Например, при сроке вклада в 6 месяцев пополнение происходит в течение 4\r\n# месяцев. Вложенная функция возвращает сумму дополнительно внесенных средств (с\r\n# процентами), а главная функция — общую сумму по вкладу на конец периода.\r\n\r\nfrom task_1_4 import get_percent\r\n\r\n\r\ndef chargeable_deposit(amount, months, charge=0):\r\n percent = get_percent(amount, months)\r\n if not percent:\r\n print('Нет подходящего тарифа')\r\n\r\n total = amount\r\n for month in range(months):\r\n profit = total * percent / 100 / 12\r\n total += profit\r\n if month != 0 and month != months - 1:\r\n total += charge + charge * percent / 100 / 12\r\n\r\n print(round(total, 2))\r\n\r\n\r\nchargeable_deposit(10000, 24, 100)\r\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import re
import os
import base64
os.popen("tshark -r log.pcap -d 'tcp.port==57000,http' -d 'tcp.port==44322,http' -d 'tcp.port==44818,http' -Y 'data-text-lines' -Tfields -e http.file_data > request")
def evals(text):
template = "{}\['__doc__'\]\[\d+\]"
keys = map(str, range(10))
keys += ['\[\]','\(\)',"''"]
rule = '|'.join(template.format(_) for _ in keys)
regex = re.compile(rule + "|'[\w|\d]'")
for i in regex.findall(text):
r = i.replace("['__doc__']", ".__doc__")
r = re.sub('^\d', 'int', r)
r = re.sub('^\(\)', 'tuple', r)
text = text.replace(i, eval(r))
text = text.replace('\n', '\\n')
return text.replace('~','')
def extract(text):
regex = re.compile(r'-s (\d+) -l \d+ ([\w\.]+)\).*\[(\d+)\].*\((\w|\d|\\n)\)')
return regex.findall(text)[0]
requ = open('request').readlines()[:]
result = dict()
for x in requ:
clean = x.strip('\n')
clean = re.sub(r'\\n', '', clean)
clean = base64.b64decode(clean)
clean = evals(clean.split('=')[1])
if 'index' in clean:
index, name, pos, char = extract(clean)
key = result.get(name, dict())
index = int(index)
pos = int(pos)
if not key:
result[name] = key
lastIndexed = result[name].get(index, dict())
if not lastIndexed:
result[name][index] = lastIndexed
lastOccurence = result[name][index].get(pos, [''])
if not lastOccurence[0]:
result[name][index][pos] = lastOccurence
lastOccurence[0] = (index, pos, char)
for k,v in result.iteritems():
print '[+] Saving', k
temp = ''
for kk in sorted(v):
vv = result[k][kk]
for kkk in sorted(vv):
vvv = result[k][kk][kkk]
char = vvv[0][-1]
if char != '\\n':
temp += vvv[0][-1]
with open(k, 'wb') as f:
content = temp.decode('hex')
f.write(content)
|
normal
|
{
"blob_id": "c26bdc3f47aa9ac0cda0334e97bdaf3f9d56eb6c",
"index": 437,
"step-1": "import re\nimport os\nimport base64\n\nos.popen(\"tshark -r log.pcap -d 'tcp.port==57000,http' -d 'tcp.port==44322,http' -d 'tcp.port==44818,http' -Y 'data-text-lines' -Tfields -e http.file_data > request\")\n\ndef evals(text):\n template = \"{}\\['__doc__'\\]\\[\\d+\\]\"\n keys = map(str, range(10))\n keys += ['\\[\\]','\\(\\)',\"''\"]\n \n rule = '|'.join(template.format(_) for _ in keys)\n regex = re.compile(rule + \"|'[\\w|\\d]'\")\n\n for i in regex.findall(text):\n r = i.replace(\"['__doc__']\", \".__doc__\")\n r = re.sub('^\\d', 'int', r)\n r = re.sub('^\\(\\)', 'tuple', r)\n text = text.replace(i, eval(r))\n \n text = text.replace('\\n', '\\\\n')\n return text.replace('~','')\n\ndef extract(text):\n regex = re.compile(r'-s (\\d+) -l \\d+ ([\\w\\.]+)\\).*\\[(\\d+)\\].*\\((\\w|\\d|\\\\n)\\)')\n return regex.findall(text)[0]\n\nrequ = open('request').readlines()[:]\nresult = dict()\n\nfor x in requ:\n clean = x.strip('\\n')\n clean = re.sub(r'\\\\n', '', clean)\n clean = base64.b64decode(clean)\n clean = evals(clean.split('=')[1])\n\n if 'index' in clean:\n index, name, pos, char = extract(clean)\n key = result.get(name, dict())\n index = int(index)\n pos = int(pos)\n\n if not key:\n result[name] = key\n\n lastIndexed = result[name].get(index, dict())\n if not lastIndexed:\n result[name][index] = lastIndexed\n\n lastOccurence = result[name][index].get(pos, [''])\n if not lastOccurence[0]:\n result[name][index][pos] = lastOccurence\n \n lastOccurence[0] = (index, pos, char)\n\nfor k,v in result.iteritems():\n print '[+] Saving', k\n\n temp = ''\n for kk in sorted(v):\n vv = result[k][kk]\n\n for kkk in sorted(vv):\n vvv = result[k][kk][kkk]\n\n char = vvv[0][-1]\n if char != '\\\\n':\n temp += vvv[0][-1]\n\n with open(k, 'wb') as f:\n content = temp.decode('hex')\n f.write(content)",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
from PIL import Image, ImageStat
import os
import shutil
# full white photo - 255.0
# full black photo - 0.0
class ImageSelection:
def __init__(self, path):
self.path = path
def brightness_check(self, image):
'''count function to set value of brightness, 0 - full black, 100 - full bright'''
with Image.open(image).convert("L") as img:
z = ImageStat.Stat(img)
stat = 100*(255-z.mean[0])/255
return int(stat)
def averange_threshold(self, dictionary, img_list):
'''counts thrshold which is RMS of all images value'''
sum = 0
for value in dictionary.values():
sum += value
return int(sum/len(img_list))
def image_analysis(self):
'''execution of class, creates two folders (bright, dark) in path
to images name is added theior value of brightness'''
img_list = os.listdir(self.path)
img_list = [os.path.join(self.path,elem) for elem in img_list]
extend_set = {".png", ".jpeg", ".jpg"}
dictionary = {os.path.basename(img): ImageSelection.brightness_check(self, img) for ext in extend_set for img in img_list if ext in img}
threshold = ImageSelection.averange_threshold(self, dictionary, img_list)
for key, value in dictionary.items():
if value < threshold:
os.makedirs(os.path.join(self.path, "bright"), exist_ok=True)
shutil.copy(os.path.join(self.path,key), os.path.join(self.path, "bright", key[:key.index(".")] + "_" + str(value) + key[key.index("."):]))
else:
os.makedirs(os.path.join(self.path, "dark"), exist_ok=True)
shutil.copy(os.path.join(self.path,key), os.path.join(self.path, "dark", key[:key.index(".")] + "_" + str(value) + key[key.index("."):]))
path = r"D:\Programy\z.programowanie\learning\to be sorted"
a = ImageSelection(path)
a.image_analysis()
|
normal
|
{
"blob_id": "897075810912e8360aa5cdedda3f12ce7c868263",
"index": 4547,
"step-1": "<mask token>\n\n\nclass ImageSelection:\n\n def __init__(self, path):\n self.path = path\n\n def brightness_check(self, image):\n \"\"\"count function to set value of brightness, 0 - full black, 100 - full bright\"\"\"\n with Image.open(image).convert('L') as img:\n z = ImageStat.Stat(img)\n stat = 100 * (255 - z.mean[0]) / 255\n return int(stat)\n\n def averange_threshold(self, dictionary, img_list):\n \"\"\"counts thrshold which is RMS of all images value\"\"\"\n sum = 0\n for value in dictionary.values():\n sum += value\n return int(sum / len(img_list))\n\n def image_analysis(self):\n \"\"\"execution of class, creates two folders (bright, dark) in path\n to images name is added theior value of brightness\"\"\"\n img_list = os.listdir(self.path)\n img_list = [os.path.join(self.path, elem) for elem in img_list]\n extend_set = {'.png', '.jpeg', '.jpg'}\n dictionary = {os.path.basename(img): ImageSelection.\n brightness_check(self, img) for ext in extend_set for img in\n img_list if ext in img}\n threshold = ImageSelection.averange_threshold(self, dictionary,\n img_list)\n for key, value in dictionary.items():\n if value < threshold:\n os.makedirs(os.path.join(self.path, 'bright'), exist_ok=True)\n shutil.copy(os.path.join(self.path, key), os.path.join(self\n .path, 'bright', key[:key.index('.')] + '_' + str(value\n ) + key[key.index('.'):]))\n else:\n os.makedirs(os.path.join(self.path, 'dark'), exist_ok=True)\n shutil.copy(os.path.join(self.path, key), os.path.join(self\n .path, 'dark', key[:key.index('.')] + '_' + str(value) +\n key[key.index('.'):]))\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass ImageSelection:\n\n def __init__(self, path):\n self.path = path\n\n def brightness_check(self, image):\n \"\"\"count function to set value of brightness, 0 - full black, 100 - full bright\"\"\"\n with Image.open(image).convert('L') as img:\n z = ImageStat.Stat(img)\n stat = 100 * (255 - z.mean[0]) / 255\n return int(stat)\n\n def averange_threshold(self, dictionary, img_list):\n \"\"\"counts thrshold which is RMS of all images value\"\"\"\n sum = 0\n for value in dictionary.values():\n sum += value\n return int(sum / len(img_list))\n\n def image_analysis(self):\n \"\"\"execution of class, creates two folders (bright, dark) in path\n to images name is added theior value of brightness\"\"\"\n img_list = os.listdir(self.path)\n img_list = [os.path.join(self.path, elem) for elem in img_list]\n extend_set = {'.png', '.jpeg', '.jpg'}\n dictionary = {os.path.basename(img): ImageSelection.\n brightness_check(self, img) for ext in extend_set for img in\n img_list if ext in img}\n threshold = ImageSelection.averange_threshold(self, dictionary,\n img_list)\n for key, value in dictionary.items():\n if value < threshold:\n os.makedirs(os.path.join(self.path, 'bright'), exist_ok=True)\n shutil.copy(os.path.join(self.path, key), os.path.join(self\n .path, 'bright', key[:key.index('.')] + '_' + str(value\n ) + key[key.index('.'):]))\n else:\n os.makedirs(os.path.join(self.path, 'dark'), exist_ok=True)\n shutil.copy(os.path.join(self.path, key), os.path.join(self\n .path, 'dark', key[:key.index('.')] + '_' + str(value) +\n key[key.index('.'):]))\n\n\n<mask token>\na.image_analysis()\n",
"step-3": "<mask token>\n\n\nclass ImageSelection:\n\n def __init__(self, path):\n self.path = path\n\n def brightness_check(self, image):\n \"\"\"count function to set value of brightness, 0 - full black, 100 - full bright\"\"\"\n with Image.open(image).convert('L') as img:\n z = ImageStat.Stat(img)\n stat = 100 * (255 - z.mean[0]) / 255\n return int(stat)\n\n def averange_threshold(self, dictionary, img_list):\n \"\"\"counts thrshold which is RMS of all images value\"\"\"\n sum = 0\n for value in dictionary.values():\n sum += value\n return int(sum / len(img_list))\n\n def image_analysis(self):\n \"\"\"execution of class, creates two folders (bright, dark) in path\n to images name is added theior value of brightness\"\"\"\n img_list = os.listdir(self.path)\n img_list = [os.path.join(self.path, elem) for elem in img_list]\n extend_set = {'.png', '.jpeg', '.jpg'}\n dictionary = {os.path.basename(img): ImageSelection.\n brightness_check(self, img) for ext in extend_set for img in\n img_list if ext in img}\n threshold = ImageSelection.averange_threshold(self, dictionary,\n img_list)\n for key, value in dictionary.items():\n if value < threshold:\n os.makedirs(os.path.join(self.path, 'bright'), exist_ok=True)\n shutil.copy(os.path.join(self.path, key), os.path.join(self\n .path, 'bright', key[:key.index('.')] + '_' + str(value\n ) + key[key.index('.'):]))\n else:\n os.makedirs(os.path.join(self.path, 'dark'), exist_ok=True)\n shutil.copy(os.path.join(self.path, key), os.path.join(self\n .path, 'dark', key[:key.index('.')] + '_' + str(value) +\n key[key.index('.'):]))\n\n\npath = 'D:\\\\Programy\\\\z.programowanie\\\\learning\\\\to be sorted'\na = ImageSelection(path)\na.image_analysis()\n",
"step-4": "from PIL import Image, ImageStat\nimport os\nimport shutil\n\n\nclass ImageSelection:\n\n def __init__(self, path):\n self.path = path\n\n def brightness_check(self, image):\n \"\"\"count function to set value of brightness, 0 - full black, 100 - full bright\"\"\"\n with Image.open(image).convert('L') as img:\n z = ImageStat.Stat(img)\n stat = 100 * (255 - z.mean[0]) / 255\n return int(stat)\n\n def averange_threshold(self, dictionary, img_list):\n \"\"\"counts thrshold which is RMS of all images value\"\"\"\n sum = 0\n for value in dictionary.values():\n sum += value\n return int(sum / len(img_list))\n\n def image_analysis(self):\n \"\"\"execution of class, creates two folders (bright, dark) in path\n to images name is added theior value of brightness\"\"\"\n img_list = os.listdir(self.path)\n img_list = [os.path.join(self.path, elem) for elem in img_list]\n extend_set = {'.png', '.jpeg', '.jpg'}\n dictionary = {os.path.basename(img): ImageSelection.\n brightness_check(self, img) for ext in extend_set for img in\n img_list if ext in img}\n threshold = ImageSelection.averange_threshold(self, dictionary,\n img_list)\n for key, value in dictionary.items():\n if value < threshold:\n os.makedirs(os.path.join(self.path, 'bright'), exist_ok=True)\n shutil.copy(os.path.join(self.path, key), os.path.join(self\n .path, 'bright', key[:key.index('.')] + '_' + str(value\n ) + key[key.index('.'):]))\n else:\n os.makedirs(os.path.join(self.path, 'dark'), exist_ok=True)\n shutil.copy(os.path.join(self.path, key), os.path.join(self\n .path, 'dark', key[:key.index('.')] + '_' + str(value) +\n key[key.index('.'):]))\n\n\npath = 'D:\\\\Programy\\\\z.programowanie\\\\learning\\\\to be sorted'\na = ImageSelection(path)\na.image_analysis()\n",
"step-5": "from PIL import Image, ImageStat\r\nimport os\r\nimport shutil\r\n\r\n# full white photo - 255.0\r\n# full black photo - 0.0\r\n\r\n\r\nclass ImageSelection:\r\n def __init__(self, path):\r\n self.path = path\r\n\r\n def brightness_check(self, image):\r\n '''count function to set value of brightness, 0 - full black, 100 - full bright'''\r\n with Image.open(image).convert(\"L\") as img:\r\n z = ImageStat.Stat(img)\r\n stat = 100*(255-z.mean[0])/255\r\n return int(stat)\r\n\r\n def averange_threshold(self, dictionary, img_list):\r\n '''counts thrshold which is RMS of all images value'''\r\n sum = 0\r\n for value in dictionary.values():\r\n sum += value\r\n return int(sum/len(img_list))\r\n\r\n def image_analysis(self):\r\n '''execution of class, creates two folders (bright, dark) in path\r\n to images name is added theior value of brightness'''\r\n img_list = os.listdir(self.path)\r\n img_list = [os.path.join(self.path,elem) for elem in img_list]\r\n extend_set = {\".png\", \".jpeg\", \".jpg\"}\r\n dictionary = {os.path.basename(img): ImageSelection.brightness_check(self, img) for ext in extend_set for img in img_list if ext in img}\r\n threshold = ImageSelection.averange_threshold(self, dictionary, img_list)\r\n for key, value in dictionary.items():\r\n if value < threshold:\r\n os.makedirs(os.path.join(self.path, \"bright\"), exist_ok=True)\r\n shutil.copy(os.path.join(self.path,key), os.path.join(self.path, \"bright\", key[:key.index(\".\")] + \"_\" + str(value) + key[key.index(\".\"):]))\r\n else:\r\n os.makedirs(os.path.join(self.path, \"dark\"), exist_ok=True)\r\n shutil.copy(os.path.join(self.path,key), os.path.join(self.path, \"dark\", key[:key.index(\".\")] + \"_\" + str(value) + key[key.index(\".\"):]))\r\n\r\n\r\npath = r\"D:\\Programy\\z.programowanie\\learning\\to be sorted\"\r\na = ImageSelection(path)\r\na.image_analysis()\r\n",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
from django.shortcuts import render
from rest_framework import viewsets
from rest_framework.response import Response
from crud.serializers import TodoListSerializer
from crud.models import TodoList
# Create your views here.
class TodoListViewSet(viewsets.ModelViewSet):
queryset = TodoList.objects.all()
serializer_class = TodoListSerializer
def delete(self, request, pk=None):
instance = TodoList.objects.get(id = pk)
instance.delete()
|
normal
|
{
"blob_id": "2d4680b63cdd05e89673c4bd6babda7ac6ebb588",
"index": 8895,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass TodoListViewSet(viewsets.ModelViewSet):\n <mask token>\n <mask token>\n\n def delete(self, request, pk=None):\n instance = TodoList.objects.get(id=pk)\n instance.delete()\n",
"step-3": "<mask token>\n\n\nclass TodoListViewSet(viewsets.ModelViewSet):\n queryset = TodoList.objects.all()\n serializer_class = TodoListSerializer\n\n def delete(self, request, pk=None):\n instance = TodoList.objects.get(id=pk)\n instance.delete()\n",
"step-4": "from django.shortcuts import render\nfrom rest_framework import viewsets\nfrom rest_framework.response import Response\nfrom crud.serializers import TodoListSerializer\nfrom crud.models import TodoList\n\n\nclass TodoListViewSet(viewsets.ModelViewSet):\n queryset = TodoList.objects.all()\n serializer_class = TodoListSerializer\n\n def delete(self, request, pk=None):\n instance = TodoList.objects.get(id=pk)\n instance.delete()\n",
"step-5": "from django.shortcuts import render\nfrom rest_framework import viewsets\nfrom rest_framework.response import Response\n\nfrom crud.serializers import TodoListSerializer\nfrom crud.models import TodoList\n\n# Create your views here.\n\nclass TodoListViewSet(viewsets.ModelViewSet):\n queryset = TodoList.objects.all()\n serializer_class = TodoListSerializer\n\n def delete(self, request, pk=None):\n instance = TodoList.objects.get(id = pk)\n instance.delete() ",
"step-ids": [
0,
2,
3,
4,
5
]
}
|
[
0,
2,
3,
4,
5
] |
from selenium import webdriver
import time
import xlwt
from JD_PhoneNo import get_phone_no
book = xlwt.Workbook(encoding="utf-8")
sheet1=book.add_sheet("Sheet 1")
browser = webdriver.Firefox()
browser.get("https://www.zomato.com/bhopal/dinner")
z_hotel_list = []
z_address_list = []
z_phone_list = []
z_rating_list = []
z_costoftwo = []
z_votes = []
z_hours = []
def traverse(a,b):
temp = []
for i in range(a,b,1):
a = str(i)
button = browser.find_element_by_link_text(a)
button.click()
name_list = browser.find_elements_by_class_name("result-title.hover_feedback.zred.bold.ln24.fontsize0")
add_list = browser.find_elements_by_class_name("col-m-16.search-result-address.grey-text.nowrap.ln22")
phone_list = browser.find_elements_by_class_name("item.res-snippet-ph-info")
for i in range(1,18):
if(i==4 or i==10 ):
continue
else:
try:
z_costoftwo.append(browser.find_element_by_xpath("/html/body/section/div/div[2]/div[3]/div[2]/div/div[6]/div/div[1]/section/div[1]/div[3]/div["+str(i)+"]/div[1]/div/article/div[3]/div[2]/span[2]").text)
except Exception as e:
z_costoftwo.append("NILL")
try:
z_hours.append(browser.find_element_by_xpath("/html/body/section/div/div[2]/div[3]/div[2]/div/div[6]/div/div[1]/section/div[1]/div[3]/div["+str(i)+"]/div[1]/div/article/div[3]/div[3]/div[1]").text)
except Exception as e1:
z_hours.append("NILL")
try:
z_votes.append(browser.find_element_by_xpath("/html/body/section/div/div[2]/div[3]/div[2]/div/div[6]/div/div[1]/section/div[1]/div[3]/div["+str(i)+"]/div[1]/div/article/div[1]/div/div[2]/div[1]/div[2]/span").text)
except Exception as e1:
z_votes.append("NEW")
try:
z_rating_list.append(browser.find_element_by_xpath("/html/body/section/div/div[2]/div[3]/div[2]/div/div[6]/div/div[1]/section/div[1]/div[3]/div["+str(i)+"]/div[1]/div/article/div[1]/div/div[2]/div[1]/div[2]/div[1]").text)
except Exception as e:
z_rating_list.append("NILL")
for names in name_list:
z_hotel_list.append(names.text)
temp.append(names.text)
for addname in add_list:
z_address_list.append(addname.text)
for phonename in phone_list:
z_phone_list.append(phonename.get_attribute("data-phone-no-str"))
if(int(a)<6):
clk = browser.find_element_by_xpath("/html/body/section/div/div[2]/div[3]/div[2]/div/div[6]/div/div[1]/section/div[2]/div[1]/div[2]/div/div/a[7]")
clk.click()
else:
clk = browser.find_element_by_xpath("/html/body/section/div/div[2]/div[3]/div[2]/div/div[6]/div/div[1]/section/div[2]/div[1]/div[2]/div/div/a[8]")
clk.click()
traverse(1,6)
traverse(6,11)
traverse(11,16)
traverse(16,21)
traverse(21,26)
# traverse(26,31)
# traverse(31,36)
# traverse(36,41)
# traverse(41,46)
# traverse(46,51)
# traverse(51,56)
# for i in range(1,5,10):
# traverse(i,i+5)
# traverse(i+5,i+10)
for i in range(0,len(z_hotel_list),1):
sheet1.write(i,0,z_hotel_list[i])
for i in range(0, len(z_phone_list), 1):
sheet1.write(i,1,z_phone_list[i])
for i in range(0, len(z_address_list), 1):
sheet1.write(i, 2, z_address_list[i])
for i in range(0,len(z_rating_list)):
sheet1.write(i,3,z_rating_list[i])
for i in range(0, len(z_costoftwo)):
sheet1.write(i, 4, z_costoftwo[i])
for i in range(0, len(z_hours)):
sheet1.write(i, 5, z_hours[i])
for i in range(0, len(z_votes)):
sheet1.write(i, 6, z_votes[i])
print("Writing to excel Finished")
book.save("ZomatoBhopal(data).xls")
|
normal
|
{
"blob_id": "96425986305171a9d23231f60b35dcbcbbd12d2d",
"index": 7995,
"step-1": "<mask token>\n\n\ndef traverse(a, b):\n temp = []\n for i in range(a, b, 1):\n a = str(i)\n button = browser.find_element_by_link_text(a)\n button.click()\n name_list = browser.find_elements_by_class_name(\n 'result-title.hover_feedback.zred.bold.ln24.fontsize0')\n add_list = browser.find_elements_by_class_name(\n 'col-m-16.search-result-address.grey-text.nowrap.ln22')\n phone_list = browser.find_elements_by_class_name(\n 'item.res-snippet-ph-info')\n for i in range(1, 18):\n if i == 4 or i == 10:\n continue\n else:\n try:\n z_costoftwo.append(browser.find_element_by_xpath(\n '/html/body/section/div/div[2]/div[3]/div[2]/div/div[6]/div/div[1]/section/div[1]/div[3]/div['\n + str(i) +\n ']/div[1]/div/article/div[3]/div[2]/span[2]').text)\n except Exception as e:\n z_costoftwo.append('NILL')\n try:\n z_hours.append(browser.find_element_by_xpath(\n '/html/body/section/div/div[2]/div[3]/div[2]/div/div[6]/div/div[1]/section/div[1]/div[3]/div['\n + str(i) +\n ']/div[1]/div/article/div[3]/div[3]/div[1]').text)\n except Exception as e1:\n z_hours.append('NILL')\n try:\n z_votes.append(browser.find_element_by_xpath(\n '/html/body/section/div/div[2]/div[3]/div[2]/div/div[6]/div/div[1]/section/div[1]/div[3]/div['\n + str(i) +\n ']/div[1]/div/article/div[1]/div/div[2]/div[1]/div[2]/span'\n ).text)\n except Exception as e1:\n z_votes.append('NEW')\n try:\n z_rating_list.append(browser.find_element_by_xpath(\n '/html/body/section/div/div[2]/div[3]/div[2]/div/div[6]/div/div[1]/section/div[1]/div[3]/div['\n + str(i) +\n ']/div[1]/div/article/div[1]/div/div[2]/div[1]/div[2]/div[1]'\n ).text)\n except Exception as e:\n z_rating_list.append('NILL')\n for names in name_list:\n z_hotel_list.append(names.text)\n temp.append(names.text)\n for addname in add_list:\n z_address_list.append(addname.text)\n for phonename in phone_list:\n z_phone_list.append(phonename.get_attribute('data-phone-no-str'))\n if int(a) < 6:\n clk = browser.find_element_by_xpath(\n '/html/body/section/div/div[2]/div[3]/div[2]/div/div[6]/div/div[1]/section/div[2]/div[1]/div[2]/div/div/a[7]'\n )\n clk.click()\n else:\n clk = browser.find_element_by_xpath(\n '/html/body/section/div/div[2]/div[3]/div[2]/div/div[6]/div/div[1]/section/div[2]/div[1]/div[2]/div/div/a[8]'\n )\n clk.click()\n\n\n<mask token>\n",
"step-2": "<mask token>\nbrowser.get('https://www.zomato.com/bhopal/dinner')\n<mask token>\n\n\ndef traverse(a, b):\n temp = []\n for i in range(a, b, 1):\n a = str(i)\n button = browser.find_element_by_link_text(a)\n button.click()\n name_list = browser.find_elements_by_class_name(\n 'result-title.hover_feedback.zred.bold.ln24.fontsize0')\n add_list = browser.find_elements_by_class_name(\n 'col-m-16.search-result-address.grey-text.nowrap.ln22')\n phone_list = browser.find_elements_by_class_name(\n 'item.res-snippet-ph-info')\n for i in range(1, 18):\n if i == 4 or i == 10:\n continue\n else:\n try:\n z_costoftwo.append(browser.find_element_by_xpath(\n '/html/body/section/div/div[2]/div[3]/div[2]/div/div[6]/div/div[1]/section/div[1]/div[3]/div['\n + str(i) +\n ']/div[1]/div/article/div[3]/div[2]/span[2]').text)\n except Exception as e:\n z_costoftwo.append('NILL')\n try:\n z_hours.append(browser.find_element_by_xpath(\n '/html/body/section/div/div[2]/div[3]/div[2]/div/div[6]/div/div[1]/section/div[1]/div[3]/div['\n + str(i) +\n ']/div[1]/div/article/div[3]/div[3]/div[1]').text)\n except Exception as e1:\n z_hours.append('NILL')\n try:\n z_votes.append(browser.find_element_by_xpath(\n '/html/body/section/div/div[2]/div[3]/div[2]/div/div[6]/div/div[1]/section/div[1]/div[3]/div['\n + str(i) +\n ']/div[1]/div/article/div[1]/div/div[2]/div[1]/div[2]/span'\n ).text)\n except Exception as e1:\n z_votes.append('NEW')\n try:\n z_rating_list.append(browser.find_element_by_xpath(\n '/html/body/section/div/div[2]/div[3]/div[2]/div/div[6]/div/div[1]/section/div[1]/div[3]/div['\n + str(i) +\n ']/div[1]/div/article/div[1]/div/div[2]/div[1]/div[2]/div[1]'\n ).text)\n except Exception as e:\n z_rating_list.append('NILL')\n for names in name_list:\n z_hotel_list.append(names.text)\n temp.append(names.text)\n for addname in add_list:\n z_address_list.append(addname.text)\n for phonename in phone_list:\n z_phone_list.append(phonename.get_attribute('data-phone-no-str'))\n if int(a) < 6:\n clk = browser.find_element_by_xpath(\n '/html/body/section/div/div[2]/div[3]/div[2]/div/div[6]/div/div[1]/section/div[2]/div[1]/div[2]/div/div/a[7]'\n )\n clk.click()\n else:\n clk = browser.find_element_by_xpath(\n '/html/body/section/div/div[2]/div[3]/div[2]/div/div[6]/div/div[1]/section/div[2]/div[1]/div[2]/div/div/a[8]'\n )\n clk.click()\n\n\ntraverse(1, 6)\ntraverse(6, 11)\ntraverse(11, 16)\ntraverse(16, 21)\ntraverse(21, 26)\nfor i in range(0, len(z_hotel_list), 1):\n sheet1.write(i, 0, z_hotel_list[i])\nfor i in range(0, len(z_phone_list), 1):\n sheet1.write(i, 1, z_phone_list[i])\nfor i in range(0, len(z_address_list), 1):\n sheet1.write(i, 2, z_address_list[i])\nfor i in range(0, len(z_rating_list)):\n sheet1.write(i, 3, z_rating_list[i])\nfor i in range(0, len(z_costoftwo)):\n sheet1.write(i, 4, z_costoftwo[i])\nfor i in range(0, len(z_hours)):\n sheet1.write(i, 5, z_hours[i])\nfor i in range(0, len(z_votes)):\n sheet1.write(i, 6, z_votes[i])\nprint('Writing to excel Finished')\nbook.save('ZomatoBhopal(data).xls')\n",
"step-3": "<mask token>\nbook = xlwt.Workbook(encoding='utf-8')\nsheet1 = book.add_sheet('Sheet 1')\nbrowser = webdriver.Firefox()\nbrowser.get('https://www.zomato.com/bhopal/dinner')\nz_hotel_list = []\nz_address_list = []\nz_phone_list = []\nz_rating_list = []\nz_costoftwo = []\nz_votes = []\nz_hours = []\n\n\ndef traverse(a, b):\n temp = []\n for i in range(a, b, 1):\n a = str(i)\n button = browser.find_element_by_link_text(a)\n button.click()\n name_list = browser.find_elements_by_class_name(\n 'result-title.hover_feedback.zred.bold.ln24.fontsize0')\n add_list = browser.find_elements_by_class_name(\n 'col-m-16.search-result-address.grey-text.nowrap.ln22')\n phone_list = browser.find_elements_by_class_name(\n 'item.res-snippet-ph-info')\n for i in range(1, 18):\n if i == 4 or i == 10:\n continue\n else:\n try:\n z_costoftwo.append(browser.find_element_by_xpath(\n '/html/body/section/div/div[2]/div[3]/div[2]/div/div[6]/div/div[1]/section/div[1]/div[3]/div['\n + str(i) +\n ']/div[1]/div/article/div[3]/div[2]/span[2]').text)\n except Exception as e:\n z_costoftwo.append('NILL')\n try:\n z_hours.append(browser.find_element_by_xpath(\n '/html/body/section/div/div[2]/div[3]/div[2]/div/div[6]/div/div[1]/section/div[1]/div[3]/div['\n + str(i) +\n ']/div[1]/div/article/div[3]/div[3]/div[1]').text)\n except Exception as e1:\n z_hours.append('NILL')\n try:\n z_votes.append(browser.find_element_by_xpath(\n '/html/body/section/div/div[2]/div[3]/div[2]/div/div[6]/div/div[1]/section/div[1]/div[3]/div['\n + str(i) +\n ']/div[1]/div/article/div[1]/div/div[2]/div[1]/div[2]/span'\n ).text)\n except Exception as e1:\n z_votes.append('NEW')\n try:\n z_rating_list.append(browser.find_element_by_xpath(\n '/html/body/section/div/div[2]/div[3]/div[2]/div/div[6]/div/div[1]/section/div[1]/div[3]/div['\n + str(i) +\n ']/div[1]/div/article/div[1]/div/div[2]/div[1]/div[2]/div[1]'\n ).text)\n except Exception as e:\n z_rating_list.append('NILL')\n for names in name_list:\n z_hotel_list.append(names.text)\n temp.append(names.text)\n for addname in add_list:\n z_address_list.append(addname.text)\n for phonename in phone_list:\n z_phone_list.append(phonename.get_attribute('data-phone-no-str'))\n if int(a) < 6:\n clk = browser.find_element_by_xpath(\n '/html/body/section/div/div[2]/div[3]/div[2]/div/div[6]/div/div[1]/section/div[2]/div[1]/div[2]/div/div/a[7]'\n )\n clk.click()\n else:\n clk = browser.find_element_by_xpath(\n '/html/body/section/div/div[2]/div[3]/div[2]/div/div[6]/div/div[1]/section/div[2]/div[1]/div[2]/div/div/a[8]'\n )\n clk.click()\n\n\ntraverse(1, 6)\ntraverse(6, 11)\ntraverse(11, 16)\ntraverse(16, 21)\ntraverse(21, 26)\nfor i in range(0, len(z_hotel_list), 1):\n sheet1.write(i, 0, z_hotel_list[i])\nfor i in range(0, len(z_phone_list), 1):\n sheet1.write(i, 1, z_phone_list[i])\nfor i in range(0, len(z_address_list), 1):\n sheet1.write(i, 2, z_address_list[i])\nfor i in range(0, len(z_rating_list)):\n sheet1.write(i, 3, z_rating_list[i])\nfor i in range(0, len(z_costoftwo)):\n sheet1.write(i, 4, z_costoftwo[i])\nfor i in range(0, len(z_hours)):\n sheet1.write(i, 5, z_hours[i])\nfor i in range(0, len(z_votes)):\n sheet1.write(i, 6, z_votes[i])\nprint('Writing to excel Finished')\nbook.save('ZomatoBhopal(data).xls')\n",
"step-4": "from selenium import webdriver\nimport time\nimport xlwt\nfrom JD_PhoneNo import get_phone_no\nbook = xlwt.Workbook(encoding='utf-8')\nsheet1 = book.add_sheet('Sheet 1')\nbrowser = webdriver.Firefox()\nbrowser.get('https://www.zomato.com/bhopal/dinner')\nz_hotel_list = []\nz_address_list = []\nz_phone_list = []\nz_rating_list = []\nz_costoftwo = []\nz_votes = []\nz_hours = []\n\n\ndef traverse(a, b):\n temp = []\n for i in range(a, b, 1):\n a = str(i)\n button = browser.find_element_by_link_text(a)\n button.click()\n name_list = browser.find_elements_by_class_name(\n 'result-title.hover_feedback.zred.bold.ln24.fontsize0')\n add_list = browser.find_elements_by_class_name(\n 'col-m-16.search-result-address.grey-text.nowrap.ln22')\n phone_list = browser.find_elements_by_class_name(\n 'item.res-snippet-ph-info')\n for i in range(1, 18):\n if i == 4 or i == 10:\n continue\n else:\n try:\n z_costoftwo.append(browser.find_element_by_xpath(\n '/html/body/section/div/div[2]/div[3]/div[2]/div/div[6]/div/div[1]/section/div[1]/div[3]/div['\n + str(i) +\n ']/div[1]/div/article/div[3]/div[2]/span[2]').text)\n except Exception as e:\n z_costoftwo.append('NILL')\n try:\n z_hours.append(browser.find_element_by_xpath(\n '/html/body/section/div/div[2]/div[3]/div[2]/div/div[6]/div/div[1]/section/div[1]/div[3]/div['\n + str(i) +\n ']/div[1]/div/article/div[3]/div[3]/div[1]').text)\n except Exception as e1:\n z_hours.append('NILL')\n try:\n z_votes.append(browser.find_element_by_xpath(\n '/html/body/section/div/div[2]/div[3]/div[2]/div/div[6]/div/div[1]/section/div[1]/div[3]/div['\n + str(i) +\n ']/div[1]/div/article/div[1]/div/div[2]/div[1]/div[2]/span'\n ).text)\n except Exception as e1:\n z_votes.append('NEW')\n try:\n z_rating_list.append(browser.find_element_by_xpath(\n '/html/body/section/div/div[2]/div[3]/div[2]/div/div[6]/div/div[1]/section/div[1]/div[3]/div['\n + str(i) +\n ']/div[1]/div/article/div[1]/div/div[2]/div[1]/div[2]/div[1]'\n ).text)\n except Exception as e:\n z_rating_list.append('NILL')\n for names in name_list:\n z_hotel_list.append(names.text)\n temp.append(names.text)\n for addname in add_list:\n z_address_list.append(addname.text)\n for phonename in phone_list:\n z_phone_list.append(phonename.get_attribute('data-phone-no-str'))\n if int(a) < 6:\n clk = browser.find_element_by_xpath(\n '/html/body/section/div/div[2]/div[3]/div[2]/div/div[6]/div/div[1]/section/div[2]/div[1]/div[2]/div/div/a[7]'\n )\n clk.click()\n else:\n clk = browser.find_element_by_xpath(\n '/html/body/section/div/div[2]/div[3]/div[2]/div/div[6]/div/div[1]/section/div[2]/div[1]/div[2]/div/div/a[8]'\n )\n clk.click()\n\n\ntraverse(1, 6)\ntraverse(6, 11)\ntraverse(11, 16)\ntraverse(16, 21)\ntraverse(21, 26)\nfor i in range(0, len(z_hotel_list), 1):\n sheet1.write(i, 0, z_hotel_list[i])\nfor i in range(0, len(z_phone_list), 1):\n sheet1.write(i, 1, z_phone_list[i])\nfor i in range(0, len(z_address_list), 1):\n sheet1.write(i, 2, z_address_list[i])\nfor i in range(0, len(z_rating_list)):\n sheet1.write(i, 3, z_rating_list[i])\nfor i in range(0, len(z_costoftwo)):\n sheet1.write(i, 4, z_costoftwo[i])\nfor i in range(0, len(z_hours)):\n sheet1.write(i, 5, z_hours[i])\nfor i in range(0, len(z_votes)):\n sheet1.write(i, 6, z_votes[i])\nprint('Writing to excel Finished')\nbook.save('ZomatoBhopal(data).xls')\n",
"step-5": "from selenium import webdriver\r\nimport time\r\nimport xlwt\r\nfrom JD_PhoneNo import get_phone_no\r\nbook = xlwt.Workbook(encoding=\"utf-8\")\r\nsheet1=book.add_sheet(\"Sheet 1\")\r\nbrowser = webdriver.Firefox()\r\nbrowser.get(\"https://www.zomato.com/bhopal/dinner\")\r\nz_hotel_list = []\r\nz_address_list = []\r\nz_phone_list = []\r\nz_rating_list = []\r\nz_costoftwo = []\r\nz_votes = []\r\nz_hours = []\r\n\r\ndef traverse(a,b):\r\n temp = []\r\n for i in range(a,b,1):\r\n a = str(i)\r\n button = browser.find_element_by_link_text(a)\r\n button.click()\r\n name_list = browser.find_elements_by_class_name(\"result-title.hover_feedback.zred.bold.ln24.fontsize0\")\r\n add_list = browser.find_elements_by_class_name(\"col-m-16.search-result-address.grey-text.nowrap.ln22\")\r\n phone_list = browser.find_elements_by_class_name(\"item.res-snippet-ph-info\")\r\n for i in range(1,18):\r\n if(i==4 or i==10 ):\r\n continue\r\n else:\r\n try:\r\n z_costoftwo.append(browser.find_element_by_xpath(\"/html/body/section/div/div[2]/div[3]/div[2]/div/div[6]/div/div[1]/section/div[1]/div[3]/div[\"+str(i)+\"]/div[1]/div/article/div[3]/div[2]/span[2]\").text)\r\n except Exception as e:\r\n z_costoftwo.append(\"NILL\")\r\n try:\r\n z_hours.append(browser.find_element_by_xpath(\"/html/body/section/div/div[2]/div[3]/div[2]/div/div[6]/div/div[1]/section/div[1]/div[3]/div[\"+str(i)+\"]/div[1]/div/article/div[3]/div[3]/div[1]\").text)\r\n except Exception as e1:\r\n z_hours.append(\"NILL\")\r\n try:\r\n z_votes.append(browser.find_element_by_xpath(\"/html/body/section/div/div[2]/div[3]/div[2]/div/div[6]/div/div[1]/section/div[1]/div[3]/div[\"+str(i)+\"]/div[1]/div/article/div[1]/div/div[2]/div[1]/div[2]/span\").text)\r\n except Exception as e1:\r\n z_votes.append(\"NEW\")\r\n try:\r\n z_rating_list.append(browser.find_element_by_xpath(\"/html/body/section/div/div[2]/div[3]/div[2]/div/div[6]/div/div[1]/section/div[1]/div[3]/div[\"+str(i)+\"]/div[1]/div/article/div[1]/div/div[2]/div[1]/div[2]/div[1]\").text)\r\n except Exception as e:\r\n z_rating_list.append(\"NILL\")\r\n for names in name_list:\r\n z_hotel_list.append(names.text)\r\n temp.append(names.text)\r\n for addname in add_list:\r\n z_address_list.append(addname.text)\r\n for phonename in phone_list:\r\n z_phone_list.append(phonename.get_attribute(\"data-phone-no-str\"))\r\n if(int(a)<6):\r\n clk = browser.find_element_by_xpath(\"/html/body/section/div/div[2]/div[3]/div[2]/div/div[6]/div/div[1]/section/div[2]/div[1]/div[2]/div/div/a[7]\")\r\n clk.click()\r\n else:\r\n clk = browser.find_element_by_xpath(\"/html/body/section/div/div[2]/div[3]/div[2]/div/div[6]/div/div[1]/section/div[2]/div[1]/div[2]/div/div/a[8]\")\r\n clk.click()\r\ntraverse(1,6)\r\ntraverse(6,11)\r\ntraverse(11,16)\r\ntraverse(16,21)\r\ntraverse(21,26)\r\n# traverse(26,31)\r\n# traverse(31,36)\r\n# traverse(36,41)\r\n# traverse(41,46)\r\n# traverse(46,51)\r\n# traverse(51,56)\r\n# for i in range(1,5,10):\r\n# traverse(i,i+5)\r\n# traverse(i+5,i+10)\r\nfor i in range(0,len(z_hotel_list),1):\r\n sheet1.write(i,0,z_hotel_list[i])\r\nfor i in range(0, len(z_phone_list), 1):\r\n sheet1.write(i,1,z_phone_list[i])\r\nfor i in range(0, len(z_address_list), 1):\r\n sheet1.write(i, 2, z_address_list[i])\r\nfor i in range(0,len(z_rating_list)):\r\n sheet1.write(i,3,z_rating_list[i])\r\nfor i in range(0, len(z_costoftwo)):\r\n sheet1.write(i, 4, z_costoftwo[i])\r\nfor i in range(0, len(z_hours)):\r\n sheet1.write(i, 5, z_hours[i])\r\nfor i in range(0, len(z_votes)):\r\n sheet1.write(i, 6, z_votes[i])\r\n\r\nprint(\"Writing to excel Finished\")\r\nbook.save(\"ZomatoBhopal(data).xls\")\r\n\r\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
from . import parsing
from . import cleaning
from .config import NATURAL_EMB_DIM
<|reserved_special_token_1|>
"""Contains functionality for tokenizing, parsing, embedding language."""
from . import parsing
from . import cleaning
from .config import NATURAL_EMB_DIM
|
flexible
|
{
"blob_id": "8a6c9fa67c02d69444c9c3a2e6811b982c49eb4e",
"index": 5585,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfrom . import parsing\nfrom . import cleaning\nfrom .config import NATURAL_EMB_DIM\n",
"step-3": "\"\"\"Contains functionality for tokenizing, parsing, embedding language.\"\"\"\n\nfrom . import parsing\nfrom . import cleaning\nfrom .config import NATURAL_EMB_DIM\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print(num_str)
print(num_str1)
print(num_str.isdecimal())
print(num_str1.isdecimal())
print(num_str.isdigit())
print(num_str1.isdigit())
print(num_str.isnumeric())
print(num_str1.isnumeric())
print(num_str2.isnumeric())
<|reserved_special_token_1|>
num_str = '1'
num_str1 = '²'
num_str2 = '一千零一'
print(num_str)
print(num_str1)
print(num_str.isdecimal())
print(num_str1.isdecimal())
print(num_str.isdigit())
print(num_str1.isdigit())
print(num_str.isnumeric())
print(num_str1.isnumeric())
print(num_str2.isnumeric())
<|reserved_special_token_1|>
num_str = "1"
num_str1 = "\u00b2"
num_str2 = "一千零一"
# 判断字符串是否只包含数字
# 1.三种方法都不能判断小数
# 2.isdigit 和 isnumeric 比 isdecimal 强大一些,后者只能判断正常数字,前两者可以判断带有数字的符号,如平方
# isnumeric 还可以判断中文数字
print(num_str)
print(num_str1)
print(num_str.isdecimal())
print(num_str1.isdecimal())
print(num_str.isdigit())
print(num_str1.isdigit())
print(num_str.isnumeric())
print(num_str1.isnumeric())
print(num_str2.isnumeric())
|
flexible
|
{
"blob_id": "a7be2f43c6ec8d1576ed194a75762a36089cb052",
"index": 4195,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(num_str)\nprint(num_str1)\nprint(num_str.isdecimal())\nprint(num_str1.isdecimal())\nprint(num_str.isdigit())\nprint(num_str1.isdigit())\nprint(num_str.isnumeric())\nprint(num_str1.isnumeric())\nprint(num_str2.isnumeric())\n",
"step-3": "num_str = '1'\nnum_str1 = '²'\nnum_str2 = '一千零一'\nprint(num_str)\nprint(num_str1)\nprint(num_str.isdecimal())\nprint(num_str1.isdecimal())\nprint(num_str.isdigit())\nprint(num_str1.isdigit())\nprint(num_str.isnumeric())\nprint(num_str1.isnumeric())\nprint(num_str2.isnumeric())\n",
"step-4": "num_str = \"1\"\nnum_str1 = \"\\u00b2\"\nnum_str2 = \"一千零一\"\n# 判断字符串是否只包含数字\n# 1.三种方法都不能判断小数\n# 2.isdigit 和 isnumeric 比 isdecimal 强大一些,后者只能判断正常数字,前两者可以判断带有数字的符号,如平方\n# isnumeric 还可以判断中文数字\nprint(num_str)\nprint(num_str1)\nprint(num_str.isdecimal())\nprint(num_str1.isdecimal())\nprint(num_str.isdigit())\nprint(num_str1.isdigit())\nprint(num_str.isnumeric())\nprint(num_str1.isnumeric())\nprint(num_str2.isnumeric())",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
class ListNode:
def __init__(self, val: int, next=None):
self.val = val
self.next = next
def reverseKGroup(head: ListNode, k: int) -> ListNode:
prev, cur, rs, successor = None, head, head, None
def reverseK(node: ListNode, count: int) -> ListNode:
nonlocal successor
nonlocal prev
if count + 1 == k:
successor = node.next
return node
first = reverseK(node.next, count + 1)
node.next.next = node
node.next = successor
if prev: prev.next = first
return first
index = 1
while cur:
if index % k == 0:
sub_head = reverseK(rs, 0)
prev = rs
if index == k: head = sub_head
rs, cur = successor, successor
else:
cur = cur.next
index += 1
return head
def print_list(head: ListNode):
node = head
while node:
print(str(node.val) + '-->')
node = node.next
print('---end--')
if __name__ == '__main__':
five = ListNode(5)
four = ListNode(4, five)
three = ListNode(3, four)
two = ListNode(2, three)
one = ListNode(1, two)
# print_list(one)
reversed_node = reverseKGroup(one, 5)
print_list(reversed_node)
|
normal
|
{
"blob_id": "67904f3a29b0288a24e702f9c3ee001ebc279748",
"index": 3542,
"step-1": "class ListNode:\n <mask token>\n\n\n<mask token>\n",
"step-2": "class ListNode:\n\n def __init__(self, val: int, next=None):\n self.val = val\n self.next = next\n\n\n<mask token>\n\n\ndef print_list(head: ListNode):\n node = head\n while node:\n print(str(node.val) + '-->')\n node = node.next\n print('---end--')\n\n\n<mask token>\n",
"step-3": "class ListNode:\n\n def __init__(self, val: int, next=None):\n self.val = val\n self.next = next\n\n\ndef reverseKGroup(head: ListNode, k: int) ->ListNode:\n prev, cur, rs, successor = None, head, head, None\n\n def reverseK(node: ListNode, count: int) ->ListNode:\n nonlocal successor\n nonlocal prev\n if count + 1 == k:\n successor = node.next\n return node\n first = reverseK(node.next, count + 1)\n node.next.next = node\n node.next = successor\n if prev:\n prev.next = first\n return first\n index = 1\n while cur:\n if index % k == 0:\n sub_head = reverseK(rs, 0)\n prev = rs\n if index == k:\n head = sub_head\n rs, cur = successor, successor\n else:\n cur = cur.next\n index += 1\n return head\n\n\ndef print_list(head: ListNode):\n node = head\n while node:\n print(str(node.val) + '-->')\n node = node.next\n print('---end--')\n\n\n<mask token>\n",
"step-4": "class ListNode:\n\n def __init__(self, val: int, next=None):\n self.val = val\n self.next = next\n\n\ndef reverseKGroup(head: ListNode, k: int) ->ListNode:\n prev, cur, rs, successor = None, head, head, None\n\n def reverseK(node: ListNode, count: int) ->ListNode:\n nonlocal successor\n nonlocal prev\n if count + 1 == k:\n successor = node.next\n return node\n first = reverseK(node.next, count + 1)\n node.next.next = node\n node.next = successor\n if prev:\n prev.next = first\n return first\n index = 1\n while cur:\n if index % k == 0:\n sub_head = reverseK(rs, 0)\n prev = rs\n if index == k:\n head = sub_head\n rs, cur = successor, successor\n else:\n cur = cur.next\n index += 1\n return head\n\n\ndef print_list(head: ListNode):\n node = head\n while node:\n print(str(node.val) + '-->')\n node = node.next\n print('---end--')\n\n\nif __name__ == '__main__':\n five = ListNode(5)\n four = ListNode(4, five)\n three = ListNode(3, four)\n two = ListNode(2, three)\n one = ListNode(1, two)\n reversed_node = reverseKGroup(one, 5)\n print_list(reversed_node)\n",
"step-5": "class ListNode:\n def __init__(self, val: int, next=None):\n self.val = val\n self.next = next\n\n\ndef reverseKGroup(head: ListNode, k: int) -> ListNode:\n prev, cur, rs, successor = None, head, head, None\n\n def reverseK(node: ListNode, count: int) -> ListNode:\n nonlocal successor\n nonlocal prev\n\n if count + 1 == k:\n successor = node.next\n return node\n\n first = reverseK(node.next, count + 1)\n\n node.next.next = node\n node.next = successor\n if prev: prev.next = first\n return first\n\n index = 1\n while cur:\n if index % k == 0:\n sub_head = reverseK(rs, 0)\n prev = rs\n if index == k: head = sub_head\n rs, cur = successor, successor\n else:\n cur = cur.next\n index += 1\n return head\n\n\ndef print_list(head: ListNode):\n node = head\n while node:\n print(str(node.val) + '-->')\n node = node.next\n print('---end--')\n\n\nif __name__ == '__main__':\n five = ListNode(5)\n four = ListNode(4, five)\n three = ListNode(3, four)\n two = ListNode(2, three)\n one = ListNode(1, two)\n # print_list(one)\n reversed_node = reverseKGroup(one, 5)\n print_list(reversed_node)\n\n\n\n\n",
"step-ids": [
1,
3,
4,
5,
6
]
}
|
[
1,
3,
4,
5,
6
] |
# OSINT By FajarTheGGman For Google Code-in 2019©
import urllib3 as url
class GCI:
def banner():
print("[---- OSINT By FajarTheGGman ----]\n")
def main():
user = str(input("[!] Input Name Victim ? "))
init = url.PoolManager()
a = init.request("GET", "https://facebook.com/" + user)
b = init.request("GET", "https://instagram.com/" + user)
c = init.request("GET", "https://twitter.com/" + user)
if a.status == 200:
print("[+] " + user + " => Found In Facebook")
else:
print("[-] " + user + " => NotFound in Facebook")
if b.status == 200:
print("[+] " + user + " => Found In Instagram")
else:
print("[-] " + user + " => NotFound in Instagram")
if b.status == 200:
print("[+] " + user + " => Found In Twitter")
else:
print("[-] " + user + " => NotFound in Twitter")
x = GCI
x.banner()
x.main()
|
normal
|
{
"blob_id": "6c8180d24110045348d9c2041c0cca26fa9ea2d2",
"index": 4318,
"step-1": "<mask token>\n\n\nclass GCI:\n\n def banner():\n print('[---- OSINT By FajarTheGGman ----]\\n')\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass GCI:\n\n def banner():\n print('[---- OSINT By FajarTheGGman ----]\\n')\n\n def main():\n user = str(input('[!] Input Name Victim ? '))\n init = url.PoolManager()\n a = init.request('GET', 'https://facebook.com/' + user)\n b = init.request('GET', 'https://instagram.com/' + user)\n c = init.request('GET', 'https://twitter.com/' + user)\n if a.status == 200:\n print('[+] ' + user + ' => Found In Facebook')\n else:\n print('[-] ' + user + ' => NotFound in Facebook')\n if b.status == 200:\n print('[+] ' + user + ' => Found In Instagram')\n else:\n print('[-] ' + user + ' => NotFound in Instagram')\n if b.status == 200:\n print('[+] ' + user + ' => Found In Twitter')\n else:\n print('[-] ' + user + ' => NotFound in Twitter')\n\n\n<mask token>\nx.banner()\nx.main()\n",
"step-3": "<mask token>\n\n\nclass GCI:\n\n def banner():\n print('[---- OSINT By FajarTheGGman ----]\\n')\n\n def main():\n user = str(input('[!] Input Name Victim ? '))\n init = url.PoolManager()\n a = init.request('GET', 'https://facebook.com/' + user)\n b = init.request('GET', 'https://instagram.com/' + user)\n c = init.request('GET', 'https://twitter.com/' + user)\n if a.status == 200:\n print('[+] ' + user + ' => Found In Facebook')\n else:\n print('[-] ' + user + ' => NotFound in Facebook')\n if b.status == 200:\n print('[+] ' + user + ' => Found In Instagram')\n else:\n print('[-] ' + user + ' => NotFound in Instagram')\n if b.status == 200:\n print('[+] ' + user + ' => Found In Twitter')\n else:\n print('[-] ' + user + ' => NotFound in Twitter')\n\n\nx = GCI\nx.banner()\nx.main()\n",
"step-4": "import urllib3 as url\n\n\nclass GCI:\n\n def banner():\n print('[---- OSINT By FajarTheGGman ----]\\n')\n\n def main():\n user = str(input('[!] Input Name Victim ? '))\n init = url.PoolManager()\n a = init.request('GET', 'https://facebook.com/' + user)\n b = init.request('GET', 'https://instagram.com/' + user)\n c = init.request('GET', 'https://twitter.com/' + user)\n if a.status == 200:\n print('[+] ' + user + ' => Found In Facebook')\n else:\n print('[-] ' + user + ' => NotFound in Facebook')\n if b.status == 200:\n print('[+] ' + user + ' => Found In Instagram')\n else:\n print('[-] ' + user + ' => NotFound in Instagram')\n if b.status == 200:\n print('[+] ' + user + ' => Found In Twitter')\n else:\n print('[-] ' + user + ' => NotFound in Twitter')\n\n\nx = GCI\nx.banner()\nx.main()\n",
"step-5": "# OSINT By FajarTheGGman For Google Code-in 2019©\r\n\r\nimport urllib3 as url\r\n\r\nclass GCI:\r\n\tdef banner():\r\n\t\tprint(\"[---- OSINT By FajarTheGGman ----]\\n\")\r\n\r\n\tdef main():\r\n\t\tuser = str(input(\"[!] Input Name Victim ? \"))\r\n\t\tinit = url.PoolManager()\r\n\t\ta = init.request(\"GET\", \"https://facebook.com/\" + user)\r\n\t\tb = init.request(\"GET\", \"https://instagram.com/\" + user)\r\n\t\tc = init.request(\"GET\", \"https://twitter.com/\" + user)\r\n\t\tif a.status == 200:\r\n\t\t\tprint(\"[+] \" + user + \" => Found In Facebook\")\r\n\t\telse:\r\n\t\t\tprint(\"[-] \" + user + \" => NotFound in Facebook\")\r\n\r\n\t\tif b.status == 200:\r\n\t\t\tprint(\"[+] \" + user + \" => Found In Instagram\")\r\n\t\telse:\r\n\t\t\tprint(\"[-] \" + user + \" => NotFound in Instagram\")\r\n\r\n\t\tif b.status == 200:\r\n\t\t\tprint(\"[+] \" + user + \" => Found In Twitter\")\r\n\t\telse:\r\n\t\t\tprint(\"[-] \" + user + \" => NotFound in Twitter\")\r\n\r\nx = GCI\r\nx.banner()\r\nx.main()",
"step-ids": [
2,
4,
5,
6,
7
]
}
|
[
2,
4,
5,
6,
7
] |
import re
from typing import Any, Dict, List
import aiosqlite
from migri.elements import Query
from migri.interfaces import ConnectionBackend, TransactionBackend
class SQLiteConnection(ConnectionBackend):
_dialect = "sqlite"
@staticmethod
def _compile(query: Query) -> dict:
q = query.statement
v = []
if query.placeholders:
for p in query.placeholders:
# Append value
v.append(query.values[p.replace("$", "")])
# Substitute
q = re.sub(f"\\{p}", "?", q)
return {"query": q, "values": v}
async def connect(self):
self.db = await aiosqlite.connect(self.db_name)
self.db.row_factory = aiosqlite.Row
async def disconnect(self):
await self.db.close()
async def execute(self, query: Query):
q = self._compile(query)
await self.db.execute(q["query"], q["values"])
async def fetch(self, query: Query) -> Dict[str, Any]:
q = self._compile(query)
cursor = await self.db.execute(q["query"], q["values"])
res = await cursor.fetchone()
await cursor.close()
return dict(res)
async def fetch_all(self, query: Query) -> List[Dict[str, Any]]:
q = self._compile(query)
cursor = await self.db.execute(q["query"], q["values"])
res = await cursor.fetchall()
await cursor.close()
return [dict(r) for r in res]
def transaction(self) -> "TransactionBackend":
return SQLiteTransaction(self)
class SQLiteTransaction(TransactionBackend):
async def start(self):
# Nothing to do
return
async def commit(self):
await self._connection.database.commit()
async def rollback(self):
await self._connection.database.rollback()
|
normal
|
{
"blob_id": "191a57d3f13fcbe217ff6d0bd92dea163d5fb3cf",
"index": 4822,
"step-1": "<mask token>\n\n\nclass SQLiteConnection(ConnectionBackend):\n <mask token>\n <mask token>\n\n async def connect(self):\n self.db = await aiosqlite.connect(self.db_name)\n self.db.row_factory = aiosqlite.Row\n\n async def disconnect(self):\n await self.db.close()\n\n async def execute(self, query: Query):\n q = self._compile(query)\n await self.db.execute(q['query'], q['values'])\n\n async def fetch(self, query: Query) ->Dict[str, Any]:\n q = self._compile(query)\n cursor = await self.db.execute(q['query'], q['values'])\n res = await cursor.fetchone()\n await cursor.close()\n return dict(res)\n\n async def fetch_all(self, query: Query) ->List[Dict[str, Any]]:\n q = self._compile(query)\n cursor = await self.db.execute(q['query'], q['values'])\n res = await cursor.fetchall()\n await cursor.close()\n return [dict(r) for r in res]\n\n def transaction(self) ->'TransactionBackend':\n return SQLiteTransaction(self)\n\n\nclass SQLiteTransaction(TransactionBackend):\n\n async def start(self):\n return\n\n async def commit(self):\n await self._connection.database.commit()\n\n async def rollback(self):\n await self._connection.database.rollback()\n",
"step-2": "<mask token>\n\n\nclass SQLiteConnection(ConnectionBackend):\n <mask token>\n\n @staticmethod\n def _compile(query: Query) ->dict:\n q = query.statement\n v = []\n if query.placeholders:\n for p in query.placeholders:\n v.append(query.values[p.replace('$', '')])\n q = re.sub(f'\\\\{p}', '?', q)\n return {'query': q, 'values': v}\n\n async def connect(self):\n self.db = await aiosqlite.connect(self.db_name)\n self.db.row_factory = aiosqlite.Row\n\n async def disconnect(self):\n await self.db.close()\n\n async def execute(self, query: Query):\n q = self._compile(query)\n await self.db.execute(q['query'], q['values'])\n\n async def fetch(self, query: Query) ->Dict[str, Any]:\n q = self._compile(query)\n cursor = await self.db.execute(q['query'], q['values'])\n res = await cursor.fetchone()\n await cursor.close()\n return dict(res)\n\n async def fetch_all(self, query: Query) ->List[Dict[str, Any]]:\n q = self._compile(query)\n cursor = await self.db.execute(q['query'], q['values'])\n res = await cursor.fetchall()\n await cursor.close()\n return [dict(r) for r in res]\n\n def transaction(self) ->'TransactionBackend':\n return SQLiteTransaction(self)\n\n\nclass SQLiteTransaction(TransactionBackend):\n\n async def start(self):\n return\n\n async def commit(self):\n await self._connection.database.commit()\n\n async def rollback(self):\n await self._connection.database.rollback()\n",
"step-3": "<mask token>\n\n\nclass SQLiteConnection(ConnectionBackend):\n _dialect = 'sqlite'\n\n @staticmethod\n def _compile(query: Query) ->dict:\n q = query.statement\n v = []\n if query.placeholders:\n for p in query.placeholders:\n v.append(query.values[p.replace('$', '')])\n q = re.sub(f'\\\\{p}', '?', q)\n return {'query': q, 'values': v}\n\n async def connect(self):\n self.db = await aiosqlite.connect(self.db_name)\n self.db.row_factory = aiosqlite.Row\n\n async def disconnect(self):\n await self.db.close()\n\n async def execute(self, query: Query):\n q = self._compile(query)\n await self.db.execute(q['query'], q['values'])\n\n async def fetch(self, query: Query) ->Dict[str, Any]:\n q = self._compile(query)\n cursor = await self.db.execute(q['query'], q['values'])\n res = await cursor.fetchone()\n await cursor.close()\n return dict(res)\n\n async def fetch_all(self, query: Query) ->List[Dict[str, Any]]:\n q = self._compile(query)\n cursor = await self.db.execute(q['query'], q['values'])\n res = await cursor.fetchall()\n await cursor.close()\n return [dict(r) for r in res]\n\n def transaction(self) ->'TransactionBackend':\n return SQLiteTransaction(self)\n\n\nclass SQLiteTransaction(TransactionBackend):\n\n async def start(self):\n return\n\n async def commit(self):\n await self._connection.database.commit()\n\n async def rollback(self):\n await self._connection.database.rollback()\n",
"step-4": "import re\nfrom typing import Any, Dict, List\nimport aiosqlite\nfrom migri.elements import Query\nfrom migri.interfaces import ConnectionBackend, TransactionBackend\n\n\nclass SQLiteConnection(ConnectionBackend):\n _dialect = 'sqlite'\n\n @staticmethod\n def _compile(query: Query) ->dict:\n q = query.statement\n v = []\n if query.placeholders:\n for p in query.placeholders:\n v.append(query.values[p.replace('$', '')])\n q = re.sub(f'\\\\{p}', '?', q)\n return {'query': q, 'values': v}\n\n async def connect(self):\n self.db = await aiosqlite.connect(self.db_name)\n self.db.row_factory = aiosqlite.Row\n\n async def disconnect(self):\n await self.db.close()\n\n async def execute(self, query: Query):\n q = self._compile(query)\n await self.db.execute(q['query'], q['values'])\n\n async def fetch(self, query: Query) ->Dict[str, Any]:\n q = self._compile(query)\n cursor = await self.db.execute(q['query'], q['values'])\n res = await cursor.fetchone()\n await cursor.close()\n return dict(res)\n\n async def fetch_all(self, query: Query) ->List[Dict[str, Any]]:\n q = self._compile(query)\n cursor = await self.db.execute(q['query'], q['values'])\n res = await cursor.fetchall()\n await cursor.close()\n return [dict(r) for r in res]\n\n def transaction(self) ->'TransactionBackend':\n return SQLiteTransaction(self)\n\n\nclass SQLiteTransaction(TransactionBackend):\n\n async def start(self):\n return\n\n async def commit(self):\n await self._connection.database.commit()\n\n async def rollback(self):\n await self._connection.database.rollback()\n",
"step-5": "import re\nfrom typing import Any, Dict, List\n\nimport aiosqlite\n\nfrom migri.elements import Query\nfrom migri.interfaces import ConnectionBackend, TransactionBackend\n\n\nclass SQLiteConnection(ConnectionBackend):\n _dialect = \"sqlite\"\n\n @staticmethod\n def _compile(query: Query) -> dict:\n q = query.statement\n v = []\n\n if query.placeholders:\n for p in query.placeholders:\n # Append value\n v.append(query.values[p.replace(\"$\", \"\")])\n\n # Substitute\n q = re.sub(f\"\\\\{p}\", \"?\", q)\n\n return {\"query\": q, \"values\": v}\n\n async def connect(self):\n self.db = await aiosqlite.connect(self.db_name)\n self.db.row_factory = aiosqlite.Row\n\n async def disconnect(self):\n await self.db.close()\n\n async def execute(self, query: Query):\n q = self._compile(query)\n await self.db.execute(q[\"query\"], q[\"values\"])\n\n async def fetch(self, query: Query) -> Dict[str, Any]:\n q = self._compile(query)\n cursor = await self.db.execute(q[\"query\"], q[\"values\"])\n res = await cursor.fetchone()\n await cursor.close()\n\n return dict(res)\n\n async def fetch_all(self, query: Query) -> List[Dict[str, Any]]:\n q = self._compile(query)\n cursor = await self.db.execute(q[\"query\"], q[\"values\"])\n res = await cursor.fetchall()\n await cursor.close()\n\n return [dict(r) for r in res]\n\n def transaction(self) -> \"TransactionBackend\":\n return SQLiteTransaction(self)\n\n\nclass SQLiteTransaction(TransactionBackend):\n async def start(self):\n # Nothing to do\n return\n\n async def commit(self):\n await self._connection.database.commit()\n\n async def rollback(self):\n await self._connection.database.rollback()\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
<|reserved_special_token_0|>
def run206_01():
print('Regular dict:')
d = {}
d['a'] = 'A'
d['b'] = 'B'
d['c'] = 'C'
for k, v in d.items():
print(k, v)
print('OrderedDict:')
d = OrderedDict()
d['a'] = 'A'
d['b'] = 'B'
d['c'] = 'C'
for k, v in d.items():
print(k, v)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def run206_01():
print('Regular dict:')
d = {}
d['a'] = 'A'
d['b'] = 'B'
d['c'] = 'C'
for k, v in d.items():
print(k, v)
print('OrderedDict:')
d = OrderedDict()
d['a'] = 'A'
d['b'] = 'B'
d['c'] = 'C'
for k, v in d.items():
print(k, v)
def run206_02():
"""
相等性判断,需要考虑顺序
:return:
"""
print('Regular dict:')
d1 = {'a': 'A', 'b': 'B', 'c': 'C'}
d2 = {'c': 'C', 'b': 'B', 'a': 'A'}
print(d1 == d2)
for k, v in d1.items():
print(k, v)
for k, v in d2.items():
print(k, v)
print('OrderedDict:')
d1 = OrderedDict(d1)
d2 = OrderedDict(d2)
print(d1 == d2)
for k, v in d1.items():
print(k, v)
for k, v in d2.items():
print(k, v)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def run206_01():
print('Regular dict:')
d = {}
d['a'] = 'A'
d['b'] = 'B'
d['c'] = 'C'
for k, v in d.items():
print(k, v)
print('OrderedDict:')
d = OrderedDict()
d['a'] = 'A'
d['b'] = 'B'
d['c'] = 'C'
for k, v in d.items():
print(k, v)
def run206_02():
"""
相等性判断,需要考虑顺序
:return:
"""
print('Regular dict:')
d1 = {'a': 'A', 'b': 'B', 'c': 'C'}
d2 = {'c': 'C', 'b': 'B', 'a': 'A'}
print(d1 == d2)
for k, v in d1.items():
print(k, v)
for k, v in d2.items():
print(k, v)
print('OrderedDict:')
d1 = OrderedDict(d1)
d2 = OrderedDict(d2)
print(d1 == d2)
for k, v in d1.items():
print(k, v)
for k, v in d2.items():
print(k, v)
def run206_03():
"""
re ordering
:return:
"""
d = OrderedDict([('a', 'A'), ('b', 'B'), ('c', 'C')])
print('Before:')
for k, v in d.items():
print(k, v)
d.move_to_end('b')
print('\nmove_to_end():')
for k, v in d.items():
print(k, v)
d.move_to_end('b', last=False)
print('\nmove_to_end(last=False):')
for k, v in d.items():
print(k, v)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
from collections import OrderedDict
def run206_01():
print('Regular dict:')
d = {}
d['a'] = 'A'
d['b'] = 'B'
d['c'] = 'C'
for k, v in d.items():
print(k, v)
print('OrderedDict:')
d = OrderedDict()
d['a'] = 'A'
d['b'] = 'B'
d['c'] = 'C'
for k, v in d.items():
print(k, v)
def run206_02():
"""
相等性判断,需要考虑顺序
:return:
"""
print('Regular dict:')
d1 = {'a': 'A', 'b': 'B', 'c': 'C'}
d2 = {'c': 'C', 'b': 'B', 'a': 'A'}
print(d1 == d2)
for k, v in d1.items():
print(k, v)
for k, v in d2.items():
print(k, v)
print('OrderedDict:')
d1 = OrderedDict(d1)
d2 = OrderedDict(d2)
print(d1 == d2)
for k, v in d1.items():
print(k, v)
for k, v in d2.items():
print(k, v)
def run206_03():
"""
re ordering
:return:
"""
d = OrderedDict([('a', 'A'), ('b', 'B'), ('c', 'C')])
print('Before:')
for k, v in d.items():
print(k, v)
d.move_to_end('b')
print('\nmove_to_end():')
for k, v in d.items():
print(k, v)
d.move_to_end('b', last=False)
print('\nmove_to_end(last=False):')
for k, v in d.items():
print(k, v)
<|reserved_special_token_1|>
#!/usr/bin/env python
# encoding: utf-8
"""
@description: 有序字典
(notice: python3.6 以后字典已经有序了)
@author: baoqiang
@time: 2019/11/28 1:34 下午
"""
from collections import OrderedDict
def run206_01():
print('Regular dict:')
# d = {'a':'A','b':'B','c':'C'}
d = {}
d['a'] = 'A'
d['b'] = 'B'
d['c'] = 'C'
for k, v in d.items():
print(k, v)
print('OrderedDict:')
d = OrderedDict()
d['a'] = 'A'
d['b'] = 'B'
d['c'] = 'C'
for k, v in d.items():
print(k, v)
def run206_02():
"""
相等性判断,需要考虑顺序
:return:
"""
print('Regular dict:')
d1 = {'a': 'A', 'b': 'B', 'c': 'C'}
d2 = {'c': 'C', 'b': 'B', 'a': 'A'}
print(d1 == d2)
for k, v in d1.items():
print(k, v)
for k, v in d2.items():
print(k, v)
print('OrderedDict:')
d1 = OrderedDict(d1)
d2 = OrderedDict(d2)
print(d1 == d2)
for k, v in d1.items():
print(k, v)
for k, v in d2.items():
print(k, v)
def run206_03():
"""
re ordering
:return:
"""
d = OrderedDict([('a', 'A'), ('b', 'B'), ('c', 'C')])
print('Before:')
for k, v in d.items():
print(k, v)
d.move_to_end('b')
print('\nmove_to_end():')
for k, v in d.items():
print(k, v)
d.move_to_end('b', last=False)
print('\nmove_to_end(last=False):')
for k, v in d.items():
print(k, v)
|
flexible
|
{
"blob_id": "4a7d8db2bc3b753ea1a12120e1ad85f31d572dc7",
"index": 4237,
"step-1": "<mask token>\n\n\ndef run206_01():\n print('Regular dict:')\n d = {}\n d['a'] = 'A'\n d['b'] = 'B'\n d['c'] = 'C'\n for k, v in d.items():\n print(k, v)\n print('OrderedDict:')\n d = OrderedDict()\n d['a'] = 'A'\n d['b'] = 'B'\n d['c'] = 'C'\n for k, v in d.items():\n print(k, v)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef run206_01():\n print('Regular dict:')\n d = {}\n d['a'] = 'A'\n d['b'] = 'B'\n d['c'] = 'C'\n for k, v in d.items():\n print(k, v)\n print('OrderedDict:')\n d = OrderedDict()\n d['a'] = 'A'\n d['b'] = 'B'\n d['c'] = 'C'\n for k, v in d.items():\n print(k, v)\n\n\ndef run206_02():\n \"\"\"\n 相等性判断,需要考虑顺序\n :return:\n \"\"\"\n print('Regular dict:')\n d1 = {'a': 'A', 'b': 'B', 'c': 'C'}\n d2 = {'c': 'C', 'b': 'B', 'a': 'A'}\n print(d1 == d2)\n for k, v in d1.items():\n print(k, v)\n for k, v in d2.items():\n print(k, v)\n print('OrderedDict:')\n d1 = OrderedDict(d1)\n d2 = OrderedDict(d2)\n print(d1 == d2)\n for k, v in d1.items():\n print(k, v)\n for k, v in d2.items():\n print(k, v)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef run206_01():\n print('Regular dict:')\n d = {}\n d['a'] = 'A'\n d['b'] = 'B'\n d['c'] = 'C'\n for k, v in d.items():\n print(k, v)\n print('OrderedDict:')\n d = OrderedDict()\n d['a'] = 'A'\n d['b'] = 'B'\n d['c'] = 'C'\n for k, v in d.items():\n print(k, v)\n\n\ndef run206_02():\n \"\"\"\n 相等性判断,需要考虑顺序\n :return:\n \"\"\"\n print('Regular dict:')\n d1 = {'a': 'A', 'b': 'B', 'c': 'C'}\n d2 = {'c': 'C', 'b': 'B', 'a': 'A'}\n print(d1 == d2)\n for k, v in d1.items():\n print(k, v)\n for k, v in d2.items():\n print(k, v)\n print('OrderedDict:')\n d1 = OrderedDict(d1)\n d2 = OrderedDict(d2)\n print(d1 == d2)\n for k, v in d1.items():\n print(k, v)\n for k, v in d2.items():\n print(k, v)\n\n\ndef run206_03():\n \"\"\"\n re ordering\n :return:\n \"\"\"\n d = OrderedDict([('a', 'A'), ('b', 'B'), ('c', 'C')])\n print('Before:')\n for k, v in d.items():\n print(k, v)\n d.move_to_end('b')\n print('\\nmove_to_end():')\n for k, v in d.items():\n print(k, v)\n d.move_to_end('b', last=False)\n print('\\nmove_to_end(last=False):')\n for k, v in d.items():\n print(k, v)\n",
"step-4": "<mask token>\nfrom collections import OrderedDict\n\n\ndef run206_01():\n print('Regular dict:')\n d = {}\n d['a'] = 'A'\n d['b'] = 'B'\n d['c'] = 'C'\n for k, v in d.items():\n print(k, v)\n print('OrderedDict:')\n d = OrderedDict()\n d['a'] = 'A'\n d['b'] = 'B'\n d['c'] = 'C'\n for k, v in d.items():\n print(k, v)\n\n\ndef run206_02():\n \"\"\"\n 相等性判断,需要考虑顺序\n :return:\n \"\"\"\n print('Regular dict:')\n d1 = {'a': 'A', 'b': 'B', 'c': 'C'}\n d2 = {'c': 'C', 'b': 'B', 'a': 'A'}\n print(d1 == d2)\n for k, v in d1.items():\n print(k, v)\n for k, v in d2.items():\n print(k, v)\n print('OrderedDict:')\n d1 = OrderedDict(d1)\n d2 = OrderedDict(d2)\n print(d1 == d2)\n for k, v in d1.items():\n print(k, v)\n for k, v in d2.items():\n print(k, v)\n\n\ndef run206_03():\n \"\"\"\n re ordering\n :return:\n \"\"\"\n d = OrderedDict([('a', 'A'), ('b', 'B'), ('c', 'C')])\n print('Before:')\n for k, v in d.items():\n print(k, v)\n d.move_to_end('b')\n print('\\nmove_to_end():')\n for k, v in d.items():\n print(k, v)\n d.move_to_end('b', last=False)\n print('\\nmove_to_end(last=False):')\n for k, v in d.items():\n print(k, v)\n",
"step-5": "#!/usr/bin/env python\n# encoding: utf-8\n\n\"\"\"\n@description: 有序字典\n(notice: python3.6 以后字典已经有序了)\n\n@author: baoqiang\n@time: 2019/11/28 1:34 下午\n\"\"\"\n\nfrom collections import OrderedDict\n\n\ndef run206_01():\n print('Regular dict:')\n # d = {'a':'A','b':'B','c':'C'}\n d = {}\n d['a'] = 'A'\n d['b'] = 'B'\n d['c'] = 'C'\n for k, v in d.items():\n print(k, v)\n\n print('OrderedDict:')\n d = OrderedDict()\n d['a'] = 'A'\n d['b'] = 'B'\n d['c'] = 'C'\n for k, v in d.items():\n print(k, v)\n\n\ndef run206_02():\n \"\"\"\n 相等性判断,需要考虑顺序\n :return:\n \"\"\"\n print('Regular dict:')\n d1 = {'a': 'A', 'b': 'B', 'c': 'C'}\n d2 = {'c': 'C', 'b': 'B', 'a': 'A'}\n print(d1 == d2)\n\n for k, v in d1.items():\n print(k, v)\n for k, v in d2.items():\n print(k, v)\n\n print('OrderedDict:')\n d1 = OrderedDict(d1)\n d2 = OrderedDict(d2)\n print(d1 == d2)\n\n for k, v in d1.items():\n print(k, v)\n for k, v in d2.items():\n print(k, v)\n\n\ndef run206_03():\n \"\"\"\n re ordering\n :return:\n \"\"\"\n d = OrderedDict([('a', 'A'), ('b', 'B'), ('c', 'C')])\n\n print('Before:')\n for k, v in d.items():\n print(k, v)\n\n d.move_to_end('b')\n print('\\nmove_to_end():')\n for k, v in d.items():\n print(k, v)\n\n d.move_to_end('b', last=False)\n print('\\nmove_to_end(last=False):')\n for k, v in d.items():\n print(k, v)\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class GameStats:
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class GameStats:
def __init__(self, setting):
self.setting = setting
self.ships_left = self.setting.ship_limit
self.game_active = True
<|reserved_special_token_1|>
"""
统计飞船信息
"""
class GameStats:
def __init__(self, setting):
self.setting = setting
self.ships_left = self.setting.ship_limit
self.game_active = True
|
flexible
|
{
"blob_id": "3ab26612111e3df59f41f5b5e0bf23398e015a8a",
"index": 1595,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass GameStats:\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass GameStats:\n\n def __init__(self, setting):\n self.setting = setting\n self.ships_left = self.setting.ship_limit\n self.game_active = True\n",
"step-4": "\"\"\"\n统计飞船信息\n\"\"\"\n\n\nclass GameStats:\n def __init__(self, setting):\n self.setting = setting\n self.ships_left = self.setting.ship_limit\n self.game_active = True\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
class Solution(object):
def plusOne(self, digits):
"""
:type digits: List[int]
:rtype: List[int]
"""
plus = True # In the last digit, we should add one as the quesiton requries
indexList = range(len(digits))
indexList.reverse()
for i in indexList:
if plus:
digits[i] += 1
if digits[i] == 10:
digits[i] = 0
plus = True
else:
plus = False
if plus:
# handle the case where we need one more digit
return [1] + digits
return digits
|
normal
|
{
"blob_id": "02a228c479a6c94858f7e8ef73a7c8528def871e",
"index": 9423,
"step-1": "<mask token>\n",
"step-2": "class Solution(object):\n <mask token>\n",
"step-3": "class Solution(object):\n\n def plusOne(self, digits):\n \"\"\"\n :type digits: List[int]\n :rtype: List[int]\n \"\"\"\n plus = True\n indexList = range(len(digits))\n indexList.reverse()\n for i in indexList:\n if plus:\n digits[i] += 1\n if digits[i] == 10:\n digits[i] = 0\n plus = True\n else:\n plus = False\n if plus:\n return [1] + digits\n return digits\n",
"step-4": "class Solution(object):\n def plusOne(self, digits):\n \"\"\"\n :type digits: List[int]\n :rtype: List[int]\n \"\"\"\n plus = True # In the last digit, we should add one as the quesiton requries\n indexList = range(len(digits))\n indexList.reverse()\n for i in indexList:\n if plus:\n digits[i] += 1\n if digits[i] == 10:\n digits[i] = 0\n plus = True\n else:\n plus = False\n if plus:\n # handle the case where we need one more digit\n return [1] + digits\n return digits\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class UserCreateSerializer(serializers.ModelSerializer):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class Meta:
model = CustomUser
fields = '__all__'
def validate(self, data):
password = data.get('password', None)
username = data.get('username', None)
if password is not None:
password_validation.validate_password(password)
return super().validate(data)
def create(self, validated_data):
user = super().create(validated_data)
user.set_password(validated_data['password'])
user.save()
return user
class UserSerializer(serializers.ModelSerializer):
role = RoleSerializer(read_only=True)
role_id = serializers.IntegerField(required=False, allow_null=True)
class Meta:
model = CustomUser
exclude = 'password',
def update(self, instance, validated_data):
validated_data.pop('password', None)
return super().update(instance, validated_data)
class UserSimpleSerializer(serializers.ModelSerializer):
class Meta:
model = CustomUser
fields = 'id', 'username', 'price_level'
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class RoleSerializer(serializers.ModelSerializer):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class Meta:
model = Role
fields = '__all__'
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class SelfChangePasswordSerializer(serializers.Serializer):
old_password = serializers.CharField(required=True)
new_password = serializers.CharField(required=True)
def get_current_user(self):
return self.context['request'].user
def validate(self, data):
old_password = data.get('old_password', None)
new_password = data.get('new_password', None)
if old_password is not None and not self.get_current_user(
).check_password(old_password):
raise serializers.ValidationError({'old_password':
'Your old password was entered incorrectly. Please enter it again.'
})
if new_password is not None:
password_validation.validate_password(new_password)
return super().validate(data)
class ChangePasswordSerializer(serializers.Serializer):
new_password = serializers.CharField(max_length=128)
def get_current_user(self):
return self.context['request'].user
def validate(self, data):
new_password = data.get('new_password', None)
if new_password is not None:
password_validation.validate_password(new_password)
return super().validate(data)
class UserCreateSerializer(serializers.ModelSerializer):
username = serializers.CharField(min_length=5, max_length=150,
validators=[UniqueValidator(queryset=CustomUser.objects.all())])
password = serializers.CharField(max_length=128)
price_level = serializers.IntegerField(min_value=1, max_value=5)
balance = serializers.DecimalField(max_digits=10, decimal_places=2,
min_value=0.0)
role_id = serializers.IntegerField(required=False, allow_null=True)
class Meta:
model = CustomUser
fields = '__all__'
def validate(self, data):
password = data.get('password', None)
username = data.get('username', None)
if password is not None:
password_validation.validate_password(password)
return super().validate(data)
def create(self, validated_data):
user = super().create(validated_data)
user.set_password(validated_data['password'])
user.save()
return user
class UserSerializer(serializers.ModelSerializer):
role = RoleSerializer(read_only=True)
role_id = serializers.IntegerField(required=False, allow_null=True)
class Meta:
model = CustomUser
exclude = 'password',
def update(self, instance, validated_data):
validated_data.pop('password', None)
return super().update(instance, validated_data)
class UserSimpleSerializer(serializers.ModelSerializer):
class Meta:
model = CustomUser
fields = 'id', 'username', 'price_level'
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class RoleSerializer(serializers.ModelSerializer):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class Meta:
model = Role
fields = '__all__'
<|reserved_special_token_0|>
def update(self, instance, validated_data):
permissions = validated_data.pop('permissions', None)
for permissionData in permissions:
for actionData in permissionData.get('actionEntitySet'):
action = ActionEntity.objects.get(pk=actionData.get('id'))
action.enable = actionData.get('enable')
action.save()
super().update(instance, validated_data)
return instance
class SelfChangePasswordSerializer(serializers.Serializer):
old_password = serializers.CharField(required=True)
new_password = serializers.CharField(required=True)
def get_current_user(self):
return self.context['request'].user
def validate(self, data):
old_password = data.get('old_password', None)
new_password = data.get('new_password', None)
if old_password is not None and not self.get_current_user(
).check_password(old_password):
raise serializers.ValidationError({'old_password':
'Your old password was entered incorrectly. Please enter it again.'
})
if new_password is not None:
password_validation.validate_password(new_password)
return super().validate(data)
class ChangePasswordSerializer(serializers.Serializer):
new_password = serializers.CharField(max_length=128)
def get_current_user(self):
return self.context['request'].user
def validate(self, data):
new_password = data.get('new_password', None)
if new_password is not None:
password_validation.validate_password(new_password)
return super().validate(data)
class UserCreateSerializer(serializers.ModelSerializer):
username = serializers.CharField(min_length=5, max_length=150,
validators=[UniqueValidator(queryset=CustomUser.objects.all())])
password = serializers.CharField(max_length=128)
price_level = serializers.IntegerField(min_value=1, max_value=5)
balance = serializers.DecimalField(max_digits=10, decimal_places=2,
min_value=0.0)
role_id = serializers.IntegerField(required=False, allow_null=True)
class Meta:
model = CustomUser
fields = '__all__'
def validate(self, data):
password = data.get('password', None)
username = data.get('username', None)
if password is not None:
password_validation.validate_password(password)
return super().validate(data)
def create(self, validated_data):
user = super().create(validated_data)
user.set_password(validated_data['password'])
user.save()
return user
class UserSerializer(serializers.ModelSerializer):
role = RoleSerializer(read_only=True)
role_id = serializers.IntegerField(required=False, allow_null=True)
class Meta:
model = CustomUser
exclude = 'password',
def update(self, instance, validated_data):
validated_data.pop('password', None)
return super().update(instance, validated_data)
class UserSimpleSerializer(serializers.ModelSerializer):
class Meta:
model = CustomUser
fields = 'id', 'username', 'price_level'
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ActionEntitySerializer(serializers.ModelSerializer):
<|reserved_special_token_0|>
class Meta:
model = ActionEntity
fields = '__all__'
class PermissionSerializer(serializers.ModelSerializer):
actionEntitySet = ActionEntitySerializer(many=True)
class Meta:
model = Permission
fields = '__all__'
class RoleSerializer(serializers.ModelSerializer):
id = serializers.ReadOnlyField()
permissions = PermissionSerializer(many=True)
name = serializers.CharField(max_length=32, validators=[UniqueValidator
(queryset=Role.objects.all())])
status = serializers.IntegerField()
describe = serializers.CharField(required=False, allow_null=True,
max_length=128)
class Meta:
model = Role
fields = '__all__'
def create(self, validated_data):
permissions = validated_data.pop('permissions', None)
role = Role.objects.create(**validated_data)
create_permission(role)
return role
def update(self, instance, validated_data):
permissions = validated_data.pop('permissions', None)
for permissionData in permissions:
for actionData in permissionData.get('actionEntitySet'):
action = ActionEntity.objects.get(pk=actionData.get('id'))
action.enable = actionData.get('enable')
action.save()
super().update(instance, validated_data)
return instance
class SelfChangePasswordSerializer(serializers.Serializer):
old_password = serializers.CharField(required=True)
new_password = serializers.CharField(required=True)
def get_current_user(self):
return self.context['request'].user
def validate(self, data):
old_password = data.get('old_password', None)
new_password = data.get('new_password', None)
if old_password is not None and not self.get_current_user(
).check_password(old_password):
raise serializers.ValidationError({'old_password':
'Your old password was entered incorrectly. Please enter it again.'
})
if new_password is not None:
password_validation.validate_password(new_password)
return super().validate(data)
class ChangePasswordSerializer(serializers.Serializer):
new_password = serializers.CharField(max_length=128)
def get_current_user(self):
return self.context['request'].user
def validate(self, data):
new_password = data.get('new_password', None)
if new_password is not None:
password_validation.validate_password(new_password)
return super().validate(data)
class UserCreateSerializer(serializers.ModelSerializer):
username = serializers.CharField(min_length=5, max_length=150,
validators=[UniqueValidator(queryset=CustomUser.objects.all())])
password = serializers.CharField(max_length=128)
price_level = serializers.IntegerField(min_value=1, max_value=5)
balance = serializers.DecimalField(max_digits=10, decimal_places=2,
min_value=0.0)
role_id = serializers.IntegerField(required=False, allow_null=True)
class Meta:
model = CustomUser
fields = '__all__'
def validate(self, data):
password = data.get('password', None)
username = data.get('username', None)
if password is not None:
password_validation.validate_password(password)
return super().validate(data)
def create(self, validated_data):
user = super().create(validated_data)
user.set_password(validated_data['password'])
user.save()
return user
class UserSerializer(serializers.ModelSerializer):
role = RoleSerializer(read_only=True)
role_id = serializers.IntegerField(required=False, allow_null=True)
class Meta:
model = CustomUser
exclude = 'password',
def update(self, instance, validated_data):
validated_data.pop('password', None)
return super().update(instance, validated_data)
class UserSimpleSerializer(serializers.ModelSerializer):
class Meta:
model = CustomUser
fields = 'id', 'username', 'price_level'
<|reserved_special_token_1|>
from rest_framework import serializers
from django.contrib.auth import password_validation
from rest_framework.validators import UniqueValidator
from .models import CustomUser, Role, Permission, ActionEntity
from .utils import create_permission
class ActionEntitySerializer(serializers.ModelSerializer):
id = serializers.IntegerField(required=False)
class Meta:
model = ActionEntity
fields = '__all__'
class PermissionSerializer(serializers.ModelSerializer):
actionEntitySet = ActionEntitySerializer(many=True)
class Meta:
model = Permission
fields = '__all__'
class RoleSerializer(serializers.ModelSerializer):
id = serializers.ReadOnlyField()
permissions = PermissionSerializer(many=True)
name = serializers.CharField(max_length=32, validators=[UniqueValidator(queryset=Role.objects.all())])
status = serializers.IntegerField()
describe = serializers.CharField(required=False, allow_null=True, max_length=128)
class Meta:
model = Role
fields = '__all__'
def create(self, validated_data):
permissions = validated_data.pop('permissions', None)
role = Role.objects.create(**validated_data)
create_permission(role)
return role
def update(self, instance, validated_data):
permissions = validated_data.pop('permissions', None)
for permissionData in permissions:
for actionData in permissionData.get('actionEntitySet'):
action = ActionEntity.objects.get(pk=actionData.get('id'))
action.enable = actionData.get('enable')
action.save()
super().update(instance, validated_data)
return instance
class SelfChangePasswordSerializer(serializers.Serializer):
old_password = serializers.CharField(required=True)
new_password = serializers.CharField(required=True)
def get_current_user(self):
return self.context['request'].user
def validate(self, data):
old_password = data.get('old_password', None)
new_password = data.get('new_password', None)
if old_password is not None and not self.get_current_user().check_password(old_password):
raise serializers.ValidationError({'old_password': 'Your old password was entered incorrectly. Please enter it again.'})
if new_password is not None:
password_validation.validate_password(new_password)
return super().validate(data)
class ChangePasswordSerializer(serializers.Serializer):
new_password = serializers.CharField(max_length=128)
def get_current_user(self):
return self.context['request'].user
def validate(self, data):
new_password = data.get('new_password', None)
if new_password is not None:
password_validation.validate_password(new_password)
return super().validate(data)
class UserCreateSerializer(serializers.ModelSerializer):
username = serializers.CharField(min_length= 5, max_length=150, validators=[UniqueValidator(queryset=CustomUser.objects.all())])
password = serializers.CharField(max_length=128)
price_level = serializers.IntegerField(min_value=1, max_value=5)
balance = serializers.DecimalField(max_digits=10, decimal_places=2, min_value=0.0)
role_id = serializers.IntegerField(required=False, allow_null=True)
class Meta:
model = CustomUser
fields = '__all__'
def validate(self, data):
password = data.get('password', None)
username = data.get('username', None)
if password is not None:
password_validation.validate_password(password)
return super().validate(data)
def create(self, validated_data):
user = super().create(validated_data)
user.set_password(validated_data['password'])
user.save()
return user
class UserSerializer(serializers.ModelSerializer):
role = RoleSerializer(read_only=True)
role_id = serializers.IntegerField(required=False, allow_null=True)
class Meta:
model = CustomUser
exclude = (
'password',
)
def update(self, instance, validated_data):
validated_data.pop('password', None)
return super().update(instance, validated_data)
class UserSimpleSerializer(serializers.ModelSerializer):
class Meta:
model = CustomUser
fields = (
'id', 'username', 'price_level'
)
|
flexible
|
{
"blob_id": "b10a50ce649650542d176a2f6fb8c35c500fbc38",
"index": 3644,
"step-1": "<mask token>\n\n\nclass UserCreateSerializer(serializers.ModelSerializer):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\n class Meta:\n model = CustomUser\n fields = '__all__'\n\n def validate(self, data):\n password = data.get('password', None)\n username = data.get('username', None)\n if password is not None:\n password_validation.validate_password(password)\n return super().validate(data)\n\n def create(self, validated_data):\n user = super().create(validated_data)\n user.set_password(validated_data['password'])\n user.save()\n return user\n\n\nclass UserSerializer(serializers.ModelSerializer):\n role = RoleSerializer(read_only=True)\n role_id = serializers.IntegerField(required=False, allow_null=True)\n\n\n class Meta:\n model = CustomUser\n exclude = 'password',\n\n def update(self, instance, validated_data):\n validated_data.pop('password', None)\n return super().update(instance, validated_data)\n\n\nclass UserSimpleSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = CustomUser\n fields = 'id', 'username', 'price_level'\n",
"step-2": "<mask token>\n\n\nclass RoleSerializer(serializers.ModelSerializer):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\n class Meta:\n model = Role\n fields = '__all__'\n <mask token>\n <mask token>\n\n\nclass SelfChangePasswordSerializer(serializers.Serializer):\n old_password = serializers.CharField(required=True)\n new_password = serializers.CharField(required=True)\n\n def get_current_user(self):\n return self.context['request'].user\n\n def validate(self, data):\n old_password = data.get('old_password', None)\n new_password = data.get('new_password', None)\n if old_password is not None and not self.get_current_user(\n ).check_password(old_password):\n raise serializers.ValidationError({'old_password':\n 'Your old password was entered incorrectly. Please enter it again.'\n })\n if new_password is not None:\n password_validation.validate_password(new_password)\n return super().validate(data)\n\n\nclass ChangePasswordSerializer(serializers.Serializer):\n new_password = serializers.CharField(max_length=128)\n\n def get_current_user(self):\n return self.context['request'].user\n\n def validate(self, data):\n new_password = data.get('new_password', None)\n if new_password is not None:\n password_validation.validate_password(new_password)\n return super().validate(data)\n\n\nclass UserCreateSerializer(serializers.ModelSerializer):\n username = serializers.CharField(min_length=5, max_length=150,\n validators=[UniqueValidator(queryset=CustomUser.objects.all())])\n password = serializers.CharField(max_length=128)\n price_level = serializers.IntegerField(min_value=1, max_value=5)\n balance = serializers.DecimalField(max_digits=10, decimal_places=2,\n min_value=0.0)\n role_id = serializers.IntegerField(required=False, allow_null=True)\n\n\n class Meta:\n model = CustomUser\n fields = '__all__'\n\n def validate(self, data):\n password = data.get('password', None)\n username = data.get('username', None)\n if password is not None:\n password_validation.validate_password(password)\n return super().validate(data)\n\n def create(self, validated_data):\n user = super().create(validated_data)\n user.set_password(validated_data['password'])\n user.save()\n return user\n\n\nclass UserSerializer(serializers.ModelSerializer):\n role = RoleSerializer(read_only=True)\n role_id = serializers.IntegerField(required=False, allow_null=True)\n\n\n class Meta:\n model = CustomUser\n exclude = 'password',\n\n def update(self, instance, validated_data):\n validated_data.pop('password', None)\n return super().update(instance, validated_data)\n\n\nclass UserSimpleSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = CustomUser\n fields = 'id', 'username', 'price_level'\n",
"step-3": "<mask token>\n\n\nclass RoleSerializer(serializers.ModelSerializer):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\n class Meta:\n model = Role\n fields = '__all__'\n <mask token>\n\n def update(self, instance, validated_data):\n permissions = validated_data.pop('permissions', None)\n for permissionData in permissions:\n for actionData in permissionData.get('actionEntitySet'):\n action = ActionEntity.objects.get(pk=actionData.get('id'))\n action.enable = actionData.get('enable')\n action.save()\n super().update(instance, validated_data)\n return instance\n\n\nclass SelfChangePasswordSerializer(serializers.Serializer):\n old_password = serializers.CharField(required=True)\n new_password = serializers.CharField(required=True)\n\n def get_current_user(self):\n return self.context['request'].user\n\n def validate(self, data):\n old_password = data.get('old_password', None)\n new_password = data.get('new_password', None)\n if old_password is not None and not self.get_current_user(\n ).check_password(old_password):\n raise serializers.ValidationError({'old_password':\n 'Your old password was entered incorrectly. Please enter it again.'\n })\n if new_password is not None:\n password_validation.validate_password(new_password)\n return super().validate(data)\n\n\nclass ChangePasswordSerializer(serializers.Serializer):\n new_password = serializers.CharField(max_length=128)\n\n def get_current_user(self):\n return self.context['request'].user\n\n def validate(self, data):\n new_password = data.get('new_password', None)\n if new_password is not None:\n password_validation.validate_password(new_password)\n return super().validate(data)\n\n\nclass UserCreateSerializer(serializers.ModelSerializer):\n username = serializers.CharField(min_length=5, max_length=150,\n validators=[UniqueValidator(queryset=CustomUser.objects.all())])\n password = serializers.CharField(max_length=128)\n price_level = serializers.IntegerField(min_value=1, max_value=5)\n balance = serializers.DecimalField(max_digits=10, decimal_places=2,\n min_value=0.0)\n role_id = serializers.IntegerField(required=False, allow_null=True)\n\n\n class Meta:\n model = CustomUser\n fields = '__all__'\n\n def validate(self, data):\n password = data.get('password', None)\n username = data.get('username', None)\n if password is not None:\n password_validation.validate_password(password)\n return super().validate(data)\n\n def create(self, validated_data):\n user = super().create(validated_data)\n user.set_password(validated_data['password'])\n user.save()\n return user\n\n\nclass UserSerializer(serializers.ModelSerializer):\n role = RoleSerializer(read_only=True)\n role_id = serializers.IntegerField(required=False, allow_null=True)\n\n\n class Meta:\n model = CustomUser\n exclude = 'password',\n\n def update(self, instance, validated_data):\n validated_data.pop('password', None)\n return super().update(instance, validated_data)\n\n\nclass UserSimpleSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = CustomUser\n fields = 'id', 'username', 'price_level'\n",
"step-4": "<mask token>\n\n\nclass ActionEntitySerializer(serializers.ModelSerializer):\n <mask token>\n\n\n class Meta:\n model = ActionEntity\n fields = '__all__'\n\n\nclass PermissionSerializer(serializers.ModelSerializer):\n actionEntitySet = ActionEntitySerializer(many=True)\n\n\n class Meta:\n model = Permission\n fields = '__all__'\n\n\nclass RoleSerializer(serializers.ModelSerializer):\n id = serializers.ReadOnlyField()\n permissions = PermissionSerializer(many=True)\n name = serializers.CharField(max_length=32, validators=[UniqueValidator\n (queryset=Role.objects.all())])\n status = serializers.IntegerField()\n describe = serializers.CharField(required=False, allow_null=True,\n max_length=128)\n\n\n class Meta:\n model = Role\n fields = '__all__'\n\n def create(self, validated_data):\n permissions = validated_data.pop('permissions', None)\n role = Role.objects.create(**validated_data)\n create_permission(role)\n return role\n\n def update(self, instance, validated_data):\n permissions = validated_data.pop('permissions', None)\n for permissionData in permissions:\n for actionData in permissionData.get('actionEntitySet'):\n action = ActionEntity.objects.get(pk=actionData.get('id'))\n action.enable = actionData.get('enable')\n action.save()\n super().update(instance, validated_data)\n return instance\n\n\nclass SelfChangePasswordSerializer(serializers.Serializer):\n old_password = serializers.CharField(required=True)\n new_password = serializers.CharField(required=True)\n\n def get_current_user(self):\n return self.context['request'].user\n\n def validate(self, data):\n old_password = data.get('old_password', None)\n new_password = data.get('new_password', None)\n if old_password is not None and not self.get_current_user(\n ).check_password(old_password):\n raise serializers.ValidationError({'old_password':\n 'Your old password was entered incorrectly. Please enter it again.'\n })\n if new_password is not None:\n password_validation.validate_password(new_password)\n return super().validate(data)\n\n\nclass ChangePasswordSerializer(serializers.Serializer):\n new_password = serializers.CharField(max_length=128)\n\n def get_current_user(self):\n return self.context['request'].user\n\n def validate(self, data):\n new_password = data.get('new_password', None)\n if new_password is not None:\n password_validation.validate_password(new_password)\n return super().validate(data)\n\n\nclass UserCreateSerializer(serializers.ModelSerializer):\n username = serializers.CharField(min_length=5, max_length=150,\n validators=[UniqueValidator(queryset=CustomUser.objects.all())])\n password = serializers.CharField(max_length=128)\n price_level = serializers.IntegerField(min_value=1, max_value=5)\n balance = serializers.DecimalField(max_digits=10, decimal_places=2,\n min_value=0.0)\n role_id = serializers.IntegerField(required=False, allow_null=True)\n\n\n class Meta:\n model = CustomUser\n fields = '__all__'\n\n def validate(self, data):\n password = data.get('password', None)\n username = data.get('username', None)\n if password is not None:\n password_validation.validate_password(password)\n return super().validate(data)\n\n def create(self, validated_data):\n user = super().create(validated_data)\n user.set_password(validated_data['password'])\n user.save()\n return user\n\n\nclass UserSerializer(serializers.ModelSerializer):\n role = RoleSerializer(read_only=True)\n role_id = serializers.IntegerField(required=False, allow_null=True)\n\n\n class Meta:\n model = CustomUser\n exclude = 'password',\n\n def update(self, instance, validated_data):\n validated_data.pop('password', None)\n return super().update(instance, validated_data)\n\n\nclass UserSimpleSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = CustomUser\n fields = 'id', 'username', 'price_level'\n",
"step-5": "from rest_framework import serializers\nfrom django.contrib.auth import password_validation\nfrom rest_framework.validators import UniqueValidator\n\nfrom .models import CustomUser, Role, Permission, ActionEntity\nfrom .utils import create_permission\n\nclass ActionEntitySerializer(serializers.ModelSerializer):\n\n id = serializers.IntegerField(required=False)\n \n class Meta:\n model = ActionEntity\n fields = '__all__'\n\nclass PermissionSerializer(serializers.ModelSerializer):\n\n actionEntitySet = ActionEntitySerializer(many=True)\n\n class Meta:\n model = Permission\n fields = '__all__'\n\nclass RoleSerializer(serializers.ModelSerializer):\n\n id = serializers.ReadOnlyField()\n\n permissions = PermissionSerializer(many=True)\n\n name = serializers.CharField(max_length=32, validators=[UniqueValidator(queryset=Role.objects.all())])\n\n status = serializers.IntegerField() \n\n describe = serializers.CharField(required=False, allow_null=True, max_length=128)\n\n class Meta:\n model = Role\n fields = '__all__'\n\n def create(self, validated_data):\n permissions = validated_data.pop('permissions', None)\n role = Role.objects.create(**validated_data)\n create_permission(role)\n return role\n\n def update(self, instance, validated_data):\n permissions = validated_data.pop('permissions', None)\n for permissionData in permissions:\n for actionData in permissionData.get('actionEntitySet'):\n action = ActionEntity.objects.get(pk=actionData.get('id'))\n action.enable = actionData.get('enable')\n action.save()\n\n super().update(instance, validated_data)\n\n return instance\n\nclass SelfChangePasswordSerializer(serializers.Serializer):\n\n old_password = serializers.CharField(required=True)\n new_password = serializers.CharField(required=True)\n\n def get_current_user(self):\n return self.context['request'].user\n\n def validate(self, data):\n old_password = data.get('old_password', None)\n new_password = data.get('new_password', None)\n\n if old_password is not None and not self.get_current_user().check_password(old_password):\n raise serializers.ValidationError({'old_password': 'Your old password was entered incorrectly. Please enter it again.'})\n \n if new_password is not None:\n password_validation.validate_password(new_password)\n\n return super().validate(data)\n\nclass ChangePasswordSerializer(serializers.Serializer):\n\n new_password = serializers.CharField(max_length=128)\n\n def get_current_user(self):\n return self.context['request'].user\n\n def validate(self, data):\n new_password = data.get('new_password', None)\n\n if new_password is not None:\n password_validation.validate_password(new_password)\n\n return super().validate(data)\n\nclass UserCreateSerializer(serializers.ModelSerializer):\n username = serializers.CharField(min_length= 5, max_length=150, validators=[UniqueValidator(queryset=CustomUser.objects.all())])\n password = serializers.CharField(max_length=128)\n price_level = serializers.IntegerField(min_value=1, max_value=5)\n balance = serializers.DecimalField(max_digits=10, decimal_places=2, min_value=0.0)\n\n role_id = serializers.IntegerField(required=False, allow_null=True)\n\n class Meta:\n model = CustomUser\n fields = '__all__'\n\n def validate(self, data):\n password = data.get('password', None)\n username = data.get('username', None)\n \n if password is not None:\n password_validation.validate_password(password)\n\n return super().validate(data)\n\n def create(self, validated_data):\n user = super().create(validated_data)\n user.set_password(validated_data['password'])\n user.save()\n return user\n\nclass UserSerializer(serializers.ModelSerializer):\n\n role = RoleSerializer(read_only=True)\n\n role_id = serializers.IntegerField(required=False, allow_null=True)\n\n class Meta:\n model = CustomUser\n exclude = (\n 'password',\n )\n\n def update(self, instance, validated_data):\n validated_data.pop('password', None)\n return super().update(instance, validated_data)\n\nclass UserSimpleSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = CustomUser\n fields = (\n 'id', 'username', 'price_level'\n )",
"step-ids": [
7,
17,
18,
23,
26
]
}
|
[
7,
17,
18,
23,
26
] |
"""
Author: Eric J. Ma
Purpose: This is a set of utility variables and functions that can be used
across the PIN project.
"""
import numpy as np
from sklearn.preprocessing import StandardScaler
BACKBONE_ATOMS = ["N", "CA", "C", "O"]
AMINO_ACIDS = [
"A",
"B",
"C",
"D",
"E",
"F",
"G",
"H",
"I",
"J",
"K",
"L",
"M",
"N",
"P",
"Q",
"R",
"S",
"T",
"V",
"W",
"X",
"Y",
"Z",
]
BOND_TYPES = [
"hydrophobic",
"disulfide",
"hbond",
"ionic",
"aromatic",
"aromatic_sulphur",
"cation_pi",
"backbone",
"delaunay",
]
RESI_NAMES = [
"ALA",
"ASX",
"CYS",
"ASP",
"GLU",
"PHE",
"GLY",
"HIS",
"ILE",
"LYS",
"LEU",
"MET",
"ASN",
"PRO",
"GLN",
"ARG",
"SER",
"THR",
"VAL",
"TRP",
"TYR",
"GLX",
"UNK",
]
HYDROPHOBIC_RESIS = [
"ALA",
"VAL",
"LEU",
"ILE",
"MET",
"PHE",
"TRP",
"PRO",
"TYR",
]
DISULFIDE_RESIS = ["CYS"]
DISULFIDE_ATOMS = ["SG"]
IONIC_RESIS = ["ARG", "LYS", "HIS", "ASP", "GLU"]
POS_AA = ["HIS", "LYS", "ARG"]
NEG_AA = ["GLU", "ASP"]
AA_RING_ATOMS = dict()
AA_RING_ATOMS["PHE"] = ["CG", "CD", "CE", "CZ"]
AA_RING_ATOMS["TRP"] = ["CD", "CE", "CH", "CZ"]
AA_RING_ATOMS["HIS"] = ["CG", "CD", "CE", "ND", "NE"]
AA_RING_ATOMS["TYR"] = ["CG", "CD", "CE", "CZ"]
AROMATIC_RESIS = ["PHE", "TRP", "HIS", "TYR"]
CATION_PI_RESIS = ["LYS", "ARG", "PHE", "TYR", "TRP"]
CATION_RESIS = ["LYS", "ARG"]
PI_RESIS = ["PHE", "TYR", "TRP"]
SULPHUR_RESIS = ["MET", "CYS"]
ISOELECTRIC_POINTS = {
"ALA": 6.11,
"ARG": 10.76,
"ASN": 10.76,
"ASP": 2.98,
"CYS": 5.02,
"GLU": 3.08,
"GLN": 5.65,
"GLY": 6.06,
"HIS": 7.64,
"ILE": 6.04,
"LEU": 6.04,
"LYS": 9.74,
"MET": 5.74,
"PHE": 5.91,
"PRO": 6.30,
"SER": 5.68,
"THR": 5.60,
"TRP": 5.88,
"TYR": 5.63,
"VAL": 6.02,
"UNK": 7.00, # unknown so assign neutral
"ASX": 6.87, # the average of D and N
"GLX": 4.35, # the average of E and Q
}
scaler = StandardScaler()
scaler.fit(np.array([v for v in ISOELECTRIC_POINTS.values()]).reshape(-1, 1))
ISOELECTRIC_POINTS_STD = dict()
for k, v in ISOELECTRIC_POINTS.items():
ISOELECTRIC_POINTS_STD[k] = scaler.transform(np.array([v]).reshape(-1, 1))
MOLECULAR_WEIGHTS = {
"ALA": 89.0935,
"ARG": 174.2017,
"ASN": 132.1184,
"ASP": 133.1032,
"CYS": 121.1590,
"GLU": 147.1299,
"GLN": 146.1451,
"GLY": 75.0669,
"HIS": 155.1552,
"ILE": 131.1736,
"LEU": 131.1736,
"LYS": 146.1882,
"MET": 149.2124,
"PHE": 165.1900,
"PRO": 115.1310,
"SER": 105.0930,
"THR": 119.1197,
"TRP": 204.2262,
"TYR": 181.1894,
"VAL": 117.1469,
"UNK": 137.1484, # unknown, therefore assign average of knowns
"ASX": 132.6108, # the average of D and N
"GLX": 146.6375, # the average of E and Q
}
MOLECULAR_WEIGHTS_STD = dict()
scaler.fit(np.array([v for v in MOLECULAR_WEIGHTS.values()]).reshape(-1, 1))
MOLECULAR_WEIGHTS_STD = dict()
for k, v in MOLECULAR_WEIGHTS.items():
MOLECULAR_WEIGHTS_STD[k] = scaler.transform(np.array([v]).reshape(-1, 1))
|
normal
|
{
"blob_id": "330df4f194deec521f7db0389f88171d9e2aac40",
"index": 2384,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nscaler.fit(np.array([v for v in ISOELECTRIC_POINTS.values()]).reshape(-1, 1))\n<mask token>\nfor k, v in ISOELECTRIC_POINTS.items():\n ISOELECTRIC_POINTS_STD[k] = scaler.transform(np.array([v]).reshape(-1, 1))\n<mask token>\nscaler.fit(np.array([v for v in MOLECULAR_WEIGHTS.values()]).reshape(-1, 1))\n<mask token>\nfor k, v in MOLECULAR_WEIGHTS.items():\n MOLECULAR_WEIGHTS_STD[k] = scaler.transform(np.array([v]).reshape(-1, 1))\n",
"step-3": "<mask token>\nBACKBONE_ATOMS = ['N', 'CA', 'C', 'O']\nAMINO_ACIDS = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L',\n 'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'V', 'W', 'X', 'Y', 'Z']\nBOND_TYPES = ['hydrophobic', 'disulfide', 'hbond', 'ionic', 'aromatic',\n 'aromatic_sulphur', 'cation_pi', 'backbone', 'delaunay']\nRESI_NAMES = ['ALA', 'ASX', 'CYS', 'ASP', 'GLU', 'PHE', 'GLY', 'HIS', 'ILE',\n 'LYS', 'LEU', 'MET', 'ASN', 'PRO', 'GLN', 'ARG', 'SER', 'THR', 'VAL',\n 'TRP', 'TYR', 'GLX', 'UNK']\nHYDROPHOBIC_RESIS = ['ALA', 'VAL', 'LEU', 'ILE', 'MET', 'PHE', 'TRP', 'PRO',\n 'TYR']\nDISULFIDE_RESIS = ['CYS']\nDISULFIDE_ATOMS = ['SG']\nIONIC_RESIS = ['ARG', 'LYS', 'HIS', 'ASP', 'GLU']\nPOS_AA = ['HIS', 'LYS', 'ARG']\nNEG_AA = ['GLU', 'ASP']\nAA_RING_ATOMS = dict()\nAA_RING_ATOMS['PHE'] = ['CG', 'CD', 'CE', 'CZ']\nAA_RING_ATOMS['TRP'] = ['CD', 'CE', 'CH', 'CZ']\nAA_RING_ATOMS['HIS'] = ['CG', 'CD', 'CE', 'ND', 'NE']\nAA_RING_ATOMS['TYR'] = ['CG', 'CD', 'CE', 'CZ']\nAROMATIC_RESIS = ['PHE', 'TRP', 'HIS', 'TYR']\nCATION_PI_RESIS = ['LYS', 'ARG', 'PHE', 'TYR', 'TRP']\nCATION_RESIS = ['LYS', 'ARG']\nPI_RESIS = ['PHE', 'TYR', 'TRP']\nSULPHUR_RESIS = ['MET', 'CYS']\nISOELECTRIC_POINTS = {'ALA': 6.11, 'ARG': 10.76, 'ASN': 10.76, 'ASP': 2.98,\n 'CYS': 5.02, 'GLU': 3.08, 'GLN': 5.65, 'GLY': 6.06, 'HIS': 7.64, 'ILE':\n 6.04, 'LEU': 6.04, 'LYS': 9.74, 'MET': 5.74, 'PHE': 5.91, 'PRO': 6.3,\n 'SER': 5.68, 'THR': 5.6, 'TRP': 5.88, 'TYR': 5.63, 'VAL': 6.02, 'UNK': \n 7.0, 'ASX': 6.87, 'GLX': 4.35}\nscaler = StandardScaler()\nscaler.fit(np.array([v for v in ISOELECTRIC_POINTS.values()]).reshape(-1, 1))\nISOELECTRIC_POINTS_STD = dict()\nfor k, v in ISOELECTRIC_POINTS.items():\n ISOELECTRIC_POINTS_STD[k] = scaler.transform(np.array([v]).reshape(-1, 1))\nMOLECULAR_WEIGHTS = {'ALA': 89.0935, 'ARG': 174.2017, 'ASN': 132.1184,\n 'ASP': 133.1032, 'CYS': 121.159, 'GLU': 147.1299, 'GLN': 146.1451,\n 'GLY': 75.0669, 'HIS': 155.1552, 'ILE': 131.1736, 'LEU': 131.1736,\n 'LYS': 146.1882, 'MET': 149.2124, 'PHE': 165.19, 'PRO': 115.131, 'SER':\n 105.093, 'THR': 119.1197, 'TRP': 204.2262, 'TYR': 181.1894, 'VAL': \n 117.1469, 'UNK': 137.1484, 'ASX': 132.6108, 'GLX': 146.6375}\nMOLECULAR_WEIGHTS_STD = dict()\nscaler.fit(np.array([v for v in MOLECULAR_WEIGHTS.values()]).reshape(-1, 1))\nMOLECULAR_WEIGHTS_STD = dict()\nfor k, v in MOLECULAR_WEIGHTS.items():\n MOLECULAR_WEIGHTS_STD[k] = scaler.transform(np.array([v]).reshape(-1, 1))\n",
"step-4": "<mask token>\nimport numpy as np\nfrom sklearn.preprocessing import StandardScaler\nBACKBONE_ATOMS = ['N', 'CA', 'C', 'O']\nAMINO_ACIDS = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L',\n 'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'V', 'W', 'X', 'Y', 'Z']\nBOND_TYPES = ['hydrophobic', 'disulfide', 'hbond', 'ionic', 'aromatic',\n 'aromatic_sulphur', 'cation_pi', 'backbone', 'delaunay']\nRESI_NAMES = ['ALA', 'ASX', 'CYS', 'ASP', 'GLU', 'PHE', 'GLY', 'HIS', 'ILE',\n 'LYS', 'LEU', 'MET', 'ASN', 'PRO', 'GLN', 'ARG', 'SER', 'THR', 'VAL',\n 'TRP', 'TYR', 'GLX', 'UNK']\nHYDROPHOBIC_RESIS = ['ALA', 'VAL', 'LEU', 'ILE', 'MET', 'PHE', 'TRP', 'PRO',\n 'TYR']\nDISULFIDE_RESIS = ['CYS']\nDISULFIDE_ATOMS = ['SG']\nIONIC_RESIS = ['ARG', 'LYS', 'HIS', 'ASP', 'GLU']\nPOS_AA = ['HIS', 'LYS', 'ARG']\nNEG_AA = ['GLU', 'ASP']\nAA_RING_ATOMS = dict()\nAA_RING_ATOMS['PHE'] = ['CG', 'CD', 'CE', 'CZ']\nAA_RING_ATOMS['TRP'] = ['CD', 'CE', 'CH', 'CZ']\nAA_RING_ATOMS['HIS'] = ['CG', 'CD', 'CE', 'ND', 'NE']\nAA_RING_ATOMS['TYR'] = ['CG', 'CD', 'CE', 'CZ']\nAROMATIC_RESIS = ['PHE', 'TRP', 'HIS', 'TYR']\nCATION_PI_RESIS = ['LYS', 'ARG', 'PHE', 'TYR', 'TRP']\nCATION_RESIS = ['LYS', 'ARG']\nPI_RESIS = ['PHE', 'TYR', 'TRP']\nSULPHUR_RESIS = ['MET', 'CYS']\nISOELECTRIC_POINTS = {'ALA': 6.11, 'ARG': 10.76, 'ASN': 10.76, 'ASP': 2.98,\n 'CYS': 5.02, 'GLU': 3.08, 'GLN': 5.65, 'GLY': 6.06, 'HIS': 7.64, 'ILE':\n 6.04, 'LEU': 6.04, 'LYS': 9.74, 'MET': 5.74, 'PHE': 5.91, 'PRO': 6.3,\n 'SER': 5.68, 'THR': 5.6, 'TRP': 5.88, 'TYR': 5.63, 'VAL': 6.02, 'UNK': \n 7.0, 'ASX': 6.87, 'GLX': 4.35}\nscaler = StandardScaler()\nscaler.fit(np.array([v for v in ISOELECTRIC_POINTS.values()]).reshape(-1, 1))\nISOELECTRIC_POINTS_STD = dict()\nfor k, v in ISOELECTRIC_POINTS.items():\n ISOELECTRIC_POINTS_STD[k] = scaler.transform(np.array([v]).reshape(-1, 1))\nMOLECULAR_WEIGHTS = {'ALA': 89.0935, 'ARG': 174.2017, 'ASN': 132.1184,\n 'ASP': 133.1032, 'CYS': 121.159, 'GLU': 147.1299, 'GLN': 146.1451,\n 'GLY': 75.0669, 'HIS': 155.1552, 'ILE': 131.1736, 'LEU': 131.1736,\n 'LYS': 146.1882, 'MET': 149.2124, 'PHE': 165.19, 'PRO': 115.131, 'SER':\n 105.093, 'THR': 119.1197, 'TRP': 204.2262, 'TYR': 181.1894, 'VAL': \n 117.1469, 'UNK': 137.1484, 'ASX': 132.6108, 'GLX': 146.6375}\nMOLECULAR_WEIGHTS_STD = dict()\nscaler.fit(np.array([v for v in MOLECULAR_WEIGHTS.values()]).reshape(-1, 1))\nMOLECULAR_WEIGHTS_STD = dict()\nfor k, v in MOLECULAR_WEIGHTS.items():\n MOLECULAR_WEIGHTS_STD[k] = scaler.transform(np.array([v]).reshape(-1, 1))\n",
"step-5": "\"\"\"\nAuthor: Eric J. Ma\n\nPurpose: This is a set of utility variables and functions that can be used\nacross the PIN project.\n\"\"\"\n\nimport numpy as np\nfrom sklearn.preprocessing import StandardScaler\n\nBACKBONE_ATOMS = [\"N\", \"CA\", \"C\", \"O\"]\n\nAMINO_ACIDS = [\n \"A\",\n \"B\",\n \"C\",\n \"D\",\n \"E\",\n \"F\",\n \"G\",\n \"H\",\n \"I\",\n \"J\",\n \"K\",\n \"L\",\n \"M\",\n \"N\",\n \"P\",\n \"Q\",\n \"R\",\n \"S\",\n \"T\",\n \"V\",\n \"W\",\n \"X\",\n \"Y\",\n \"Z\",\n]\n\nBOND_TYPES = [\n \"hydrophobic\",\n \"disulfide\",\n \"hbond\",\n \"ionic\",\n \"aromatic\",\n \"aromatic_sulphur\",\n \"cation_pi\",\n \"backbone\",\n \"delaunay\",\n]\n\nRESI_NAMES = [\n \"ALA\",\n \"ASX\",\n \"CYS\",\n \"ASP\",\n \"GLU\",\n \"PHE\",\n \"GLY\",\n \"HIS\",\n \"ILE\",\n \"LYS\",\n \"LEU\",\n \"MET\",\n \"ASN\",\n \"PRO\",\n \"GLN\",\n \"ARG\",\n \"SER\",\n \"THR\",\n \"VAL\",\n \"TRP\",\n \"TYR\",\n \"GLX\",\n \"UNK\",\n]\n\nHYDROPHOBIC_RESIS = [\n \"ALA\",\n \"VAL\",\n \"LEU\",\n \"ILE\",\n \"MET\",\n \"PHE\",\n \"TRP\",\n \"PRO\",\n \"TYR\",\n]\n\nDISULFIDE_RESIS = [\"CYS\"]\n\nDISULFIDE_ATOMS = [\"SG\"]\n\nIONIC_RESIS = [\"ARG\", \"LYS\", \"HIS\", \"ASP\", \"GLU\"]\n\nPOS_AA = [\"HIS\", \"LYS\", \"ARG\"]\n\nNEG_AA = [\"GLU\", \"ASP\"]\n\nAA_RING_ATOMS = dict()\nAA_RING_ATOMS[\"PHE\"] = [\"CG\", \"CD\", \"CE\", \"CZ\"]\nAA_RING_ATOMS[\"TRP\"] = [\"CD\", \"CE\", \"CH\", \"CZ\"]\nAA_RING_ATOMS[\"HIS\"] = [\"CG\", \"CD\", \"CE\", \"ND\", \"NE\"]\nAA_RING_ATOMS[\"TYR\"] = [\"CG\", \"CD\", \"CE\", \"CZ\"]\n\nAROMATIC_RESIS = [\"PHE\", \"TRP\", \"HIS\", \"TYR\"]\n\nCATION_PI_RESIS = [\"LYS\", \"ARG\", \"PHE\", \"TYR\", \"TRP\"]\n\nCATION_RESIS = [\"LYS\", \"ARG\"]\n\nPI_RESIS = [\"PHE\", \"TYR\", \"TRP\"]\n\nSULPHUR_RESIS = [\"MET\", \"CYS\"]\n\nISOELECTRIC_POINTS = {\n \"ALA\": 6.11,\n \"ARG\": 10.76,\n \"ASN\": 10.76,\n \"ASP\": 2.98,\n \"CYS\": 5.02,\n \"GLU\": 3.08,\n \"GLN\": 5.65,\n \"GLY\": 6.06,\n \"HIS\": 7.64,\n \"ILE\": 6.04,\n \"LEU\": 6.04,\n \"LYS\": 9.74,\n \"MET\": 5.74,\n \"PHE\": 5.91,\n \"PRO\": 6.30,\n \"SER\": 5.68,\n \"THR\": 5.60,\n \"TRP\": 5.88,\n \"TYR\": 5.63,\n \"VAL\": 6.02,\n \"UNK\": 7.00, # unknown so assign neutral\n \"ASX\": 6.87, # the average of D and N\n \"GLX\": 4.35, # the average of E and Q\n}\n\nscaler = StandardScaler()\nscaler.fit(np.array([v for v in ISOELECTRIC_POINTS.values()]).reshape(-1, 1))\n\nISOELECTRIC_POINTS_STD = dict()\nfor k, v in ISOELECTRIC_POINTS.items():\n ISOELECTRIC_POINTS_STD[k] = scaler.transform(np.array([v]).reshape(-1, 1))\n\nMOLECULAR_WEIGHTS = {\n \"ALA\": 89.0935,\n \"ARG\": 174.2017,\n \"ASN\": 132.1184,\n \"ASP\": 133.1032,\n \"CYS\": 121.1590,\n \"GLU\": 147.1299,\n \"GLN\": 146.1451,\n \"GLY\": 75.0669,\n \"HIS\": 155.1552,\n \"ILE\": 131.1736,\n \"LEU\": 131.1736,\n \"LYS\": 146.1882,\n \"MET\": 149.2124,\n \"PHE\": 165.1900,\n \"PRO\": 115.1310,\n \"SER\": 105.0930,\n \"THR\": 119.1197,\n \"TRP\": 204.2262,\n \"TYR\": 181.1894,\n \"VAL\": 117.1469,\n \"UNK\": 137.1484, # unknown, therefore assign average of knowns\n \"ASX\": 132.6108, # the average of D and N\n \"GLX\": 146.6375, # the average of E and Q\n}\n\nMOLECULAR_WEIGHTS_STD = dict()\n\nscaler.fit(np.array([v for v in MOLECULAR_WEIGHTS.values()]).reshape(-1, 1))\nMOLECULAR_WEIGHTS_STD = dict()\nfor k, v in MOLECULAR_WEIGHTS.items():\n MOLECULAR_WEIGHTS_STD[k] = scaler.transform(np.array([v]).reshape(-1, 1))\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
from __future__ import absolute_import
from . import utils
from . import bert_model
from . import transformer
from .utils import *
from .bert_model import *
from .transformer import *
|
flexible
|
{
"blob_id": "6415b08795975698e8e2019cafb82561b35f8e71",
"index": 2037,
"step-1": "<mask token>\n",
"step-2": "from __future__ import absolute_import\nfrom . import utils\nfrom . import bert_model\nfrom . import transformer\nfrom .utils import *\nfrom .bert_model import *\nfrom .transformer import *\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
# e.g. 8-34
from tkinter import *
from PP4E.launchmodes import PortableLauncher
import os, sys
demoModules = ['demoDlg', 'demoRadio', 'demoCheck', 'demoScale']
for demo in demoModules:
pid = os.fork()
filepath = './' + demo + '.py'
if pid == 0:
os.execvp('python3.5', (filepath, ))
root = Tk()
root.title('Progress')
Label(root, text='Multiple program demo: command lines', bg='white').pack()
root.mainloop()
|
normal
|
{
"blob_id": "d91dc850c293cf085e1be04b6e13e0a62cb0bcb1",
"index": 9812,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor demo in demoModules:\n pid = os.fork()\n filepath = './' + demo + '.py'\n if pid == 0:\n os.execvp('python3.5', (filepath,))\n<mask token>\nroot.title('Progress')\nLabel(root, text='Multiple program demo: command lines', bg='white').pack()\nroot.mainloop()\n",
"step-3": "<mask token>\ndemoModules = ['demoDlg', 'demoRadio', 'demoCheck', 'demoScale']\nfor demo in demoModules:\n pid = os.fork()\n filepath = './' + demo + '.py'\n if pid == 0:\n os.execvp('python3.5', (filepath,))\nroot = Tk()\nroot.title('Progress')\nLabel(root, text='Multiple program demo: command lines', bg='white').pack()\nroot.mainloop()\n",
"step-4": "from tkinter import *\nfrom PP4E.launchmodes import PortableLauncher\nimport os, sys\ndemoModules = ['demoDlg', 'demoRadio', 'demoCheck', 'demoScale']\nfor demo in demoModules:\n pid = os.fork()\n filepath = './' + demo + '.py'\n if pid == 0:\n os.execvp('python3.5', (filepath,))\nroot = Tk()\nroot.title('Progress')\nLabel(root, text='Multiple program demo: command lines', bg='white').pack()\nroot.mainloop()\n",
"step-5": "# e.g. 8-34\n\nfrom tkinter import *\nfrom PP4E.launchmodes import PortableLauncher\nimport os, sys\n\n\ndemoModules = ['demoDlg', 'demoRadio', 'demoCheck', 'demoScale']\n\nfor demo in demoModules:\n pid = os.fork()\n filepath = './' + demo + '.py'\n if pid == 0:\n os.execvp('python3.5', (filepath, ))\n\nroot = Tk()\nroot.title('Progress')\nLabel(root, text='Multiple program demo: command lines', bg='white').pack()\nroot.mainloop()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class DoorTeleportActor(AreaTeleportActor):
pass
<|reserved_special_token_1|>
from pirates.teleport.AreaTeleportActor import AreaTeleportActor
class DoorTeleportActor(AreaTeleportActor):
pass
|
flexible
|
{
"blob_id": "b679444fde7cd8eb819443922f37ee54c0f29de4",
"index": 424,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass DoorTeleportActor(AreaTeleportActor):\n pass\n",
"step-3": "from pirates.teleport.AreaTeleportActor import AreaTeleportActor\n\n\nclass DoorTeleportActor(AreaTeleportActor):\n pass\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
import os
import requests
import json
from web import *
from libs_support import *
from rss_parser import *
from database import *
class Solr_helper:
""" Ho tro He thong tu dong cap nhat du lieu - su dung post.jar de tu dong cap nhat du lieu moi vao he thong theo
tung khoang thoi gian nhat dinh """
def __init__(self, db_name = "btl-tktdtt", domain = "localhost", port = 8983, solr_home = "."):
self.server_db_name = db_name
self.server_port = port
self.server_domain = domain
self.server_db_name = db_name
#default
self.set_solr_home(solr_home)
# Cai dat cua solr
def set_post_tool(self, path_tool):
self.server_post_tool = path_tool
def set_solr_home(self, path_home):
if(path_home.endswith("/")): path_home = path_home[:-1]
self.server_solr_home = path_home
self.server_post_tool = path_home +"/example/exampledocs/post.jar"
# update du lieu json web vao he thong
def update_use_tool(self, path_file_json_data, type_update="text/json"):
# use java tool
cmd_update_data = "java -Dtype={2} -Durl=http://{0}:{1}/solr/{3}/update -jar {5} {4}" \
.format(self.server_domain, self.server_port, type_update, self.server_db_name, path_file_json_data,
self.server_post_tool)
print (cmd_update_data)
# os.system(cmd_update_data)
# update du lieu json web vao he thong
def update(self, data_json):
# post paterm: curl 'http://localhost:8983/solr/testBTL/update/json/docs' -H 'Content-type:application/json' -d '[{},{}]'
# use Data with Index Handlers (DIH) Http post
url = "http://{0}:{1}/solr/{2}/update/json/docs" \
.format(self.server_domain, self.server_port, self.server_db_name)
headers = dict()
headers['Content-type'] = 'application/json'
try:
r = requests.post(url=url,data=data_json,headers=headers)
r.close()
return r.text # .encode('utf-8', 'inorge')
except Exception, e:
print('Exception' + str(e))
return None
def reload(self):
# post paterm: curl "http://localhost:8983/solr/admin/cores?action=RELOAD&core=mycore"
# use Data with Index Handlers (DIH) Http post
url = "http://{0}:{1}/solr/admin/cores?action=RELOAD&core={2}" .format(self.server_domain, self.server_port,self.server_db_name)
try:
r = requests.post(url=url)
r.close()
return r.text # .encode('utf-8', 'inorge')
except Exception, e:
print('Exception' + str(e))
return None
def crawl_data():
max_count_web = 500
rss_page_links = [
#"http://vietbao.vn/vn/rss",
#"http://vnexpress.net/rss",
"http://dantri.com.vn/rss",
#"http://vtv.vn/rss",
"http://techtalk.vn/"
]
web_mannual_page_links = [
# "vtv.vn" ,
"kenh14.vn"
]
# Cai dat bo loc crawl web
# Web_filter.set_last_time("2016-10-26, 22:20:08+07:00") # Bai viet moi hon ke tu thoi diem xxx
# Web_filter.set_limit_time("2016-10-26, 22:20:08+07:00", "2016-10-26, 23:20:08+07:00") # Bai viet trong khoang tg
Web_filter.set_max_count_web_each_domain(10000) # moi domain khong vuot qua 1000
Web_filter.set_max_count_web_each_sublabel(100) # moi label trong 1 domain k vuot qua 100
# Cac trang co rss
data = "["
for link_rss in rss_page_links:
parser = rss_parser(link_rss)
webs = parser.get_list_web()
for web_x in webs:
data += (web_x.get_json()+",")
# web_x.write_to_file('/mnt/01CDF1ECE3AB4280/DH/NAM_5/Ki_1/TimkiemTrinhDien/BTL/vietnam-news/data-train')
if data.__len__() > 1:
data = data[:-1]+"]"
solr = Solr_helper(db_name="btl-tktdtt")
solr.set_solr_home("/mnt/01CDF1ECE3AB4280/DH/NAM_5/Ki_1/TimkiemTrinhDien/BTL/solr-6.2.1")
print (solr.update(data))
print (solr.reload())
def query():
# http://localhost:8983/solr/btl-tktdtt/select?indent=on&q=*:*&wt=json
# http://localhost:8983/solr/btl-tktdtt/select?q=*:*&sort=dist(0,%2010,%2010)%20desc
# http://localhost:8983/solr/btl-tktdtt/select?q=title:Thiên thần+url:thien-than
None
if __name__ == "__main__":
t = 1
t = t + 1
solr = Solr_helper( db_name = "btl-tktdtt")
solr.set_solr_home("/mnt/01CDF1ECE3AB4280/DH/NAM_5/Ki_1/TimkiemTrinhDien/BTL/solr-6.2.1")
# # solr.update("/mnt/01CDF1ECE3AB4280/DH/NAM_5/Ki_1/TimkiemTrinhDien/BTL/vietnam-news/data-train/techtalk/Cong\ nghe/31fa871c7d521106e28c45f567a63445c33e1186.json")
#
# data_test = []
# data_test.append({
# "code": "55421c7d521106e28c45f567a63445c33e118744446",
# "title": "test dddd vcc c dsf" ,
# "url": "http://techtalk.vn/van-de-da-ngon-ngu-trong-angularjs.html",
# "labels": "techtalk/Cong nghe",
# "content": "tset content ",
# "image_url": "",
# "date": "2016-11-14, 12:00:02+00:00"
# })
# data_test.append({
# "code": "12345651717ebecaeb1c179522eff5dcc19c86ce8",
# "title": "test title ",
# "url": "http://techtalk.vn/tim-hieu-ve-middleware-trong-expressjs.html",
# "labels": "techtalk/Cong nghe",
# "content": "test ddddd content ",
# "image_url": "",
# "date": "2016-11-13, 01:00:14+00:00"
# })
crawl_data()
# data_json = (json.dumps(data_test,indent=4, separators=(',', ': '), ensure_ascii=False))
# solr.update(data_json)
# print (solr.reload())
|
normal
|
{
"blob_id": "deaaf7620b9eba32149f733cd543399bdc2813a1",
"index": 6553,
"step-1": "\nimport os\nimport requests\nimport json\n\nfrom web import *\nfrom libs_support import *\nfrom rss_parser import *\nfrom database import *\n\nclass Solr_helper:\n\n \"\"\" Ho tro He thong tu dong cap nhat du lieu - su dung post.jar de tu dong cap nhat du lieu moi vao he thong theo\n tung khoang thoi gian nhat dinh \"\"\"\n\n def __init__(self, db_name = \"btl-tktdtt\", domain = \"localhost\", port = 8983, solr_home = \".\"):\n self.server_db_name = db_name\n self.server_port = port\n self.server_domain = domain\n self.server_db_name = db_name\n\n #default\n self.set_solr_home(solr_home)\n\n # Cai dat cua solr\n def set_post_tool(self, path_tool):\n self.server_post_tool = path_tool\n def set_solr_home(self, path_home):\n if(path_home.endswith(\"/\")): path_home = path_home[:-1]\n self.server_solr_home = path_home\n self.server_post_tool = path_home +\"/example/exampledocs/post.jar\"\n\n # update du lieu json web vao he thong\n def update_use_tool(self, path_file_json_data, type_update=\"text/json\"):\n # use java tool\n cmd_update_data = \"java -Dtype={2} -Durl=http://{0}:{1}/solr/{3}/update -jar {5} {4}\" \\\n .format(self.server_domain, self.server_port, type_update, self.server_db_name, path_file_json_data,\n self.server_post_tool)\n print (cmd_update_data)\n # os.system(cmd_update_data)\n\n # update du lieu json web vao he thong\n def update(self, data_json):\n # post paterm: curl 'http://localhost:8983/solr/testBTL/update/json/docs' -H 'Content-type:application/json' -d '[{},{}]'\n # use Data with Index Handlers (DIH) Http post\n url = \"http://{0}:{1}/solr/{2}/update/json/docs\" \\\n .format(self.server_domain, self.server_port, self.server_db_name)\n headers = dict()\n headers['Content-type'] = 'application/json'\n try:\n r = requests.post(url=url,data=data_json,headers=headers)\n r.close()\n return r.text # .encode('utf-8', 'inorge')\n except Exception, e:\n print('Exception' + str(e))\n return None\n\n def reload(self):\n # post paterm: curl \"http://localhost:8983/solr/admin/cores?action=RELOAD&core=mycore\"\n # use Data with Index Handlers (DIH) Http post\n url = \"http://{0}:{1}/solr/admin/cores?action=RELOAD&core={2}\" .format(self.server_domain, self.server_port,self.server_db_name)\n try:\n r = requests.post(url=url)\n r.close()\n return r.text # .encode('utf-8', 'inorge')\n except Exception, e:\n print('Exception' + str(e))\n return None\n\ndef crawl_data():\n max_count_web = 500\n rss_page_links = [\n #\"http://vietbao.vn/vn/rss\",\n #\"http://vnexpress.net/rss\",\n \"http://dantri.com.vn/rss\",\n #\"http://vtv.vn/rss\",\n \"http://techtalk.vn/\"\n ]\n web_mannual_page_links = [\n # \"vtv.vn\" ,\n \"kenh14.vn\"\n ]\n\n # Cai dat bo loc crawl web\n # Web_filter.set_last_time(\"2016-10-26, 22:20:08+07:00\") # Bai viet moi hon ke tu thoi diem xxx\n # Web_filter.set_limit_time(\"2016-10-26, 22:20:08+07:00\", \"2016-10-26, 23:20:08+07:00\") # Bai viet trong khoang tg\n Web_filter.set_max_count_web_each_domain(10000) # moi domain khong vuot qua 1000\n Web_filter.set_max_count_web_each_sublabel(100) # moi label trong 1 domain k vuot qua 100\n\n # Cac trang co rss\n data = \"[\"\n for link_rss in rss_page_links:\n parser = rss_parser(link_rss)\n webs = parser.get_list_web()\n for web_x in webs:\n data += (web_x.get_json()+\",\")\n # web_x.write_to_file('/mnt/01CDF1ECE3AB4280/DH/NAM_5/Ki_1/TimkiemTrinhDien/BTL/vietnam-news/data-train')\n\n if data.__len__() > 1:\n data = data[:-1]+\"]\"\n solr = Solr_helper(db_name=\"btl-tktdtt\")\n solr.set_solr_home(\"/mnt/01CDF1ECE3AB4280/DH/NAM_5/Ki_1/TimkiemTrinhDien/BTL/solr-6.2.1\")\n\n print (solr.update(data))\n print (solr.reload())\n\ndef query():\n # http://localhost:8983/solr/btl-tktdtt/select?indent=on&q=*:*&wt=json\t\n # http://localhost:8983/solr/btl-tktdtt/select?q=*:*&sort=dist(0,%2010,%2010)%20desc\n # http://localhost:8983/solr/btl-tktdtt/select?q=title:Thiên thần+url:thien-than\n None\n\n\n\nif __name__ == \"__main__\":\n t = 1\n t = t + 1\n\n solr = Solr_helper( db_name = \"btl-tktdtt\")\n solr.set_solr_home(\"/mnt/01CDF1ECE3AB4280/DH/NAM_5/Ki_1/TimkiemTrinhDien/BTL/solr-6.2.1\")\n # # solr.update(\"/mnt/01CDF1ECE3AB4280/DH/NAM_5/Ki_1/TimkiemTrinhDien/BTL/vietnam-news/data-train/techtalk/Cong\\ nghe/31fa871c7d521106e28c45f567a63445c33e1186.json\")\n #\n # data_test = []\n # data_test.append({\n # \"code\": \"55421c7d521106e28c45f567a63445c33e118744446\",\n # \"title\": \"test dddd vcc c dsf\" ,\n # \"url\": \"http://techtalk.vn/van-de-da-ngon-ngu-trong-angularjs.html\",\n # \"labels\": \"techtalk/Cong nghe\",\n # \"content\": \"tset content \",\n # \"image_url\": \"\",\n # \"date\": \"2016-11-14, 12:00:02+00:00\"\n # })\n # data_test.append({\n # \"code\": \"12345651717ebecaeb1c179522eff5dcc19c86ce8\",\n # \"title\": \"test title \",\n # \"url\": \"http://techtalk.vn/tim-hieu-ve-middleware-trong-expressjs.html\",\n # \"labels\": \"techtalk/Cong nghe\",\n # \"content\": \"test ddddd content \",\n # \"image_url\": \"\",\n # \"date\": \"2016-11-13, 01:00:14+00:00\"\n # })\n crawl_data()\n # data_json = (json.dumps(data_test,indent=4, separators=(',', ': '), ensure_ascii=False))\n # solr.update(data_json)\n # print (solr.reload())\n\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
class web:
def __init__(self, lnum=0, land_list=[], graph_money=GraphAL(),
graph_time=GraphAL(), graph_line=GraphAL()):
self.graph_money = graph_money
self.graph_time = graph_time
self.graph_line = graph_line
self.lnum = lnum
self.land_list = land_list
def is_empty(self):
return self.lnum == 0
<|reserved_special_token_0|>
def lst_pos(self, land):
return self.land_list.index(land)
def _get_position(self):
if self.is_empty():
raise WebLandsError("in 'get_all_position'")
positionn = []
for x in self.land_list():
positionn.append(x.position)
return positionn
def add_land(self, landscape):
self.land_list.append(landscape)
self.graph_money.add_vertex()
self.graph_time.add_vertex()
self.graph_line.add_vertex()
self.lnum += 1
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def get_money(self, land1, land2):
a = self.graph_money.get_edge(self.land_list.index(land1), self.
land_list.index(land2))
return a
def set_time(self, land1, land2, time):
self.graph_money.add_edge(self.land_list.index(land1), self.
land_list.index(land2), time)
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class landscape:
def __init__(self, name, position, category=None, hot=0):
self.name = name
self.position = position
self.category = category
self.hot = hot
def position(self):
return self._position
def category(self):
return self._category
def name(self):
return self._name
def hot(self):
return hot
def set_category(self, sorts):
if sorts not in categorys:
raise ValuError('in set_category, we do not have {}'.format(sorts))
self.category = sorts
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class web:
def __init__(self, lnum=0, land_list=[], graph_money=GraphAL(),
graph_time=GraphAL(), graph_line=GraphAL()):
self.graph_money = graph_money
self.graph_time = graph_time
self.graph_line = graph_line
self.lnum = lnum
self.land_list = land_list
def is_empty(self):
return self.lnum == 0
def _get_name(self):
if self.is_empty():
raise WebLandsError("in 'get_all_position'")
namee = []
for x in self.land_list():
namee.append(x.name)
return namee
def lst_pos(self, land):
return self.land_list.index(land)
def _get_position(self):
if self.is_empty():
raise WebLandsError("in 'get_all_position'")
positionn = []
for x in self.land_list():
positionn.append(x.position)
return positionn
def add_land(self, landscape):
self.land_list.append(landscape)
self.graph_money.add_vertex()
self.graph_time.add_vertex()
self.graph_line.add_vertex()
self.lnum += 1
def set_all(self, land1, land2, money=infnum, time=infnum, line=1):
graph_money.add_edge(self.land_list().index(land1), self.land_list(
).index(land2), money)
graph_time.add_edge(self.land_list().index(land1), self.land_list()
.index(land2), time)
graph_line.add_edge(self.land_list().index(land1), self.land_list()
.index(land2), line)
def set_money(self, land1, land2, money):
self.graph_money.add_edge(self.land_list.index(land1), self.
land_list.index(land2), money)
def get_money(self, land1, land2):
a = self.graph_money.get_edge(self.land_list.index(land1), self.
land_list.index(land2))
return a
def set_time(self, land1, land2, time):
self.graph_money.add_edge(self.land_list.index(land1), self.
land_list.index(land2), time)
def get_time(self, land1, land2):
a = self.graph_time.get_edge(self.land_list.index(land1), self.
land_list.index(land2))
return a
def set_line(self, land1, land2, line):
self.graph_line.add_edge(self.land_list.index(land1), self.
land_list.index(land2), line)
def get_line(self, land1, land2):
a = self.graph_line.get_edge(self.land_list.index(land1), self.
land_list.index(land2))
return a
<|reserved_special_token_0|>
class landscape:
def __init__(self, name, position, category=None, hot=0):
self.name = name
self.position = position
self.category = category
self.hot = hot
def position(self):
return self._position
def category(self):
return self._category
def name(self):
return self._name
def hot(self):
return hot
def set_category(self, sorts):
if sorts not in categorys:
raise ValuError('in set_category, we do not have {}'.format(sorts))
self.category = sorts
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def find_by_word(lst, word):
ans = []
for x in lst:
if word == x:
ans.append(x)
if len(word) > 20:
raise ValuError(
"in find_by_word, we don't think it's possible for a city or a town to own a name longer than 20"
)
if ans != []:
return ans
slices = []
for i in range(len(word)):
for j in range(0, len(word) - i + 1):
slices.append(word[j:j + i])
for x in lst:
for i in range(1, len(word)):
if slices[-i] in x:
ans.append(x)
return ans
<|reserved_special_token_0|>
class web:
def __init__(self, lnum=0, land_list=[], graph_money=GraphAL(),
graph_time=GraphAL(), graph_line=GraphAL()):
self.graph_money = graph_money
self.graph_time = graph_time
self.graph_line = graph_line
self.lnum = lnum
self.land_list = land_list
def is_empty(self):
return self.lnum == 0
def _get_name(self):
if self.is_empty():
raise WebLandsError("in 'get_all_position'")
namee = []
for x in self.land_list():
namee.append(x.name)
return namee
def lst_pos(self, land):
return self.land_list.index(land)
def _get_position(self):
if self.is_empty():
raise WebLandsError("in 'get_all_position'")
positionn = []
for x in self.land_list():
positionn.append(x.position)
return positionn
def add_land(self, landscape):
self.land_list.append(landscape)
self.graph_money.add_vertex()
self.graph_time.add_vertex()
self.graph_line.add_vertex()
self.lnum += 1
def set_all(self, land1, land2, money=infnum, time=infnum, line=1):
graph_money.add_edge(self.land_list().index(land1), self.land_list(
).index(land2), money)
graph_time.add_edge(self.land_list().index(land1), self.land_list()
.index(land2), time)
graph_line.add_edge(self.land_list().index(land1), self.land_list()
.index(land2), line)
def set_money(self, land1, land2, money):
self.graph_money.add_edge(self.land_list.index(land1), self.
land_list.index(land2), money)
def get_money(self, land1, land2):
a = self.graph_money.get_edge(self.land_list.index(land1), self.
land_list.index(land2))
return a
def set_time(self, land1, land2, time):
self.graph_money.add_edge(self.land_list.index(land1), self.
land_list.index(land2), time)
def get_time(self, land1, land2):
a = self.graph_time.get_edge(self.land_list.index(land1), self.
land_list.index(land2))
return a
def set_line(self, land1, land2, line):
self.graph_line.add_edge(self.land_list.index(land1), self.
land_list.index(land2), line)
def get_line(self, land1, land2):
a = self.graph_line.get_edge(self.land_list.index(land1), self.
land_list.index(land2))
return a
def shortest_money(web, land1, land2):
vi = web.lst_pos(land1)
vj = web.lst_pos(land2)
if vi == vj:
raise ValuError(
"in shortest_money, if the begining is the same as the ending, you don't have to pay anything"
)
path = dijkstra_shortest_paths(web.graph_money, vi)
path_list = [vi]
while vi != path[vj][0]:
path_list.append(path[vj][0])
vi = path[vj][0]
return path_list, path[vj][1]
<|reserved_special_token_0|>
class landscape:
def __init__(self, name, position, category=None, hot=0):
self.name = name
self.position = position
self.category = category
self.hot = hot
def position(self):
return self._position
def category(self):
return self._category
def name(self):
return self._name
def hot(self):
return hot
def set_category(self, sorts):
if sorts not in categorys:
raise ValuError('in set_category, we do not have {}'.format(sorts))
self.category = sorts
def muti_aim_solve(land_list):
sub_web = web()
for x in land_list:
sub_web.add_land(x)
lanst = web.land_list().copy()
for x in lanst:
for y in lanst:
if x == y:
continue
vi = lst_pos(web, x)
vj = lst_pos(web, y)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def find_by_word(lst, word):
ans = []
for x in lst:
if word == x:
ans.append(x)
if len(word) > 20:
raise ValuError(
"in find_by_word, we don't think it's possible for a city or a town to own a name longer than 20"
)
if ans != []:
return ans
slices = []
for i in range(len(word)):
for j in range(0, len(word) - i + 1):
slices.append(word[j:j + i])
for x in lst:
for i in range(1, len(word)):
if slices[-i] in x:
ans.append(x)
return ans
<|reserved_special_token_0|>
class web:
def __init__(self, lnum=0, land_list=[], graph_money=GraphAL(),
graph_time=GraphAL(), graph_line=GraphAL()):
self.graph_money = graph_money
self.graph_time = graph_time
self.graph_line = graph_line
self.lnum = lnum
self.land_list = land_list
def is_empty(self):
return self.lnum == 0
def _get_name(self):
if self.is_empty():
raise WebLandsError("in 'get_all_position'")
namee = []
for x in self.land_list():
namee.append(x.name)
return namee
def lst_pos(self, land):
return self.land_list.index(land)
def _get_position(self):
if self.is_empty():
raise WebLandsError("in 'get_all_position'")
positionn = []
for x in self.land_list():
positionn.append(x.position)
return positionn
def add_land(self, landscape):
self.land_list.append(landscape)
self.graph_money.add_vertex()
self.graph_time.add_vertex()
self.graph_line.add_vertex()
self.lnum += 1
def set_all(self, land1, land2, money=infnum, time=infnum, line=1):
graph_money.add_edge(self.land_list().index(land1), self.land_list(
).index(land2), money)
graph_time.add_edge(self.land_list().index(land1), self.land_list()
.index(land2), time)
graph_line.add_edge(self.land_list().index(land1), self.land_list()
.index(land2), line)
def set_money(self, land1, land2, money):
self.graph_money.add_edge(self.land_list.index(land1), self.
land_list.index(land2), money)
def get_money(self, land1, land2):
a = self.graph_money.get_edge(self.land_list.index(land1), self.
land_list.index(land2))
return a
def set_time(self, land1, land2, time):
self.graph_money.add_edge(self.land_list.index(land1), self.
land_list.index(land2), time)
def get_time(self, land1, land2):
a = self.graph_time.get_edge(self.land_list.index(land1), self.
land_list.index(land2))
return a
def set_line(self, land1, land2, line):
self.graph_line.add_edge(self.land_list.index(land1), self.
land_list.index(land2), line)
def get_line(self, land1, land2):
a = self.graph_line.get_edge(self.land_list.index(land1), self.
land_list.index(land2))
return a
def shortest_money(web, land1, land2):
vi = web.lst_pos(land1)
vj = web.lst_pos(land2)
if vi == vj:
raise ValuError(
"in shortest_money, if the begining is the same as the ending, you don't have to pay anything"
)
path = dijkstra_shortest_paths(web.graph_money, vi)
path_list = [vi]
while vi != path[vj][0]:
path_list.append(path[vj][0])
vi = path[vj][0]
return path_list, path[vj][1]
def shortest_money_str(web, land1, land2):
str_ = ''
path, pay = shortest_money(web, land1, land2)
for i in range(len(path)):
str_ += str(web.land_list[path[i]].name)
str_ += '->'
str_ += land2.name
return '所求的最短路money路径为', str_, '总money代价为', pay
def shortest_time(web, land1, land2):
vi = web.lst_pos(land1)
vj = web.lst_pos(land2)
if vi == vj:
raise ValuError(
"in shortest_time, if the begining is the same as the ending, you don't have to pay anything"
)
path = dijkstra_shortest_paths(web.graph_time(), vi)
path_list = [vi]
while vi != vj:
path_list.append(path[vj][0])
vi = path[vj][0]
return path_list, path[vj][1]
def shortest_time_str(web, land1, land2):
str_ = ''
path, pay = shortest_time(web, land1, land2)
for i in range(len(path)):
str_ += str(path[i])
return '所求的最短路time路径为', str_, '总time代价为', pay
def shortest_line(web, land1, land2):
vi = web.lst_pos(land1)
vj = web.lst_pos(land2)
if vi == vj:
raise ValuError(
"in shortest_line, if the begining is the same as the ending, you don't have to pay anything"
)
path = dijkstra_shortest_paths(web.graph_line(), vi)
path_list = [vi]
while vi != vj:
path_list.append(path[vj][0])
vi = path[vj][0]
return path_list, path[vj][1]
def shortest_time_str(web, land1, land2):
str_ = ''
path, pay = shortest_line(web, land1, land2)
for i in range(len(path)):
str_ += str(path[i])
return '所求的最短路line路径为', str_, '总line代价为', pay
class landscape:
def __init__(self, name, position, category=None, hot=0):
self.name = name
self.position = position
self.category = category
self.hot = hot
def position(self):
return self._position
def category(self):
return self._category
def name(self):
return self._name
def hot(self):
return hot
def set_category(self, sorts):
if sorts not in categorys:
raise ValuError('in set_category, we do not have {}'.format(sorts))
self.category = sorts
def muti_aim_solve(land_list):
sub_web = web()
for x in land_list:
sub_web.add_land(x)
lanst = web.land_list().copy()
for x in lanst:
for y in lanst:
if x == y:
continue
vi = lst_pos(web, x)
vj = lst_pos(web, y)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
from Graph import *
from PrioQueue import *
from GShortestPath import *
from GSpanTree import *
from User import *
infinity = float("inf")
# 这是根据关键字找地点的方法,已经形成了某个依据属性的表后,通过关键词匹配来解决问题
# 最终输出一个yield出的迭代器,将其list化后就可以向末端输出了
def find_by_word(lst, word):
# 这个是字符串匹配函数,word是客户输入,lst是循环的东西
# 最好排成优先队列
# 若没找到,我们可以造一个关于word的任意位置的切片,长度比word短,由此来寻找想要的名称
# 由于景点,地名的长度一般不长,所以即使这里的时间代价极高,我们也可以保证这样做不会引发混乱
ans = []
for x in lst:
if word == x:
ans.append(x)
if len(word) > 20:
raise ValuError("in find_by_word, we don't think it's possible for a city or a town\
to own a name longer than 20")
# 如果客户输入的地名在地名总集中,我们有理由相信他没有输错
if ans != []:
return ans
slices = []
for i in range(len(word)):
# 这里为了保证效率,我们可以通过控制内部循环来使得表中名字串长度从小到大排列
# 并且这样排出来的结果是相似度高的在前面
for j in range(0, len(word) - i + 1):
slices.append(word[j:j + i])
for x in lst:
for i in range(1, len(word)):
if slices[-i] in x:
ans.append(x)
return ans
categorys = {"历史文化", "现代都市", "山区", "海景", "综合"}
infnum = float("inf")
class web:
# land_list是一个list对象,适用相应方法
def __init__(self, lnum=0, land_list=[], graph_money=GraphAL(), graph_time=GraphAL(), graph_line=GraphAL()):
self.graph_money = graph_money
self.graph_time = graph_time
self.graph_line = graph_line
self.lnum = lnum
self.land_list = land_list
def is_empty(self):
return self.lnum == 0
# 获得所有景点名称,用list储存
# self._land_list是以landscape为元素的表
def _get_name(self):
if self.is_empty():
raise WebLandsError("in 'get_all_position'")
namee = []
for x in self.land_list():
namee.append(x.name)
return namee
# 获得所有景点位置
def lst_pos(self, land):
return self.land_list.index(land)
def _get_position(self):
if self.is_empty():
raise WebLandsError("in 'get_all_position'")
positionn = []
for x in self.land_list():
positionn.append(x.position)
return positionn
def add_land(self, landscape):
self.land_list.append(landscape)
self.graph_money.add_vertex()
self.graph_time.add_vertex()
self.graph_line.add_vertex()
self.lnum += 1
# 如果不设置money,time或line,自然landscape之间没有边相连
def set_all(self, land1, land2, money=infnum, time=infnum, line=1):
graph_money.add_edge(self.land_list().index(land1),
self.land_list().index(land2), money)
graph_time.add_edge(self.land_list().index(land1),
self.land_list().index(land2), time)
graph_line.add_edge(self.land_list().index(land1),
self.land_list().index(land2), line)
# 以下基于Dijkstra算法来搞定最短路径问题,可同时作用于时间,金钱和路径长度做邻接图
def set_money(self, land1, land2, money):
self.graph_money.add_edge(self.land_list.index(land1),
self.land_list.index(land2), money)
def get_money(self, land1, land2):
a = self.graph_money.get_edge(self.land_list.index(land1),
self.land_list.index(land2))
return a
def set_time(self, land1, land2, time):
self.graph_money.add_edge(self.land_list.index(land1),
self.land_list.index(land2), time)
def get_time(self, land1, land2):
a = self.graph_time.get_edge(self.land_list.index(land1),
self.land_list.index(land2))
return a
def set_line(self, land1, land2, line):
self.graph_line.add_edge(self.land_list.index(land1),
self.land_list.index(land2), line)
def get_line(self, land1, land2):
a = self.graph_line.get_edge(self.land_list.index(land1),
self.land_list.index(land2))
return a
# shortestmoney等开始
def shortest_money(web, land1, land2):
vi = web.lst_pos(land1)
vj = web.lst_pos(land2)
if vi == vj:
raise ValuError("in shortest_money,\
if the begining is the same as the ending, you don't have to pay anything")
path = dijkstra_shortest_paths(web.graph_money, vi)
path_list = [vi]
while vi != path[vj][0]:
path_list.append(path[vj][0])
vi = path[vj][0]
return path_list, path[vj][1]
def shortest_money_str(web, land1, land2):
str_ = ""
path, pay = shortest_money(web, land1, land2)
for i in range(len(path)):
str_ += str(web.land_list[path[i]].name)
str_ += "->"
str_ += land2.name
return "所求的最短路money路径为", str_, "总money代价为", pay
def shortest_time(web, land1, land2):
vi = web.lst_pos(land1)
vj = web.lst_pos(land2)
if vi == vj:
raise ValuError("in shortest_time,\
if the begining is the same as the ending, you don't have to pay anything")
path = dijkstra_shortest_paths(web.graph_time(), vi)
path_list = [vi]
while vi != vj:
path_list.append(path[vj][0])
vi = path[vj][0]
return path_list, path[vj][1]
def shortest_time_str(web, land1, land2):
str_ = ""
path, pay = shortest_time(web, land1, land2)
for i in range(len(path)):
str_ += str(path[i])
return "所求的最短路time路径为", str_, "总time代价为", pay
def shortest_line(web, land1, land2):
vi = web.lst_pos(land1)
vj = web.lst_pos(land2)
if vi == vj:
raise ValuError("in shortest_line,\
if the begining is the same as the ending, you don't have to pay anything")
path = dijkstra_shortest_paths(web.graph_line(), vi)
path_list = [vi]
while vi != vj:
path_list.append(path[vj][0])
vi = path[vj][0]
return path_list, path[vj][1]
def shortest_time_str(web, land1, land2):
str_ = ""
path, pay = shortest_line(web, land1, land2)
for i in range(len(path)):
str_ += str(path[i])
return "所求的最短路line路径为", str_, "总line代价为", pay
# shortest等结束
class landscape: # landscape代表一个景点,rank表示在图中list的位置
def __init__(self, name, position, category=None, hot=0): # 其中position是一个数,代表一个景点
self.name = name
self.position = position
self.category = category
self.hot = hot
def position(self):
return self._position
def category(self):
return self._category
def name(self):
return self._name
def hot(self):
return hot
def set_category(self, sorts):
if sorts not in categorys:
raise ValuError("in set_category, we do not have {}".format(sorts))
self.category = sorts
# 对于多目标问题,先用既有方法构造一个web,web保存了所有目标landscape
# 现在基于Prim算法给出一个关于多目标问题的算法,其实就是最小生成树问题
def muti_aim_solve(land_list):
sub_web = web()
for x in land_list:
sub_web.add_land(x)
lanst = web.land_list().copy()
for x in lanst:
for y in lanst:
if x == y:
continue
vi = lst_pos(web, x)
vj = lst_pos(web, y)
a, b, c = Edges([0, 2, 4])
lst = ["东方明珠", "西湖", "迪士尼"]
china = web(3, lst, a, b, c)
|
flexible
|
{
"blob_id": "b5ec6e0fc4239a53a882b455a113eaac4db6cef5",
"index": 2331,
"step-1": "<mask token>\n\n\nclass web:\n\n def __init__(self, lnum=0, land_list=[], graph_money=GraphAL(),\n graph_time=GraphAL(), graph_line=GraphAL()):\n self.graph_money = graph_money\n self.graph_time = graph_time\n self.graph_line = graph_line\n self.lnum = lnum\n self.land_list = land_list\n\n def is_empty(self):\n return self.lnum == 0\n <mask token>\n\n def lst_pos(self, land):\n return self.land_list.index(land)\n\n def _get_position(self):\n if self.is_empty():\n raise WebLandsError(\"in 'get_all_position'\")\n positionn = []\n for x in self.land_list():\n positionn.append(x.position)\n return positionn\n\n def add_land(self, landscape):\n self.land_list.append(landscape)\n self.graph_money.add_vertex()\n self.graph_time.add_vertex()\n self.graph_line.add_vertex()\n self.lnum += 1\n <mask token>\n <mask token>\n\n def get_money(self, land1, land2):\n a = self.graph_money.get_edge(self.land_list.index(land1), self.\n land_list.index(land2))\n return a\n\n def set_time(self, land1, land2, time):\n self.graph_money.add_edge(self.land_list.index(land1), self.\n land_list.index(land2), time)\n <mask token>\n <mask token>\n <mask token>\n\n\n<mask token>\n\n\nclass landscape:\n\n def __init__(self, name, position, category=None, hot=0):\n self.name = name\n self.position = position\n self.category = category\n self.hot = hot\n\n def position(self):\n return self._position\n\n def category(self):\n return self._category\n\n def name(self):\n return self._name\n\n def hot(self):\n return hot\n\n def set_category(self, sorts):\n if sorts not in categorys:\n raise ValuError('in set_category, we do not have {}'.format(sorts))\n self.category = sorts\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass web:\n\n def __init__(self, lnum=0, land_list=[], graph_money=GraphAL(),\n graph_time=GraphAL(), graph_line=GraphAL()):\n self.graph_money = graph_money\n self.graph_time = graph_time\n self.graph_line = graph_line\n self.lnum = lnum\n self.land_list = land_list\n\n def is_empty(self):\n return self.lnum == 0\n\n def _get_name(self):\n if self.is_empty():\n raise WebLandsError(\"in 'get_all_position'\")\n namee = []\n for x in self.land_list():\n namee.append(x.name)\n return namee\n\n def lst_pos(self, land):\n return self.land_list.index(land)\n\n def _get_position(self):\n if self.is_empty():\n raise WebLandsError(\"in 'get_all_position'\")\n positionn = []\n for x in self.land_list():\n positionn.append(x.position)\n return positionn\n\n def add_land(self, landscape):\n self.land_list.append(landscape)\n self.graph_money.add_vertex()\n self.graph_time.add_vertex()\n self.graph_line.add_vertex()\n self.lnum += 1\n\n def set_all(self, land1, land2, money=infnum, time=infnum, line=1):\n graph_money.add_edge(self.land_list().index(land1), self.land_list(\n ).index(land2), money)\n graph_time.add_edge(self.land_list().index(land1), self.land_list()\n .index(land2), time)\n graph_line.add_edge(self.land_list().index(land1), self.land_list()\n .index(land2), line)\n\n def set_money(self, land1, land2, money):\n self.graph_money.add_edge(self.land_list.index(land1), self.\n land_list.index(land2), money)\n\n def get_money(self, land1, land2):\n a = self.graph_money.get_edge(self.land_list.index(land1), self.\n land_list.index(land2))\n return a\n\n def set_time(self, land1, land2, time):\n self.graph_money.add_edge(self.land_list.index(land1), self.\n land_list.index(land2), time)\n\n def get_time(self, land1, land2):\n a = self.graph_time.get_edge(self.land_list.index(land1), self.\n land_list.index(land2))\n return a\n\n def set_line(self, land1, land2, line):\n self.graph_line.add_edge(self.land_list.index(land1), self.\n land_list.index(land2), line)\n\n def get_line(self, land1, land2):\n a = self.graph_line.get_edge(self.land_list.index(land1), self.\n land_list.index(land2))\n return a\n\n\n<mask token>\n\n\nclass landscape:\n\n def __init__(self, name, position, category=None, hot=0):\n self.name = name\n self.position = position\n self.category = category\n self.hot = hot\n\n def position(self):\n return self._position\n\n def category(self):\n return self._category\n\n def name(self):\n return self._name\n\n def hot(self):\n return hot\n\n def set_category(self, sorts):\n if sorts not in categorys:\n raise ValuError('in set_category, we do not have {}'.format(sorts))\n self.category = sorts\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef find_by_word(lst, word):\n ans = []\n for x in lst:\n if word == x:\n ans.append(x)\n if len(word) > 20:\n raise ValuError(\n \"in find_by_word, we don't think it's possible for a city or a town to own a name longer than 20\"\n )\n if ans != []:\n return ans\n slices = []\n for i in range(len(word)):\n for j in range(0, len(word) - i + 1):\n slices.append(word[j:j + i])\n for x in lst:\n for i in range(1, len(word)):\n if slices[-i] in x:\n ans.append(x)\n return ans\n\n\n<mask token>\n\n\nclass web:\n\n def __init__(self, lnum=0, land_list=[], graph_money=GraphAL(),\n graph_time=GraphAL(), graph_line=GraphAL()):\n self.graph_money = graph_money\n self.graph_time = graph_time\n self.graph_line = graph_line\n self.lnum = lnum\n self.land_list = land_list\n\n def is_empty(self):\n return self.lnum == 0\n\n def _get_name(self):\n if self.is_empty():\n raise WebLandsError(\"in 'get_all_position'\")\n namee = []\n for x in self.land_list():\n namee.append(x.name)\n return namee\n\n def lst_pos(self, land):\n return self.land_list.index(land)\n\n def _get_position(self):\n if self.is_empty():\n raise WebLandsError(\"in 'get_all_position'\")\n positionn = []\n for x in self.land_list():\n positionn.append(x.position)\n return positionn\n\n def add_land(self, landscape):\n self.land_list.append(landscape)\n self.graph_money.add_vertex()\n self.graph_time.add_vertex()\n self.graph_line.add_vertex()\n self.lnum += 1\n\n def set_all(self, land1, land2, money=infnum, time=infnum, line=1):\n graph_money.add_edge(self.land_list().index(land1), self.land_list(\n ).index(land2), money)\n graph_time.add_edge(self.land_list().index(land1), self.land_list()\n .index(land2), time)\n graph_line.add_edge(self.land_list().index(land1), self.land_list()\n .index(land2), line)\n\n def set_money(self, land1, land2, money):\n self.graph_money.add_edge(self.land_list.index(land1), self.\n land_list.index(land2), money)\n\n def get_money(self, land1, land2):\n a = self.graph_money.get_edge(self.land_list.index(land1), self.\n land_list.index(land2))\n return a\n\n def set_time(self, land1, land2, time):\n self.graph_money.add_edge(self.land_list.index(land1), self.\n land_list.index(land2), time)\n\n def get_time(self, land1, land2):\n a = self.graph_time.get_edge(self.land_list.index(land1), self.\n land_list.index(land2))\n return a\n\n def set_line(self, land1, land2, line):\n self.graph_line.add_edge(self.land_list.index(land1), self.\n land_list.index(land2), line)\n\n def get_line(self, land1, land2):\n a = self.graph_line.get_edge(self.land_list.index(land1), self.\n land_list.index(land2))\n return a\n\n\ndef shortest_money(web, land1, land2):\n vi = web.lst_pos(land1)\n vj = web.lst_pos(land2)\n if vi == vj:\n raise ValuError(\n \"in shortest_money, if the begining is the same as the ending, you don't have to pay anything\"\n )\n path = dijkstra_shortest_paths(web.graph_money, vi)\n path_list = [vi]\n while vi != path[vj][0]:\n path_list.append(path[vj][0])\n vi = path[vj][0]\n return path_list, path[vj][1]\n\n\n<mask token>\n\n\nclass landscape:\n\n def __init__(self, name, position, category=None, hot=0):\n self.name = name\n self.position = position\n self.category = category\n self.hot = hot\n\n def position(self):\n return self._position\n\n def category(self):\n return self._category\n\n def name(self):\n return self._name\n\n def hot(self):\n return hot\n\n def set_category(self, sorts):\n if sorts not in categorys:\n raise ValuError('in set_category, we do not have {}'.format(sorts))\n self.category = sorts\n\n\ndef muti_aim_solve(land_list):\n sub_web = web()\n for x in land_list:\n sub_web.add_land(x)\n lanst = web.land_list().copy()\n for x in lanst:\n for y in lanst:\n if x == y:\n continue\n vi = lst_pos(web, x)\n vj = lst_pos(web, y)\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\ndef find_by_word(lst, word):\n ans = []\n for x in lst:\n if word == x:\n ans.append(x)\n if len(word) > 20:\n raise ValuError(\n \"in find_by_word, we don't think it's possible for a city or a town to own a name longer than 20\"\n )\n if ans != []:\n return ans\n slices = []\n for i in range(len(word)):\n for j in range(0, len(word) - i + 1):\n slices.append(word[j:j + i])\n for x in lst:\n for i in range(1, len(word)):\n if slices[-i] in x:\n ans.append(x)\n return ans\n\n\n<mask token>\n\n\nclass web:\n\n def __init__(self, lnum=0, land_list=[], graph_money=GraphAL(),\n graph_time=GraphAL(), graph_line=GraphAL()):\n self.graph_money = graph_money\n self.graph_time = graph_time\n self.graph_line = graph_line\n self.lnum = lnum\n self.land_list = land_list\n\n def is_empty(self):\n return self.lnum == 0\n\n def _get_name(self):\n if self.is_empty():\n raise WebLandsError(\"in 'get_all_position'\")\n namee = []\n for x in self.land_list():\n namee.append(x.name)\n return namee\n\n def lst_pos(self, land):\n return self.land_list.index(land)\n\n def _get_position(self):\n if self.is_empty():\n raise WebLandsError(\"in 'get_all_position'\")\n positionn = []\n for x in self.land_list():\n positionn.append(x.position)\n return positionn\n\n def add_land(self, landscape):\n self.land_list.append(landscape)\n self.graph_money.add_vertex()\n self.graph_time.add_vertex()\n self.graph_line.add_vertex()\n self.lnum += 1\n\n def set_all(self, land1, land2, money=infnum, time=infnum, line=1):\n graph_money.add_edge(self.land_list().index(land1), self.land_list(\n ).index(land2), money)\n graph_time.add_edge(self.land_list().index(land1), self.land_list()\n .index(land2), time)\n graph_line.add_edge(self.land_list().index(land1), self.land_list()\n .index(land2), line)\n\n def set_money(self, land1, land2, money):\n self.graph_money.add_edge(self.land_list.index(land1), self.\n land_list.index(land2), money)\n\n def get_money(self, land1, land2):\n a = self.graph_money.get_edge(self.land_list.index(land1), self.\n land_list.index(land2))\n return a\n\n def set_time(self, land1, land2, time):\n self.graph_money.add_edge(self.land_list.index(land1), self.\n land_list.index(land2), time)\n\n def get_time(self, land1, land2):\n a = self.graph_time.get_edge(self.land_list.index(land1), self.\n land_list.index(land2))\n return a\n\n def set_line(self, land1, land2, line):\n self.graph_line.add_edge(self.land_list.index(land1), self.\n land_list.index(land2), line)\n\n def get_line(self, land1, land2):\n a = self.graph_line.get_edge(self.land_list.index(land1), self.\n land_list.index(land2))\n return a\n\n\ndef shortest_money(web, land1, land2):\n vi = web.lst_pos(land1)\n vj = web.lst_pos(land2)\n if vi == vj:\n raise ValuError(\n \"in shortest_money, if the begining is the same as the ending, you don't have to pay anything\"\n )\n path = dijkstra_shortest_paths(web.graph_money, vi)\n path_list = [vi]\n while vi != path[vj][0]:\n path_list.append(path[vj][0])\n vi = path[vj][0]\n return path_list, path[vj][1]\n\n\ndef shortest_money_str(web, land1, land2):\n str_ = ''\n path, pay = shortest_money(web, land1, land2)\n for i in range(len(path)):\n str_ += str(web.land_list[path[i]].name)\n str_ += '->'\n str_ += land2.name\n return '所求的最短路money路径为', str_, '总money代价为', pay\n\n\ndef shortest_time(web, land1, land2):\n vi = web.lst_pos(land1)\n vj = web.lst_pos(land2)\n if vi == vj:\n raise ValuError(\n \"in shortest_time, if the begining is the same as the ending, you don't have to pay anything\"\n )\n path = dijkstra_shortest_paths(web.graph_time(), vi)\n path_list = [vi]\n while vi != vj:\n path_list.append(path[vj][0])\n vi = path[vj][0]\n return path_list, path[vj][1]\n\n\ndef shortest_time_str(web, land1, land2):\n str_ = ''\n path, pay = shortest_time(web, land1, land2)\n for i in range(len(path)):\n str_ += str(path[i])\n return '所求的最短路time路径为', str_, '总time代价为', pay\n\n\ndef shortest_line(web, land1, land2):\n vi = web.lst_pos(land1)\n vj = web.lst_pos(land2)\n if vi == vj:\n raise ValuError(\n \"in shortest_line, if the begining is the same as the ending, you don't have to pay anything\"\n )\n path = dijkstra_shortest_paths(web.graph_line(), vi)\n path_list = [vi]\n while vi != vj:\n path_list.append(path[vj][0])\n vi = path[vj][0]\n return path_list, path[vj][1]\n\n\ndef shortest_time_str(web, land1, land2):\n str_ = ''\n path, pay = shortest_line(web, land1, land2)\n for i in range(len(path)):\n str_ += str(path[i])\n return '所求的最短路line路径为', str_, '总line代价为', pay\n\n\nclass landscape:\n\n def __init__(self, name, position, category=None, hot=0):\n self.name = name\n self.position = position\n self.category = category\n self.hot = hot\n\n def position(self):\n return self._position\n\n def category(self):\n return self._category\n\n def name(self):\n return self._name\n\n def hot(self):\n return hot\n\n def set_category(self, sorts):\n if sorts not in categorys:\n raise ValuError('in set_category, we do not have {}'.format(sorts))\n self.category = sorts\n\n\ndef muti_aim_solve(land_list):\n sub_web = web()\n for x in land_list:\n sub_web.add_land(x)\n lanst = web.land_list().copy()\n for x in lanst:\n for y in lanst:\n if x == y:\n continue\n vi = lst_pos(web, x)\n vj = lst_pos(web, y)\n\n\n<mask token>\n",
"step-5": "from Graph import *\r\nfrom PrioQueue import *\r\nfrom GShortestPath import *\r\nfrom GSpanTree import *\r\nfrom User import *\r\ninfinity = float(\"inf\")\r\n\r\n\r\n# 这是根据关键字找地点的方法,已经形成了某个依据属性的表后,通过关键词匹配来解决问题\r\n# 最终输出一个yield出的迭代器,将其list化后就可以向末端输出了\r\ndef find_by_word(lst, word):\r\n # 这个是字符串匹配函数,word是客户输入,lst是循环的东西\r\n # 最好排成优先队列\r\n # 若没找到,我们可以造一个关于word的任意位置的切片,长度比word短,由此来寻找想要的名称\r\n # 由于景点,地名的长度一般不长,所以即使这里的时间代价极高,我们也可以保证这样做不会引发混乱\r\n ans = []\r\n for x in lst:\r\n if word == x:\r\n ans.append(x)\r\n if len(word) > 20:\r\n raise ValuError(\"in find_by_word, we don't think it's possible for a city or a town\\\r\n to own a name longer than 20\")\r\n # 如果客户输入的地名在地名总集中,我们有理由相信他没有输错\r\n if ans != []:\r\n return ans\r\n slices = []\r\n for i in range(len(word)):\r\n # 这里为了保证效率,我们可以通过控制内部循环来使得表中名字串长度从小到大排列\r\n # 并且这样排出来的结果是相似度高的在前面\r\n for j in range(0, len(word) - i + 1):\r\n slices.append(word[j:j + i])\r\n for x in lst:\r\n for i in range(1, len(word)):\r\n if slices[-i] in x:\r\n ans.append(x)\r\n return ans\r\n\r\n\r\ncategorys = {\"历史文化\", \"现代都市\", \"山区\", \"海景\", \"综合\"}\r\ninfnum = float(\"inf\")\r\n\r\n\r\nclass web:\r\n # land_list是一个list对象,适用相应方法\r\n\r\n def __init__(self, lnum=0, land_list=[], graph_money=GraphAL(), graph_time=GraphAL(), graph_line=GraphAL()):\r\n self.graph_money = graph_money\r\n self.graph_time = graph_time\r\n self.graph_line = graph_line\r\n self.lnum = lnum\r\n self.land_list = land_list\r\n\r\n def is_empty(self):\r\n return self.lnum == 0\r\n # 获得所有景点名称,用list储存\r\n # self._land_list是以landscape为元素的表\r\n\r\n def _get_name(self):\r\n if self.is_empty():\r\n raise WebLandsError(\"in 'get_all_position'\")\r\n namee = []\r\n for x in self.land_list():\r\n namee.append(x.name)\r\n return namee\r\n # 获得所有景点位置\r\n\r\n def lst_pos(self, land):\r\n return self.land_list.index(land)\r\n\r\n def _get_position(self):\r\n if self.is_empty():\r\n raise WebLandsError(\"in 'get_all_position'\")\r\n positionn = []\r\n for x in self.land_list():\r\n positionn.append(x.position)\r\n return positionn\r\n\r\n def add_land(self, landscape):\r\n self.land_list.append(landscape)\r\n self.graph_money.add_vertex()\r\n self.graph_time.add_vertex()\r\n self.graph_line.add_vertex()\r\n self.lnum += 1\r\n\r\n # 如果不设置money,time或line,自然landscape之间没有边相连\r\n\r\n def set_all(self, land1, land2, money=infnum, time=infnum, line=1):\r\n graph_money.add_edge(self.land_list().index(land1),\r\n self.land_list().index(land2), money)\r\n graph_time.add_edge(self.land_list().index(land1),\r\n self.land_list().index(land2), time)\r\n graph_line.add_edge(self.land_list().index(land1),\r\n self.land_list().index(land2), line)\r\n\r\n# 以下基于Dijkstra算法来搞定最短路径问题,可同时作用于时间,金钱和路径长度做邻接图\r\n def set_money(self, land1, land2, money):\r\n self.graph_money.add_edge(self.land_list.index(land1),\r\n self.land_list.index(land2), money)\r\n\r\n def get_money(self, land1, land2):\r\n a = self.graph_money.get_edge(self.land_list.index(land1),\r\n self.land_list.index(land2))\r\n return a\r\n\r\n def set_time(self, land1, land2, time):\r\n self.graph_money.add_edge(self.land_list.index(land1),\r\n self.land_list.index(land2), time)\r\n\r\n def get_time(self, land1, land2):\r\n a = self.graph_time.get_edge(self.land_list.index(land1),\r\n self.land_list.index(land2))\r\n return a\r\n\r\n def set_line(self, land1, land2, line):\r\n self.graph_line.add_edge(self.land_list.index(land1),\r\n self.land_list.index(land2), line)\r\n\r\n def get_line(self, land1, land2):\r\n a = self.graph_line.get_edge(self.land_list.index(land1),\r\n self.land_list.index(land2))\r\n return a\r\n\r\n# shortestmoney等开始\r\n\r\n\r\ndef shortest_money(web, land1, land2):\r\n vi = web.lst_pos(land1)\r\n vj = web.lst_pos(land2)\r\n if vi == vj:\r\n raise ValuError(\"in shortest_money,\\\r\n if the begining is the same as the ending, you don't have to pay anything\")\r\n path = dijkstra_shortest_paths(web.graph_money, vi)\r\n path_list = [vi]\r\n while vi != path[vj][0]:\r\n path_list.append(path[vj][0])\r\n vi = path[vj][0]\r\n return path_list, path[vj][1]\r\n\r\n\r\ndef shortest_money_str(web, land1, land2):\r\n str_ = \"\"\r\n path, pay = shortest_money(web, land1, land2)\r\n for i in range(len(path)):\r\n str_ += str(web.land_list[path[i]].name)\r\n str_ += \"->\"\r\n str_ += land2.name\r\n return \"所求的最短路money路径为\", str_, \"总money代价为\", pay\r\n\r\n\r\ndef shortest_time(web, land1, land2):\r\n vi = web.lst_pos(land1)\r\n vj = web.lst_pos(land2)\r\n if vi == vj:\r\n raise ValuError(\"in shortest_time,\\\r\n if the begining is the same as the ending, you don't have to pay anything\")\r\n path = dijkstra_shortest_paths(web.graph_time(), vi)\r\n path_list = [vi]\r\n while vi != vj:\r\n path_list.append(path[vj][0])\r\n vi = path[vj][0]\r\n return path_list, path[vj][1]\r\n\r\n\r\ndef shortest_time_str(web, land1, land2):\r\n str_ = \"\"\r\n path, pay = shortest_time(web, land1, land2)\r\n for i in range(len(path)):\r\n str_ += str(path[i])\r\n return \"所求的最短路time路径为\", str_, \"总time代价为\", pay\r\n\r\n\r\ndef shortest_line(web, land1, land2):\r\n vi = web.lst_pos(land1)\r\n vj = web.lst_pos(land2)\r\n if vi == vj:\r\n raise ValuError(\"in shortest_line,\\\r\n if the begining is the same as the ending, you don't have to pay anything\")\r\n path = dijkstra_shortest_paths(web.graph_line(), vi)\r\n path_list = [vi]\r\n while vi != vj:\r\n path_list.append(path[vj][0])\r\n vi = path[vj][0]\r\n return path_list, path[vj][1]\r\n\r\n\r\ndef shortest_time_str(web, land1, land2):\r\n str_ = \"\"\r\n path, pay = shortest_line(web, land1, land2)\r\n for i in range(len(path)):\r\n str_ += str(path[i])\r\n return \"所求的最短路line路径为\", str_, \"总line代价为\", pay\r\n# shortest等结束\r\n\r\n\r\nclass landscape: # landscape代表一个景点,rank表示在图中list的位置\r\n\r\n def __init__(self, name, position, category=None, hot=0): # 其中position是一个数,代表一个景点\r\n self.name = name\r\n self.position = position\r\n self.category = category\r\n self.hot = hot\r\n\r\n def position(self):\r\n return self._position\r\n\r\n def category(self):\r\n return self._category\r\n\r\n def name(self):\r\n return self._name\r\n\r\n def hot(self):\r\n return hot\r\n\r\n def set_category(self, sorts):\r\n if sorts not in categorys:\r\n raise ValuError(\"in set_category, we do not have {}\".format(sorts))\r\n self.category = sorts\r\n\r\n# 对于多目标问题,先用既有方法构造一个web,web保存了所有目标landscape\r\n# 现在基于Prim算法给出一个关于多目标问题的算法,其实就是最小生成树问题\r\n\r\n\r\ndef muti_aim_solve(land_list):\r\n sub_web = web()\r\n for x in land_list:\r\n sub_web.add_land(x)\r\n lanst = web.land_list().copy()\r\n for x in lanst:\r\n for y in lanst:\r\n if x == y:\r\n continue\r\n vi = lst_pos(web, x)\r\n vj = lst_pos(web, y)\r\n\r\na, b, c = Edges([0, 2, 4])\r\nlst = [\"东方明珠\", \"西湖\", \"迪士尼\"]\r\nchina = web(3, lst, a, b, c)\r\n",
"step-ids": [
15,
21,
24,
29,
32
]
}
|
[
15,
21,
24,
29,
32
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
from .dispatch import dispatch_expts
|
flexible
|
{
"blob_id": "394ebfe25bbf8eaf427509f28a82a98b9b481b63",
"index": 4957,
"step-1": "<mask token>\n",
"step-2": "from .dispatch import dispatch_expts\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
def getGC(st):
n = 0
for char in st:
if char == 'C' or char == 'G':
n += 1
return n
while True:
try:
DNA = input()
ln = int(input())
maxLen = 0
subDNA = ''
for i in range(len(DNA) - ln + 1):
sub = DNA[i:i + ln]
if getGC(sub) > maxLen:
maxLen = getGC(sub)
subDNA = sub
print(subDNA)
except:
break
|
normal
|
{
"blob_id": "afe63f94c7107cf79e57f695df8543e0786a155f",
"index": 6556,
"step-1": "<mask token>\n",
"step-2": "def getGC(st):\n n = 0\n for char in st:\n if char == 'C' or char == 'G':\n n += 1\n return n\n\n\n<mask token>\n",
"step-3": "def getGC(st):\n n = 0\n for char in st:\n if char == 'C' or char == 'G':\n n += 1\n return n\n\n\nwhile True:\n try:\n DNA = input()\n ln = int(input())\n maxLen = 0\n subDNA = ''\n for i in range(len(DNA) - ln + 1):\n sub = DNA[i:i + ln]\n if getGC(sub) > maxLen:\n maxLen = getGC(sub)\n subDNA = sub\n print(subDNA)\n except:\n break\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
from django.contrib import admin
# Register your models here.
from registration.models import FbAuth
class AllFieldsAdmin(admin.ModelAdmin):
"""
A model admin that displays all field in admin excpet Many to many and pk field
"""
def __init__(self, model, admin_site):
self.list_display = [field.name for field in model._meta.fields
if field.name not in ["id"]]
super(AllFieldsAdmin, self).__init__(model, admin_site)
admin.site.register(FbAuth)
|
normal
|
{
"blob_id": "821afa85eb783b4bf1018800f598a3294c4cbcfb",
"index": 9532,
"step-1": "<mask token>\n\n\nclass AllFieldsAdmin(admin.ModelAdmin):\n <mask token>\n\n def __init__(self, model, admin_site):\n self.list_display = [field.name for field in model._meta.fields if \n field.name not in ['id']]\n super(AllFieldsAdmin, self).__init__(model, admin_site)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass AllFieldsAdmin(admin.ModelAdmin):\n \"\"\"\n A model admin that displays all field in admin excpet Many to many and pk field\n \"\"\"\n\n def __init__(self, model, admin_site):\n self.list_display = [field.name for field in model._meta.fields if \n field.name not in ['id']]\n super(AllFieldsAdmin, self).__init__(model, admin_site)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass AllFieldsAdmin(admin.ModelAdmin):\n \"\"\"\n A model admin that displays all field in admin excpet Many to many and pk field\n \"\"\"\n\n def __init__(self, model, admin_site):\n self.list_display = [field.name for field in model._meta.fields if \n field.name not in ['id']]\n super(AllFieldsAdmin, self).__init__(model, admin_site)\n\n\nadmin.site.register(FbAuth)\n",
"step-4": "from django.contrib import admin\nfrom registration.models import FbAuth\n\n\nclass AllFieldsAdmin(admin.ModelAdmin):\n \"\"\"\n A model admin that displays all field in admin excpet Many to many and pk field\n \"\"\"\n\n def __init__(self, model, admin_site):\n self.list_display = [field.name for field in model._meta.fields if \n field.name not in ['id']]\n super(AllFieldsAdmin, self).__init__(model, admin_site)\n\n\nadmin.site.register(FbAuth)\n",
"step-5": "from django.contrib import admin\n\n# Register your models here.\nfrom registration.models import FbAuth\n\n\nclass AllFieldsAdmin(admin.ModelAdmin):\n\n \"\"\"\n A model admin that displays all field in admin excpet Many to many and pk field\n \"\"\"\n\n def __init__(self, model, admin_site):\n self.list_display = [field.name for field in model._meta.fields\n if field.name not in [\"id\"]]\n super(AllFieldsAdmin, self).__init__(model, admin_site)\n\nadmin.site.register(FbAuth)",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for _ in range(N - 1):
a, b = map(int, input().split())
E.append((a, b))
adj_list[a].append(b)
adj_list[b].append(a)
<|reserved_special_token_0|>
while q:
v = q.popleft()
V_number[v - 1] = C[i]
i += 1
for u in adj_list[v]:
if V_number[u - 1] is None:
q.append(u)
print(sum(C[1:]))
print(*V_number)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
N = int(input())
adj_list = defaultdict(list)
E = []
V_number = [None] * N
for _ in range(N - 1):
a, b = map(int, input().split())
E.append((a, b))
adj_list[a].append(b)
adj_list[b].append(a)
C = sorted(list(map(int, input().split())), reverse=True)
q = deque([1])
i = 0
while q:
v = q.popleft()
V_number[v - 1] = C[i]
i += 1
for u in adj_list[v]:
if V_number[u - 1] is None:
q.append(u)
print(sum(C[1:]))
print(*V_number)
<|reserved_special_token_1|>
from collections import defaultdict, deque
N = int(input())
adj_list = defaultdict(list)
E = []
V_number = [None] * N
for _ in range(N - 1):
a, b = map(int, input().split())
E.append((a, b))
adj_list[a].append(b)
adj_list[b].append(a)
C = sorted(list(map(int, input().split())), reverse=True)
q = deque([1])
i = 0
while q:
v = q.popleft()
V_number[v - 1] = C[i]
i += 1
for u in adj_list[v]:
if V_number[u - 1] is None:
q.append(u)
print(sum(C[1:]))
print(*V_number)
|
flexible
|
{
"blob_id": "b93f6c3192f8dd58b96dfdc6ea2b17e12cce34d0",
"index": 9752,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor _ in range(N - 1):\n a, b = map(int, input().split())\n E.append((a, b))\n adj_list[a].append(b)\n adj_list[b].append(a)\n<mask token>\nwhile q:\n v = q.popleft()\n V_number[v - 1] = C[i]\n i += 1\n for u in adj_list[v]:\n if V_number[u - 1] is None:\n q.append(u)\nprint(sum(C[1:]))\nprint(*V_number)\n",
"step-3": "<mask token>\nN = int(input())\nadj_list = defaultdict(list)\nE = []\nV_number = [None] * N\nfor _ in range(N - 1):\n a, b = map(int, input().split())\n E.append((a, b))\n adj_list[a].append(b)\n adj_list[b].append(a)\nC = sorted(list(map(int, input().split())), reverse=True)\nq = deque([1])\ni = 0\nwhile q:\n v = q.popleft()\n V_number[v - 1] = C[i]\n i += 1\n for u in adj_list[v]:\n if V_number[u - 1] is None:\n q.append(u)\nprint(sum(C[1:]))\nprint(*V_number)\n",
"step-4": "from collections import defaultdict, deque\nN = int(input())\nadj_list = defaultdict(list)\nE = []\nV_number = [None] * N\nfor _ in range(N - 1):\n a, b = map(int, input().split())\n E.append((a, b))\n adj_list[a].append(b)\n adj_list[b].append(a)\nC = sorted(list(map(int, input().split())), reverse=True)\nq = deque([1])\ni = 0\nwhile q:\n v = q.popleft()\n V_number[v - 1] = C[i]\n i += 1\n for u in adj_list[v]:\n if V_number[u - 1] is None:\n q.append(u)\nprint(sum(C[1:]))\nprint(*V_number)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class CheckoutIntegrationTest(BaseCheckoutTest):
<|reserved_special_token_0|>
def test_checkout_process(self):
"""Full checkout process using minimal api calls"""
product = self.create_product()
res = self.do_get_basket()
self.assertEqual(res.status_code, 200)
basket_id = res.data['id']
res = self.do_add_to_basket(product.id)
self.assertEqual(res.status_code, 200)
cs_url, cs_data = self.do_sign_auth_request(basket_id)
res = self.do_cybersource_post(cs_url, cs_data)
self.assertEqual(res.status_code, 302)
self.check_finished_order(cs_data['reference_number'], product.id)
def test_add_product_during_auth(self):
"""Test attempting to add a product during the authorize flow"""
product = self.create_product()
res = self.do_get_basket()
self.assertEqual(res.status_code, 200)
basket_id = res.data['id']
res = self.do_add_to_basket(product.id)
basket1 = res.data['id']
self.assertEqual(res.status_code, 200)
cs_url, cs_data = self.do_sign_auth_request(basket_id)
res = self.do_add_to_basket(product.id)
self.assertEqual(res.status_code, 200)
basket2 = res.data['id']
self.assertNotEqual(basket1, basket2)
res = self.do_cybersource_post(cs_url, cs_data)
self.assertEqual(res.status_code, 302)
self.check_finished_order(cs_data['reference_number'], product.id)
res = self.do_add_to_basket(product.id)
self.assertEqual(res.status_code, 200)
basket3 = res.data['id']
self.assertEqual(basket2, basket3)
def test_pay_for_nothing(self):
"""Test attempting to pay for an empty basket"""
res = self.do_get_basket()
self.assertEqual(res.status_code, 200)
basket_id = res.data['id']
data = {'guest_email': 'herp@example.com', 'basket': reverse(
'basket-detail', args=[basket_id]), 'shipping_address': {
'first_name': 'fadsf', 'last_name': 'fad', 'line1':
'234 5th Ave', 'line4': 'Manhattan', 'postcode': '10001',
'state': 'NY', 'country': reverse('country-detail', args=['US']
), 'phone_number': '+1 (717) 467-1111'}}
url = reverse('cybersource-sign-auth-request')
res = self.client.post(url, data, format='json')
self.assertEqual(res.status_code, 406)
def test_manipulate_total_pre_auth(self):
"""Test attempting to manipulate basket price when requesting an auth form"""
product = self.create_product()
res = self.do_get_basket()
self.assertEqual(res.status_code, 200)
basket_id = res.data['id']
res = self.do_add_to_basket(product.id)
self.assertEqual(res.status_code, 200)
self.assertEqual(res.data['total_incl_tax'], '10.00')
url = reverse('cybersource-sign-auth-request')
data = {'guest_email': 'herp@example.com', 'basket': reverse(
'basket-detail', args=[basket_id]), 'total': '2.00',
'shipping_address': {'first_name': 'fadsf', 'last_name': 'fad',
'line1': '234 5th Ave', 'line4': 'Manhattan', 'postcode':
'10001', 'state': 'NY', 'country': reverse('country-detail',
args=['US']), 'phone_number': '+1 (717) 467-1111'}}
res = self.client.post(url, data, format='json')
self.assertEqual(res.status_code, 406)
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class CSReplyViewTest(BaseCheckoutTest):
"""Test the CybersourceReplyView with fixtured requests"""
def prepare_basket(self):
"""Setup a basket and session like SignAuthorizePaymentFormView would normally"""
product = self.create_product()
res = self.do_get_basket()
self.assertEqual(res.status_code, 200)
basket_id = res.data['id']
res = self.do_add_to_basket(product.id)
self.assertEqual(res.status_code, 200)
session = self.client.session
session[CHECKOUT_BASKET_ID] = basket_id
session[CHECKOUT_ORDER_NUM] = str(randrange(1000000, 9999999))
session[CHECKOUT_SHIPPING_CODE] = 'free-shipping'
session.save()
return session, basket_id, session[CHECKOUT_ORDER_NUM]
@patch('cybersource.signals.order_placed.send')
def test_invalid_signature(self, order_placed):
"""Invalid signature should result in 400 Bad Request"""
session, basket_id, order_number = self.prepare_basket()
data = cs_factories.build_declined_reply_data(order_number)
data = cs_factories.sign_reply_data(data)
data['signature'] = 'abcdef'
url = reverse('cybersource-reply')
resp = self.client.post(url, data)
self.assertEqual(resp.status_code, 400)
self.assertEqual(len(mail.outbox), 0, 'Should not send email')
self.assertEqual(order_placed.call_count, 0,
'Should not trigger signal')
self.assertEqual(Order.objects.count(), 0, 'Should not make order')
@patch('cybersource.signals.order_placed.send')
def test_invalid_request_type(self, order_placed):
"""Bad request type should result in 400 Bad Request"""
session, basket_id, order_number = self.prepare_basket()
data = cs_factories.build_declined_reply_data(order_number)
data['req_transaction_type'] = 'payment',
data = cs_factories.sign_reply_data(data)
url = reverse('cybersource-reply')
resp = self.client.post(url, data)
self.assertEqual(resp.status_code, 400)
self.assertEqual(len(mail.outbox), 0, 'Should not send email')
self.assertEqual(order_placed.call_count, 0,
'Should not trigger signal')
self.assertEqual(Order.objects.count(), 0, 'Should not make order')
@patch('cybersource.signals.order_placed.send')
def test_duplicate_transaction_id(self, order_placed):
"""Duplicate Transaction ID should result in redirect to the success page"""
session, basket_id, order_number = self.prepare_basket()
data = cs_factories.build_accepted_reply_data(order_number)
data = cs_factories.sign_reply_data(data)
url = reverse('cybersource-reply')
self.assertEqual(order_placed.call_count, 0)
self.assertEqual(Order.objects.count(), 0)
resp = self.client.post(url, data)
self.assertRedirects(resp, reverse('checkout:thank-you'))
self.assertEqual(order_placed.call_count, 1)
self.assertEqual(Order.objects.count(), 1)
resp = self.client.post(url, data)
self.assertRedirects(resp, reverse('checkout:thank-you'))
self.assertEqual(order_placed.call_count, 1)
self.assertEqual(Order.objects.count(), 1)
@patch('cybersource.signals.order_placed.send')
def test_invalid_reference_number(self, order_placed):
"""Mismatched reference number should result in 400 Bad Request"""
session, basket_id, order_number = self.prepare_basket()
data = cs_factories.build_accepted_reply_data(order_number + 'ABC')
data = cs_factories.sign_reply_data(data)
url = reverse('cybersource-reply')
resp = self.client.post(url, data)
self.assertEqual(resp.status_code, 400)
self.assertEqual(order_placed.call_count, 0)
self.assertEqual(Order.objects.count(), 0)
@patch('cybersource.signals.order_placed.send')
def test_missing_basket(self, order_placed):
"""Missing basket should result in 400 Bad Request"""
session, basket_id, order_number = self.prepare_basket()
del session[CHECKOUT_BASKET_ID]
session.save()
data = cs_factories.build_accepted_reply_data(order_number)
data = cs_factories.sign_reply_data(data)
url = reverse('cybersource-reply')
resp = self.client.post(url, data)
self.assertEqual(resp.status_code, 400)
self.assertEqual(order_placed.call_count, 0)
self.assertEqual(Order.objects.count(), 0)
@patch('cybersource.signals.order_placed.send')
def test_declined_card(self, order_placed):
"""Declined card should should result in redirect to failure page"""
session, basket_id, order_number = self.prepare_basket()
data = cs_factories.build_declined_reply_data(order_number)
data = cs_factories.sign_reply_data(data)
url = reverse('cybersource-reply')
resp = self.client.post(url, data)
self.assertRedirects(resp, reverse('checkout:index'),
fetch_redirect_response=False)
self.assertEqual(len(mail.outbox), 0, 'Should not send email')
self.assertEqual(order_placed.call_count, 0,
'Should not trigger signal')
self.assertEqual(Order.objects.count(), 0, 'Should not make order')
@patch('cybersource.signals.order_placed.send')
def test_success(self, order_placed):
"""Successful authorization should create an order and redirect to the success page"""
session, basket_id, order_number = self.prepare_basket()
data = cs_factories.build_accepted_reply_data(order_number)
data = cs_factories.sign_reply_data(data)
url = reverse('cybersource-reply')
self.assertEqual(order_placed.call_count, 0)
resp = self.client.post(url, data)
self.assertRedirects(resp, reverse('checkout:thank-you'))
self.assertEqual(len(mail.outbox), 1, 'Should send email')
self.assertEqual(order_placed.call_count, 1,
'Should trigger order_placed signal')
order = order_placed.call_args[1]['order']
self.assertEqual(order.status, 'Authorized', 'Should set order status')
self.assertEqual(order.basket.id, basket_id,
'Should use basket from session')
self.assertEqual(order.number, order_number,
'Should use order number from CS request')
session = self.client.session
self.assertEquals(session[CHECKOUT_ORDER_ID], order.id,
'Should save order_id in session')
self.assertEqual(order.sources.count(), 1, 'Should save pPaymentSource'
)
source = order.sources.first()
self.assertEqual(source.currency, 'USD')
self.assertEqual(source.amount_allocated, D('99.99'))
self.assertEqual(source.amount_refunded, D('0.00'))
self.assertEqual(source.amount_debited, D('0.00'))
self.assertEqual(source.transactions.count(), 1,
'Should save Transaction')
transaction = source.transactions.first()
self.assertEqual(transaction.log.data, data)
self.assertEqual(transaction.token.log, transaction.log)
self.assertEqual(transaction.token.masked_card_number,
'xxxxxxxxxxxx1111')
self.assertEqual(transaction.token.card_type, '001')
self.assertEqual(transaction.txn_type, 'Authorise')
self.assertEqual(transaction.amount, D('99.99'))
self.assertEqual(transaction.reference, data['transaction_id'])
self.assertEqual(transaction.status, 'ACCEPT')
self.assertEqual(transaction.request_token, data['request_token'])
self.assertEqual(order.payment_events.count(), 1,
'Should save PaymentEvent')
event = order.payment_events.first()
self.assertEqual(event.amount, D('99.99'))
self.assertEqual(event.reference, data['transaction_id'])
self.assertEqual(event.event_type.name, 'Authorise')
self.assertEqual(event.line_quantities.count(), 1,
'Should save PaymentEventQuantity')
lq = event.line_quantities.first()
self.assertEqual(lq.line, order.lines.first())
self.assertEqual(lq.quantity, 1)
class AuthPaymentFormViewTest(BaseCheckoutTest):
"""Test the SignAuthorizePaymentFormView"""
def prepare_basket(self):
"""Setup a basket so that we can pay for it"""
product = self.create_product()
res = self.do_get_basket()
self.assertEqual(res.status_code, 200)
basket_id = res.data['id']
res = self.do_add_to_basket(product.id)
self.assertEqual(res.status_code, 200)
return basket_id
@patch('cybersource.signals.pre_build_auth_request.send')
@patch('cybersource.signals.pre_calculate_auth_total.send')
def test_request_auth_form_success(self, pre_calculate_auth_total,
pre_build_auth_request):
basket_id = self.prepare_basket()
def add_taxes(sender, basket, shipping_address, **kwargs):
for line in basket.all_lines():
line.purchase_info.price.tax = D('0.42')
pre_calculate_auth_total.side_effect = add_taxes
def add_a_field(sender, extra_fields, request, basket, **kwargs):
extra_fields['my_custom_field'] = 'ABC'
pre_build_auth_request.side_effect = add_a_field
session = self.client.session
session[CHECKOUT_ORDER_NUM] = '10000042'
session.save()
cs_url, data = self.do_sign_auth_request(basket_id=basket_id)
self.assertEqual(cs_url,
'https://testsecureacceptance.cybersource.com/silent/pay')
session = self.client.session
self.assertEqual(session[CHECKOUT_BASKET_ID], basket_id)
basket = Basket.objects.get(id=basket_id)
self.assertFalse(basket.can_be_edited)
self.assertEqual(pre_calculate_auth_total.call_count, 1)
self.assertEqual(pre_build_auth_request.call_count, 1)
self.assertEquals(data['amount'], '10.42')
self.assertEquals(data['bill_to_address_city'], 'Manhattan')
self.assertEquals(data['bill_to_address_country'], 'US')
self.assertEquals(data['bill_to_address_line1'], '234 5th Ave')
self.assertEquals(data['bill_to_address_line2'], 'apt 5')
self.assertEquals(data['bill_to_address_postal_code'], '10001')
self.assertEquals(data['bill_to_address_state'], 'NY')
self.assertEquals(data['bill_to_email'], 'herp@example.com')
self.assertEquals(data['bill_to_forename'], 'Testy')
self.assertEquals(data['bill_to_phone'], '17174671111')
self.assertEquals(data['bill_to_surname'], 'McUnitTest')
self.assertEquals(data['card_cvn'], '123')
self.assertEquals(data['card_expiry_date'], '12-2017')
self.assertEquals(data['card_number'], '4111111111111111')
self.assertEquals(data['card_type'], '001')
self.assertEquals(data['currency'], 'USD')
self.assertEquals(data['customer_ip_address'], '127.0.0.1')
self.assertEquals(data['device_fingerprint_id'], '')
self.assertEquals(data['item_0_name'], 'My Product')
self.assertEquals(data['item_0_quantity'], '1')
self.assertEquals(data['item_0_sku'], basket.all_lines()[0].
stockrecord.partner_sku)
self.assertEquals(data['item_0_unit_price'], '10.42')
self.assertEquals(data['line_item_count'], '1')
self.assertEquals(data['locale'], 'en')
self.assertEquals(data['my_custom_field'], 'ABC')
self.assertEquals(data['payment_method'], 'card')
self.assertEquals(data['reference_number'], '10000042')
self.assertEquals(data['ship_to_address_city'], 'Manhattan')
self.assertEquals(data['ship_to_address_country'], 'US')
self.assertEquals(data['ship_to_address_line1'], '234 5th Ave')
self.assertEquals(data['ship_to_address_line2'], '')
self.assertEquals(data['ship_to_address_postal_code'], '10001')
self.assertEquals(data['ship_to_address_state'], 'NY')
self.assertEquals(data['ship_to_forename'], 'fadsf')
self.assertEquals(data['ship_to_phone'], '17174671111')
self.assertEquals(data['ship_to_surname'], 'fad')
self.assertEquals(data['transaction_type'],
'authorization,create_payment_token')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class BaseCheckoutTest(APITestCase):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def do_get_basket(self):
url = reverse('api-basket')
return self.client.get(url)
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def check_finished_order(self, number, product_id, quantity=1):
self.assertEqual(Order.objects.all().count(), 1)
order = Order.objects.get()
self.assertEqual(order.number, number)
lines = order.lines.all()
self.assertEqual(lines.count(), 1)
line = lines[0]
self.assertEqual(line.quantity, quantity)
self.assertEqual(line.product_id, product_id)
payment_events = order.payment_events.filter(event_type__name=
'Authorise')
self.assertEqual(payment_events.count(), 1)
self.assertEqual(payment_events[0].amount, order.total_incl_tax)
payment_sources = order.sources.all()
self.assertEqual(payment_sources.count(), 1)
self.assertEqual(payment_sources[0].currency, order.currency)
self.assertEqual(payment_sources[0].amount_allocated, order.
total_incl_tax)
self.assertEqual(payment_sources[0].amount_debited, D('0.00'))
self.assertEqual(payment_sources[0].amount_refunded, D('0.00'))
transactions = payment_sources[0].transactions.all()
self.assertEqual(transactions.count(), 1)
self.assertEqual(transactions[0].txn_type, 'Authorise')
self.assertEqual(transactions[0].amount, order.total_incl_tax)
self.assertEqual(transactions[0].status, 'ACCEPT')
self.assertEqual(transactions[0].log_field('req_reference_number'),
order.number)
self.assertEqual(transactions[0].token.card_last4, '1111')
self.assertEqual(len(mail.outbox), 1)
class CheckoutIntegrationTest(BaseCheckoutTest):
"""Full Integration Test of Checkout"""
def test_checkout_process(self):
"""Full checkout process using minimal api calls"""
product = self.create_product()
res = self.do_get_basket()
self.assertEqual(res.status_code, 200)
basket_id = res.data['id']
res = self.do_add_to_basket(product.id)
self.assertEqual(res.status_code, 200)
cs_url, cs_data = self.do_sign_auth_request(basket_id)
res = self.do_cybersource_post(cs_url, cs_data)
self.assertEqual(res.status_code, 302)
self.check_finished_order(cs_data['reference_number'], product.id)
def test_add_product_during_auth(self):
"""Test attempting to add a product during the authorize flow"""
product = self.create_product()
res = self.do_get_basket()
self.assertEqual(res.status_code, 200)
basket_id = res.data['id']
res = self.do_add_to_basket(product.id)
basket1 = res.data['id']
self.assertEqual(res.status_code, 200)
cs_url, cs_data = self.do_sign_auth_request(basket_id)
res = self.do_add_to_basket(product.id)
self.assertEqual(res.status_code, 200)
basket2 = res.data['id']
self.assertNotEqual(basket1, basket2)
res = self.do_cybersource_post(cs_url, cs_data)
self.assertEqual(res.status_code, 302)
self.check_finished_order(cs_data['reference_number'], product.id)
res = self.do_add_to_basket(product.id)
self.assertEqual(res.status_code, 200)
basket3 = res.data['id']
self.assertEqual(basket2, basket3)
def test_pay_for_nothing(self):
"""Test attempting to pay for an empty basket"""
res = self.do_get_basket()
self.assertEqual(res.status_code, 200)
basket_id = res.data['id']
data = {'guest_email': 'herp@example.com', 'basket': reverse(
'basket-detail', args=[basket_id]), 'shipping_address': {
'first_name': 'fadsf', 'last_name': 'fad', 'line1':
'234 5th Ave', 'line4': 'Manhattan', 'postcode': '10001',
'state': 'NY', 'country': reverse('country-detail', args=['US']
), 'phone_number': '+1 (717) 467-1111'}}
url = reverse('cybersource-sign-auth-request')
res = self.client.post(url, data, format='json')
self.assertEqual(res.status_code, 406)
def test_manipulate_total_pre_auth(self):
"""Test attempting to manipulate basket price when requesting an auth form"""
product = self.create_product()
res = self.do_get_basket()
self.assertEqual(res.status_code, 200)
basket_id = res.data['id']
res = self.do_add_to_basket(product.id)
self.assertEqual(res.status_code, 200)
self.assertEqual(res.data['total_incl_tax'], '10.00')
url = reverse('cybersource-sign-auth-request')
data = {'guest_email': 'herp@example.com', 'basket': reverse(
'basket-detail', args=[basket_id]), 'total': '2.00',
'shipping_address': {'first_name': 'fadsf', 'last_name': 'fad',
'line1': '234 5th Ave', 'line4': 'Manhattan', 'postcode':
'10001', 'state': 'NY', 'country': reverse('country-detail',
args=['US']), 'phone_number': '+1 (717) 467-1111'}}
res = self.client.post(url, data, format='json')
self.assertEqual(res.status_code, 406)
def test_manipulate_total_during_auth(self):
"""Test attempting to manipulate basket price when requesting auth from CyberSource"""
product = self.create_product()
res = self.do_get_basket()
self.assertEqual(res.status_code, 200)
basket_id = res.data['id']
res = self.do_add_to_basket(product.id)
self.assertEqual(res.status_code, 200)
self.assertEqual(res.data['total_incl_tax'], '10.00')
cs_url, cs_data = self.do_sign_auth_request(basket_id)
cs_data['amount'] = '2.00'
res = requests.post(cs_url, cs_data)
self.assertEqual(res.status_code, 403)
def test_free_product(self):
"""Full checkout process using minimal api calls"""
product = self.create_product(price=D('0.00'))
res = self.do_get_basket()
self.assertEqual(res.status_code, 200)
basket_id = res.data['id']
res = self.do_add_to_basket(product.id)
self.assertEqual(res.status_code, 200)
cs_url, cs_data = self.do_sign_auth_request(basket_id)
self.assertEqual(cs_data['amount'], '0.00')
res = self.do_cybersource_post(cs_url, cs_data)
self.assertEqual(res.status_code, 302)
self.check_finished_order(cs_data['reference_number'], product.id)
class CSReplyViewTest(BaseCheckoutTest):
"""Test the CybersourceReplyView with fixtured requests"""
def prepare_basket(self):
"""Setup a basket and session like SignAuthorizePaymentFormView would normally"""
product = self.create_product()
res = self.do_get_basket()
self.assertEqual(res.status_code, 200)
basket_id = res.data['id']
res = self.do_add_to_basket(product.id)
self.assertEqual(res.status_code, 200)
session = self.client.session
session[CHECKOUT_BASKET_ID] = basket_id
session[CHECKOUT_ORDER_NUM] = str(randrange(1000000, 9999999))
session[CHECKOUT_SHIPPING_CODE] = 'free-shipping'
session.save()
return session, basket_id, session[CHECKOUT_ORDER_NUM]
@patch('cybersource.signals.order_placed.send')
def test_invalid_signature(self, order_placed):
"""Invalid signature should result in 400 Bad Request"""
session, basket_id, order_number = self.prepare_basket()
data = cs_factories.build_declined_reply_data(order_number)
data = cs_factories.sign_reply_data(data)
data['signature'] = 'abcdef'
url = reverse('cybersource-reply')
resp = self.client.post(url, data)
self.assertEqual(resp.status_code, 400)
self.assertEqual(len(mail.outbox), 0, 'Should not send email')
self.assertEqual(order_placed.call_count, 0,
'Should not trigger signal')
self.assertEqual(Order.objects.count(), 0, 'Should not make order')
@patch('cybersource.signals.order_placed.send')
def test_invalid_request_type(self, order_placed):
"""Bad request type should result in 400 Bad Request"""
session, basket_id, order_number = self.prepare_basket()
data = cs_factories.build_declined_reply_data(order_number)
data['req_transaction_type'] = 'payment',
data = cs_factories.sign_reply_data(data)
url = reverse('cybersource-reply')
resp = self.client.post(url, data)
self.assertEqual(resp.status_code, 400)
self.assertEqual(len(mail.outbox), 0, 'Should not send email')
self.assertEqual(order_placed.call_count, 0,
'Should not trigger signal')
self.assertEqual(Order.objects.count(), 0, 'Should not make order')
@patch('cybersource.signals.order_placed.send')
def test_duplicate_transaction_id(self, order_placed):
"""Duplicate Transaction ID should result in redirect to the success page"""
session, basket_id, order_number = self.prepare_basket()
data = cs_factories.build_accepted_reply_data(order_number)
data = cs_factories.sign_reply_data(data)
url = reverse('cybersource-reply')
self.assertEqual(order_placed.call_count, 0)
self.assertEqual(Order.objects.count(), 0)
resp = self.client.post(url, data)
self.assertRedirects(resp, reverse('checkout:thank-you'))
self.assertEqual(order_placed.call_count, 1)
self.assertEqual(Order.objects.count(), 1)
resp = self.client.post(url, data)
self.assertRedirects(resp, reverse('checkout:thank-you'))
self.assertEqual(order_placed.call_count, 1)
self.assertEqual(Order.objects.count(), 1)
@patch('cybersource.signals.order_placed.send')
def test_invalid_reference_number(self, order_placed):
"""Mismatched reference number should result in 400 Bad Request"""
session, basket_id, order_number = self.prepare_basket()
data = cs_factories.build_accepted_reply_data(order_number + 'ABC')
data = cs_factories.sign_reply_data(data)
url = reverse('cybersource-reply')
resp = self.client.post(url, data)
self.assertEqual(resp.status_code, 400)
self.assertEqual(order_placed.call_count, 0)
self.assertEqual(Order.objects.count(), 0)
@patch('cybersource.signals.order_placed.send')
def test_missing_basket(self, order_placed):
"""Missing basket should result in 400 Bad Request"""
session, basket_id, order_number = self.prepare_basket()
del session[CHECKOUT_BASKET_ID]
session.save()
data = cs_factories.build_accepted_reply_data(order_number)
data = cs_factories.sign_reply_data(data)
url = reverse('cybersource-reply')
resp = self.client.post(url, data)
self.assertEqual(resp.status_code, 400)
self.assertEqual(order_placed.call_count, 0)
self.assertEqual(Order.objects.count(), 0)
@patch('cybersource.signals.order_placed.send')
def test_declined_card(self, order_placed):
"""Declined card should should result in redirect to failure page"""
session, basket_id, order_number = self.prepare_basket()
data = cs_factories.build_declined_reply_data(order_number)
data = cs_factories.sign_reply_data(data)
url = reverse('cybersource-reply')
resp = self.client.post(url, data)
self.assertRedirects(resp, reverse('checkout:index'),
fetch_redirect_response=False)
self.assertEqual(len(mail.outbox), 0, 'Should not send email')
self.assertEqual(order_placed.call_count, 0,
'Should not trigger signal')
self.assertEqual(Order.objects.count(), 0, 'Should not make order')
@patch('cybersource.signals.order_placed.send')
def test_success(self, order_placed):
"""Successful authorization should create an order and redirect to the success page"""
session, basket_id, order_number = self.prepare_basket()
data = cs_factories.build_accepted_reply_data(order_number)
data = cs_factories.sign_reply_data(data)
url = reverse('cybersource-reply')
self.assertEqual(order_placed.call_count, 0)
resp = self.client.post(url, data)
self.assertRedirects(resp, reverse('checkout:thank-you'))
self.assertEqual(len(mail.outbox), 1, 'Should send email')
self.assertEqual(order_placed.call_count, 1,
'Should trigger order_placed signal')
order = order_placed.call_args[1]['order']
self.assertEqual(order.status, 'Authorized', 'Should set order status')
self.assertEqual(order.basket.id, basket_id,
'Should use basket from session')
self.assertEqual(order.number, order_number,
'Should use order number from CS request')
session = self.client.session
self.assertEquals(session[CHECKOUT_ORDER_ID], order.id,
'Should save order_id in session')
self.assertEqual(order.sources.count(), 1, 'Should save pPaymentSource'
)
source = order.sources.first()
self.assertEqual(source.currency, 'USD')
self.assertEqual(source.amount_allocated, D('99.99'))
self.assertEqual(source.amount_refunded, D('0.00'))
self.assertEqual(source.amount_debited, D('0.00'))
self.assertEqual(source.transactions.count(), 1,
'Should save Transaction')
transaction = source.transactions.first()
self.assertEqual(transaction.log.data, data)
self.assertEqual(transaction.token.log, transaction.log)
self.assertEqual(transaction.token.masked_card_number,
'xxxxxxxxxxxx1111')
self.assertEqual(transaction.token.card_type, '001')
self.assertEqual(transaction.txn_type, 'Authorise')
self.assertEqual(transaction.amount, D('99.99'))
self.assertEqual(transaction.reference, data['transaction_id'])
self.assertEqual(transaction.status, 'ACCEPT')
self.assertEqual(transaction.request_token, data['request_token'])
self.assertEqual(order.payment_events.count(), 1,
'Should save PaymentEvent')
event = order.payment_events.first()
self.assertEqual(event.amount, D('99.99'))
self.assertEqual(event.reference, data['transaction_id'])
self.assertEqual(event.event_type.name, 'Authorise')
self.assertEqual(event.line_quantities.count(), 1,
'Should save PaymentEventQuantity')
lq = event.line_quantities.first()
self.assertEqual(lq.line, order.lines.first())
self.assertEqual(lq.quantity, 1)
class AuthPaymentFormViewTest(BaseCheckoutTest):
"""Test the SignAuthorizePaymentFormView"""
def prepare_basket(self):
"""Setup a basket so that we can pay for it"""
product = self.create_product()
res = self.do_get_basket()
self.assertEqual(res.status_code, 200)
basket_id = res.data['id']
res = self.do_add_to_basket(product.id)
self.assertEqual(res.status_code, 200)
return basket_id
@patch('cybersource.signals.pre_build_auth_request.send')
@patch('cybersource.signals.pre_calculate_auth_total.send')
def test_request_auth_form_success(self, pre_calculate_auth_total,
pre_build_auth_request):
basket_id = self.prepare_basket()
def add_taxes(sender, basket, shipping_address, **kwargs):
for line in basket.all_lines():
line.purchase_info.price.tax = D('0.42')
pre_calculate_auth_total.side_effect = add_taxes
def add_a_field(sender, extra_fields, request, basket, **kwargs):
extra_fields['my_custom_field'] = 'ABC'
pre_build_auth_request.side_effect = add_a_field
session = self.client.session
session[CHECKOUT_ORDER_NUM] = '10000042'
session.save()
cs_url, data = self.do_sign_auth_request(basket_id=basket_id)
self.assertEqual(cs_url,
'https://testsecureacceptance.cybersource.com/silent/pay')
session = self.client.session
self.assertEqual(session[CHECKOUT_BASKET_ID], basket_id)
basket = Basket.objects.get(id=basket_id)
self.assertFalse(basket.can_be_edited)
self.assertEqual(pre_calculate_auth_total.call_count, 1)
self.assertEqual(pre_build_auth_request.call_count, 1)
self.assertEquals(data['amount'], '10.42')
self.assertEquals(data['bill_to_address_city'], 'Manhattan')
self.assertEquals(data['bill_to_address_country'], 'US')
self.assertEquals(data['bill_to_address_line1'], '234 5th Ave')
self.assertEquals(data['bill_to_address_line2'], 'apt 5')
self.assertEquals(data['bill_to_address_postal_code'], '10001')
self.assertEquals(data['bill_to_address_state'], 'NY')
self.assertEquals(data['bill_to_email'], 'herp@example.com')
self.assertEquals(data['bill_to_forename'], 'Testy')
self.assertEquals(data['bill_to_phone'], '17174671111')
self.assertEquals(data['bill_to_surname'], 'McUnitTest')
self.assertEquals(data['card_cvn'], '123')
self.assertEquals(data['card_expiry_date'], '12-2017')
self.assertEquals(data['card_number'], '4111111111111111')
self.assertEquals(data['card_type'], '001')
self.assertEquals(data['currency'], 'USD')
self.assertEquals(data['customer_ip_address'], '127.0.0.1')
self.assertEquals(data['device_fingerprint_id'], '')
self.assertEquals(data['item_0_name'], 'My Product')
self.assertEquals(data['item_0_quantity'], '1')
self.assertEquals(data['item_0_sku'], basket.all_lines()[0].
stockrecord.partner_sku)
self.assertEquals(data['item_0_unit_price'], '10.42')
self.assertEquals(data['line_item_count'], '1')
self.assertEquals(data['locale'], 'en')
self.assertEquals(data['my_custom_field'], 'ABC')
self.assertEquals(data['payment_method'], 'card')
self.assertEquals(data['reference_number'], '10000042')
self.assertEquals(data['ship_to_address_city'], 'Manhattan')
self.assertEquals(data['ship_to_address_country'], 'US')
self.assertEquals(data['ship_to_address_line1'], '234 5th Ave')
self.assertEquals(data['ship_to_address_line2'], '')
self.assertEquals(data['ship_to_address_postal_code'], '10001')
self.assertEquals(data['ship_to_address_state'], 'NY')
self.assertEquals(data['ship_to_forename'], 'fadsf')
self.assertEquals(data['ship_to_phone'], '17174671111')
self.assertEquals(data['ship_to_surname'], 'fad')
self.assertEquals(data['transaction_type'],
'authorization,create_payment_token')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class BaseCheckoutTest(APITestCase):
<|reserved_special_token_0|>
def create_product(self, price=D('10.00')):
product = factories.create_product(title='My Product',
product_class='My Product Class')
record = factories.create_stockrecord(currency='USD', product=
product, num_in_stock=10, price_excl_tax=price)
factories.create_purchase_info(record)
return product
def do_add_to_basket(self, product_id, quantity=1):
url = reverse('api-basket-add-product')
data = {'url': reverse('product-detail', args=[product_id]),
'quantity': quantity}
return self.client.post(url, data)
def do_get_basket(self):
url = reverse('api-basket')
return self.client.get(url)
def do_sign_auth_request(self, basket_id=None, data=None):
if data is None:
data = {'guest_email': 'herp@example.com', 'basket': reverse(
'basket-detail', args=[basket_id]), 'shipping_address': {
'first_name': 'fadsf', 'last_name': 'fad', 'line1':
'234 5th Ave', 'line4': 'Manhattan', 'postcode': '10001',
'state': 'NY', 'country': reverse('country-detail', args=[
'US']), 'phone_number': '+1 (717) 467-1111'}}
url = reverse('cybersource-sign-auth-request')
res = self.client.post(url, data, format='json')
self.assertEqual(res.status_code, 200)
next_year = datetime.date.today().year + 1
cs_data = {'card_type': '001', 'card_number': '4111111111111111',
'card_cvn': '123', 'card_expiry_date': '12-{}'.format(next_year
), 'bill_to_forename': 'Testy', 'bill_to_surname': 'McUnitTest',
'bill_to_address_line1': '234 5th Ave', 'bill_to_address_line2':
'apt 5', 'bill_to_address_city': 'Manhattan',
'bill_to_address_state': 'NY', 'bill_to_address_postal_code':
'10001', 'bill_to_address_country': 'US', 'bill_to_phone':
'17174671111'}
for field in res.data['fields']:
if not field['editable'] or field['key'] not in cs_data:
cs_data[field['key']] = field['value']
cs_url = res.data['url']
return cs_url, cs_data
def do_cybersource_post(self, cs_url, cs_data):
res = requests.post(cs_url, cs_data)
self.assertEqual(res.status_code, 200)
soup = BeautifulSoup(res.content, 'html.parser')
form_data = {}
for element in soup.find_all('input'):
form_data[element['name']] = element['value']
url = reverse('cybersource-reply')
return self.client.post(url, form_data)
def check_finished_order(self, number, product_id, quantity=1):
self.assertEqual(Order.objects.all().count(), 1)
order = Order.objects.get()
self.assertEqual(order.number, number)
lines = order.lines.all()
self.assertEqual(lines.count(), 1)
line = lines[0]
self.assertEqual(line.quantity, quantity)
self.assertEqual(line.product_id, product_id)
payment_events = order.payment_events.filter(event_type__name=
'Authorise')
self.assertEqual(payment_events.count(), 1)
self.assertEqual(payment_events[0].amount, order.total_incl_tax)
payment_sources = order.sources.all()
self.assertEqual(payment_sources.count(), 1)
self.assertEqual(payment_sources[0].currency, order.currency)
self.assertEqual(payment_sources[0].amount_allocated, order.
total_incl_tax)
self.assertEqual(payment_sources[0].amount_debited, D('0.00'))
self.assertEqual(payment_sources[0].amount_refunded, D('0.00'))
transactions = payment_sources[0].transactions.all()
self.assertEqual(transactions.count(), 1)
self.assertEqual(transactions[0].txn_type, 'Authorise')
self.assertEqual(transactions[0].amount, order.total_incl_tax)
self.assertEqual(transactions[0].status, 'ACCEPT')
self.assertEqual(transactions[0].log_field('req_reference_number'),
order.number)
self.assertEqual(transactions[0].token.card_last4, '1111')
self.assertEqual(len(mail.outbox), 1)
class CheckoutIntegrationTest(BaseCheckoutTest):
"""Full Integration Test of Checkout"""
def test_checkout_process(self):
"""Full checkout process using minimal api calls"""
product = self.create_product()
res = self.do_get_basket()
self.assertEqual(res.status_code, 200)
basket_id = res.data['id']
res = self.do_add_to_basket(product.id)
self.assertEqual(res.status_code, 200)
cs_url, cs_data = self.do_sign_auth_request(basket_id)
res = self.do_cybersource_post(cs_url, cs_data)
self.assertEqual(res.status_code, 302)
self.check_finished_order(cs_data['reference_number'], product.id)
def test_add_product_during_auth(self):
"""Test attempting to add a product during the authorize flow"""
product = self.create_product()
res = self.do_get_basket()
self.assertEqual(res.status_code, 200)
basket_id = res.data['id']
res = self.do_add_to_basket(product.id)
basket1 = res.data['id']
self.assertEqual(res.status_code, 200)
cs_url, cs_data = self.do_sign_auth_request(basket_id)
res = self.do_add_to_basket(product.id)
self.assertEqual(res.status_code, 200)
basket2 = res.data['id']
self.assertNotEqual(basket1, basket2)
res = self.do_cybersource_post(cs_url, cs_data)
self.assertEqual(res.status_code, 302)
self.check_finished_order(cs_data['reference_number'], product.id)
res = self.do_add_to_basket(product.id)
self.assertEqual(res.status_code, 200)
basket3 = res.data['id']
self.assertEqual(basket2, basket3)
def test_pay_for_nothing(self):
"""Test attempting to pay for an empty basket"""
res = self.do_get_basket()
self.assertEqual(res.status_code, 200)
basket_id = res.data['id']
data = {'guest_email': 'herp@example.com', 'basket': reverse(
'basket-detail', args=[basket_id]), 'shipping_address': {
'first_name': 'fadsf', 'last_name': 'fad', 'line1':
'234 5th Ave', 'line4': 'Manhattan', 'postcode': '10001',
'state': 'NY', 'country': reverse('country-detail', args=['US']
), 'phone_number': '+1 (717) 467-1111'}}
url = reverse('cybersource-sign-auth-request')
res = self.client.post(url, data, format='json')
self.assertEqual(res.status_code, 406)
def test_manipulate_total_pre_auth(self):
"""Test attempting to manipulate basket price when requesting an auth form"""
product = self.create_product()
res = self.do_get_basket()
self.assertEqual(res.status_code, 200)
basket_id = res.data['id']
res = self.do_add_to_basket(product.id)
self.assertEqual(res.status_code, 200)
self.assertEqual(res.data['total_incl_tax'], '10.00')
url = reverse('cybersource-sign-auth-request')
data = {'guest_email': 'herp@example.com', 'basket': reverse(
'basket-detail', args=[basket_id]), 'total': '2.00',
'shipping_address': {'first_name': 'fadsf', 'last_name': 'fad',
'line1': '234 5th Ave', 'line4': 'Manhattan', 'postcode':
'10001', 'state': 'NY', 'country': reverse('country-detail',
args=['US']), 'phone_number': '+1 (717) 467-1111'}}
res = self.client.post(url, data, format='json')
self.assertEqual(res.status_code, 406)
def test_manipulate_total_during_auth(self):
"""Test attempting to manipulate basket price when requesting auth from CyberSource"""
product = self.create_product()
res = self.do_get_basket()
self.assertEqual(res.status_code, 200)
basket_id = res.data['id']
res = self.do_add_to_basket(product.id)
self.assertEqual(res.status_code, 200)
self.assertEqual(res.data['total_incl_tax'], '10.00')
cs_url, cs_data = self.do_sign_auth_request(basket_id)
cs_data['amount'] = '2.00'
res = requests.post(cs_url, cs_data)
self.assertEqual(res.status_code, 403)
def test_free_product(self):
"""Full checkout process using minimal api calls"""
product = self.create_product(price=D('0.00'))
res = self.do_get_basket()
self.assertEqual(res.status_code, 200)
basket_id = res.data['id']
res = self.do_add_to_basket(product.id)
self.assertEqual(res.status_code, 200)
cs_url, cs_data = self.do_sign_auth_request(basket_id)
self.assertEqual(cs_data['amount'], '0.00')
res = self.do_cybersource_post(cs_url, cs_data)
self.assertEqual(res.status_code, 302)
self.check_finished_order(cs_data['reference_number'], product.id)
class CSReplyViewTest(BaseCheckoutTest):
"""Test the CybersourceReplyView with fixtured requests"""
def prepare_basket(self):
"""Setup a basket and session like SignAuthorizePaymentFormView would normally"""
product = self.create_product()
res = self.do_get_basket()
self.assertEqual(res.status_code, 200)
basket_id = res.data['id']
res = self.do_add_to_basket(product.id)
self.assertEqual(res.status_code, 200)
session = self.client.session
session[CHECKOUT_BASKET_ID] = basket_id
session[CHECKOUT_ORDER_NUM] = str(randrange(1000000, 9999999))
session[CHECKOUT_SHIPPING_CODE] = 'free-shipping'
session.save()
return session, basket_id, session[CHECKOUT_ORDER_NUM]
@patch('cybersource.signals.order_placed.send')
def test_invalid_signature(self, order_placed):
"""Invalid signature should result in 400 Bad Request"""
session, basket_id, order_number = self.prepare_basket()
data = cs_factories.build_declined_reply_data(order_number)
data = cs_factories.sign_reply_data(data)
data['signature'] = 'abcdef'
url = reverse('cybersource-reply')
resp = self.client.post(url, data)
self.assertEqual(resp.status_code, 400)
self.assertEqual(len(mail.outbox), 0, 'Should not send email')
self.assertEqual(order_placed.call_count, 0,
'Should not trigger signal')
self.assertEqual(Order.objects.count(), 0, 'Should not make order')
@patch('cybersource.signals.order_placed.send')
def test_invalid_request_type(self, order_placed):
"""Bad request type should result in 400 Bad Request"""
session, basket_id, order_number = self.prepare_basket()
data = cs_factories.build_declined_reply_data(order_number)
data['req_transaction_type'] = 'payment',
data = cs_factories.sign_reply_data(data)
url = reverse('cybersource-reply')
resp = self.client.post(url, data)
self.assertEqual(resp.status_code, 400)
self.assertEqual(len(mail.outbox), 0, 'Should not send email')
self.assertEqual(order_placed.call_count, 0,
'Should not trigger signal')
self.assertEqual(Order.objects.count(), 0, 'Should not make order')
@patch('cybersource.signals.order_placed.send')
def test_duplicate_transaction_id(self, order_placed):
"""Duplicate Transaction ID should result in redirect to the success page"""
session, basket_id, order_number = self.prepare_basket()
data = cs_factories.build_accepted_reply_data(order_number)
data = cs_factories.sign_reply_data(data)
url = reverse('cybersource-reply')
self.assertEqual(order_placed.call_count, 0)
self.assertEqual(Order.objects.count(), 0)
resp = self.client.post(url, data)
self.assertRedirects(resp, reverse('checkout:thank-you'))
self.assertEqual(order_placed.call_count, 1)
self.assertEqual(Order.objects.count(), 1)
resp = self.client.post(url, data)
self.assertRedirects(resp, reverse('checkout:thank-you'))
self.assertEqual(order_placed.call_count, 1)
self.assertEqual(Order.objects.count(), 1)
@patch('cybersource.signals.order_placed.send')
def test_invalid_reference_number(self, order_placed):
"""Mismatched reference number should result in 400 Bad Request"""
session, basket_id, order_number = self.prepare_basket()
data = cs_factories.build_accepted_reply_data(order_number + 'ABC')
data = cs_factories.sign_reply_data(data)
url = reverse('cybersource-reply')
resp = self.client.post(url, data)
self.assertEqual(resp.status_code, 400)
self.assertEqual(order_placed.call_count, 0)
self.assertEqual(Order.objects.count(), 0)
@patch('cybersource.signals.order_placed.send')
def test_missing_basket(self, order_placed):
"""Missing basket should result in 400 Bad Request"""
session, basket_id, order_number = self.prepare_basket()
del session[CHECKOUT_BASKET_ID]
session.save()
data = cs_factories.build_accepted_reply_data(order_number)
data = cs_factories.sign_reply_data(data)
url = reverse('cybersource-reply')
resp = self.client.post(url, data)
self.assertEqual(resp.status_code, 400)
self.assertEqual(order_placed.call_count, 0)
self.assertEqual(Order.objects.count(), 0)
@patch('cybersource.signals.order_placed.send')
def test_declined_card(self, order_placed):
"""Declined card should should result in redirect to failure page"""
session, basket_id, order_number = self.prepare_basket()
data = cs_factories.build_declined_reply_data(order_number)
data = cs_factories.sign_reply_data(data)
url = reverse('cybersource-reply')
resp = self.client.post(url, data)
self.assertRedirects(resp, reverse('checkout:index'),
fetch_redirect_response=False)
self.assertEqual(len(mail.outbox), 0, 'Should not send email')
self.assertEqual(order_placed.call_count, 0,
'Should not trigger signal')
self.assertEqual(Order.objects.count(), 0, 'Should not make order')
@patch('cybersource.signals.order_placed.send')
def test_success(self, order_placed):
"""Successful authorization should create an order and redirect to the success page"""
session, basket_id, order_number = self.prepare_basket()
data = cs_factories.build_accepted_reply_data(order_number)
data = cs_factories.sign_reply_data(data)
url = reverse('cybersource-reply')
self.assertEqual(order_placed.call_count, 0)
resp = self.client.post(url, data)
self.assertRedirects(resp, reverse('checkout:thank-you'))
self.assertEqual(len(mail.outbox), 1, 'Should send email')
self.assertEqual(order_placed.call_count, 1,
'Should trigger order_placed signal')
order = order_placed.call_args[1]['order']
self.assertEqual(order.status, 'Authorized', 'Should set order status')
self.assertEqual(order.basket.id, basket_id,
'Should use basket from session')
self.assertEqual(order.number, order_number,
'Should use order number from CS request')
session = self.client.session
self.assertEquals(session[CHECKOUT_ORDER_ID], order.id,
'Should save order_id in session')
self.assertEqual(order.sources.count(), 1, 'Should save pPaymentSource'
)
source = order.sources.first()
self.assertEqual(source.currency, 'USD')
self.assertEqual(source.amount_allocated, D('99.99'))
self.assertEqual(source.amount_refunded, D('0.00'))
self.assertEqual(source.amount_debited, D('0.00'))
self.assertEqual(source.transactions.count(), 1,
'Should save Transaction')
transaction = source.transactions.first()
self.assertEqual(transaction.log.data, data)
self.assertEqual(transaction.token.log, transaction.log)
self.assertEqual(transaction.token.masked_card_number,
'xxxxxxxxxxxx1111')
self.assertEqual(transaction.token.card_type, '001')
self.assertEqual(transaction.txn_type, 'Authorise')
self.assertEqual(transaction.amount, D('99.99'))
self.assertEqual(transaction.reference, data['transaction_id'])
self.assertEqual(transaction.status, 'ACCEPT')
self.assertEqual(transaction.request_token, data['request_token'])
self.assertEqual(order.payment_events.count(), 1,
'Should save PaymentEvent')
event = order.payment_events.first()
self.assertEqual(event.amount, D('99.99'))
self.assertEqual(event.reference, data['transaction_id'])
self.assertEqual(event.event_type.name, 'Authorise')
self.assertEqual(event.line_quantities.count(), 1,
'Should save PaymentEventQuantity')
lq = event.line_quantities.first()
self.assertEqual(lq.line, order.lines.first())
self.assertEqual(lq.quantity, 1)
class AuthPaymentFormViewTest(BaseCheckoutTest):
"""Test the SignAuthorizePaymentFormView"""
def prepare_basket(self):
"""Setup a basket so that we can pay for it"""
product = self.create_product()
res = self.do_get_basket()
self.assertEqual(res.status_code, 200)
basket_id = res.data['id']
res = self.do_add_to_basket(product.id)
self.assertEqual(res.status_code, 200)
return basket_id
@patch('cybersource.signals.pre_build_auth_request.send')
@patch('cybersource.signals.pre_calculate_auth_total.send')
def test_request_auth_form_success(self, pre_calculate_auth_total,
pre_build_auth_request):
basket_id = self.prepare_basket()
def add_taxes(sender, basket, shipping_address, **kwargs):
for line in basket.all_lines():
line.purchase_info.price.tax = D('0.42')
pre_calculate_auth_total.side_effect = add_taxes
def add_a_field(sender, extra_fields, request, basket, **kwargs):
extra_fields['my_custom_field'] = 'ABC'
pre_build_auth_request.side_effect = add_a_field
session = self.client.session
session[CHECKOUT_ORDER_NUM] = '10000042'
session.save()
cs_url, data = self.do_sign_auth_request(basket_id=basket_id)
self.assertEqual(cs_url,
'https://testsecureacceptance.cybersource.com/silent/pay')
session = self.client.session
self.assertEqual(session[CHECKOUT_BASKET_ID], basket_id)
basket = Basket.objects.get(id=basket_id)
self.assertFalse(basket.can_be_edited)
self.assertEqual(pre_calculate_auth_total.call_count, 1)
self.assertEqual(pre_build_auth_request.call_count, 1)
self.assertEquals(data['amount'], '10.42')
self.assertEquals(data['bill_to_address_city'], 'Manhattan')
self.assertEquals(data['bill_to_address_country'], 'US')
self.assertEquals(data['bill_to_address_line1'], '234 5th Ave')
self.assertEquals(data['bill_to_address_line2'], 'apt 5')
self.assertEquals(data['bill_to_address_postal_code'], '10001')
self.assertEquals(data['bill_to_address_state'], 'NY')
self.assertEquals(data['bill_to_email'], 'herp@example.com')
self.assertEquals(data['bill_to_forename'], 'Testy')
self.assertEquals(data['bill_to_phone'], '17174671111')
self.assertEquals(data['bill_to_surname'], 'McUnitTest')
self.assertEquals(data['card_cvn'], '123')
self.assertEquals(data['card_expiry_date'], '12-2017')
self.assertEquals(data['card_number'], '4111111111111111')
self.assertEquals(data['card_type'], '001')
self.assertEquals(data['currency'], 'USD')
self.assertEquals(data['customer_ip_address'], '127.0.0.1')
self.assertEquals(data['device_fingerprint_id'], '')
self.assertEquals(data['item_0_name'], 'My Product')
self.assertEquals(data['item_0_quantity'], '1')
self.assertEquals(data['item_0_sku'], basket.all_lines()[0].
stockrecord.partner_sku)
self.assertEquals(data['item_0_unit_price'], '10.42')
self.assertEquals(data['line_item_count'], '1')
self.assertEquals(data['locale'], 'en')
self.assertEquals(data['my_custom_field'], 'ABC')
self.assertEquals(data['payment_method'], 'card')
self.assertEquals(data['reference_number'], '10000042')
self.assertEquals(data['ship_to_address_city'], 'Manhattan')
self.assertEquals(data['ship_to_address_country'], 'US')
self.assertEquals(data['ship_to_address_line1'], '234 5th Ave')
self.assertEquals(data['ship_to_address_line2'], '')
self.assertEquals(data['ship_to_address_postal_code'], '10001')
self.assertEquals(data['ship_to_address_state'], 'NY')
self.assertEquals(data['ship_to_forename'], 'fadsf')
self.assertEquals(data['ship_to_phone'], '17174671111')
self.assertEquals(data['ship_to_surname'], 'fad')
self.assertEquals(data['transaction_type'],
'authorization,create_payment_token')
<|reserved_special_token_1|>
from bs4 import BeautifulSoup
from cybersource.constants import CHECKOUT_BASKET_ID, CHECKOUT_ORDER_NUM, CHECKOUT_SHIPPING_CODE, CHECKOUT_ORDER_ID
from cybersource.tests import factories as cs_factories
from decimal import Decimal as D
from django.core import mail
from django.core.urlresolvers import reverse
from mock import patch
from oscar.core.loading import get_class, get_model
from oscar.test import factories
from random import randrange
from rest_framework.test import APITestCase
import datetime
import requests
Basket = get_model('basket', 'Basket')
Product = get_model('catalogue', 'Product')
Order = get_model('order', 'Order')
class BaseCheckoutTest(APITestCase):
fixtures = ['cybersource-test.yaml']
def create_product(self, price=D('10.00')):
product = factories.create_product(title='My Product',
product_class='My Product Class')
record = factories.create_stockrecord(currency='USD', product=
product, num_in_stock=10, price_excl_tax=price)
factories.create_purchase_info(record)
return product
def do_add_to_basket(self, product_id, quantity=1):
url = reverse('api-basket-add-product')
data = {'url': reverse('product-detail', args=[product_id]),
'quantity': quantity}
return self.client.post(url, data)
def do_get_basket(self):
url = reverse('api-basket')
return self.client.get(url)
def do_sign_auth_request(self, basket_id=None, data=None):
if data is None:
data = {'guest_email': 'herp@example.com', 'basket': reverse(
'basket-detail', args=[basket_id]), 'shipping_address': {
'first_name': 'fadsf', 'last_name': 'fad', 'line1':
'234 5th Ave', 'line4': 'Manhattan', 'postcode': '10001',
'state': 'NY', 'country': reverse('country-detail', args=[
'US']), 'phone_number': '+1 (717) 467-1111'}}
url = reverse('cybersource-sign-auth-request')
res = self.client.post(url, data, format='json')
self.assertEqual(res.status_code, 200)
next_year = datetime.date.today().year + 1
cs_data = {'card_type': '001', 'card_number': '4111111111111111',
'card_cvn': '123', 'card_expiry_date': '12-{}'.format(next_year
), 'bill_to_forename': 'Testy', 'bill_to_surname': 'McUnitTest',
'bill_to_address_line1': '234 5th Ave', 'bill_to_address_line2':
'apt 5', 'bill_to_address_city': 'Manhattan',
'bill_to_address_state': 'NY', 'bill_to_address_postal_code':
'10001', 'bill_to_address_country': 'US', 'bill_to_phone':
'17174671111'}
for field in res.data['fields']:
if not field['editable'] or field['key'] not in cs_data:
cs_data[field['key']] = field['value']
cs_url = res.data['url']
return cs_url, cs_data
def do_cybersource_post(self, cs_url, cs_data):
res = requests.post(cs_url, cs_data)
self.assertEqual(res.status_code, 200)
soup = BeautifulSoup(res.content, 'html.parser')
form_data = {}
for element in soup.find_all('input'):
form_data[element['name']] = element['value']
url = reverse('cybersource-reply')
return self.client.post(url, form_data)
def check_finished_order(self, number, product_id, quantity=1):
self.assertEqual(Order.objects.all().count(), 1)
order = Order.objects.get()
self.assertEqual(order.number, number)
lines = order.lines.all()
self.assertEqual(lines.count(), 1)
line = lines[0]
self.assertEqual(line.quantity, quantity)
self.assertEqual(line.product_id, product_id)
payment_events = order.payment_events.filter(event_type__name=
'Authorise')
self.assertEqual(payment_events.count(), 1)
self.assertEqual(payment_events[0].amount, order.total_incl_tax)
payment_sources = order.sources.all()
self.assertEqual(payment_sources.count(), 1)
self.assertEqual(payment_sources[0].currency, order.currency)
self.assertEqual(payment_sources[0].amount_allocated, order.
total_incl_tax)
self.assertEqual(payment_sources[0].amount_debited, D('0.00'))
self.assertEqual(payment_sources[0].amount_refunded, D('0.00'))
transactions = payment_sources[0].transactions.all()
self.assertEqual(transactions.count(), 1)
self.assertEqual(transactions[0].txn_type, 'Authorise')
self.assertEqual(transactions[0].amount, order.total_incl_tax)
self.assertEqual(transactions[0].status, 'ACCEPT')
self.assertEqual(transactions[0].log_field('req_reference_number'),
order.number)
self.assertEqual(transactions[0].token.card_last4, '1111')
self.assertEqual(len(mail.outbox), 1)
class CheckoutIntegrationTest(BaseCheckoutTest):
"""Full Integration Test of Checkout"""
def test_checkout_process(self):
"""Full checkout process using minimal api calls"""
product = self.create_product()
res = self.do_get_basket()
self.assertEqual(res.status_code, 200)
basket_id = res.data['id']
res = self.do_add_to_basket(product.id)
self.assertEqual(res.status_code, 200)
cs_url, cs_data = self.do_sign_auth_request(basket_id)
res = self.do_cybersource_post(cs_url, cs_data)
self.assertEqual(res.status_code, 302)
self.check_finished_order(cs_data['reference_number'], product.id)
def test_add_product_during_auth(self):
"""Test attempting to add a product during the authorize flow"""
product = self.create_product()
res = self.do_get_basket()
self.assertEqual(res.status_code, 200)
basket_id = res.data['id']
res = self.do_add_to_basket(product.id)
basket1 = res.data['id']
self.assertEqual(res.status_code, 200)
cs_url, cs_data = self.do_sign_auth_request(basket_id)
res = self.do_add_to_basket(product.id)
self.assertEqual(res.status_code, 200)
basket2 = res.data['id']
self.assertNotEqual(basket1, basket2)
res = self.do_cybersource_post(cs_url, cs_data)
self.assertEqual(res.status_code, 302)
self.check_finished_order(cs_data['reference_number'], product.id)
res = self.do_add_to_basket(product.id)
self.assertEqual(res.status_code, 200)
basket3 = res.data['id']
self.assertEqual(basket2, basket3)
def test_pay_for_nothing(self):
"""Test attempting to pay for an empty basket"""
res = self.do_get_basket()
self.assertEqual(res.status_code, 200)
basket_id = res.data['id']
data = {'guest_email': 'herp@example.com', 'basket': reverse(
'basket-detail', args=[basket_id]), 'shipping_address': {
'first_name': 'fadsf', 'last_name': 'fad', 'line1':
'234 5th Ave', 'line4': 'Manhattan', 'postcode': '10001',
'state': 'NY', 'country': reverse('country-detail', args=['US']
), 'phone_number': '+1 (717) 467-1111'}}
url = reverse('cybersource-sign-auth-request')
res = self.client.post(url, data, format='json')
self.assertEqual(res.status_code, 406)
def test_manipulate_total_pre_auth(self):
"""Test attempting to manipulate basket price when requesting an auth form"""
product = self.create_product()
res = self.do_get_basket()
self.assertEqual(res.status_code, 200)
basket_id = res.data['id']
res = self.do_add_to_basket(product.id)
self.assertEqual(res.status_code, 200)
self.assertEqual(res.data['total_incl_tax'], '10.00')
url = reverse('cybersource-sign-auth-request')
data = {'guest_email': 'herp@example.com', 'basket': reverse(
'basket-detail', args=[basket_id]), 'total': '2.00',
'shipping_address': {'first_name': 'fadsf', 'last_name': 'fad',
'line1': '234 5th Ave', 'line4': 'Manhattan', 'postcode':
'10001', 'state': 'NY', 'country': reverse('country-detail',
args=['US']), 'phone_number': '+1 (717) 467-1111'}}
res = self.client.post(url, data, format='json')
self.assertEqual(res.status_code, 406)
def test_manipulate_total_during_auth(self):
"""Test attempting to manipulate basket price when requesting auth from CyberSource"""
product = self.create_product()
res = self.do_get_basket()
self.assertEqual(res.status_code, 200)
basket_id = res.data['id']
res = self.do_add_to_basket(product.id)
self.assertEqual(res.status_code, 200)
self.assertEqual(res.data['total_incl_tax'], '10.00')
cs_url, cs_data = self.do_sign_auth_request(basket_id)
cs_data['amount'] = '2.00'
res = requests.post(cs_url, cs_data)
self.assertEqual(res.status_code, 403)
def test_free_product(self):
"""Full checkout process using minimal api calls"""
product = self.create_product(price=D('0.00'))
res = self.do_get_basket()
self.assertEqual(res.status_code, 200)
basket_id = res.data['id']
res = self.do_add_to_basket(product.id)
self.assertEqual(res.status_code, 200)
cs_url, cs_data = self.do_sign_auth_request(basket_id)
self.assertEqual(cs_data['amount'], '0.00')
res = self.do_cybersource_post(cs_url, cs_data)
self.assertEqual(res.status_code, 302)
self.check_finished_order(cs_data['reference_number'], product.id)
class CSReplyViewTest(BaseCheckoutTest):
"""Test the CybersourceReplyView with fixtured requests"""
def prepare_basket(self):
"""Setup a basket and session like SignAuthorizePaymentFormView would normally"""
product = self.create_product()
res = self.do_get_basket()
self.assertEqual(res.status_code, 200)
basket_id = res.data['id']
res = self.do_add_to_basket(product.id)
self.assertEqual(res.status_code, 200)
session = self.client.session
session[CHECKOUT_BASKET_ID] = basket_id
session[CHECKOUT_ORDER_NUM] = str(randrange(1000000, 9999999))
session[CHECKOUT_SHIPPING_CODE] = 'free-shipping'
session.save()
return session, basket_id, session[CHECKOUT_ORDER_NUM]
@patch('cybersource.signals.order_placed.send')
def test_invalid_signature(self, order_placed):
"""Invalid signature should result in 400 Bad Request"""
session, basket_id, order_number = self.prepare_basket()
data = cs_factories.build_declined_reply_data(order_number)
data = cs_factories.sign_reply_data(data)
data['signature'] = 'abcdef'
url = reverse('cybersource-reply')
resp = self.client.post(url, data)
self.assertEqual(resp.status_code, 400)
self.assertEqual(len(mail.outbox), 0, 'Should not send email')
self.assertEqual(order_placed.call_count, 0,
'Should not trigger signal')
self.assertEqual(Order.objects.count(), 0, 'Should not make order')
@patch('cybersource.signals.order_placed.send')
def test_invalid_request_type(self, order_placed):
"""Bad request type should result in 400 Bad Request"""
session, basket_id, order_number = self.prepare_basket()
data = cs_factories.build_declined_reply_data(order_number)
data['req_transaction_type'] = 'payment',
data = cs_factories.sign_reply_data(data)
url = reverse('cybersource-reply')
resp = self.client.post(url, data)
self.assertEqual(resp.status_code, 400)
self.assertEqual(len(mail.outbox), 0, 'Should not send email')
self.assertEqual(order_placed.call_count, 0,
'Should not trigger signal')
self.assertEqual(Order.objects.count(), 0, 'Should not make order')
@patch('cybersource.signals.order_placed.send')
def test_duplicate_transaction_id(self, order_placed):
"""Duplicate Transaction ID should result in redirect to the success page"""
session, basket_id, order_number = self.prepare_basket()
data = cs_factories.build_accepted_reply_data(order_number)
data = cs_factories.sign_reply_data(data)
url = reverse('cybersource-reply')
self.assertEqual(order_placed.call_count, 0)
self.assertEqual(Order.objects.count(), 0)
resp = self.client.post(url, data)
self.assertRedirects(resp, reverse('checkout:thank-you'))
self.assertEqual(order_placed.call_count, 1)
self.assertEqual(Order.objects.count(), 1)
resp = self.client.post(url, data)
self.assertRedirects(resp, reverse('checkout:thank-you'))
self.assertEqual(order_placed.call_count, 1)
self.assertEqual(Order.objects.count(), 1)
@patch('cybersource.signals.order_placed.send')
def test_invalid_reference_number(self, order_placed):
"""Mismatched reference number should result in 400 Bad Request"""
session, basket_id, order_number = self.prepare_basket()
data = cs_factories.build_accepted_reply_data(order_number + 'ABC')
data = cs_factories.sign_reply_data(data)
url = reverse('cybersource-reply')
resp = self.client.post(url, data)
self.assertEqual(resp.status_code, 400)
self.assertEqual(order_placed.call_count, 0)
self.assertEqual(Order.objects.count(), 0)
@patch('cybersource.signals.order_placed.send')
def test_missing_basket(self, order_placed):
"""Missing basket should result in 400 Bad Request"""
session, basket_id, order_number = self.prepare_basket()
del session[CHECKOUT_BASKET_ID]
session.save()
data = cs_factories.build_accepted_reply_data(order_number)
data = cs_factories.sign_reply_data(data)
url = reverse('cybersource-reply')
resp = self.client.post(url, data)
self.assertEqual(resp.status_code, 400)
self.assertEqual(order_placed.call_count, 0)
self.assertEqual(Order.objects.count(), 0)
@patch('cybersource.signals.order_placed.send')
def test_declined_card(self, order_placed):
"""Declined card should should result in redirect to failure page"""
session, basket_id, order_number = self.prepare_basket()
data = cs_factories.build_declined_reply_data(order_number)
data = cs_factories.sign_reply_data(data)
url = reverse('cybersource-reply')
resp = self.client.post(url, data)
self.assertRedirects(resp, reverse('checkout:index'),
fetch_redirect_response=False)
self.assertEqual(len(mail.outbox), 0, 'Should not send email')
self.assertEqual(order_placed.call_count, 0,
'Should not trigger signal')
self.assertEqual(Order.objects.count(), 0, 'Should not make order')
@patch('cybersource.signals.order_placed.send')
def test_success(self, order_placed):
"""Successful authorization should create an order and redirect to the success page"""
session, basket_id, order_number = self.prepare_basket()
data = cs_factories.build_accepted_reply_data(order_number)
data = cs_factories.sign_reply_data(data)
url = reverse('cybersource-reply')
self.assertEqual(order_placed.call_count, 0)
resp = self.client.post(url, data)
self.assertRedirects(resp, reverse('checkout:thank-you'))
self.assertEqual(len(mail.outbox), 1, 'Should send email')
self.assertEqual(order_placed.call_count, 1,
'Should trigger order_placed signal')
order = order_placed.call_args[1]['order']
self.assertEqual(order.status, 'Authorized', 'Should set order status')
self.assertEqual(order.basket.id, basket_id,
'Should use basket from session')
self.assertEqual(order.number, order_number,
'Should use order number from CS request')
session = self.client.session
self.assertEquals(session[CHECKOUT_ORDER_ID], order.id,
'Should save order_id in session')
self.assertEqual(order.sources.count(), 1, 'Should save pPaymentSource'
)
source = order.sources.first()
self.assertEqual(source.currency, 'USD')
self.assertEqual(source.amount_allocated, D('99.99'))
self.assertEqual(source.amount_refunded, D('0.00'))
self.assertEqual(source.amount_debited, D('0.00'))
self.assertEqual(source.transactions.count(), 1,
'Should save Transaction')
transaction = source.transactions.first()
self.assertEqual(transaction.log.data, data)
self.assertEqual(transaction.token.log, transaction.log)
self.assertEqual(transaction.token.masked_card_number,
'xxxxxxxxxxxx1111')
self.assertEqual(transaction.token.card_type, '001')
self.assertEqual(transaction.txn_type, 'Authorise')
self.assertEqual(transaction.amount, D('99.99'))
self.assertEqual(transaction.reference, data['transaction_id'])
self.assertEqual(transaction.status, 'ACCEPT')
self.assertEqual(transaction.request_token, data['request_token'])
self.assertEqual(order.payment_events.count(), 1,
'Should save PaymentEvent')
event = order.payment_events.first()
self.assertEqual(event.amount, D('99.99'))
self.assertEqual(event.reference, data['transaction_id'])
self.assertEqual(event.event_type.name, 'Authorise')
self.assertEqual(event.line_quantities.count(), 1,
'Should save PaymentEventQuantity')
lq = event.line_quantities.first()
self.assertEqual(lq.line, order.lines.first())
self.assertEqual(lq.quantity, 1)
class AuthPaymentFormViewTest(BaseCheckoutTest):
"""Test the SignAuthorizePaymentFormView"""
def prepare_basket(self):
"""Setup a basket so that we can pay for it"""
product = self.create_product()
res = self.do_get_basket()
self.assertEqual(res.status_code, 200)
basket_id = res.data['id']
res = self.do_add_to_basket(product.id)
self.assertEqual(res.status_code, 200)
return basket_id
@patch('cybersource.signals.pre_build_auth_request.send')
@patch('cybersource.signals.pre_calculate_auth_total.send')
def test_request_auth_form_success(self, pre_calculate_auth_total,
pre_build_auth_request):
basket_id = self.prepare_basket()
def add_taxes(sender, basket, shipping_address, **kwargs):
for line in basket.all_lines():
line.purchase_info.price.tax = D('0.42')
pre_calculate_auth_total.side_effect = add_taxes
def add_a_field(sender, extra_fields, request, basket, **kwargs):
extra_fields['my_custom_field'] = 'ABC'
pre_build_auth_request.side_effect = add_a_field
session = self.client.session
session[CHECKOUT_ORDER_NUM] = '10000042'
session.save()
cs_url, data = self.do_sign_auth_request(basket_id=basket_id)
self.assertEqual(cs_url,
'https://testsecureacceptance.cybersource.com/silent/pay')
session = self.client.session
self.assertEqual(session[CHECKOUT_BASKET_ID], basket_id)
basket = Basket.objects.get(id=basket_id)
self.assertFalse(basket.can_be_edited)
self.assertEqual(pre_calculate_auth_total.call_count, 1)
self.assertEqual(pre_build_auth_request.call_count, 1)
self.assertEquals(data['amount'], '10.42')
self.assertEquals(data['bill_to_address_city'], 'Manhattan')
self.assertEquals(data['bill_to_address_country'], 'US')
self.assertEquals(data['bill_to_address_line1'], '234 5th Ave')
self.assertEquals(data['bill_to_address_line2'], 'apt 5')
self.assertEquals(data['bill_to_address_postal_code'], '10001')
self.assertEquals(data['bill_to_address_state'], 'NY')
self.assertEquals(data['bill_to_email'], 'herp@example.com')
self.assertEquals(data['bill_to_forename'], 'Testy')
self.assertEquals(data['bill_to_phone'], '17174671111')
self.assertEquals(data['bill_to_surname'], 'McUnitTest')
self.assertEquals(data['card_cvn'], '123')
self.assertEquals(data['card_expiry_date'], '12-2017')
self.assertEquals(data['card_number'], '4111111111111111')
self.assertEquals(data['card_type'], '001')
self.assertEquals(data['currency'], 'USD')
self.assertEquals(data['customer_ip_address'], '127.0.0.1')
self.assertEquals(data['device_fingerprint_id'], '')
self.assertEquals(data['item_0_name'], 'My Product')
self.assertEquals(data['item_0_quantity'], '1')
self.assertEquals(data['item_0_sku'], basket.all_lines()[0].
stockrecord.partner_sku)
self.assertEquals(data['item_0_unit_price'], '10.42')
self.assertEquals(data['line_item_count'], '1')
self.assertEquals(data['locale'], 'en')
self.assertEquals(data['my_custom_field'], 'ABC')
self.assertEquals(data['payment_method'], 'card')
self.assertEquals(data['reference_number'], '10000042')
self.assertEquals(data['ship_to_address_city'], 'Manhattan')
self.assertEquals(data['ship_to_address_country'], 'US')
self.assertEquals(data['ship_to_address_line1'], '234 5th Ave')
self.assertEquals(data['ship_to_address_line2'], '')
self.assertEquals(data['ship_to_address_postal_code'], '10001')
self.assertEquals(data['ship_to_address_state'], 'NY')
self.assertEquals(data['ship_to_forename'], 'fadsf')
self.assertEquals(data['ship_to_phone'], '17174671111')
self.assertEquals(data['ship_to_surname'], 'fad')
self.assertEquals(data['transaction_type'],
'authorization,create_payment_token')
<|reserved_special_token_1|>
from bs4 import BeautifulSoup
from cybersource.constants import CHECKOUT_BASKET_ID, CHECKOUT_ORDER_NUM, CHECKOUT_SHIPPING_CODE, CHECKOUT_ORDER_ID
from cybersource.tests import factories as cs_factories
from decimal import Decimal as D
from django.core import mail
from django.core.urlresolvers import reverse
from mock import patch
from oscar.core.loading import get_class, get_model
from oscar.test import factories
from random import randrange
from rest_framework.test import APITestCase
import datetime
import requests # Needed for external calls!
Basket = get_model('basket', 'Basket')
Product = get_model('catalogue', 'Product')
Order = get_model('order', 'Order')
class BaseCheckoutTest(APITestCase):
fixtures = ['cybersource-test.yaml']
def create_product(self, price=D('10.00')):
product = factories.create_product(
title='My Product',
product_class='My Product Class')
record = factories.create_stockrecord(
currency='USD',
product=product,
num_in_stock=10,
price_excl_tax=price)
factories.create_purchase_info(record)
return product
def do_add_to_basket(self, product_id, quantity=1):
url = reverse('api-basket-add-product')
data = {
"url": reverse('product-detail', args=[product_id]),
"quantity": quantity
}
return self.client.post(url, data)
def do_get_basket(self):
url = reverse('api-basket')
return self.client.get(url)
def do_sign_auth_request(self, basket_id=None, data=None):
if data is None:
data = {
"guest_email": "herp@example.com",
"basket": reverse('basket-detail', args=[basket_id]),
"shipping_address": {
"first_name": "fadsf",
"last_name": "fad",
"line1": "234 5th Ave",
"line4": "Manhattan",
"postcode": "10001",
"state": "NY",
"country": reverse('country-detail', args=['US']),
"phone_number": "+1 (717) 467-1111",
}
}
url = reverse('cybersource-sign-auth-request')
res = self.client.post(url, data, format='json')
self.assertEqual(res.status_code, 200)
next_year = datetime.date.today().year + 1
cs_data = {
'card_type': '001',
'card_number': '4111111111111111',
'card_cvn': '123',
'card_expiry_date': '12-{}'.format(next_year),
'bill_to_forename': 'Testy',
'bill_to_surname': 'McUnitTest',
'bill_to_address_line1': '234 5th Ave',
'bill_to_address_line2': 'apt 5',
'bill_to_address_city': 'Manhattan',
'bill_to_address_state': 'NY',
'bill_to_address_postal_code': '10001',
'bill_to_address_country': 'US',
'bill_to_phone': '17174671111',
}
for field in res.data['fields']:
if not field['editable'] or field['key'] not in cs_data:
cs_data[field['key']] = field['value']
cs_url = res.data['url']
return cs_url, cs_data
def do_cybersource_post(self, cs_url, cs_data):
res = requests.post(cs_url, cs_data)
self.assertEqual(res.status_code, 200)
soup = BeautifulSoup(res.content, 'html.parser')
form_data = {}
for element in soup.find_all('input'):
form_data[element['name']] = element['value']
# We have the data from cybersource, send it to our cybersource callback
url = reverse('cybersource-reply')
return self.client.post(url, form_data)
def check_finished_order(self, number, product_id, quantity=1):
# Order exists and was paid for
self.assertEqual(Order.objects.all().count(), 1)
order = Order.objects.get()
self.assertEqual(order.number, number)
lines = order.lines.all()
self.assertEqual(lines.count(), 1)
line = lines[0]
self.assertEqual(line.quantity, quantity)
self.assertEqual(line.product_id, product_id)
payment_events = order.payment_events.filter(event_type__name="Authorise")
self.assertEqual(payment_events.count(), 1)
self.assertEqual(payment_events[0].amount, order.total_incl_tax)
payment_sources = order.sources.all()
self.assertEqual(payment_sources.count(), 1)
self.assertEqual(payment_sources[0].currency, order.currency)
self.assertEqual(payment_sources[0].amount_allocated, order.total_incl_tax)
self.assertEqual(payment_sources[0].amount_debited, D('0.00'))
self.assertEqual(payment_sources[0].amount_refunded, D('0.00'))
transactions = payment_sources[0].transactions.all()
self.assertEqual(transactions.count(), 1)
self.assertEqual(transactions[0].txn_type, 'Authorise')
self.assertEqual(transactions[0].amount, order.total_incl_tax)
self.assertEqual(transactions[0].status, 'ACCEPT')
self.assertEqual(transactions[0].log_field('req_reference_number'), order.number)
self.assertEqual(transactions[0].token.card_last4, '1111')
self.assertEqual(len(mail.outbox), 1)
class CheckoutIntegrationTest(BaseCheckoutTest):
"""Full Integration Test of Checkout"""
def test_checkout_process(self):
"""Full checkout process using minimal api calls"""
product = self.create_product()
res = self.do_get_basket()
self.assertEqual(res.status_code, 200)
basket_id = res.data['id']
res = self.do_add_to_basket(product.id)
self.assertEqual(res.status_code, 200)
cs_url, cs_data = self.do_sign_auth_request(basket_id)
res = self.do_cybersource_post(cs_url, cs_data)
self.assertEqual(res.status_code, 302)
self.check_finished_order(cs_data['reference_number'], product.id)
def test_add_product_during_auth(self):
"""Test attempting to add a product during the authorize flow"""
product = self.create_product()
res = self.do_get_basket()
self.assertEqual(res.status_code, 200)
basket_id = res.data['id']
# Adding a product here should succeed
res = self.do_add_to_basket(product.id)
basket1 = res.data['id']
self.assertEqual(res.status_code, 200)
cs_url, cs_data = self.do_sign_auth_request(basket_id)
# Adding a product here should go to a new basket, not the one we're auth'ing
res = self.do_add_to_basket(product.id)
self.assertEqual(res.status_code, 200)
basket2 = res.data['id']
self.assertNotEqual(basket1, basket2)
res = self.do_cybersource_post(cs_url, cs_data)
self.assertEqual(res.status_code, 302)
self.check_finished_order(cs_data['reference_number'], product.id)
# Adding a product here should go to basket2, not basket1
res = self.do_add_to_basket(product.id)
self.assertEqual(res.status_code, 200)
basket3 = res.data['id']
self.assertEqual(basket2, basket3)
def test_pay_for_nothing(self):
"""Test attempting to pay for an empty basket"""
res = self.do_get_basket()
self.assertEqual(res.status_code, 200)
basket_id = res.data['id']
data = {
"guest_email": "herp@example.com",
"basket": reverse('basket-detail', args=[basket_id]),
"shipping_address": {
"first_name": "fadsf",
"last_name": "fad",
"line1": "234 5th Ave",
"line4": "Manhattan",
"postcode": "10001",
"state": "NY",
"country": reverse('country-detail', args=['US']),
"phone_number": "+1 (717) 467-1111",
}
}
url = reverse('cybersource-sign-auth-request')
res = self.client.post(url, data, format='json')
self.assertEqual(res.status_code, 406)
def test_manipulate_total_pre_auth(self):
"""Test attempting to manipulate basket price when requesting an auth form"""
product = self.create_product()
res = self.do_get_basket()
self.assertEqual(res.status_code, 200)
basket_id = res.data['id']
res = self.do_add_to_basket(product.id)
self.assertEqual(res.status_code, 200)
self.assertEqual(res.data['total_incl_tax'], '10.00')
url = reverse('cybersource-sign-auth-request')
data = {
"guest_email": "herp@example.com",
"basket": reverse('basket-detail', args=[basket_id]),
"total": "2.00", # Try and get $10 of product for only $2
"shipping_address": {
"first_name": "fadsf",
"last_name": "fad",
"line1": "234 5th Ave",
"line4": "Manhattan",
"postcode": "10001",
"state": "NY",
"country": reverse('country-detail', args=['US']),
"phone_number": "+1 (717) 467-1111",
}
}
res = self.client.post(url, data, format='json')
self.assertEqual(res.status_code, 406)
def test_manipulate_total_during_auth(self):
"""Test attempting to manipulate basket price when requesting auth from CyberSource"""
product = self.create_product()
res = self.do_get_basket()
self.assertEqual(res.status_code, 200)
basket_id = res.data['id']
res = self.do_add_to_basket(product.id)
self.assertEqual(res.status_code, 200)
self.assertEqual(res.data['total_incl_tax'], '10.00')
cs_url, cs_data = self.do_sign_auth_request(basket_id)
cs_data['amount'] = '2.00'
res = requests.post(cs_url, cs_data)
self.assertEqual(res.status_code, 403)
def test_free_product(self):
"""Full checkout process using minimal api calls"""
product = self.create_product(price=D('0.00'))
res = self.do_get_basket()
self.assertEqual(res.status_code, 200)
basket_id = res.data['id']
res = self.do_add_to_basket(product.id)
self.assertEqual(res.status_code, 200)
cs_url, cs_data = self.do_sign_auth_request(basket_id)
self.assertEqual(cs_data['amount'], '0.00')
res = self.do_cybersource_post(cs_url, cs_data)
self.assertEqual(res.status_code, 302)
self.check_finished_order(cs_data['reference_number'], product.id)
class CSReplyViewTest(BaseCheckoutTest):
"""Test the CybersourceReplyView with fixtured requests"""
def prepare_basket(self):
"""Setup a basket and session like SignAuthorizePaymentFormView would normally"""
product = self.create_product()
res = self.do_get_basket()
self.assertEqual(res.status_code, 200)
basket_id = res.data['id']
res = self.do_add_to_basket(product.id)
self.assertEqual(res.status_code, 200)
session = self.client.session
session[CHECKOUT_BASKET_ID] = basket_id
session[CHECKOUT_ORDER_NUM] = str(randrange(1000000, 9999999))
session[CHECKOUT_SHIPPING_CODE] = 'free-shipping'
session.save()
return session, basket_id, session[CHECKOUT_ORDER_NUM]
@patch('cybersource.signals.order_placed.send')
def test_invalid_signature(self, order_placed):
"""Invalid signature should result in 400 Bad Request"""
session, basket_id, order_number = self.prepare_basket()
data = cs_factories.build_declined_reply_data(order_number)
data = cs_factories.sign_reply_data(data)
data['signature'] = 'abcdef'
url = reverse('cybersource-reply')
resp = self.client.post(url, data)
self.assertEqual(resp.status_code, 400)
self.assertEqual(len(mail.outbox), 0, 'Should not send email')
self.assertEqual(order_placed.call_count, 0, 'Should not trigger signal')
self.assertEqual(Order.objects.count(), 0, 'Should not make order')
@patch('cybersource.signals.order_placed.send')
def test_invalid_request_type(self, order_placed):
"""Bad request type should result in 400 Bad Request"""
session, basket_id, order_number = self.prepare_basket()
data = cs_factories.build_declined_reply_data(order_number)
data["req_transaction_type"] = "payment",
data = cs_factories.sign_reply_data(data)
url = reverse('cybersource-reply')
resp = self.client.post(url, data)
self.assertEqual(resp.status_code, 400)
self.assertEqual(len(mail.outbox), 0, 'Should not send email')
self.assertEqual(order_placed.call_count, 0, 'Should not trigger signal')
self.assertEqual(Order.objects.count(), 0, 'Should not make order')
@patch('cybersource.signals.order_placed.send')
def test_duplicate_transaction_id(self, order_placed):
"""Duplicate Transaction ID should result in redirect to the success page"""
session, basket_id, order_number = self.prepare_basket()
data = cs_factories.build_accepted_reply_data(order_number)
data = cs_factories.sign_reply_data(data)
url = reverse('cybersource-reply')
self.assertEqual(order_placed.call_count, 0)
self.assertEqual(Order.objects.count(), 0)
resp = self.client.post(url, data)
self.assertRedirects(resp, reverse('checkout:thank-you'))
self.assertEqual(order_placed.call_count, 1)
self.assertEqual(Order.objects.count(), 1)
resp = self.client.post(url, data)
self.assertRedirects(resp, reverse('checkout:thank-you'))
self.assertEqual(order_placed.call_count, 1)
self.assertEqual(Order.objects.count(), 1)
@patch('cybersource.signals.order_placed.send')
def test_invalid_reference_number(self, order_placed):
"""Mismatched reference number should result in 400 Bad Request"""
session, basket_id, order_number = self.prepare_basket()
data = cs_factories.build_accepted_reply_data(order_number + 'ABC')
data = cs_factories.sign_reply_data(data)
url = reverse('cybersource-reply')
resp = self.client.post(url, data)
self.assertEqual(resp.status_code, 400)
self.assertEqual(order_placed.call_count, 0)
self.assertEqual(Order.objects.count(), 0)
@patch('cybersource.signals.order_placed.send')
def test_missing_basket(self, order_placed):
"""Missing basket should result in 400 Bad Request"""
session, basket_id, order_number = self.prepare_basket()
del session[CHECKOUT_BASKET_ID]
session.save()
data = cs_factories.build_accepted_reply_data(order_number)
data = cs_factories.sign_reply_data(data)
url = reverse('cybersource-reply')
resp = self.client.post(url, data)
self.assertEqual(resp.status_code, 400)
self.assertEqual(order_placed.call_count, 0)
self.assertEqual(Order.objects.count(), 0)
@patch('cybersource.signals.order_placed.send')
def test_declined_card(self, order_placed):
"""Declined card should should result in redirect to failure page"""
session, basket_id, order_number = self.prepare_basket()
data = cs_factories.build_declined_reply_data(order_number)
data = cs_factories.sign_reply_data(data)
url = reverse('cybersource-reply')
resp = self.client.post(url, data)
self.assertRedirects(resp, reverse('checkout:index'), fetch_redirect_response=False)
self.assertEqual(len(mail.outbox), 0, 'Should not send email')
self.assertEqual(order_placed.call_count, 0, 'Should not trigger signal')
self.assertEqual(Order.objects.count(), 0, 'Should not make order')
@patch('cybersource.signals.order_placed.send')
def test_success(self, order_placed):
"""Successful authorization should create an order and redirect to the success page"""
session, basket_id, order_number = self.prepare_basket()
data = cs_factories.build_accepted_reply_data(order_number)
data = cs_factories.sign_reply_data(data)
url = reverse('cybersource-reply')
self.assertEqual(order_placed.call_count, 0)
resp = self.client.post(url, data)
self.assertRedirects(resp, reverse('checkout:thank-you'))
self.assertEqual(len(mail.outbox), 1, 'Should send email')
self.assertEqual(order_placed.call_count, 1, 'Should trigger order_placed signal')
order = order_placed.call_args[1]['order']
self.assertEqual(order.status, 'Authorized', 'Should set order status')
self.assertEqual(order.basket.id, basket_id, 'Should use basket from session')
self.assertEqual(order.number, order_number, 'Should use order number from CS request')
session = self.client.session
self.assertEquals(session[CHECKOUT_ORDER_ID], order.id, 'Should save order_id in session')
self.assertEqual(order.sources.count(), 1, 'Should save pPaymentSource')
source = order.sources.first()
self.assertEqual(source.currency, 'USD')
self.assertEqual(source.amount_allocated, D('99.99'))
self.assertEqual(source.amount_refunded, D('0.00'))
self.assertEqual(source.amount_debited, D('0.00'))
self.assertEqual(source.transactions.count(), 1, 'Should save Transaction')
transaction = source.transactions.first()
self.assertEqual(transaction.log.data, data)
self.assertEqual(transaction.token.log, transaction.log)
self.assertEqual(transaction.token.masked_card_number, 'xxxxxxxxxxxx1111')
self.assertEqual(transaction.token.card_type, '001')
self.assertEqual(transaction.txn_type, 'Authorise')
self.assertEqual(transaction.amount, D('99.99'))
self.assertEqual(transaction.reference, data['transaction_id'])
self.assertEqual(transaction.status, 'ACCEPT')
self.assertEqual(transaction.request_token, data['request_token'])
self.assertEqual(order.payment_events.count(), 1, 'Should save PaymentEvent')
event = order.payment_events.first()
self.assertEqual(event.amount, D('99.99'))
self.assertEqual(event.reference, data['transaction_id'])
self.assertEqual(event.event_type.name, 'Authorise')
self.assertEqual(event.line_quantities.count(), 1, 'Should save PaymentEventQuantity')
lq = event.line_quantities.first()
self.assertEqual(lq.line, order.lines.first())
self.assertEqual(lq.quantity, 1)
class AuthPaymentFormViewTest(BaseCheckoutTest):
"""Test the SignAuthorizePaymentFormView"""
def prepare_basket(self):
"""Setup a basket so that we can pay for it"""
product = self.create_product()
res = self.do_get_basket()
self.assertEqual(res.status_code, 200)
basket_id = res.data['id']
res = self.do_add_to_basket(product.id)
self.assertEqual(res.status_code, 200)
return basket_id
@patch('cybersource.signals.pre_build_auth_request.send')
@patch('cybersource.signals.pre_calculate_auth_total.send')
def test_request_auth_form_success(self, pre_calculate_auth_total, pre_build_auth_request):
basket_id = self.prepare_basket()
# Add some taxes to the basket
def add_taxes(sender, basket, shipping_address, **kwargs):
for line in basket.all_lines():
line.purchase_info.price.tax = D('0.42')
pre_calculate_auth_total.side_effect = add_taxes
# Add an extra field into the request
def add_a_field(sender, extra_fields, request, basket, **kwargs):
extra_fields['my_custom_field'] = 'ABC'
pre_build_auth_request.side_effect = add_a_field
# Pregenerate the order number
session = self.client.session
session[CHECKOUT_ORDER_NUM] = '10000042'
session.save()
cs_url, data = self.do_sign_auth_request(basket_id=basket_id)
# CS URL should be correct
self.assertEqual(cs_url, 'https://testsecureacceptance.cybersource.com/silent/pay')
# Basket ID should be stored in the session
session = self.client.session
self.assertEqual(session[CHECKOUT_BASKET_ID], basket_id)
# Basket must be frozen
basket = Basket.objects.get(id=basket_id)
self.assertFalse(basket.can_be_edited)
# Make sure each signal got called
self.assertEqual(pre_calculate_auth_total.call_count, 1)
self.assertEqual(pre_build_auth_request.call_count, 1)
# Check response fields
self.assertEquals(data['amount'], '10.42')
self.assertEquals(data['bill_to_address_city'], 'Manhattan')
self.assertEquals(data['bill_to_address_country'], 'US')
self.assertEquals(data['bill_to_address_line1'], '234 5th Ave')
self.assertEquals(data['bill_to_address_line2'], 'apt 5')
self.assertEquals(data['bill_to_address_postal_code'], '10001')
self.assertEquals(data['bill_to_address_state'], 'NY')
self.assertEquals(data['bill_to_email'], 'herp@example.com')
self.assertEquals(data['bill_to_forename'], 'Testy')
self.assertEquals(data['bill_to_phone'], '17174671111')
self.assertEquals(data['bill_to_surname'], 'McUnitTest')
self.assertEquals(data['card_cvn'], '123')
self.assertEquals(data['card_expiry_date'], '12-2017')
self.assertEquals(data['card_number'], '4111111111111111')
self.assertEquals(data['card_type'], '001')
self.assertEquals(data['currency'], 'USD')
self.assertEquals(data['customer_ip_address'], '127.0.0.1')
self.assertEquals(data['device_fingerprint_id'], '')
self.assertEquals(data['item_0_name'], 'My Product')
self.assertEquals(data['item_0_quantity'], '1')
self.assertEquals(data['item_0_sku'], basket.all_lines()[0].stockrecord.partner_sku)
self.assertEquals(data['item_0_unit_price'], '10.42')
self.assertEquals(data['line_item_count'], '1')
self.assertEquals(data['locale'], 'en')
self.assertEquals(data['my_custom_field'], 'ABC')
self.assertEquals(data['payment_method'], 'card')
self.assertEquals(data['reference_number'], '10000042')
self.assertEquals(data['ship_to_address_city'], 'Manhattan')
self.assertEquals(data['ship_to_address_country'], 'US')
self.assertEquals(data['ship_to_address_line1'], '234 5th Ave')
self.assertEquals(data['ship_to_address_line2'], '')
self.assertEquals(data['ship_to_address_postal_code'], '10001')
self.assertEquals(data['ship_to_address_state'], 'NY')
self.assertEquals(data['ship_to_forename'], 'fadsf')
self.assertEquals(data['ship_to_phone'], '17174671111')
self.assertEquals(data['ship_to_surname'], 'fad')
self.assertEquals(data['transaction_type'], 'authorization,create_payment_token')
|
flexible
|
{
"blob_id": "9155b3eed8ac79b94a033801dbf142392b50720b",
"index": 5123,
"step-1": "<mask token>\n\n\nclass CheckoutIntegrationTest(BaseCheckoutTest):\n <mask token>\n\n def test_checkout_process(self):\n \"\"\"Full checkout process using minimal api calls\"\"\"\n product = self.create_product()\n res = self.do_get_basket()\n self.assertEqual(res.status_code, 200)\n basket_id = res.data['id']\n res = self.do_add_to_basket(product.id)\n self.assertEqual(res.status_code, 200)\n cs_url, cs_data = self.do_sign_auth_request(basket_id)\n res = self.do_cybersource_post(cs_url, cs_data)\n self.assertEqual(res.status_code, 302)\n self.check_finished_order(cs_data['reference_number'], product.id)\n\n def test_add_product_during_auth(self):\n \"\"\"Test attempting to add a product during the authorize flow\"\"\"\n product = self.create_product()\n res = self.do_get_basket()\n self.assertEqual(res.status_code, 200)\n basket_id = res.data['id']\n res = self.do_add_to_basket(product.id)\n basket1 = res.data['id']\n self.assertEqual(res.status_code, 200)\n cs_url, cs_data = self.do_sign_auth_request(basket_id)\n res = self.do_add_to_basket(product.id)\n self.assertEqual(res.status_code, 200)\n basket2 = res.data['id']\n self.assertNotEqual(basket1, basket2)\n res = self.do_cybersource_post(cs_url, cs_data)\n self.assertEqual(res.status_code, 302)\n self.check_finished_order(cs_data['reference_number'], product.id)\n res = self.do_add_to_basket(product.id)\n self.assertEqual(res.status_code, 200)\n basket3 = res.data['id']\n self.assertEqual(basket2, basket3)\n\n def test_pay_for_nothing(self):\n \"\"\"Test attempting to pay for an empty basket\"\"\"\n res = self.do_get_basket()\n self.assertEqual(res.status_code, 200)\n basket_id = res.data['id']\n data = {'guest_email': 'herp@example.com', 'basket': reverse(\n 'basket-detail', args=[basket_id]), 'shipping_address': {\n 'first_name': 'fadsf', 'last_name': 'fad', 'line1':\n '234 5th Ave', 'line4': 'Manhattan', 'postcode': '10001',\n 'state': 'NY', 'country': reverse('country-detail', args=['US']\n ), 'phone_number': '+1 (717) 467-1111'}}\n url = reverse('cybersource-sign-auth-request')\n res = self.client.post(url, data, format='json')\n self.assertEqual(res.status_code, 406)\n\n def test_manipulate_total_pre_auth(self):\n \"\"\"Test attempting to manipulate basket price when requesting an auth form\"\"\"\n product = self.create_product()\n res = self.do_get_basket()\n self.assertEqual(res.status_code, 200)\n basket_id = res.data['id']\n res = self.do_add_to_basket(product.id)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(res.data['total_incl_tax'], '10.00')\n url = reverse('cybersource-sign-auth-request')\n data = {'guest_email': 'herp@example.com', 'basket': reverse(\n 'basket-detail', args=[basket_id]), 'total': '2.00',\n 'shipping_address': {'first_name': 'fadsf', 'last_name': 'fad',\n 'line1': '234 5th Ave', 'line4': 'Manhattan', 'postcode':\n '10001', 'state': 'NY', 'country': reverse('country-detail',\n args=['US']), 'phone_number': '+1 (717) 467-1111'}}\n res = self.client.post(url, data, format='json')\n self.assertEqual(res.status_code, 406)\n <mask token>\n <mask token>\n\n\nclass CSReplyViewTest(BaseCheckoutTest):\n \"\"\"Test the CybersourceReplyView with fixtured requests\"\"\"\n\n def prepare_basket(self):\n \"\"\"Setup a basket and session like SignAuthorizePaymentFormView would normally\"\"\"\n product = self.create_product()\n res = self.do_get_basket()\n self.assertEqual(res.status_code, 200)\n basket_id = res.data['id']\n res = self.do_add_to_basket(product.id)\n self.assertEqual(res.status_code, 200)\n session = self.client.session\n session[CHECKOUT_BASKET_ID] = basket_id\n session[CHECKOUT_ORDER_NUM] = str(randrange(1000000, 9999999))\n session[CHECKOUT_SHIPPING_CODE] = 'free-shipping'\n session.save()\n return session, basket_id, session[CHECKOUT_ORDER_NUM]\n\n @patch('cybersource.signals.order_placed.send')\n def test_invalid_signature(self, order_placed):\n \"\"\"Invalid signature should result in 400 Bad Request\"\"\"\n session, basket_id, order_number = self.prepare_basket()\n data = cs_factories.build_declined_reply_data(order_number)\n data = cs_factories.sign_reply_data(data)\n data['signature'] = 'abcdef'\n url = reverse('cybersource-reply')\n resp = self.client.post(url, data)\n self.assertEqual(resp.status_code, 400)\n self.assertEqual(len(mail.outbox), 0, 'Should not send email')\n self.assertEqual(order_placed.call_count, 0,\n 'Should not trigger signal')\n self.assertEqual(Order.objects.count(), 0, 'Should not make order')\n\n @patch('cybersource.signals.order_placed.send')\n def test_invalid_request_type(self, order_placed):\n \"\"\"Bad request type should result in 400 Bad Request\"\"\"\n session, basket_id, order_number = self.prepare_basket()\n data = cs_factories.build_declined_reply_data(order_number)\n data['req_transaction_type'] = 'payment',\n data = cs_factories.sign_reply_data(data)\n url = reverse('cybersource-reply')\n resp = self.client.post(url, data)\n self.assertEqual(resp.status_code, 400)\n self.assertEqual(len(mail.outbox), 0, 'Should not send email')\n self.assertEqual(order_placed.call_count, 0,\n 'Should not trigger signal')\n self.assertEqual(Order.objects.count(), 0, 'Should not make order')\n\n @patch('cybersource.signals.order_placed.send')\n def test_duplicate_transaction_id(self, order_placed):\n \"\"\"Duplicate Transaction ID should result in redirect to the success page\"\"\"\n session, basket_id, order_number = self.prepare_basket()\n data = cs_factories.build_accepted_reply_data(order_number)\n data = cs_factories.sign_reply_data(data)\n url = reverse('cybersource-reply')\n self.assertEqual(order_placed.call_count, 0)\n self.assertEqual(Order.objects.count(), 0)\n resp = self.client.post(url, data)\n self.assertRedirects(resp, reverse('checkout:thank-you'))\n self.assertEqual(order_placed.call_count, 1)\n self.assertEqual(Order.objects.count(), 1)\n resp = self.client.post(url, data)\n self.assertRedirects(resp, reverse('checkout:thank-you'))\n self.assertEqual(order_placed.call_count, 1)\n self.assertEqual(Order.objects.count(), 1)\n\n @patch('cybersource.signals.order_placed.send')\n def test_invalid_reference_number(self, order_placed):\n \"\"\"Mismatched reference number should result in 400 Bad Request\"\"\"\n session, basket_id, order_number = self.prepare_basket()\n data = cs_factories.build_accepted_reply_data(order_number + 'ABC')\n data = cs_factories.sign_reply_data(data)\n url = reverse('cybersource-reply')\n resp = self.client.post(url, data)\n self.assertEqual(resp.status_code, 400)\n self.assertEqual(order_placed.call_count, 0)\n self.assertEqual(Order.objects.count(), 0)\n\n @patch('cybersource.signals.order_placed.send')\n def test_missing_basket(self, order_placed):\n \"\"\"Missing basket should result in 400 Bad Request\"\"\"\n session, basket_id, order_number = self.prepare_basket()\n del session[CHECKOUT_BASKET_ID]\n session.save()\n data = cs_factories.build_accepted_reply_data(order_number)\n data = cs_factories.sign_reply_data(data)\n url = reverse('cybersource-reply')\n resp = self.client.post(url, data)\n self.assertEqual(resp.status_code, 400)\n self.assertEqual(order_placed.call_count, 0)\n self.assertEqual(Order.objects.count(), 0)\n\n @patch('cybersource.signals.order_placed.send')\n def test_declined_card(self, order_placed):\n \"\"\"Declined card should should result in redirect to failure page\"\"\"\n session, basket_id, order_number = self.prepare_basket()\n data = cs_factories.build_declined_reply_data(order_number)\n data = cs_factories.sign_reply_data(data)\n url = reverse('cybersource-reply')\n resp = self.client.post(url, data)\n self.assertRedirects(resp, reverse('checkout:index'),\n fetch_redirect_response=False)\n self.assertEqual(len(mail.outbox), 0, 'Should not send email')\n self.assertEqual(order_placed.call_count, 0,\n 'Should not trigger signal')\n self.assertEqual(Order.objects.count(), 0, 'Should not make order')\n\n @patch('cybersource.signals.order_placed.send')\n def test_success(self, order_placed):\n \"\"\"Successful authorization should create an order and redirect to the success page\"\"\"\n session, basket_id, order_number = self.prepare_basket()\n data = cs_factories.build_accepted_reply_data(order_number)\n data = cs_factories.sign_reply_data(data)\n url = reverse('cybersource-reply')\n self.assertEqual(order_placed.call_count, 0)\n resp = self.client.post(url, data)\n self.assertRedirects(resp, reverse('checkout:thank-you'))\n self.assertEqual(len(mail.outbox), 1, 'Should send email')\n self.assertEqual(order_placed.call_count, 1,\n 'Should trigger order_placed signal')\n order = order_placed.call_args[1]['order']\n self.assertEqual(order.status, 'Authorized', 'Should set order status')\n self.assertEqual(order.basket.id, basket_id,\n 'Should use basket from session')\n self.assertEqual(order.number, order_number,\n 'Should use order number from CS request')\n session = self.client.session\n self.assertEquals(session[CHECKOUT_ORDER_ID], order.id,\n 'Should save order_id in session')\n self.assertEqual(order.sources.count(), 1, 'Should save pPaymentSource'\n )\n source = order.sources.first()\n self.assertEqual(source.currency, 'USD')\n self.assertEqual(source.amount_allocated, D('99.99'))\n self.assertEqual(source.amount_refunded, D('0.00'))\n self.assertEqual(source.amount_debited, D('0.00'))\n self.assertEqual(source.transactions.count(), 1,\n 'Should save Transaction')\n transaction = source.transactions.first()\n self.assertEqual(transaction.log.data, data)\n self.assertEqual(transaction.token.log, transaction.log)\n self.assertEqual(transaction.token.masked_card_number,\n 'xxxxxxxxxxxx1111')\n self.assertEqual(transaction.token.card_type, '001')\n self.assertEqual(transaction.txn_type, 'Authorise')\n self.assertEqual(transaction.amount, D('99.99'))\n self.assertEqual(transaction.reference, data['transaction_id'])\n self.assertEqual(transaction.status, 'ACCEPT')\n self.assertEqual(transaction.request_token, data['request_token'])\n self.assertEqual(order.payment_events.count(), 1,\n 'Should save PaymentEvent')\n event = order.payment_events.first()\n self.assertEqual(event.amount, D('99.99'))\n self.assertEqual(event.reference, data['transaction_id'])\n self.assertEqual(event.event_type.name, 'Authorise')\n self.assertEqual(event.line_quantities.count(), 1,\n 'Should save PaymentEventQuantity')\n lq = event.line_quantities.first()\n self.assertEqual(lq.line, order.lines.first())\n self.assertEqual(lq.quantity, 1)\n\n\nclass AuthPaymentFormViewTest(BaseCheckoutTest):\n \"\"\"Test the SignAuthorizePaymentFormView\"\"\"\n\n def prepare_basket(self):\n \"\"\"Setup a basket so that we can pay for it\"\"\"\n product = self.create_product()\n res = self.do_get_basket()\n self.assertEqual(res.status_code, 200)\n basket_id = res.data['id']\n res = self.do_add_to_basket(product.id)\n self.assertEqual(res.status_code, 200)\n return basket_id\n\n @patch('cybersource.signals.pre_build_auth_request.send')\n @patch('cybersource.signals.pre_calculate_auth_total.send')\n def test_request_auth_form_success(self, pre_calculate_auth_total,\n pre_build_auth_request):\n basket_id = self.prepare_basket()\n\n def add_taxes(sender, basket, shipping_address, **kwargs):\n for line in basket.all_lines():\n line.purchase_info.price.tax = D('0.42')\n pre_calculate_auth_total.side_effect = add_taxes\n\n def add_a_field(sender, extra_fields, request, basket, **kwargs):\n extra_fields['my_custom_field'] = 'ABC'\n pre_build_auth_request.side_effect = add_a_field\n session = self.client.session\n session[CHECKOUT_ORDER_NUM] = '10000042'\n session.save()\n cs_url, data = self.do_sign_auth_request(basket_id=basket_id)\n self.assertEqual(cs_url,\n 'https://testsecureacceptance.cybersource.com/silent/pay')\n session = self.client.session\n self.assertEqual(session[CHECKOUT_BASKET_ID], basket_id)\n basket = Basket.objects.get(id=basket_id)\n self.assertFalse(basket.can_be_edited)\n self.assertEqual(pre_calculate_auth_total.call_count, 1)\n self.assertEqual(pre_build_auth_request.call_count, 1)\n self.assertEquals(data['amount'], '10.42')\n self.assertEquals(data['bill_to_address_city'], 'Manhattan')\n self.assertEquals(data['bill_to_address_country'], 'US')\n self.assertEquals(data['bill_to_address_line1'], '234 5th Ave')\n self.assertEquals(data['bill_to_address_line2'], 'apt 5')\n self.assertEquals(data['bill_to_address_postal_code'], '10001')\n self.assertEquals(data['bill_to_address_state'], 'NY')\n self.assertEquals(data['bill_to_email'], 'herp@example.com')\n self.assertEquals(data['bill_to_forename'], 'Testy')\n self.assertEquals(data['bill_to_phone'], '17174671111')\n self.assertEquals(data['bill_to_surname'], 'McUnitTest')\n self.assertEquals(data['card_cvn'], '123')\n self.assertEquals(data['card_expiry_date'], '12-2017')\n self.assertEquals(data['card_number'], '4111111111111111')\n self.assertEquals(data['card_type'], '001')\n self.assertEquals(data['currency'], 'USD')\n self.assertEquals(data['customer_ip_address'], '127.0.0.1')\n self.assertEquals(data['device_fingerprint_id'], '')\n self.assertEquals(data['item_0_name'], 'My Product')\n self.assertEquals(data['item_0_quantity'], '1')\n self.assertEquals(data['item_0_sku'], basket.all_lines()[0].\n stockrecord.partner_sku)\n self.assertEquals(data['item_0_unit_price'], '10.42')\n self.assertEquals(data['line_item_count'], '1')\n self.assertEquals(data['locale'], 'en')\n self.assertEquals(data['my_custom_field'], 'ABC')\n self.assertEquals(data['payment_method'], 'card')\n self.assertEquals(data['reference_number'], '10000042')\n self.assertEquals(data['ship_to_address_city'], 'Manhattan')\n self.assertEquals(data['ship_to_address_country'], 'US')\n self.assertEquals(data['ship_to_address_line1'], '234 5th Ave')\n self.assertEquals(data['ship_to_address_line2'], '')\n self.assertEquals(data['ship_to_address_postal_code'], '10001')\n self.assertEquals(data['ship_to_address_state'], 'NY')\n self.assertEquals(data['ship_to_forename'], 'fadsf')\n self.assertEquals(data['ship_to_phone'], '17174671111')\n self.assertEquals(data['ship_to_surname'], 'fad')\n self.assertEquals(data['transaction_type'],\n 'authorization,create_payment_token')\n",
"step-2": "<mask token>\n\n\nclass BaseCheckoutTest(APITestCase):\n <mask token>\n <mask token>\n <mask token>\n\n def do_get_basket(self):\n url = reverse('api-basket')\n return self.client.get(url)\n <mask token>\n <mask token>\n\n def check_finished_order(self, number, product_id, quantity=1):\n self.assertEqual(Order.objects.all().count(), 1)\n order = Order.objects.get()\n self.assertEqual(order.number, number)\n lines = order.lines.all()\n self.assertEqual(lines.count(), 1)\n line = lines[0]\n self.assertEqual(line.quantity, quantity)\n self.assertEqual(line.product_id, product_id)\n payment_events = order.payment_events.filter(event_type__name=\n 'Authorise')\n self.assertEqual(payment_events.count(), 1)\n self.assertEqual(payment_events[0].amount, order.total_incl_tax)\n payment_sources = order.sources.all()\n self.assertEqual(payment_sources.count(), 1)\n self.assertEqual(payment_sources[0].currency, order.currency)\n self.assertEqual(payment_sources[0].amount_allocated, order.\n total_incl_tax)\n self.assertEqual(payment_sources[0].amount_debited, D('0.00'))\n self.assertEqual(payment_sources[0].amount_refunded, D('0.00'))\n transactions = payment_sources[0].transactions.all()\n self.assertEqual(transactions.count(), 1)\n self.assertEqual(transactions[0].txn_type, 'Authorise')\n self.assertEqual(transactions[0].amount, order.total_incl_tax)\n self.assertEqual(transactions[0].status, 'ACCEPT')\n self.assertEqual(transactions[0].log_field('req_reference_number'),\n order.number)\n self.assertEqual(transactions[0].token.card_last4, '1111')\n self.assertEqual(len(mail.outbox), 1)\n\n\nclass CheckoutIntegrationTest(BaseCheckoutTest):\n \"\"\"Full Integration Test of Checkout\"\"\"\n\n def test_checkout_process(self):\n \"\"\"Full checkout process using minimal api calls\"\"\"\n product = self.create_product()\n res = self.do_get_basket()\n self.assertEqual(res.status_code, 200)\n basket_id = res.data['id']\n res = self.do_add_to_basket(product.id)\n self.assertEqual(res.status_code, 200)\n cs_url, cs_data = self.do_sign_auth_request(basket_id)\n res = self.do_cybersource_post(cs_url, cs_data)\n self.assertEqual(res.status_code, 302)\n self.check_finished_order(cs_data['reference_number'], product.id)\n\n def test_add_product_during_auth(self):\n \"\"\"Test attempting to add a product during the authorize flow\"\"\"\n product = self.create_product()\n res = self.do_get_basket()\n self.assertEqual(res.status_code, 200)\n basket_id = res.data['id']\n res = self.do_add_to_basket(product.id)\n basket1 = res.data['id']\n self.assertEqual(res.status_code, 200)\n cs_url, cs_data = self.do_sign_auth_request(basket_id)\n res = self.do_add_to_basket(product.id)\n self.assertEqual(res.status_code, 200)\n basket2 = res.data['id']\n self.assertNotEqual(basket1, basket2)\n res = self.do_cybersource_post(cs_url, cs_data)\n self.assertEqual(res.status_code, 302)\n self.check_finished_order(cs_data['reference_number'], product.id)\n res = self.do_add_to_basket(product.id)\n self.assertEqual(res.status_code, 200)\n basket3 = res.data['id']\n self.assertEqual(basket2, basket3)\n\n def test_pay_for_nothing(self):\n \"\"\"Test attempting to pay for an empty basket\"\"\"\n res = self.do_get_basket()\n self.assertEqual(res.status_code, 200)\n basket_id = res.data['id']\n data = {'guest_email': 'herp@example.com', 'basket': reverse(\n 'basket-detail', args=[basket_id]), 'shipping_address': {\n 'first_name': 'fadsf', 'last_name': 'fad', 'line1':\n '234 5th Ave', 'line4': 'Manhattan', 'postcode': '10001',\n 'state': 'NY', 'country': reverse('country-detail', args=['US']\n ), 'phone_number': '+1 (717) 467-1111'}}\n url = reverse('cybersource-sign-auth-request')\n res = self.client.post(url, data, format='json')\n self.assertEqual(res.status_code, 406)\n\n def test_manipulate_total_pre_auth(self):\n \"\"\"Test attempting to manipulate basket price when requesting an auth form\"\"\"\n product = self.create_product()\n res = self.do_get_basket()\n self.assertEqual(res.status_code, 200)\n basket_id = res.data['id']\n res = self.do_add_to_basket(product.id)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(res.data['total_incl_tax'], '10.00')\n url = reverse('cybersource-sign-auth-request')\n data = {'guest_email': 'herp@example.com', 'basket': reverse(\n 'basket-detail', args=[basket_id]), 'total': '2.00',\n 'shipping_address': {'first_name': 'fadsf', 'last_name': 'fad',\n 'line1': '234 5th Ave', 'line4': 'Manhattan', 'postcode':\n '10001', 'state': 'NY', 'country': reverse('country-detail',\n args=['US']), 'phone_number': '+1 (717) 467-1111'}}\n res = self.client.post(url, data, format='json')\n self.assertEqual(res.status_code, 406)\n\n def test_manipulate_total_during_auth(self):\n \"\"\"Test attempting to manipulate basket price when requesting auth from CyberSource\"\"\"\n product = self.create_product()\n res = self.do_get_basket()\n self.assertEqual(res.status_code, 200)\n basket_id = res.data['id']\n res = self.do_add_to_basket(product.id)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(res.data['total_incl_tax'], '10.00')\n cs_url, cs_data = self.do_sign_auth_request(basket_id)\n cs_data['amount'] = '2.00'\n res = requests.post(cs_url, cs_data)\n self.assertEqual(res.status_code, 403)\n\n def test_free_product(self):\n \"\"\"Full checkout process using minimal api calls\"\"\"\n product = self.create_product(price=D('0.00'))\n res = self.do_get_basket()\n self.assertEqual(res.status_code, 200)\n basket_id = res.data['id']\n res = self.do_add_to_basket(product.id)\n self.assertEqual(res.status_code, 200)\n cs_url, cs_data = self.do_sign_auth_request(basket_id)\n self.assertEqual(cs_data['amount'], '0.00')\n res = self.do_cybersource_post(cs_url, cs_data)\n self.assertEqual(res.status_code, 302)\n self.check_finished_order(cs_data['reference_number'], product.id)\n\n\nclass CSReplyViewTest(BaseCheckoutTest):\n \"\"\"Test the CybersourceReplyView with fixtured requests\"\"\"\n\n def prepare_basket(self):\n \"\"\"Setup a basket and session like SignAuthorizePaymentFormView would normally\"\"\"\n product = self.create_product()\n res = self.do_get_basket()\n self.assertEqual(res.status_code, 200)\n basket_id = res.data['id']\n res = self.do_add_to_basket(product.id)\n self.assertEqual(res.status_code, 200)\n session = self.client.session\n session[CHECKOUT_BASKET_ID] = basket_id\n session[CHECKOUT_ORDER_NUM] = str(randrange(1000000, 9999999))\n session[CHECKOUT_SHIPPING_CODE] = 'free-shipping'\n session.save()\n return session, basket_id, session[CHECKOUT_ORDER_NUM]\n\n @patch('cybersource.signals.order_placed.send')\n def test_invalid_signature(self, order_placed):\n \"\"\"Invalid signature should result in 400 Bad Request\"\"\"\n session, basket_id, order_number = self.prepare_basket()\n data = cs_factories.build_declined_reply_data(order_number)\n data = cs_factories.sign_reply_data(data)\n data['signature'] = 'abcdef'\n url = reverse('cybersource-reply')\n resp = self.client.post(url, data)\n self.assertEqual(resp.status_code, 400)\n self.assertEqual(len(mail.outbox), 0, 'Should not send email')\n self.assertEqual(order_placed.call_count, 0,\n 'Should not trigger signal')\n self.assertEqual(Order.objects.count(), 0, 'Should not make order')\n\n @patch('cybersource.signals.order_placed.send')\n def test_invalid_request_type(self, order_placed):\n \"\"\"Bad request type should result in 400 Bad Request\"\"\"\n session, basket_id, order_number = self.prepare_basket()\n data = cs_factories.build_declined_reply_data(order_number)\n data['req_transaction_type'] = 'payment',\n data = cs_factories.sign_reply_data(data)\n url = reverse('cybersource-reply')\n resp = self.client.post(url, data)\n self.assertEqual(resp.status_code, 400)\n self.assertEqual(len(mail.outbox), 0, 'Should not send email')\n self.assertEqual(order_placed.call_count, 0,\n 'Should not trigger signal')\n self.assertEqual(Order.objects.count(), 0, 'Should not make order')\n\n @patch('cybersource.signals.order_placed.send')\n def test_duplicate_transaction_id(self, order_placed):\n \"\"\"Duplicate Transaction ID should result in redirect to the success page\"\"\"\n session, basket_id, order_number = self.prepare_basket()\n data = cs_factories.build_accepted_reply_data(order_number)\n data = cs_factories.sign_reply_data(data)\n url = reverse('cybersource-reply')\n self.assertEqual(order_placed.call_count, 0)\n self.assertEqual(Order.objects.count(), 0)\n resp = self.client.post(url, data)\n self.assertRedirects(resp, reverse('checkout:thank-you'))\n self.assertEqual(order_placed.call_count, 1)\n self.assertEqual(Order.objects.count(), 1)\n resp = self.client.post(url, data)\n self.assertRedirects(resp, reverse('checkout:thank-you'))\n self.assertEqual(order_placed.call_count, 1)\n self.assertEqual(Order.objects.count(), 1)\n\n @patch('cybersource.signals.order_placed.send')\n def test_invalid_reference_number(self, order_placed):\n \"\"\"Mismatched reference number should result in 400 Bad Request\"\"\"\n session, basket_id, order_number = self.prepare_basket()\n data = cs_factories.build_accepted_reply_data(order_number + 'ABC')\n data = cs_factories.sign_reply_data(data)\n url = reverse('cybersource-reply')\n resp = self.client.post(url, data)\n self.assertEqual(resp.status_code, 400)\n self.assertEqual(order_placed.call_count, 0)\n self.assertEqual(Order.objects.count(), 0)\n\n @patch('cybersource.signals.order_placed.send')\n def test_missing_basket(self, order_placed):\n \"\"\"Missing basket should result in 400 Bad Request\"\"\"\n session, basket_id, order_number = self.prepare_basket()\n del session[CHECKOUT_BASKET_ID]\n session.save()\n data = cs_factories.build_accepted_reply_data(order_number)\n data = cs_factories.sign_reply_data(data)\n url = reverse('cybersource-reply')\n resp = self.client.post(url, data)\n self.assertEqual(resp.status_code, 400)\n self.assertEqual(order_placed.call_count, 0)\n self.assertEqual(Order.objects.count(), 0)\n\n @patch('cybersource.signals.order_placed.send')\n def test_declined_card(self, order_placed):\n \"\"\"Declined card should should result in redirect to failure page\"\"\"\n session, basket_id, order_number = self.prepare_basket()\n data = cs_factories.build_declined_reply_data(order_number)\n data = cs_factories.sign_reply_data(data)\n url = reverse('cybersource-reply')\n resp = self.client.post(url, data)\n self.assertRedirects(resp, reverse('checkout:index'),\n fetch_redirect_response=False)\n self.assertEqual(len(mail.outbox), 0, 'Should not send email')\n self.assertEqual(order_placed.call_count, 0,\n 'Should not trigger signal')\n self.assertEqual(Order.objects.count(), 0, 'Should not make order')\n\n @patch('cybersource.signals.order_placed.send')\n def test_success(self, order_placed):\n \"\"\"Successful authorization should create an order and redirect to the success page\"\"\"\n session, basket_id, order_number = self.prepare_basket()\n data = cs_factories.build_accepted_reply_data(order_number)\n data = cs_factories.sign_reply_data(data)\n url = reverse('cybersource-reply')\n self.assertEqual(order_placed.call_count, 0)\n resp = self.client.post(url, data)\n self.assertRedirects(resp, reverse('checkout:thank-you'))\n self.assertEqual(len(mail.outbox), 1, 'Should send email')\n self.assertEqual(order_placed.call_count, 1,\n 'Should trigger order_placed signal')\n order = order_placed.call_args[1]['order']\n self.assertEqual(order.status, 'Authorized', 'Should set order status')\n self.assertEqual(order.basket.id, basket_id,\n 'Should use basket from session')\n self.assertEqual(order.number, order_number,\n 'Should use order number from CS request')\n session = self.client.session\n self.assertEquals(session[CHECKOUT_ORDER_ID], order.id,\n 'Should save order_id in session')\n self.assertEqual(order.sources.count(), 1, 'Should save pPaymentSource'\n )\n source = order.sources.first()\n self.assertEqual(source.currency, 'USD')\n self.assertEqual(source.amount_allocated, D('99.99'))\n self.assertEqual(source.amount_refunded, D('0.00'))\n self.assertEqual(source.amount_debited, D('0.00'))\n self.assertEqual(source.transactions.count(), 1,\n 'Should save Transaction')\n transaction = source.transactions.first()\n self.assertEqual(transaction.log.data, data)\n self.assertEqual(transaction.token.log, transaction.log)\n self.assertEqual(transaction.token.masked_card_number,\n 'xxxxxxxxxxxx1111')\n self.assertEqual(transaction.token.card_type, '001')\n self.assertEqual(transaction.txn_type, 'Authorise')\n self.assertEqual(transaction.amount, D('99.99'))\n self.assertEqual(transaction.reference, data['transaction_id'])\n self.assertEqual(transaction.status, 'ACCEPT')\n self.assertEqual(transaction.request_token, data['request_token'])\n self.assertEqual(order.payment_events.count(), 1,\n 'Should save PaymentEvent')\n event = order.payment_events.first()\n self.assertEqual(event.amount, D('99.99'))\n self.assertEqual(event.reference, data['transaction_id'])\n self.assertEqual(event.event_type.name, 'Authorise')\n self.assertEqual(event.line_quantities.count(), 1,\n 'Should save PaymentEventQuantity')\n lq = event.line_quantities.first()\n self.assertEqual(lq.line, order.lines.first())\n self.assertEqual(lq.quantity, 1)\n\n\nclass AuthPaymentFormViewTest(BaseCheckoutTest):\n \"\"\"Test the SignAuthorizePaymentFormView\"\"\"\n\n def prepare_basket(self):\n \"\"\"Setup a basket so that we can pay for it\"\"\"\n product = self.create_product()\n res = self.do_get_basket()\n self.assertEqual(res.status_code, 200)\n basket_id = res.data['id']\n res = self.do_add_to_basket(product.id)\n self.assertEqual(res.status_code, 200)\n return basket_id\n\n @patch('cybersource.signals.pre_build_auth_request.send')\n @patch('cybersource.signals.pre_calculate_auth_total.send')\n def test_request_auth_form_success(self, pre_calculate_auth_total,\n pre_build_auth_request):\n basket_id = self.prepare_basket()\n\n def add_taxes(sender, basket, shipping_address, **kwargs):\n for line in basket.all_lines():\n line.purchase_info.price.tax = D('0.42')\n pre_calculate_auth_total.side_effect = add_taxes\n\n def add_a_field(sender, extra_fields, request, basket, **kwargs):\n extra_fields['my_custom_field'] = 'ABC'\n pre_build_auth_request.side_effect = add_a_field\n session = self.client.session\n session[CHECKOUT_ORDER_NUM] = '10000042'\n session.save()\n cs_url, data = self.do_sign_auth_request(basket_id=basket_id)\n self.assertEqual(cs_url,\n 'https://testsecureacceptance.cybersource.com/silent/pay')\n session = self.client.session\n self.assertEqual(session[CHECKOUT_BASKET_ID], basket_id)\n basket = Basket.objects.get(id=basket_id)\n self.assertFalse(basket.can_be_edited)\n self.assertEqual(pre_calculate_auth_total.call_count, 1)\n self.assertEqual(pre_build_auth_request.call_count, 1)\n self.assertEquals(data['amount'], '10.42')\n self.assertEquals(data['bill_to_address_city'], 'Manhattan')\n self.assertEquals(data['bill_to_address_country'], 'US')\n self.assertEquals(data['bill_to_address_line1'], '234 5th Ave')\n self.assertEquals(data['bill_to_address_line2'], 'apt 5')\n self.assertEquals(data['bill_to_address_postal_code'], '10001')\n self.assertEquals(data['bill_to_address_state'], 'NY')\n self.assertEquals(data['bill_to_email'], 'herp@example.com')\n self.assertEquals(data['bill_to_forename'], 'Testy')\n self.assertEquals(data['bill_to_phone'], '17174671111')\n self.assertEquals(data['bill_to_surname'], 'McUnitTest')\n self.assertEquals(data['card_cvn'], '123')\n self.assertEquals(data['card_expiry_date'], '12-2017')\n self.assertEquals(data['card_number'], '4111111111111111')\n self.assertEquals(data['card_type'], '001')\n self.assertEquals(data['currency'], 'USD')\n self.assertEquals(data['customer_ip_address'], '127.0.0.1')\n self.assertEquals(data['device_fingerprint_id'], '')\n self.assertEquals(data['item_0_name'], 'My Product')\n self.assertEquals(data['item_0_quantity'], '1')\n self.assertEquals(data['item_0_sku'], basket.all_lines()[0].\n stockrecord.partner_sku)\n self.assertEquals(data['item_0_unit_price'], '10.42')\n self.assertEquals(data['line_item_count'], '1')\n self.assertEquals(data['locale'], 'en')\n self.assertEquals(data['my_custom_field'], 'ABC')\n self.assertEquals(data['payment_method'], 'card')\n self.assertEquals(data['reference_number'], '10000042')\n self.assertEquals(data['ship_to_address_city'], 'Manhattan')\n self.assertEquals(data['ship_to_address_country'], 'US')\n self.assertEquals(data['ship_to_address_line1'], '234 5th Ave')\n self.assertEquals(data['ship_to_address_line2'], '')\n self.assertEquals(data['ship_to_address_postal_code'], '10001')\n self.assertEquals(data['ship_to_address_state'], 'NY')\n self.assertEquals(data['ship_to_forename'], 'fadsf')\n self.assertEquals(data['ship_to_phone'], '17174671111')\n self.assertEquals(data['ship_to_surname'], 'fad')\n self.assertEquals(data['transaction_type'],\n 'authorization,create_payment_token')\n",
"step-3": "<mask token>\n\n\nclass BaseCheckoutTest(APITestCase):\n <mask token>\n\n def create_product(self, price=D('10.00')):\n product = factories.create_product(title='My Product',\n product_class='My Product Class')\n record = factories.create_stockrecord(currency='USD', product=\n product, num_in_stock=10, price_excl_tax=price)\n factories.create_purchase_info(record)\n return product\n\n def do_add_to_basket(self, product_id, quantity=1):\n url = reverse('api-basket-add-product')\n data = {'url': reverse('product-detail', args=[product_id]),\n 'quantity': quantity}\n return self.client.post(url, data)\n\n def do_get_basket(self):\n url = reverse('api-basket')\n return self.client.get(url)\n\n def do_sign_auth_request(self, basket_id=None, data=None):\n if data is None:\n data = {'guest_email': 'herp@example.com', 'basket': reverse(\n 'basket-detail', args=[basket_id]), 'shipping_address': {\n 'first_name': 'fadsf', 'last_name': 'fad', 'line1':\n '234 5th Ave', 'line4': 'Manhattan', 'postcode': '10001',\n 'state': 'NY', 'country': reverse('country-detail', args=[\n 'US']), 'phone_number': '+1 (717) 467-1111'}}\n url = reverse('cybersource-sign-auth-request')\n res = self.client.post(url, data, format='json')\n self.assertEqual(res.status_code, 200)\n next_year = datetime.date.today().year + 1\n cs_data = {'card_type': '001', 'card_number': '4111111111111111',\n 'card_cvn': '123', 'card_expiry_date': '12-{}'.format(next_year\n ), 'bill_to_forename': 'Testy', 'bill_to_surname': 'McUnitTest',\n 'bill_to_address_line1': '234 5th Ave', 'bill_to_address_line2':\n 'apt 5', 'bill_to_address_city': 'Manhattan',\n 'bill_to_address_state': 'NY', 'bill_to_address_postal_code':\n '10001', 'bill_to_address_country': 'US', 'bill_to_phone':\n '17174671111'}\n for field in res.data['fields']:\n if not field['editable'] or field['key'] not in cs_data:\n cs_data[field['key']] = field['value']\n cs_url = res.data['url']\n return cs_url, cs_data\n\n def do_cybersource_post(self, cs_url, cs_data):\n res = requests.post(cs_url, cs_data)\n self.assertEqual(res.status_code, 200)\n soup = BeautifulSoup(res.content, 'html.parser')\n form_data = {}\n for element in soup.find_all('input'):\n form_data[element['name']] = element['value']\n url = reverse('cybersource-reply')\n return self.client.post(url, form_data)\n\n def check_finished_order(self, number, product_id, quantity=1):\n self.assertEqual(Order.objects.all().count(), 1)\n order = Order.objects.get()\n self.assertEqual(order.number, number)\n lines = order.lines.all()\n self.assertEqual(lines.count(), 1)\n line = lines[0]\n self.assertEqual(line.quantity, quantity)\n self.assertEqual(line.product_id, product_id)\n payment_events = order.payment_events.filter(event_type__name=\n 'Authorise')\n self.assertEqual(payment_events.count(), 1)\n self.assertEqual(payment_events[0].amount, order.total_incl_tax)\n payment_sources = order.sources.all()\n self.assertEqual(payment_sources.count(), 1)\n self.assertEqual(payment_sources[0].currency, order.currency)\n self.assertEqual(payment_sources[0].amount_allocated, order.\n total_incl_tax)\n self.assertEqual(payment_sources[0].amount_debited, D('0.00'))\n self.assertEqual(payment_sources[0].amount_refunded, D('0.00'))\n transactions = payment_sources[0].transactions.all()\n self.assertEqual(transactions.count(), 1)\n self.assertEqual(transactions[0].txn_type, 'Authorise')\n self.assertEqual(transactions[0].amount, order.total_incl_tax)\n self.assertEqual(transactions[0].status, 'ACCEPT')\n self.assertEqual(transactions[0].log_field('req_reference_number'),\n order.number)\n self.assertEqual(transactions[0].token.card_last4, '1111')\n self.assertEqual(len(mail.outbox), 1)\n\n\nclass CheckoutIntegrationTest(BaseCheckoutTest):\n \"\"\"Full Integration Test of Checkout\"\"\"\n\n def test_checkout_process(self):\n \"\"\"Full checkout process using minimal api calls\"\"\"\n product = self.create_product()\n res = self.do_get_basket()\n self.assertEqual(res.status_code, 200)\n basket_id = res.data['id']\n res = self.do_add_to_basket(product.id)\n self.assertEqual(res.status_code, 200)\n cs_url, cs_data = self.do_sign_auth_request(basket_id)\n res = self.do_cybersource_post(cs_url, cs_data)\n self.assertEqual(res.status_code, 302)\n self.check_finished_order(cs_data['reference_number'], product.id)\n\n def test_add_product_during_auth(self):\n \"\"\"Test attempting to add a product during the authorize flow\"\"\"\n product = self.create_product()\n res = self.do_get_basket()\n self.assertEqual(res.status_code, 200)\n basket_id = res.data['id']\n res = self.do_add_to_basket(product.id)\n basket1 = res.data['id']\n self.assertEqual(res.status_code, 200)\n cs_url, cs_data = self.do_sign_auth_request(basket_id)\n res = self.do_add_to_basket(product.id)\n self.assertEqual(res.status_code, 200)\n basket2 = res.data['id']\n self.assertNotEqual(basket1, basket2)\n res = self.do_cybersource_post(cs_url, cs_data)\n self.assertEqual(res.status_code, 302)\n self.check_finished_order(cs_data['reference_number'], product.id)\n res = self.do_add_to_basket(product.id)\n self.assertEqual(res.status_code, 200)\n basket3 = res.data['id']\n self.assertEqual(basket2, basket3)\n\n def test_pay_for_nothing(self):\n \"\"\"Test attempting to pay for an empty basket\"\"\"\n res = self.do_get_basket()\n self.assertEqual(res.status_code, 200)\n basket_id = res.data['id']\n data = {'guest_email': 'herp@example.com', 'basket': reverse(\n 'basket-detail', args=[basket_id]), 'shipping_address': {\n 'first_name': 'fadsf', 'last_name': 'fad', 'line1':\n '234 5th Ave', 'line4': 'Manhattan', 'postcode': '10001',\n 'state': 'NY', 'country': reverse('country-detail', args=['US']\n ), 'phone_number': '+1 (717) 467-1111'}}\n url = reverse('cybersource-sign-auth-request')\n res = self.client.post(url, data, format='json')\n self.assertEqual(res.status_code, 406)\n\n def test_manipulate_total_pre_auth(self):\n \"\"\"Test attempting to manipulate basket price when requesting an auth form\"\"\"\n product = self.create_product()\n res = self.do_get_basket()\n self.assertEqual(res.status_code, 200)\n basket_id = res.data['id']\n res = self.do_add_to_basket(product.id)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(res.data['total_incl_tax'], '10.00')\n url = reverse('cybersource-sign-auth-request')\n data = {'guest_email': 'herp@example.com', 'basket': reverse(\n 'basket-detail', args=[basket_id]), 'total': '2.00',\n 'shipping_address': {'first_name': 'fadsf', 'last_name': 'fad',\n 'line1': '234 5th Ave', 'line4': 'Manhattan', 'postcode':\n '10001', 'state': 'NY', 'country': reverse('country-detail',\n args=['US']), 'phone_number': '+1 (717) 467-1111'}}\n res = self.client.post(url, data, format='json')\n self.assertEqual(res.status_code, 406)\n\n def test_manipulate_total_during_auth(self):\n \"\"\"Test attempting to manipulate basket price when requesting auth from CyberSource\"\"\"\n product = self.create_product()\n res = self.do_get_basket()\n self.assertEqual(res.status_code, 200)\n basket_id = res.data['id']\n res = self.do_add_to_basket(product.id)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(res.data['total_incl_tax'], '10.00')\n cs_url, cs_data = self.do_sign_auth_request(basket_id)\n cs_data['amount'] = '2.00'\n res = requests.post(cs_url, cs_data)\n self.assertEqual(res.status_code, 403)\n\n def test_free_product(self):\n \"\"\"Full checkout process using minimal api calls\"\"\"\n product = self.create_product(price=D('0.00'))\n res = self.do_get_basket()\n self.assertEqual(res.status_code, 200)\n basket_id = res.data['id']\n res = self.do_add_to_basket(product.id)\n self.assertEqual(res.status_code, 200)\n cs_url, cs_data = self.do_sign_auth_request(basket_id)\n self.assertEqual(cs_data['amount'], '0.00')\n res = self.do_cybersource_post(cs_url, cs_data)\n self.assertEqual(res.status_code, 302)\n self.check_finished_order(cs_data['reference_number'], product.id)\n\n\nclass CSReplyViewTest(BaseCheckoutTest):\n \"\"\"Test the CybersourceReplyView with fixtured requests\"\"\"\n\n def prepare_basket(self):\n \"\"\"Setup a basket and session like SignAuthorizePaymentFormView would normally\"\"\"\n product = self.create_product()\n res = self.do_get_basket()\n self.assertEqual(res.status_code, 200)\n basket_id = res.data['id']\n res = self.do_add_to_basket(product.id)\n self.assertEqual(res.status_code, 200)\n session = self.client.session\n session[CHECKOUT_BASKET_ID] = basket_id\n session[CHECKOUT_ORDER_NUM] = str(randrange(1000000, 9999999))\n session[CHECKOUT_SHIPPING_CODE] = 'free-shipping'\n session.save()\n return session, basket_id, session[CHECKOUT_ORDER_NUM]\n\n @patch('cybersource.signals.order_placed.send')\n def test_invalid_signature(self, order_placed):\n \"\"\"Invalid signature should result in 400 Bad Request\"\"\"\n session, basket_id, order_number = self.prepare_basket()\n data = cs_factories.build_declined_reply_data(order_number)\n data = cs_factories.sign_reply_data(data)\n data['signature'] = 'abcdef'\n url = reverse('cybersource-reply')\n resp = self.client.post(url, data)\n self.assertEqual(resp.status_code, 400)\n self.assertEqual(len(mail.outbox), 0, 'Should not send email')\n self.assertEqual(order_placed.call_count, 0,\n 'Should not trigger signal')\n self.assertEqual(Order.objects.count(), 0, 'Should not make order')\n\n @patch('cybersource.signals.order_placed.send')\n def test_invalid_request_type(self, order_placed):\n \"\"\"Bad request type should result in 400 Bad Request\"\"\"\n session, basket_id, order_number = self.prepare_basket()\n data = cs_factories.build_declined_reply_data(order_number)\n data['req_transaction_type'] = 'payment',\n data = cs_factories.sign_reply_data(data)\n url = reverse('cybersource-reply')\n resp = self.client.post(url, data)\n self.assertEqual(resp.status_code, 400)\n self.assertEqual(len(mail.outbox), 0, 'Should not send email')\n self.assertEqual(order_placed.call_count, 0,\n 'Should not trigger signal')\n self.assertEqual(Order.objects.count(), 0, 'Should not make order')\n\n @patch('cybersource.signals.order_placed.send')\n def test_duplicate_transaction_id(self, order_placed):\n \"\"\"Duplicate Transaction ID should result in redirect to the success page\"\"\"\n session, basket_id, order_number = self.prepare_basket()\n data = cs_factories.build_accepted_reply_data(order_number)\n data = cs_factories.sign_reply_data(data)\n url = reverse('cybersource-reply')\n self.assertEqual(order_placed.call_count, 0)\n self.assertEqual(Order.objects.count(), 0)\n resp = self.client.post(url, data)\n self.assertRedirects(resp, reverse('checkout:thank-you'))\n self.assertEqual(order_placed.call_count, 1)\n self.assertEqual(Order.objects.count(), 1)\n resp = self.client.post(url, data)\n self.assertRedirects(resp, reverse('checkout:thank-you'))\n self.assertEqual(order_placed.call_count, 1)\n self.assertEqual(Order.objects.count(), 1)\n\n @patch('cybersource.signals.order_placed.send')\n def test_invalid_reference_number(self, order_placed):\n \"\"\"Mismatched reference number should result in 400 Bad Request\"\"\"\n session, basket_id, order_number = self.prepare_basket()\n data = cs_factories.build_accepted_reply_data(order_number + 'ABC')\n data = cs_factories.sign_reply_data(data)\n url = reverse('cybersource-reply')\n resp = self.client.post(url, data)\n self.assertEqual(resp.status_code, 400)\n self.assertEqual(order_placed.call_count, 0)\n self.assertEqual(Order.objects.count(), 0)\n\n @patch('cybersource.signals.order_placed.send')\n def test_missing_basket(self, order_placed):\n \"\"\"Missing basket should result in 400 Bad Request\"\"\"\n session, basket_id, order_number = self.prepare_basket()\n del session[CHECKOUT_BASKET_ID]\n session.save()\n data = cs_factories.build_accepted_reply_data(order_number)\n data = cs_factories.sign_reply_data(data)\n url = reverse('cybersource-reply')\n resp = self.client.post(url, data)\n self.assertEqual(resp.status_code, 400)\n self.assertEqual(order_placed.call_count, 0)\n self.assertEqual(Order.objects.count(), 0)\n\n @patch('cybersource.signals.order_placed.send')\n def test_declined_card(self, order_placed):\n \"\"\"Declined card should should result in redirect to failure page\"\"\"\n session, basket_id, order_number = self.prepare_basket()\n data = cs_factories.build_declined_reply_data(order_number)\n data = cs_factories.sign_reply_data(data)\n url = reverse('cybersource-reply')\n resp = self.client.post(url, data)\n self.assertRedirects(resp, reverse('checkout:index'),\n fetch_redirect_response=False)\n self.assertEqual(len(mail.outbox), 0, 'Should not send email')\n self.assertEqual(order_placed.call_count, 0,\n 'Should not trigger signal')\n self.assertEqual(Order.objects.count(), 0, 'Should not make order')\n\n @patch('cybersource.signals.order_placed.send')\n def test_success(self, order_placed):\n \"\"\"Successful authorization should create an order and redirect to the success page\"\"\"\n session, basket_id, order_number = self.prepare_basket()\n data = cs_factories.build_accepted_reply_data(order_number)\n data = cs_factories.sign_reply_data(data)\n url = reverse('cybersource-reply')\n self.assertEqual(order_placed.call_count, 0)\n resp = self.client.post(url, data)\n self.assertRedirects(resp, reverse('checkout:thank-you'))\n self.assertEqual(len(mail.outbox), 1, 'Should send email')\n self.assertEqual(order_placed.call_count, 1,\n 'Should trigger order_placed signal')\n order = order_placed.call_args[1]['order']\n self.assertEqual(order.status, 'Authorized', 'Should set order status')\n self.assertEqual(order.basket.id, basket_id,\n 'Should use basket from session')\n self.assertEqual(order.number, order_number,\n 'Should use order number from CS request')\n session = self.client.session\n self.assertEquals(session[CHECKOUT_ORDER_ID], order.id,\n 'Should save order_id in session')\n self.assertEqual(order.sources.count(), 1, 'Should save pPaymentSource'\n )\n source = order.sources.first()\n self.assertEqual(source.currency, 'USD')\n self.assertEqual(source.amount_allocated, D('99.99'))\n self.assertEqual(source.amount_refunded, D('0.00'))\n self.assertEqual(source.amount_debited, D('0.00'))\n self.assertEqual(source.transactions.count(), 1,\n 'Should save Transaction')\n transaction = source.transactions.first()\n self.assertEqual(transaction.log.data, data)\n self.assertEqual(transaction.token.log, transaction.log)\n self.assertEqual(transaction.token.masked_card_number,\n 'xxxxxxxxxxxx1111')\n self.assertEqual(transaction.token.card_type, '001')\n self.assertEqual(transaction.txn_type, 'Authorise')\n self.assertEqual(transaction.amount, D('99.99'))\n self.assertEqual(transaction.reference, data['transaction_id'])\n self.assertEqual(transaction.status, 'ACCEPT')\n self.assertEqual(transaction.request_token, data['request_token'])\n self.assertEqual(order.payment_events.count(), 1,\n 'Should save PaymentEvent')\n event = order.payment_events.first()\n self.assertEqual(event.amount, D('99.99'))\n self.assertEqual(event.reference, data['transaction_id'])\n self.assertEqual(event.event_type.name, 'Authorise')\n self.assertEqual(event.line_quantities.count(), 1,\n 'Should save PaymentEventQuantity')\n lq = event.line_quantities.first()\n self.assertEqual(lq.line, order.lines.first())\n self.assertEqual(lq.quantity, 1)\n\n\nclass AuthPaymentFormViewTest(BaseCheckoutTest):\n \"\"\"Test the SignAuthorizePaymentFormView\"\"\"\n\n def prepare_basket(self):\n \"\"\"Setup a basket so that we can pay for it\"\"\"\n product = self.create_product()\n res = self.do_get_basket()\n self.assertEqual(res.status_code, 200)\n basket_id = res.data['id']\n res = self.do_add_to_basket(product.id)\n self.assertEqual(res.status_code, 200)\n return basket_id\n\n @patch('cybersource.signals.pre_build_auth_request.send')\n @patch('cybersource.signals.pre_calculate_auth_total.send')\n def test_request_auth_form_success(self, pre_calculate_auth_total,\n pre_build_auth_request):\n basket_id = self.prepare_basket()\n\n def add_taxes(sender, basket, shipping_address, **kwargs):\n for line in basket.all_lines():\n line.purchase_info.price.tax = D('0.42')\n pre_calculate_auth_total.side_effect = add_taxes\n\n def add_a_field(sender, extra_fields, request, basket, **kwargs):\n extra_fields['my_custom_field'] = 'ABC'\n pre_build_auth_request.side_effect = add_a_field\n session = self.client.session\n session[CHECKOUT_ORDER_NUM] = '10000042'\n session.save()\n cs_url, data = self.do_sign_auth_request(basket_id=basket_id)\n self.assertEqual(cs_url,\n 'https://testsecureacceptance.cybersource.com/silent/pay')\n session = self.client.session\n self.assertEqual(session[CHECKOUT_BASKET_ID], basket_id)\n basket = Basket.objects.get(id=basket_id)\n self.assertFalse(basket.can_be_edited)\n self.assertEqual(pre_calculate_auth_total.call_count, 1)\n self.assertEqual(pre_build_auth_request.call_count, 1)\n self.assertEquals(data['amount'], '10.42')\n self.assertEquals(data['bill_to_address_city'], 'Manhattan')\n self.assertEquals(data['bill_to_address_country'], 'US')\n self.assertEquals(data['bill_to_address_line1'], '234 5th Ave')\n self.assertEquals(data['bill_to_address_line2'], 'apt 5')\n self.assertEquals(data['bill_to_address_postal_code'], '10001')\n self.assertEquals(data['bill_to_address_state'], 'NY')\n self.assertEquals(data['bill_to_email'], 'herp@example.com')\n self.assertEquals(data['bill_to_forename'], 'Testy')\n self.assertEquals(data['bill_to_phone'], '17174671111')\n self.assertEquals(data['bill_to_surname'], 'McUnitTest')\n self.assertEquals(data['card_cvn'], '123')\n self.assertEquals(data['card_expiry_date'], '12-2017')\n self.assertEquals(data['card_number'], '4111111111111111')\n self.assertEquals(data['card_type'], '001')\n self.assertEquals(data['currency'], 'USD')\n self.assertEquals(data['customer_ip_address'], '127.0.0.1')\n self.assertEquals(data['device_fingerprint_id'], '')\n self.assertEquals(data['item_0_name'], 'My Product')\n self.assertEquals(data['item_0_quantity'], '1')\n self.assertEquals(data['item_0_sku'], basket.all_lines()[0].\n stockrecord.partner_sku)\n self.assertEquals(data['item_0_unit_price'], '10.42')\n self.assertEquals(data['line_item_count'], '1')\n self.assertEquals(data['locale'], 'en')\n self.assertEquals(data['my_custom_field'], 'ABC')\n self.assertEquals(data['payment_method'], 'card')\n self.assertEquals(data['reference_number'], '10000042')\n self.assertEquals(data['ship_to_address_city'], 'Manhattan')\n self.assertEquals(data['ship_to_address_country'], 'US')\n self.assertEquals(data['ship_to_address_line1'], '234 5th Ave')\n self.assertEquals(data['ship_to_address_line2'], '')\n self.assertEquals(data['ship_to_address_postal_code'], '10001')\n self.assertEquals(data['ship_to_address_state'], 'NY')\n self.assertEquals(data['ship_to_forename'], 'fadsf')\n self.assertEquals(data['ship_to_phone'], '17174671111')\n self.assertEquals(data['ship_to_surname'], 'fad')\n self.assertEquals(data['transaction_type'],\n 'authorization,create_payment_token')\n",
"step-4": "from bs4 import BeautifulSoup\nfrom cybersource.constants import CHECKOUT_BASKET_ID, CHECKOUT_ORDER_NUM, CHECKOUT_SHIPPING_CODE, CHECKOUT_ORDER_ID\nfrom cybersource.tests import factories as cs_factories\nfrom decimal import Decimal as D\nfrom django.core import mail\nfrom django.core.urlresolvers import reverse\nfrom mock import patch\nfrom oscar.core.loading import get_class, get_model\nfrom oscar.test import factories\nfrom random import randrange\nfrom rest_framework.test import APITestCase\nimport datetime\nimport requests\nBasket = get_model('basket', 'Basket')\nProduct = get_model('catalogue', 'Product')\nOrder = get_model('order', 'Order')\n\n\nclass BaseCheckoutTest(APITestCase):\n fixtures = ['cybersource-test.yaml']\n\n def create_product(self, price=D('10.00')):\n product = factories.create_product(title='My Product',\n product_class='My Product Class')\n record = factories.create_stockrecord(currency='USD', product=\n product, num_in_stock=10, price_excl_tax=price)\n factories.create_purchase_info(record)\n return product\n\n def do_add_to_basket(self, product_id, quantity=1):\n url = reverse('api-basket-add-product')\n data = {'url': reverse('product-detail', args=[product_id]),\n 'quantity': quantity}\n return self.client.post(url, data)\n\n def do_get_basket(self):\n url = reverse('api-basket')\n return self.client.get(url)\n\n def do_sign_auth_request(self, basket_id=None, data=None):\n if data is None:\n data = {'guest_email': 'herp@example.com', 'basket': reverse(\n 'basket-detail', args=[basket_id]), 'shipping_address': {\n 'first_name': 'fadsf', 'last_name': 'fad', 'line1':\n '234 5th Ave', 'line4': 'Manhattan', 'postcode': '10001',\n 'state': 'NY', 'country': reverse('country-detail', args=[\n 'US']), 'phone_number': '+1 (717) 467-1111'}}\n url = reverse('cybersource-sign-auth-request')\n res = self.client.post(url, data, format='json')\n self.assertEqual(res.status_code, 200)\n next_year = datetime.date.today().year + 1\n cs_data = {'card_type': '001', 'card_number': '4111111111111111',\n 'card_cvn': '123', 'card_expiry_date': '12-{}'.format(next_year\n ), 'bill_to_forename': 'Testy', 'bill_to_surname': 'McUnitTest',\n 'bill_to_address_line1': '234 5th Ave', 'bill_to_address_line2':\n 'apt 5', 'bill_to_address_city': 'Manhattan',\n 'bill_to_address_state': 'NY', 'bill_to_address_postal_code':\n '10001', 'bill_to_address_country': 'US', 'bill_to_phone':\n '17174671111'}\n for field in res.data['fields']:\n if not field['editable'] or field['key'] not in cs_data:\n cs_data[field['key']] = field['value']\n cs_url = res.data['url']\n return cs_url, cs_data\n\n def do_cybersource_post(self, cs_url, cs_data):\n res = requests.post(cs_url, cs_data)\n self.assertEqual(res.status_code, 200)\n soup = BeautifulSoup(res.content, 'html.parser')\n form_data = {}\n for element in soup.find_all('input'):\n form_data[element['name']] = element['value']\n url = reverse('cybersource-reply')\n return self.client.post(url, form_data)\n\n def check_finished_order(self, number, product_id, quantity=1):\n self.assertEqual(Order.objects.all().count(), 1)\n order = Order.objects.get()\n self.assertEqual(order.number, number)\n lines = order.lines.all()\n self.assertEqual(lines.count(), 1)\n line = lines[0]\n self.assertEqual(line.quantity, quantity)\n self.assertEqual(line.product_id, product_id)\n payment_events = order.payment_events.filter(event_type__name=\n 'Authorise')\n self.assertEqual(payment_events.count(), 1)\n self.assertEqual(payment_events[0].amount, order.total_incl_tax)\n payment_sources = order.sources.all()\n self.assertEqual(payment_sources.count(), 1)\n self.assertEqual(payment_sources[0].currency, order.currency)\n self.assertEqual(payment_sources[0].amount_allocated, order.\n total_incl_tax)\n self.assertEqual(payment_sources[0].amount_debited, D('0.00'))\n self.assertEqual(payment_sources[0].amount_refunded, D('0.00'))\n transactions = payment_sources[0].transactions.all()\n self.assertEqual(transactions.count(), 1)\n self.assertEqual(transactions[0].txn_type, 'Authorise')\n self.assertEqual(transactions[0].amount, order.total_incl_tax)\n self.assertEqual(transactions[0].status, 'ACCEPT')\n self.assertEqual(transactions[0].log_field('req_reference_number'),\n order.number)\n self.assertEqual(transactions[0].token.card_last4, '1111')\n self.assertEqual(len(mail.outbox), 1)\n\n\nclass CheckoutIntegrationTest(BaseCheckoutTest):\n \"\"\"Full Integration Test of Checkout\"\"\"\n\n def test_checkout_process(self):\n \"\"\"Full checkout process using minimal api calls\"\"\"\n product = self.create_product()\n res = self.do_get_basket()\n self.assertEqual(res.status_code, 200)\n basket_id = res.data['id']\n res = self.do_add_to_basket(product.id)\n self.assertEqual(res.status_code, 200)\n cs_url, cs_data = self.do_sign_auth_request(basket_id)\n res = self.do_cybersource_post(cs_url, cs_data)\n self.assertEqual(res.status_code, 302)\n self.check_finished_order(cs_data['reference_number'], product.id)\n\n def test_add_product_during_auth(self):\n \"\"\"Test attempting to add a product during the authorize flow\"\"\"\n product = self.create_product()\n res = self.do_get_basket()\n self.assertEqual(res.status_code, 200)\n basket_id = res.data['id']\n res = self.do_add_to_basket(product.id)\n basket1 = res.data['id']\n self.assertEqual(res.status_code, 200)\n cs_url, cs_data = self.do_sign_auth_request(basket_id)\n res = self.do_add_to_basket(product.id)\n self.assertEqual(res.status_code, 200)\n basket2 = res.data['id']\n self.assertNotEqual(basket1, basket2)\n res = self.do_cybersource_post(cs_url, cs_data)\n self.assertEqual(res.status_code, 302)\n self.check_finished_order(cs_data['reference_number'], product.id)\n res = self.do_add_to_basket(product.id)\n self.assertEqual(res.status_code, 200)\n basket3 = res.data['id']\n self.assertEqual(basket2, basket3)\n\n def test_pay_for_nothing(self):\n \"\"\"Test attempting to pay for an empty basket\"\"\"\n res = self.do_get_basket()\n self.assertEqual(res.status_code, 200)\n basket_id = res.data['id']\n data = {'guest_email': 'herp@example.com', 'basket': reverse(\n 'basket-detail', args=[basket_id]), 'shipping_address': {\n 'first_name': 'fadsf', 'last_name': 'fad', 'line1':\n '234 5th Ave', 'line4': 'Manhattan', 'postcode': '10001',\n 'state': 'NY', 'country': reverse('country-detail', args=['US']\n ), 'phone_number': '+1 (717) 467-1111'}}\n url = reverse('cybersource-sign-auth-request')\n res = self.client.post(url, data, format='json')\n self.assertEqual(res.status_code, 406)\n\n def test_manipulate_total_pre_auth(self):\n \"\"\"Test attempting to manipulate basket price when requesting an auth form\"\"\"\n product = self.create_product()\n res = self.do_get_basket()\n self.assertEqual(res.status_code, 200)\n basket_id = res.data['id']\n res = self.do_add_to_basket(product.id)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(res.data['total_incl_tax'], '10.00')\n url = reverse('cybersource-sign-auth-request')\n data = {'guest_email': 'herp@example.com', 'basket': reverse(\n 'basket-detail', args=[basket_id]), 'total': '2.00',\n 'shipping_address': {'first_name': 'fadsf', 'last_name': 'fad',\n 'line1': '234 5th Ave', 'line4': 'Manhattan', 'postcode':\n '10001', 'state': 'NY', 'country': reverse('country-detail',\n args=['US']), 'phone_number': '+1 (717) 467-1111'}}\n res = self.client.post(url, data, format='json')\n self.assertEqual(res.status_code, 406)\n\n def test_manipulate_total_during_auth(self):\n \"\"\"Test attempting to manipulate basket price when requesting auth from CyberSource\"\"\"\n product = self.create_product()\n res = self.do_get_basket()\n self.assertEqual(res.status_code, 200)\n basket_id = res.data['id']\n res = self.do_add_to_basket(product.id)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(res.data['total_incl_tax'], '10.00')\n cs_url, cs_data = self.do_sign_auth_request(basket_id)\n cs_data['amount'] = '2.00'\n res = requests.post(cs_url, cs_data)\n self.assertEqual(res.status_code, 403)\n\n def test_free_product(self):\n \"\"\"Full checkout process using minimal api calls\"\"\"\n product = self.create_product(price=D('0.00'))\n res = self.do_get_basket()\n self.assertEqual(res.status_code, 200)\n basket_id = res.data['id']\n res = self.do_add_to_basket(product.id)\n self.assertEqual(res.status_code, 200)\n cs_url, cs_data = self.do_sign_auth_request(basket_id)\n self.assertEqual(cs_data['amount'], '0.00')\n res = self.do_cybersource_post(cs_url, cs_data)\n self.assertEqual(res.status_code, 302)\n self.check_finished_order(cs_data['reference_number'], product.id)\n\n\nclass CSReplyViewTest(BaseCheckoutTest):\n \"\"\"Test the CybersourceReplyView with fixtured requests\"\"\"\n\n def prepare_basket(self):\n \"\"\"Setup a basket and session like SignAuthorizePaymentFormView would normally\"\"\"\n product = self.create_product()\n res = self.do_get_basket()\n self.assertEqual(res.status_code, 200)\n basket_id = res.data['id']\n res = self.do_add_to_basket(product.id)\n self.assertEqual(res.status_code, 200)\n session = self.client.session\n session[CHECKOUT_BASKET_ID] = basket_id\n session[CHECKOUT_ORDER_NUM] = str(randrange(1000000, 9999999))\n session[CHECKOUT_SHIPPING_CODE] = 'free-shipping'\n session.save()\n return session, basket_id, session[CHECKOUT_ORDER_NUM]\n\n @patch('cybersource.signals.order_placed.send')\n def test_invalid_signature(self, order_placed):\n \"\"\"Invalid signature should result in 400 Bad Request\"\"\"\n session, basket_id, order_number = self.prepare_basket()\n data = cs_factories.build_declined_reply_data(order_number)\n data = cs_factories.sign_reply_data(data)\n data['signature'] = 'abcdef'\n url = reverse('cybersource-reply')\n resp = self.client.post(url, data)\n self.assertEqual(resp.status_code, 400)\n self.assertEqual(len(mail.outbox), 0, 'Should not send email')\n self.assertEqual(order_placed.call_count, 0,\n 'Should not trigger signal')\n self.assertEqual(Order.objects.count(), 0, 'Should not make order')\n\n @patch('cybersource.signals.order_placed.send')\n def test_invalid_request_type(self, order_placed):\n \"\"\"Bad request type should result in 400 Bad Request\"\"\"\n session, basket_id, order_number = self.prepare_basket()\n data = cs_factories.build_declined_reply_data(order_number)\n data['req_transaction_type'] = 'payment',\n data = cs_factories.sign_reply_data(data)\n url = reverse('cybersource-reply')\n resp = self.client.post(url, data)\n self.assertEqual(resp.status_code, 400)\n self.assertEqual(len(mail.outbox), 0, 'Should not send email')\n self.assertEqual(order_placed.call_count, 0,\n 'Should not trigger signal')\n self.assertEqual(Order.objects.count(), 0, 'Should not make order')\n\n @patch('cybersource.signals.order_placed.send')\n def test_duplicate_transaction_id(self, order_placed):\n \"\"\"Duplicate Transaction ID should result in redirect to the success page\"\"\"\n session, basket_id, order_number = self.prepare_basket()\n data = cs_factories.build_accepted_reply_data(order_number)\n data = cs_factories.sign_reply_data(data)\n url = reverse('cybersource-reply')\n self.assertEqual(order_placed.call_count, 0)\n self.assertEqual(Order.objects.count(), 0)\n resp = self.client.post(url, data)\n self.assertRedirects(resp, reverse('checkout:thank-you'))\n self.assertEqual(order_placed.call_count, 1)\n self.assertEqual(Order.objects.count(), 1)\n resp = self.client.post(url, data)\n self.assertRedirects(resp, reverse('checkout:thank-you'))\n self.assertEqual(order_placed.call_count, 1)\n self.assertEqual(Order.objects.count(), 1)\n\n @patch('cybersource.signals.order_placed.send')\n def test_invalid_reference_number(self, order_placed):\n \"\"\"Mismatched reference number should result in 400 Bad Request\"\"\"\n session, basket_id, order_number = self.prepare_basket()\n data = cs_factories.build_accepted_reply_data(order_number + 'ABC')\n data = cs_factories.sign_reply_data(data)\n url = reverse('cybersource-reply')\n resp = self.client.post(url, data)\n self.assertEqual(resp.status_code, 400)\n self.assertEqual(order_placed.call_count, 0)\n self.assertEqual(Order.objects.count(), 0)\n\n @patch('cybersource.signals.order_placed.send')\n def test_missing_basket(self, order_placed):\n \"\"\"Missing basket should result in 400 Bad Request\"\"\"\n session, basket_id, order_number = self.prepare_basket()\n del session[CHECKOUT_BASKET_ID]\n session.save()\n data = cs_factories.build_accepted_reply_data(order_number)\n data = cs_factories.sign_reply_data(data)\n url = reverse('cybersource-reply')\n resp = self.client.post(url, data)\n self.assertEqual(resp.status_code, 400)\n self.assertEqual(order_placed.call_count, 0)\n self.assertEqual(Order.objects.count(), 0)\n\n @patch('cybersource.signals.order_placed.send')\n def test_declined_card(self, order_placed):\n \"\"\"Declined card should should result in redirect to failure page\"\"\"\n session, basket_id, order_number = self.prepare_basket()\n data = cs_factories.build_declined_reply_data(order_number)\n data = cs_factories.sign_reply_data(data)\n url = reverse('cybersource-reply')\n resp = self.client.post(url, data)\n self.assertRedirects(resp, reverse('checkout:index'),\n fetch_redirect_response=False)\n self.assertEqual(len(mail.outbox), 0, 'Should not send email')\n self.assertEqual(order_placed.call_count, 0,\n 'Should not trigger signal')\n self.assertEqual(Order.objects.count(), 0, 'Should not make order')\n\n @patch('cybersource.signals.order_placed.send')\n def test_success(self, order_placed):\n \"\"\"Successful authorization should create an order and redirect to the success page\"\"\"\n session, basket_id, order_number = self.prepare_basket()\n data = cs_factories.build_accepted_reply_data(order_number)\n data = cs_factories.sign_reply_data(data)\n url = reverse('cybersource-reply')\n self.assertEqual(order_placed.call_count, 0)\n resp = self.client.post(url, data)\n self.assertRedirects(resp, reverse('checkout:thank-you'))\n self.assertEqual(len(mail.outbox), 1, 'Should send email')\n self.assertEqual(order_placed.call_count, 1,\n 'Should trigger order_placed signal')\n order = order_placed.call_args[1]['order']\n self.assertEqual(order.status, 'Authorized', 'Should set order status')\n self.assertEqual(order.basket.id, basket_id,\n 'Should use basket from session')\n self.assertEqual(order.number, order_number,\n 'Should use order number from CS request')\n session = self.client.session\n self.assertEquals(session[CHECKOUT_ORDER_ID], order.id,\n 'Should save order_id in session')\n self.assertEqual(order.sources.count(), 1, 'Should save pPaymentSource'\n )\n source = order.sources.first()\n self.assertEqual(source.currency, 'USD')\n self.assertEqual(source.amount_allocated, D('99.99'))\n self.assertEqual(source.amount_refunded, D('0.00'))\n self.assertEqual(source.amount_debited, D('0.00'))\n self.assertEqual(source.transactions.count(), 1,\n 'Should save Transaction')\n transaction = source.transactions.first()\n self.assertEqual(transaction.log.data, data)\n self.assertEqual(transaction.token.log, transaction.log)\n self.assertEqual(transaction.token.masked_card_number,\n 'xxxxxxxxxxxx1111')\n self.assertEqual(transaction.token.card_type, '001')\n self.assertEqual(transaction.txn_type, 'Authorise')\n self.assertEqual(transaction.amount, D('99.99'))\n self.assertEqual(transaction.reference, data['transaction_id'])\n self.assertEqual(transaction.status, 'ACCEPT')\n self.assertEqual(transaction.request_token, data['request_token'])\n self.assertEqual(order.payment_events.count(), 1,\n 'Should save PaymentEvent')\n event = order.payment_events.first()\n self.assertEqual(event.amount, D('99.99'))\n self.assertEqual(event.reference, data['transaction_id'])\n self.assertEqual(event.event_type.name, 'Authorise')\n self.assertEqual(event.line_quantities.count(), 1,\n 'Should save PaymentEventQuantity')\n lq = event.line_quantities.first()\n self.assertEqual(lq.line, order.lines.first())\n self.assertEqual(lq.quantity, 1)\n\n\nclass AuthPaymentFormViewTest(BaseCheckoutTest):\n \"\"\"Test the SignAuthorizePaymentFormView\"\"\"\n\n def prepare_basket(self):\n \"\"\"Setup a basket so that we can pay for it\"\"\"\n product = self.create_product()\n res = self.do_get_basket()\n self.assertEqual(res.status_code, 200)\n basket_id = res.data['id']\n res = self.do_add_to_basket(product.id)\n self.assertEqual(res.status_code, 200)\n return basket_id\n\n @patch('cybersource.signals.pre_build_auth_request.send')\n @patch('cybersource.signals.pre_calculate_auth_total.send')\n def test_request_auth_form_success(self, pre_calculate_auth_total,\n pre_build_auth_request):\n basket_id = self.prepare_basket()\n\n def add_taxes(sender, basket, shipping_address, **kwargs):\n for line in basket.all_lines():\n line.purchase_info.price.tax = D('0.42')\n pre_calculate_auth_total.side_effect = add_taxes\n\n def add_a_field(sender, extra_fields, request, basket, **kwargs):\n extra_fields['my_custom_field'] = 'ABC'\n pre_build_auth_request.side_effect = add_a_field\n session = self.client.session\n session[CHECKOUT_ORDER_NUM] = '10000042'\n session.save()\n cs_url, data = self.do_sign_auth_request(basket_id=basket_id)\n self.assertEqual(cs_url,\n 'https://testsecureacceptance.cybersource.com/silent/pay')\n session = self.client.session\n self.assertEqual(session[CHECKOUT_BASKET_ID], basket_id)\n basket = Basket.objects.get(id=basket_id)\n self.assertFalse(basket.can_be_edited)\n self.assertEqual(pre_calculate_auth_total.call_count, 1)\n self.assertEqual(pre_build_auth_request.call_count, 1)\n self.assertEquals(data['amount'], '10.42')\n self.assertEquals(data['bill_to_address_city'], 'Manhattan')\n self.assertEquals(data['bill_to_address_country'], 'US')\n self.assertEquals(data['bill_to_address_line1'], '234 5th Ave')\n self.assertEquals(data['bill_to_address_line2'], 'apt 5')\n self.assertEquals(data['bill_to_address_postal_code'], '10001')\n self.assertEquals(data['bill_to_address_state'], 'NY')\n self.assertEquals(data['bill_to_email'], 'herp@example.com')\n self.assertEquals(data['bill_to_forename'], 'Testy')\n self.assertEquals(data['bill_to_phone'], '17174671111')\n self.assertEquals(data['bill_to_surname'], 'McUnitTest')\n self.assertEquals(data['card_cvn'], '123')\n self.assertEquals(data['card_expiry_date'], '12-2017')\n self.assertEquals(data['card_number'], '4111111111111111')\n self.assertEquals(data['card_type'], '001')\n self.assertEquals(data['currency'], 'USD')\n self.assertEquals(data['customer_ip_address'], '127.0.0.1')\n self.assertEquals(data['device_fingerprint_id'], '')\n self.assertEquals(data['item_0_name'], 'My Product')\n self.assertEquals(data['item_0_quantity'], '1')\n self.assertEquals(data['item_0_sku'], basket.all_lines()[0].\n stockrecord.partner_sku)\n self.assertEquals(data['item_0_unit_price'], '10.42')\n self.assertEquals(data['line_item_count'], '1')\n self.assertEquals(data['locale'], 'en')\n self.assertEquals(data['my_custom_field'], 'ABC')\n self.assertEquals(data['payment_method'], 'card')\n self.assertEquals(data['reference_number'], '10000042')\n self.assertEquals(data['ship_to_address_city'], 'Manhattan')\n self.assertEquals(data['ship_to_address_country'], 'US')\n self.assertEquals(data['ship_to_address_line1'], '234 5th Ave')\n self.assertEquals(data['ship_to_address_line2'], '')\n self.assertEquals(data['ship_to_address_postal_code'], '10001')\n self.assertEquals(data['ship_to_address_state'], 'NY')\n self.assertEquals(data['ship_to_forename'], 'fadsf')\n self.assertEquals(data['ship_to_phone'], '17174671111')\n self.assertEquals(data['ship_to_surname'], 'fad')\n self.assertEquals(data['transaction_type'],\n 'authorization,create_payment_token')\n",
"step-5": "from bs4 import BeautifulSoup\nfrom cybersource.constants import CHECKOUT_BASKET_ID, CHECKOUT_ORDER_NUM, CHECKOUT_SHIPPING_CODE, CHECKOUT_ORDER_ID\nfrom cybersource.tests import factories as cs_factories\nfrom decimal import Decimal as D\nfrom django.core import mail\nfrom django.core.urlresolvers import reverse\nfrom mock import patch\nfrom oscar.core.loading import get_class, get_model\nfrom oscar.test import factories\nfrom random import randrange\nfrom rest_framework.test import APITestCase\nimport datetime\nimport requests # Needed for external calls!\n\nBasket = get_model('basket', 'Basket')\nProduct = get_model('catalogue', 'Product')\nOrder = get_model('order', 'Order')\n\n\nclass BaseCheckoutTest(APITestCase):\n fixtures = ['cybersource-test.yaml']\n\n def create_product(self, price=D('10.00')):\n product = factories.create_product(\n title='My Product',\n product_class='My Product Class')\n record = factories.create_stockrecord(\n currency='USD',\n product=product,\n num_in_stock=10,\n price_excl_tax=price)\n factories.create_purchase_info(record)\n return product\n\n def do_add_to_basket(self, product_id, quantity=1):\n url = reverse('api-basket-add-product')\n data = {\n \"url\": reverse('product-detail', args=[product_id]),\n \"quantity\": quantity\n }\n return self.client.post(url, data)\n\n def do_get_basket(self):\n url = reverse('api-basket')\n return self.client.get(url)\n\n def do_sign_auth_request(self, basket_id=None, data=None):\n if data is None:\n data = {\n \"guest_email\": \"herp@example.com\",\n \"basket\": reverse('basket-detail', args=[basket_id]),\n \"shipping_address\": {\n \"first_name\": \"fadsf\",\n \"last_name\": \"fad\",\n \"line1\": \"234 5th Ave\",\n \"line4\": \"Manhattan\",\n \"postcode\": \"10001\",\n \"state\": \"NY\",\n \"country\": reverse('country-detail', args=['US']),\n \"phone_number\": \"+1 (717) 467-1111\",\n }\n }\n url = reverse('cybersource-sign-auth-request')\n res = self.client.post(url, data, format='json')\n self.assertEqual(res.status_code, 200)\n\n next_year = datetime.date.today().year + 1\n cs_data = {\n 'card_type': '001',\n 'card_number': '4111111111111111',\n 'card_cvn': '123',\n 'card_expiry_date': '12-{}'.format(next_year),\n 'bill_to_forename': 'Testy',\n 'bill_to_surname': 'McUnitTest',\n 'bill_to_address_line1': '234 5th Ave',\n 'bill_to_address_line2': 'apt 5',\n 'bill_to_address_city': 'Manhattan',\n 'bill_to_address_state': 'NY',\n 'bill_to_address_postal_code': '10001',\n 'bill_to_address_country': 'US',\n 'bill_to_phone': '17174671111',\n }\n for field in res.data['fields']:\n if not field['editable'] or field['key'] not in cs_data:\n cs_data[field['key']] = field['value']\n cs_url = res.data['url']\n return cs_url, cs_data\n\n def do_cybersource_post(self, cs_url, cs_data):\n res = requests.post(cs_url, cs_data)\n self.assertEqual(res.status_code, 200)\n\n soup = BeautifulSoup(res.content, 'html.parser')\n form_data = {}\n for element in soup.find_all('input'):\n form_data[element['name']] = element['value']\n\n # We have the data from cybersource, send it to our cybersource callback\n url = reverse('cybersource-reply')\n return self.client.post(url, form_data)\n\n def check_finished_order(self, number, product_id, quantity=1):\n # Order exists and was paid for\n self.assertEqual(Order.objects.all().count(), 1)\n order = Order.objects.get()\n self.assertEqual(order.number, number)\n\n lines = order.lines.all()\n self.assertEqual(lines.count(), 1)\n line = lines[0]\n self.assertEqual(line.quantity, quantity)\n self.assertEqual(line.product_id, product_id)\n\n payment_events = order.payment_events.filter(event_type__name=\"Authorise\")\n self.assertEqual(payment_events.count(), 1)\n self.assertEqual(payment_events[0].amount, order.total_incl_tax)\n\n payment_sources = order.sources.all()\n self.assertEqual(payment_sources.count(), 1)\n self.assertEqual(payment_sources[0].currency, order.currency)\n self.assertEqual(payment_sources[0].amount_allocated, order.total_incl_tax)\n self.assertEqual(payment_sources[0].amount_debited, D('0.00'))\n self.assertEqual(payment_sources[0].amount_refunded, D('0.00'))\n\n transactions = payment_sources[0].transactions.all()\n self.assertEqual(transactions.count(), 1)\n self.assertEqual(transactions[0].txn_type, 'Authorise')\n self.assertEqual(transactions[0].amount, order.total_incl_tax)\n self.assertEqual(transactions[0].status, 'ACCEPT')\n\n self.assertEqual(transactions[0].log_field('req_reference_number'), order.number)\n self.assertEqual(transactions[0].token.card_last4, '1111')\n\n self.assertEqual(len(mail.outbox), 1)\n\n\n\n\nclass CheckoutIntegrationTest(BaseCheckoutTest):\n \"\"\"Full Integration Test of Checkout\"\"\"\n\n def test_checkout_process(self):\n \"\"\"Full checkout process using minimal api calls\"\"\"\n product = self.create_product()\n\n res = self.do_get_basket()\n self.assertEqual(res.status_code, 200)\n basket_id = res.data['id']\n\n res = self.do_add_to_basket(product.id)\n self.assertEqual(res.status_code, 200)\n\n cs_url, cs_data = self.do_sign_auth_request(basket_id)\n\n res = self.do_cybersource_post(cs_url, cs_data)\n self.assertEqual(res.status_code, 302)\n self.check_finished_order(cs_data['reference_number'], product.id)\n\n\n def test_add_product_during_auth(self):\n \"\"\"Test attempting to add a product during the authorize flow\"\"\"\n product = self.create_product()\n\n res = self.do_get_basket()\n self.assertEqual(res.status_code, 200)\n basket_id = res.data['id']\n\n # Adding a product here should succeed\n res = self.do_add_to_basket(product.id)\n basket1 = res.data['id']\n self.assertEqual(res.status_code, 200)\n\n cs_url, cs_data = self.do_sign_auth_request(basket_id)\n\n # Adding a product here should go to a new basket, not the one we're auth'ing\n res = self.do_add_to_basket(product.id)\n self.assertEqual(res.status_code, 200)\n basket2 = res.data['id']\n self.assertNotEqual(basket1, basket2)\n\n res = self.do_cybersource_post(cs_url, cs_data)\n self.assertEqual(res.status_code, 302)\n self.check_finished_order(cs_data['reference_number'], product.id)\n\n # Adding a product here should go to basket2, not basket1\n res = self.do_add_to_basket(product.id)\n self.assertEqual(res.status_code, 200)\n basket3 = res.data['id']\n self.assertEqual(basket2, basket3)\n\n\n def test_pay_for_nothing(self):\n \"\"\"Test attempting to pay for an empty basket\"\"\"\n res = self.do_get_basket()\n self.assertEqual(res.status_code, 200)\n basket_id = res.data['id']\n\n data = {\n \"guest_email\": \"herp@example.com\",\n \"basket\": reverse('basket-detail', args=[basket_id]),\n \"shipping_address\": {\n \"first_name\": \"fadsf\",\n \"last_name\": \"fad\",\n \"line1\": \"234 5th Ave\",\n \"line4\": \"Manhattan\",\n \"postcode\": \"10001\",\n \"state\": \"NY\",\n \"country\": reverse('country-detail', args=['US']),\n \"phone_number\": \"+1 (717) 467-1111\",\n }\n }\n url = reverse('cybersource-sign-auth-request')\n res = self.client.post(url, data, format='json')\n self.assertEqual(res.status_code, 406)\n\n\n def test_manipulate_total_pre_auth(self):\n \"\"\"Test attempting to manipulate basket price when requesting an auth form\"\"\"\n product = self.create_product()\n\n res = self.do_get_basket()\n self.assertEqual(res.status_code, 200)\n basket_id = res.data['id']\n\n res = self.do_add_to_basket(product.id)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(res.data['total_incl_tax'], '10.00')\n\n url = reverse('cybersource-sign-auth-request')\n data = {\n \"guest_email\": \"herp@example.com\",\n \"basket\": reverse('basket-detail', args=[basket_id]),\n \"total\": \"2.00\", # Try and get $10 of product for only $2\n \"shipping_address\": {\n \"first_name\": \"fadsf\",\n \"last_name\": \"fad\",\n \"line1\": \"234 5th Ave\",\n \"line4\": \"Manhattan\",\n \"postcode\": \"10001\",\n \"state\": \"NY\",\n \"country\": reverse('country-detail', args=['US']),\n \"phone_number\": \"+1 (717) 467-1111\",\n }\n }\n res = self.client.post(url, data, format='json')\n self.assertEqual(res.status_code, 406)\n\n\n def test_manipulate_total_during_auth(self):\n \"\"\"Test attempting to manipulate basket price when requesting auth from CyberSource\"\"\"\n product = self.create_product()\n\n res = self.do_get_basket()\n self.assertEqual(res.status_code, 200)\n basket_id = res.data['id']\n\n res = self.do_add_to_basket(product.id)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(res.data['total_incl_tax'], '10.00')\n\n cs_url, cs_data = self.do_sign_auth_request(basket_id)\n\n cs_data['amount'] = '2.00'\n res = requests.post(cs_url, cs_data)\n self.assertEqual(res.status_code, 403)\n\n\n def test_free_product(self):\n \"\"\"Full checkout process using minimal api calls\"\"\"\n product = self.create_product(price=D('0.00'))\n\n res = self.do_get_basket()\n self.assertEqual(res.status_code, 200)\n basket_id = res.data['id']\n\n res = self.do_add_to_basket(product.id)\n self.assertEqual(res.status_code, 200)\n\n cs_url, cs_data = self.do_sign_auth_request(basket_id)\n\n self.assertEqual(cs_data['amount'], '0.00')\n\n res = self.do_cybersource_post(cs_url, cs_data)\n self.assertEqual(res.status_code, 302)\n self.check_finished_order(cs_data['reference_number'], product.id)\n\n\n\nclass CSReplyViewTest(BaseCheckoutTest):\n \"\"\"Test the CybersourceReplyView with fixtured requests\"\"\"\n\n def prepare_basket(self):\n \"\"\"Setup a basket and session like SignAuthorizePaymentFormView would normally\"\"\"\n product = self.create_product()\n\n res = self.do_get_basket()\n self.assertEqual(res.status_code, 200)\n basket_id = res.data['id']\n\n res = self.do_add_to_basket(product.id)\n self.assertEqual(res.status_code, 200)\n\n session = self.client.session\n session[CHECKOUT_BASKET_ID] = basket_id\n session[CHECKOUT_ORDER_NUM] = str(randrange(1000000, 9999999))\n session[CHECKOUT_SHIPPING_CODE] = 'free-shipping'\n session.save()\n return session, basket_id, session[CHECKOUT_ORDER_NUM]\n\n\n @patch('cybersource.signals.order_placed.send')\n def test_invalid_signature(self, order_placed):\n \"\"\"Invalid signature should result in 400 Bad Request\"\"\"\n session, basket_id, order_number = self.prepare_basket()\n data = cs_factories.build_declined_reply_data(order_number)\n data = cs_factories.sign_reply_data(data)\n\n data['signature'] = 'abcdef'\n\n url = reverse('cybersource-reply')\n resp = self.client.post(url, data)\n self.assertEqual(resp.status_code, 400)\n self.assertEqual(len(mail.outbox), 0, 'Should not send email')\n self.assertEqual(order_placed.call_count, 0, 'Should not trigger signal')\n self.assertEqual(Order.objects.count(), 0, 'Should not make order')\n\n\n @patch('cybersource.signals.order_placed.send')\n def test_invalid_request_type(self, order_placed):\n \"\"\"Bad request type should result in 400 Bad Request\"\"\"\n session, basket_id, order_number = self.prepare_basket()\n data = cs_factories.build_declined_reply_data(order_number)\n\n data[\"req_transaction_type\"] = \"payment\",\n\n data = cs_factories.sign_reply_data(data)\n url = reverse('cybersource-reply')\n resp = self.client.post(url, data)\n self.assertEqual(resp.status_code, 400)\n self.assertEqual(len(mail.outbox), 0, 'Should not send email')\n self.assertEqual(order_placed.call_count, 0, 'Should not trigger signal')\n self.assertEqual(Order.objects.count(), 0, 'Should not make order')\n\n\n @patch('cybersource.signals.order_placed.send')\n def test_duplicate_transaction_id(self, order_placed):\n \"\"\"Duplicate Transaction ID should result in redirect to the success page\"\"\"\n session, basket_id, order_number = self.prepare_basket()\n data = cs_factories.build_accepted_reply_data(order_number)\n data = cs_factories.sign_reply_data(data)\n url = reverse('cybersource-reply')\n self.assertEqual(order_placed.call_count, 0)\n self.assertEqual(Order.objects.count(), 0)\n\n resp = self.client.post(url, data)\n self.assertRedirects(resp, reverse('checkout:thank-you'))\n self.assertEqual(order_placed.call_count, 1)\n self.assertEqual(Order.objects.count(), 1)\n\n resp = self.client.post(url, data)\n self.assertRedirects(resp, reverse('checkout:thank-you'))\n self.assertEqual(order_placed.call_count, 1)\n self.assertEqual(Order.objects.count(), 1)\n\n\n @patch('cybersource.signals.order_placed.send')\n def test_invalid_reference_number(self, order_placed):\n \"\"\"Mismatched reference number should result in 400 Bad Request\"\"\"\n session, basket_id, order_number = self.prepare_basket()\n data = cs_factories.build_accepted_reply_data(order_number + 'ABC')\n data = cs_factories.sign_reply_data(data)\n url = reverse('cybersource-reply')\n resp = self.client.post(url, data)\n self.assertEqual(resp.status_code, 400)\n self.assertEqual(order_placed.call_count, 0)\n self.assertEqual(Order.objects.count(), 0)\n\n\n @patch('cybersource.signals.order_placed.send')\n def test_missing_basket(self, order_placed):\n \"\"\"Missing basket should result in 400 Bad Request\"\"\"\n session, basket_id, order_number = self.prepare_basket()\n del session[CHECKOUT_BASKET_ID]\n session.save()\n data = cs_factories.build_accepted_reply_data(order_number)\n data = cs_factories.sign_reply_data(data)\n url = reverse('cybersource-reply')\n resp = self.client.post(url, data)\n self.assertEqual(resp.status_code, 400)\n self.assertEqual(order_placed.call_count, 0)\n self.assertEqual(Order.objects.count(), 0)\n\n\n @patch('cybersource.signals.order_placed.send')\n def test_declined_card(self, order_placed):\n \"\"\"Declined card should should result in redirect to failure page\"\"\"\n session, basket_id, order_number = self.prepare_basket()\n data = cs_factories.build_declined_reply_data(order_number)\n data = cs_factories.sign_reply_data(data)\n url = reverse('cybersource-reply')\n\n resp = self.client.post(url, data)\n self.assertRedirects(resp, reverse('checkout:index'), fetch_redirect_response=False)\n\n self.assertEqual(len(mail.outbox), 0, 'Should not send email')\n self.assertEqual(order_placed.call_count, 0, 'Should not trigger signal')\n self.assertEqual(Order.objects.count(), 0, 'Should not make order')\n\n\n @patch('cybersource.signals.order_placed.send')\n def test_success(self, order_placed):\n \"\"\"Successful authorization should create an order and redirect to the success page\"\"\"\n session, basket_id, order_number = self.prepare_basket()\n data = cs_factories.build_accepted_reply_data(order_number)\n data = cs_factories.sign_reply_data(data)\n url = reverse('cybersource-reply')\n self.assertEqual(order_placed.call_count, 0)\n resp = self.client.post(url, data)\n\n self.assertRedirects(resp, reverse('checkout:thank-you'))\n\n self.assertEqual(len(mail.outbox), 1, 'Should send email')\n self.assertEqual(order_placed.call_count, 1, 'Should trigger order_placed signal')\n\n order = order_placed.call_args[1]['order']\n self.assertEqual(order.status, 'Authorized', 'Should set order status')\n self.assertEqual(order.basket.id, basket_id, 'Should use basket from session')\n self.assertEqual(order.number, order_number, 'Should use order number from CS request')\n\n session = self.client.session\n self.assertEquals(session[CHECKOUT_ORDER_ID], order.id, 'Should save order_id in session')\n\n self.assertEqual(order.sources.count(), 1, 'Should save pPaymentSource')\n source = order.sources.first()\n self.assertEqual(source.currency, 'USD')\n self.assertEqual(source.amount_allocated, D('99.99'))\n self.assertEqual(source.amount_refunded, D('0.00'))\n self.assertEqual(source.amount_debited, D('0.00'))\n\n self.assertEqual(source.transactions.count(), 1, 'Should save Transaction')\n transaction = source.transactions.first()\n self.assertEqual(transaction.log.data, data)\n self.assertEqual(transaction.token.log, transaction.log)\n self.assertEqual(transaction.token.masked_card_number, 'xxxxxxxxxxxx1111')\n self.assertEqual(transaction.token.card_type, '001')\n self.assertEqual(transaction.txn_type, 'Authorise')\n self.assertEqual(transaction.amount, D('99.99'))\n self.assertEqual(transaction.reference, data['transaction_id'])\n self.assertEqual(transaction.status, 'ACCEPT')\n self.assertEqual(transaction.request_token, data['request_token'])\n\n self.assertEqual(order.payment_events.count(), 1, 'Should save PaymentEvent')\n event = order.payment_events.first()\n self.assertEqual(event.amount, D('99.99'))\n self.assertEqual(event.reference, data['transaction_id'])\n self.assertEqual(event.event_type.name, 'Authorise')\n\n self.assertEqual(event.line_quantities.count(), 1, 'Should save PaymentEventQuantity')\n lq = event.line_quantities.first()\n self.assertEqual(lq.line, order.lines.first())\n self.assertEqual(lq.quantity, 1)\n\n\n\nclass AuthPaymentFormViewTest(BaseCheckoutTest):\n \"\"\"Test the SignAuthorizePaymentFormView\"\"\"\n\n def prepare_basket(self):\n \"\"\"Setup a basket so that we can pay for it\"\"\"\n product = self.create_product()\n\n res = self.do_get_basket()\n self.assertEqual(res.status_code, 200)\n basket_id = res.data['id']\n\n res = self.do_add_to_basket(product.id)\n self.assertEqual(res.status_code, 200)\n\n return basket_id\n\n\n @patch('cybersource.signals.pre_build_auth_request.send')\n @patch('cybersource.signals.pre_calculate_auth_total.send')\n def test_request_auth_form_success(self, pre_calculate_auth_total, pre_build_auth_request):\n basket_id = self.prepare_basket()\n\n # Add some taxes to the basket\n def add_taxes(sender, basket, shipping_address, **kwargs):\n for line in basket.all_lines():\n line.purchase_info.price.tax = D('0.42')\n pre_calculate_auth_total.side_effect = add_taxes\n\n # Add an extra field into the request\n def add_a_field(sender, extra_fields, request, basket, **kwargs):\n extra_fields['my_custom_field'] = 'ABC'\n pre_build_auth_request.side_effect = add_a_field\n\n # Pregenerate the order number\n session = self.client.session\n session[CHECKOUT_ORDER_NUM] = '10000042'\n session.save()\n\n cs_url, data = self.do_sign_auth_request(basket_id=basket_id)\n\n # CS URL should be correct\n self.assertEqual(cs_url, 'https://testsecureacceptance.cybersource.com/silent/pay')\n\n # Basket ID should be stored in the session\n session = self.client.session\n self.assertEqual(session[CHECKOUT_BASKET_ID], basket_id)\n\n # Basket must be frozen\n basket = Basket.objects.get(id=basket_id)\n self.assertFalse(basket.can_be_edited)\n\n # Make sure each signal got called\n self.assertEqual(pre_calculate_auth_total.call_count, 1)\n self.assertEqual(pre_build_auth_request.call_count, 1)\n\n # Check response fields\n self.assertEquals(data['amount'], '10.42')\n self.assertEquals(data['bill_to_address_city'], 'Manhattan')\n self.assertEquals(data['bill_to_address_country'], 'US')\n self.assertEquals(data['bill_to_address_line1'], '234 5th Ave')\n self.assertEquals(data['bill_to_address_line2'], 'apt 5')\n self.assertEquals(data['bill_to_address_postal_code'], '10001')\n self.assertEquals(data['bill_to_address_state'], 'NY')\n self.assertEquals(data['bill_to_email'], 'herp@example.com')\n self.assertEquals(data['bill_to_forename'], 'Testy')\n self.assertEquals(data['bill_to_phone'], '17174671111')\n self.assertEquals(data['bill_to_surname'], 'McUnitTest')\n self.assertEquals(data['card_cvn'], '123')\n self.assertEquals(data['card_expiry_date'], '12-2017')\n self.assertEquals(data['card_number'], '4111111111111111')\n self.assertEquals(data['card_type'], '001')\n self.assertEquals(data['currency'], 'USD')\n self.assertEquals(data['customer_ip_address'], '127.0.0.1')\n self.assertEquals(data['device_fingerprint_id'], '')\n self.assertEquals(data['item_0_name'], 'My Product')\n self.assertEquals(data['item_0_quantity'], '1')\n self.assertEquals(data['item_0_sku'], basket.all_lines()[0].stockrecord.partner_sku)\n self.assertEquals(data['item_0_unit_price'], '10.42')\n self.assertEquals(data['line_item_count'], '1')\n self.assertEquals(data['locale'], 'en')\n self.assertEquals(data['my_custom_field'], 'ABC')\n self.assertEquals(data['payment_method'], 'card')\n self.assertEquals(data['reference_number'], '10000042')\n self.assertEquals(data['ship_to_address_city'], 'Manhattan')\n self.assertEquals(data['ship_to_address_country'], 'US')\n self.assertEquals(data['ship_to_address_line1'], '234 5th Ave')\n self.assertEquals(data['ship_to_address_line2'], '')\n self.assertEquals(data['ship_to_address_postal_code'], '10001')\n self.assertEquals(data['ship_to_address_state'], 'NY')\n self.assertEquals(data['ship_to_forename'], 'fadsf')\n self.assertEquals(data['ship_to_phone'], '17174671111')\n self.assertEquals(data['ship_to_surname'], 'fad')\n self.assertEquals(data['transaction_type'], 'authorization,create_payment_token')\n",
"step-ids": [
19,
25,
29,
32,
33
]
}
|
[
19,
25,
29,
32,
33
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def AverageLeftRight(EyeData):
for eyes in EyeData:
eyes['avg_x'] = (eyes['left_x'] + eyes['right_x']) / 2
eyes['avg_y'] = (eyes['left_y'] + eyes['right_y']) / 2
eyes['avg_x'].loc[eyes.right_x < -100] = eyes['left_x'].loc[eyes.
right_x < -100]
eyes['avg_y'].loc[eyes.right_y < -100] = eyes['left_y'].loc[eyes.
right_y < -100]
eyes['avg_x'].loc[eyes.left_x < -100] = eyes['right_x'].loc[eyes.
left_x < -100]
eyes['avg_x'].loc[eyes.left_y < -100] = eyes['right_y'].loc[eyes.
left_y < -100]
eyes = eyes.loc[eyes.avg_x > 0]
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def AverageLeftRight(EyeData):
for eyes in EyeData:
eyes['avg_x'] = (eyes['left_x'] + eyes['right_x']) / 2
eyes['avg_y'] = (eyes['left_y'] + eyes['right_y']) / 2
eyes['avg_x'].loc[eyes.right_x < -100] = eyes['left_x'].loc[eyes.
right_x < -100]
eyes['avg_y'].loc[eyes.right_y < -100] = eyes['left_y'].loc[eyes.
right_y < -100]
eyes['avg_x'].loc[eyes.left_x < -100] = eyes['right_x'].loc[eyes.
left_x < -100]
eyes['avg_x'].loc[eyes.left_y < -100] = eyes['right_y'].loc[eyes.
left_y < -100]
eyes = eyes.loc[eyes.avg_x > 0]
def PlotXY(EyeData):
for eyes in EyeData:
fig = plt.figure()
fig.suptitle('Separate Components')
ax = fig.add_subplot(121)
ax2 = fig.add_subplot(122)
ax.plot(eyes.avg_x)
ax2.plot(eyes.avg_y)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
def AverageLeftRight(EyeData):
for eyes in EyeData:
eyes['avg_x'] = (eyes['left_x'] + eyes['right_x']) / 2
eyes['avg_y'] = (eyes['left_y'] + eyes['right_y']) / 2
eyes['avg_x'].loc[eyes.right_x < -100] = eyes['left_x'].loc[eyes.
right_x < -100]
eyes['avg_y'].loc[eyes.right_y < -100] = eyes['left_y'].loc[eyes.
right_y < -100]
eyes['avg_x'].loc[eyes.left_x < -100] = eyes['right_x'].loc[eyes.
left_x < -100]
eyes['avg_x'].loc[eyes.left_y < -100] = eyes['right_y'].loc[eyes.
left_y < -100]
eyes = eyes.loc[eyes.avg_x > 0]
def PlotXY(EyeData):
for eyes in EyeData:
fig = plt.figure()
fig.suptitle('Separate Components')
ax = fig.add_subplot(121)
ax2 = fig.add_subplot(122)
ax.plot(eyes.avg_x)
ax2.plot(eyes.avg_y)
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
"""
Created on Fri Aug 21 12:39:59 2015
@author: user
Needs to be run after the basic analysis which loads all the data into workspace
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
def AverageLeftRight(EyeData):
#Take the average of two eyes to get more accurate gaze position
for eyes in EyeData:
eyes['avg_x'] = (eyes['left_x'] + eyes['right_x'])/2
eyes['avg_y'] = (eyes['left_y'] + eyes['right_y'])/2
#Do not take the average if one of the eyes was not detected. In that case only use the other eye
eyes['avg_x'].loc[eyes.right_x < -100] = eyes['left_x'].loc[eyes.right_x < -100]
eyes['avg_y'].loc[eyes.right_y < -100] = eyes['left_y'].loc[eyes.right_y < -100]
eyes['avg_x'].loc[eyes.left_x < -100] = eyes['right_x'].loc[eyes.left_x < -100]
eyes['avg_x'].loc[eyes.left_y < -100] = eyes['right_y'].loc[eyes.left_y < -100]
eyes = eyes.loc[eyes.avg_x > 0]
def PlotXY(EyeData):
for eyes in EyeData:
fig = plt.figure()
fig.suptitle('Separate Components')
ax = fig.add_subplot(121)
ax2 = fig.add_subplot(122)
ax.plot(eyes.avg_x)
ax2.plot(eyes.avg_y)
|
flexible
|
{
"blob_id": "00ed68c68d51c5019fde0c489cd133be3d6985c3",
"index": 9339,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef AverageLeftRight(EyeData):\n for eyes in EyeData:\n eyes['avg_x'] = (eyes['left_x'] + eyes['right_x']) / 2\n eyes['avg_y'] = (eyes['left_y'] + eyes['right_y']) / 2\n eyes['avg_x'].loc[eyes.right_x < -100] = eyes['left_x'].loc[eyes.\n right_x < -100]\n eyes['avg_y'].loc[eyes.right_y < -100] = eyes['left_y'].loc[eyes.\n right_y < -100]\n eyes['avg_x'].loc[eyes.left_x < -100] = eyes['right_x'].loc[eyes.\n left_x < -100]\n eyes['avg_x'].loc[eyes.left_y < -100] = eyes['right_y'].loc[eyes.\n left_y < -100]\n eyes = eyes.loc[eyes.avg_x > 0]\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef AverageLeftRight(EyeData):\n for eyes in EyeData:\n eyes['avg_x'] = (eyes['left_x'] + eyes['right_x']) / 2\n eyes['avg_y'] = (eyes['left_y'] + eyes['right_y']) / 2\n eyes['avg_x'].loc[eyes.right_x < -100] = eyes['left_x'].loc[eyes.\n right_x < -100]\n eyes['avg_y'].loc[eyes.right_y < -100] = eyes['left_y'].loc[eyes.\n right_y < -100]\n eyes['avg_x'].loc[eyes.left_x < -100] = eyes['right_x'].loc[eyes.\n left_x < -100]\n eyes['avg_x'].loc[eyes.left_y < -100] = eyes['right_y'].loc[eyes.\n left_y < -100]\n eyes = eyes.loc[eyes.avg_x > 0]\n\n\ndef PlotXY(EyeData):\n for eyes in EyeData:\n fig = plt.figure()\n fig.suptitle('Separate Components')\n ax = fig.add_subplot(121)\n ax2 = fig.add_subplot(122)\n ax.plot(eyes.avg_x)\n ax2.plot(eyes.avg_y)\n",
"step-4": "<mask token>\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\ndef AverageLeftRight(EyeData):\n for eyes in EyeData:\n eyes['avg_x'] = (eyes['left_x'] + eyes['right_x']) / 2\n eyes['avg_y'] = (eyes['left_y'] + eyes['right_y']) / 2\n eyes['avg_x'].loc[eyes.right_x < -100] = eyes['left_x'].loc[eyes.\n right_x < -100]\n eyes['avg_y'].loc[eyes.right_y < -100] = eyes['left_y'].loc[eyes.\n right_y < -100]\n eyes['avg_x'].loc[eyes.left_x < -100] = eyes['right_x'].loc[eyes.\n left_x < -100]\n eyes['avg_x'].loc[eyes.left_y < -100] = eyes['right_y'].loc[eyes.\n left_y < -100]\n eyes = eyes.loc[eyes.avg_x > 0]\n\n\ndef PlotXY(EyeData):\n for eyes in EyeData:\n fig = plt.figure()\n fig.suptitle('Separate Components')\n ax = fig.add_subplot(121)\n ax2 = fig.add_subplot(122)\n ax.plot(eyes.avg_x)\n ax2.plot(eyes.avg_y)\n",
"step-5": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Aug 21 12:39:59 2015\n\n@author: user\nNeeds to be run after the basic analysis which loads all the data into workspace\n\"\"\"\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\ndef AverageLeftRight(EyeData):\n#Take the average of two eyes to get more accurate gaze position\n\n for eyes in EyeData:\n eyes['avg_x'] = (eyes['left_x'] + eyes['right_x'])/2 \n eyes['avg_y'] = (eyes['left_y'] + eyes['right_y'])/2\n#Do not take the average if one of the eyes was not detected. In that case only use the other eye \n \n eyes['avg_x'].loc[eyes.right_x < -100] = eyes['left_x'].loc[eyes.right_x < -100]\n eyes['avg_y'].loc[eyes.right_y < -100] = eyes['left_y'].loc[eyes.right_y < -100]\n \n eyes['avg_x'].loc[eyes.left_x < -100] = eyes['right_x'].loc[eyes.left_x < -100]\n eyes['avg_x'].loc[eyes.left_y < -100] = eyes['right_y'].loc[eyes.left_y < -100]\n \n eyes = eyes.loc[eyes.avg_x > 0]\n\n\ndef PlotXY(EyeData):\n \n for eyes in EyeData:\n fig = plt.figure()\n fig.suptitle('Separate Components')\n ax = fig.add_subplot(121)\n ax2 = fig.add_subplot(122)\n ax.plot(eyes.avg_x)\n ax2.plot(eyes.avg_y)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print(catogory)
print(data)
print(catogory.dot(data))
print(data.T.dot(catogory))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
catogory = np.array([50, 30, 40, 20])
data = np.array([[20, 50, 10, 15, 20], [30, 40, 20, 65, 35], [75, 30, 42,
70, 45], [40, 25, 35, 22, 55]])
print(catogory)
print(data)
print(catogory.dot(data))
print(data.T.dot(catogory))
<|reserved_special_token_1|>
import numpy as np
catogory = np.array([50, 30, 40, 20])
data = np.array([[20, 50, 10, 15, 20], [30, 40, 20, 65, 35], [75, 30, 42,
70, 45], [40, 25, 35, 22, 55]])
print(catogory)
print(data)
print(catogory.dot(data))
print(data.T.dot(catogory))
|
flexible
|
{
"blob_id": "e4b49faaad648c6e85274abb18f994083a74013d",
"index": 7160,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(catogory)\nprint(data)\nprint(catogory.dot(data))\nprint(data.T.dot(catogory))\n",
"step-3": "<mask token>\ncatogory = np.array([50, 30, 40, 20])\ndata = np.array([[20, 50, 10, 15, 20], [30, 40, 20, 65, 35], [75, 30, 42, \n 70, 45], [40, 25, 35, 22, 55]])\nprint(catogory)\nprint(data)\nprint(catogory.dot(data))\nprint(data.T.dot(catogory))\n",
"step-4": "import numpy as np\ncatogory = np.array([50, 30, 40, 20])\ndata = np.array([[20, 50, 10, 15, 20], [30, 40, 20, 65, 35], [75, 30, 42, \n 70, 45], [40, 25, 35, 22, 55]])\nprint(catogory)\nprint(data)\nprint(catogory.dot(data))\nprint(data.T.dot(catogory))\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# -*- coding: utf-8 -*-
from rest_framework import serializers
from django.contrib.auth.models import User
from core.models import Detalhe, Viagem, Hospital, Equipamento, Caixa
class UserSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = User
fields = '__all__'
class CaixaSerializer(serializers.ModelSerializer):
class Meta:
model = Caixa
fields = '__all__'
class HospitalSerializer(serializers.ModelSerializer):
class Meta:
model = Hospital
fields = '__all__'
class DetalheSerializer(serializers.ModelSerializer):
imeiEquipamento = serializers.CharField(max_length=22)
class Meta:
model = Detalhe
fields = '__all__'
class ViagemSerializer(serializers.ModelSerializer):
detalhes = DetalheSerializer(many=True, read_only=True)
caixa = CaixaSerializer(read_only=True)
localPartida = HospitalSerializer(read_only=True)
localChegada = HospitalSerializer(read_only=True)
class Meta:
model = Viagem
fields = '__all__'
|
normal
|
{
"blob_id": "b5c68211cfa255e47ee316dc5b0627719eacae78",
"index": 8504,
"step-1": "<mask token>\n\n\nclass ViagemSerializer(serializers.ModelSerializer):\n detalhes = DetalheSerializer(many=True, read_only=True)\n caixa = CaixaSerializer(read_only=True)\n localPartida = HospitalSerializer(read_only=True)\n localChegada = HospitalSerializer(read_only=True)\n\n\n class Meta:\n model = Viagem\n fields = '__all__'\n",
"step-2": "<mask token>\n\n\nclass CaixaSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = Caixa\n fields = '__all__'\n\n\nclass HospitalSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = Hospital\n fields = '__all__'\n\n\nclass DetalheSerializer(serializers.ModelSerializer):\n imeiEquipamento = serializers.CharField(max_length=22)\n\n\n class Meta:\n model = Detalhe\n fields = '__all__'\n\n\nclass ViagemSerializer(serializers.ModelSerializer):\n detalhes = DetalheSerializer(many=True, read_only=True)\n caixa = CaixaSerializer(read_only=True)\n localPartida = HospitalSerializer(read_only=True)\n localChegada = HospitalSerializer(read_only=True)\n\n\n class Meta:\n model = Viagem\n fields = '__all__'\n",
"step-3": "<mask token>\n\n\nclass UserSerializer(serializers.HyperlinkedModelSerializer):\n\n\n class Meta:\n model = User\n fields = '__all__'\n\n\nclass CaixaSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = Caixa\n fields = '__all__'\n\n\nclass HospitalSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = Hospital\n fields = '__all__'\n\n\nclass DetalheSerializer(serializers.ModelSerializer):\n imeiEquipamento = serializers.CharField(max_length=22)\n\n\n class Meta:\n model = Detalhe\n fields = '__all__'\n\n\nclass ViagemSerializer(serializers.ModelSerializer):\n detalhes = DetalheSerializer(many=True, read_only=True)\n caixa = CaixaSerializer(read_only=True)\n localPartida = HospitalSerializer(read_only=True)\n localChegada = HospitalSerializer(read_only=True)\n\n\n class Meta:\n model = Viagem\n fields = '__all__'\n",
"step-4": "from rest_framework import serializers\nfrom django.contrib.auth.models import User\nfrom core.models import Detalhe, Viagem, Hospital, Equipamento, Caixa\n\n\nclass UserSerializer(serializers.HyperlinkedModelSerializer):\n\n\n class Meta:\n model = User\n fields = '__all__'\n\n\nclass CaixaSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = Caixa\n fields = '__all__'\n\n\nclass HospitalSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = Hospital\n fields = '__all__'\n\n\nclass DetalheSerializer(serializers.ModelSerializer):\n imeiEquipamento = serializers.CharField(max_length=22)\n\n\n class Meta:\n model = Detalhe\n fields = '__all__'\n\n\nclass ViagemSerializer(serializers.ModelSerializer):\n detalhes = DetalheSerializer(many=True, read_only=True)\n caixa = CaixaSerializer(read_only=True)\n localPartida = HospitalSerializer(read_only=True)\n localChegada = HospitalSerializer(read_only=True)\n\n\n class Meta:\n model = Viagem\n fields = '__all__'\n",
"step-5": "# -*- coding: utf-8 -*-\nfrom rest_framework import serializers\nfrom django.contrib.auth.models import User\nfrom core.models import Detalhe, Viagem, Hospital, Equipamento, Caixa\n\nclass UserSerializer(serializers.HyperlinkedModelSerializer):\n class Meta:\n model = User\n fields = '__all__'\n\nclass CaixaSerializer(serializers.ModelSerializer):\n class Meta:\n model = Caixa\n fields = '__all__'\n\nclass HospitalSerializer(serializers.ModelSerializer):\n class Meta:\n model = Hospital\n fields = '__all__'\n\nclass DetalheSerializer(serializers.ModelSerializer):\n imeiEquipamento = serializers.CharField(max_length=22)\n class Meta:\n model = Detalhe\n fields = '__all__'\n\nclass ViagemSerializer(serializers.ModelSerializer):\n detalhes = DetalheSerializer(many=True, read_only=True)\n caixa = CaixaSerializer(read_only=True)\n localPartida = HospitalSerializer(read_only=True)\n localChegada = HospitalSerializer(read_only=True)\n \n class Meta:\n model = Viagem\n fields = '__all__'",
"step-ids": [
2,
6,
7,
8,
9
]
}
|
[
2,
6,
7,
8,
9
] |
num_str = "1"
num_str1 = "\u00b2"
num_str2 = "一千零一"
# 判断字符串是否只包含数字
# 1.三种方法都不能判断小数
# 2.isdigit 和 isnumeric 比 isdecimal 强大一些,后者只能判断正常数字,前两者可以判断带有数字的符号,如平方
# isnumeric 还可以判断中文数字
print(num_str)
print(num_str1)
print(num_str.isdecimal())
print(num_str1.isdecimal())
print(num_str.isdigit())
print(num_str1.isdigit())
print(num_str.isnumeric())
print(num_str1.isnumeric())
print(num_str2.isnumeric())
|
normal
|
{
"blob_id": "a7be2f43c6ec8d1576ed194a75762a36089cb052",
"index": 4195,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(num_str)\nprint(num_str1)\nprint(num_str.isdecimal())\nprint(num_str1.isdecimal())\nprint(num_str.isdigit())\nprint(num_str1.isdigit())\nprint(num_str.isnumeric())\nprint(num_str1.isnumeric())\nprint(num_str2.isnumeric())\n",
"step-3": "num_str = '1'\nnum_str1 = '²'\nnum_str2 = '一千零一'\nprint(num_str)\nprint(num_str1)\nprint(num_str.isdecimal())\nprint(num_str1.isdecimal())\nprint(num_str.isdigit())\nprint(num_str1.isdigit())\nprint(num_str.isnumeric())\nprint(num_str1.isnumeric())\nprint(num_str2.isnumeric())\n",
"step-4": "num_str = \"1\"\nnum_str1 = \"\\u00b2\"\nnum_str2 = \"一千零一\"\n# 判断字符串是否只包含数字\n# 1.三种方法都不能判断小数\n# 2.isdigit 和 isnumeric 比 isdecimal 强大一些,后者只能判断正常数字,前两者可以判断带有数字的符号,如平方\n# isnumeric 还可以判断中文数字\nprint(num_str)\nprint(num_str1)\nprint(num_str.isdecimal())\nprint(num_str1.isdecimal())\nprint(num_str.isdigit())\nprint(num_str1.isdigit())\nprint(num_str.isnumeric())\nprint(num_str1.isnumeric())\nprint(num_str2.isnumeric())",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class BillListSerializer(BillBaseSerializer):
id = serializers.SerializerMethodField()
def get_id(self, object):
return object.key.id()
class BillCreateSerializer(BillBaseSerializer):
line_items = serializers.JSONField(default=None)
company = serializers.CharField()
branch = serializers.CharField()
status = serializers.IntegerField()
date_of_payment = serializers.DateField(default=None)
notes = serializers.CharField(max_length=500, default=None)
class BillPaymentSerializer(serializers.Serializer):
status = serializers.IntegerField()
date_of_payment = serializers.DateField(required=True)
notes = serializers.CharField(max_length=500, required=False)
class BillDetailSerializer(BillCreateSerializer, BillListSerializer):
created_by = serializers.CharField(default=None)
created_on = serializers.DateField(default=None)
updated_by = serializers.CharField(default=None)
updated_on = serializers.DateField(default=None)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class BillBaseSerializer(serializers.Serializer):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class BillListSerializer(BillBaseSerializer):
id = serializers.SerializerMethodField()
def get_id(self, object):
return object.key.id()
class BillCreateSerializer(BillBaseSerializer):
line_items = serializers.JSONField(default=None)
company = serializers.CharField()
branch = serializers.CharField()
status = serializers.IntegerField()
date_of_payment = serializers.DateField(default=None)
notes = serializers.CharField(max_length=500, default=None)
class BillPaymentSerializer(serializers.Serializer):
status = serializers.IntegerField()
date_of_payment = serializers.DateField(required=True)
notes = serializers.CharField(max_length=500, required=False)
class BillDetailSerializer(BillCreateSerializer, BillListSerializer):
created_by = serializers.CharField(default=None)
created_on = serializers.DateField(default=None)
updated_by = serializers.CharField(default=None)
updated_on = serializers.DateField(default=None)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class BillBaseSerializer(serializers.Serializer):
vendor = serializers.CharField(required=False)
amount = serializers.FloatField()
bill_date = serializers.DateField()
due_date = serializers.DateField()
class BillListSerializer(BillBaseSerializer):
id = serializers.SerializerMethodField()
def get_id(self, object):
return object.key.id()
class BillCreateSerializer(BillBaseSerializer):
line_items = serializers.JSONField(default=None)
company = serializers.CharField()
branch = serializers.CharField()
status = serializers.IntegerField()
date_of_payment = serializers.DateField(default=None)
notes = serializers.CharField(max_length=500, default=None)
class BillPaymentSerializer(serializers.Serializer):
status = serializers.IntegerField()
date_of_payment = serializers.DateField(required=True)
notes = serializers.CharField(max_length=500, required=False)
class BillDetailSerializer(BillCreateSerializer, BillListSerializer):
created_by = serializers.CharField(default=None)
created_on = serializers.DateField(default=None)
updated_by = serializers.CharField(default=None)
updated_on = serializers.DateField(default=None)
<|reserved_special_token_1|>
from rest_framework import serializers
class BillBaseSerializer(serializers.Serializer):
vendor = serializers.CharField(required=False)
amount = serializers.FloatField()
bill_date = serializers.DateField()
due_date = serializers.DateField()
class BillListSerializer(BillBaseSerializer):
id = serializers.SerializerMethodField()
def get_id(self, object):
return object.key.id()
class BillCreateSerializer(BillBaseSerializer):
line_items = serializers.JSONField(default=None)
company = serializers.CharField()
branch = serializers.CharField()
status = serializers.IntegerField()
date_of_payment = serializers.DateField(default=None)
notes = serializers.CharField(max_length=500, default=None)
class BillPaymentSerializer(serializers.Serializer):
status = serializers.IntegerField()
date_of_payment = serializers.DateField(required=True)
notes = serializers.CharField(max_length=500, required=False)
class BillDetailSerializer(BillCreateSerializer, BillListSerializer):
created_by = serializers.CharField(default=None)
created_on = serializers.DateField(default=None)
updated_by = serializers.CharField(default=None)
updated_on = serializers.DateField(default=None)
|
flexible
|
{
"blob_id": "23160c2f030b0bd862360e944fbbc283c6cb45b2",
"index": 6625,
"step-1": "<mask token>\n\n\nclass BillListSerializer(BillBaseSerializer):\n id = serializers.SerializerMethodField()\n\n def get_id(self, object):\n return object.key.id()\n\n\nclass BillCreateSerializer(BillBaseSerializer):\n line_items = serializers.JSONField(default=None)\n company = serializers.CharField()\n branch = serializers.CharField()\n status = serializers.IntegerField()\n date_of_payment = serializers.DateField(default=None)\n notes = serializers.CharField(max_length=500, default=None)\n\n\nclass BillPaymentSerializer(serializers.Serializer):\n status = serializers.IntegerField()\n date_of_payment = serializers.DateField(required=True)\n notes = serializers.CharField(max_length=500, required=False)\n\n\nclass BillDetailSerializer(BillCreateSerializer, BillListSerializer):\n created_by = serializers.CharField(default=None)\n created_on = serializers.DateField(default=None)\n updated_by = serializers.CharField(default=None)\n updated_on = serializers.DateField(default=None)\n",
"step-2": "<mask token>\n\n\nclass BillBaseSerializer(serializers.Serializer):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass BillListSerializer(BillBaseSerializer):\n id = serializers.SerializerMethodField()\n\n def get_id(self, object):\n return object.key.id()\n\n\nclass BillCreateSerializer(BillBaseSerializer):\n line_items = serializers.JSONField(default=None)\n company = serializers.CharField()\n branch = serializers.CharField()\n status = serializers.IntegerField()\n date_of_payment = serializers.DateField(default=None)\n notes = serializers.CharField(max_length=500, default=None)\n\n\nclass BillPaymentSerializer(serializers.Serializer):\n status = serializers.IntegerField()\n date_of_payment = serializers.DateField(required=True)\n notes = serializers.CharField(max_length=500, required=False)\n\n\nclass BillDetailSerializer(BillCreateSerializer, BillListSerializer):\n created_by = serializers.CharField(default=None)\n created_on = serializers.DateField(default=None)\n updated_by = serializers.CharField(default=None)\n updated_on = serializers.DateField(default=None)\n",
"step-3": "<mask token>\n\n\nclass BillBaseSerializer(serializers.Serializer):\n vendor = serializers.CharField(required=False)\n amount = serializers.FloatField()\n bill_date = serializers.DateField()\n due_date = serializers.DateField()\n\n\nclass BillListSerializer(BillBaseSerializer):\n id = serializers.SerializerMethodField()\n\n def get_id(self, object):\n return object.key.id()\n\n\nclass BillCreateSerializer(BillBaseSerializer):\n line_items = serializers.JSONField(default=None)\n company = serializers.CharField()\n branch = serializers.CharField()\n status = serializers.IntegerField()\n date_of_payment = serializers.DateField(default=None)\n notes = serializers.CharField(max_length=500, default=None)\n\n\nclass BillPaymentSerializer(serializers.Serializer):\n status = serializers.IntegerField()\n date_of_payment = serializers.DateField(required=True)\n notes = serializers.CharField(max_length=500, required=False)\n\n\nclass BillDetailSerializer(BillCreateSerializer, BillListSerializer):\n created_by = serializers.CharField(default=None)\n created_on = serializers.DateField(default=None)\n updated_by = serializers.CharField(default=None)\n updated_on = serializers.DateField(default=None)\n",
"step-4": "from rest_framework import serializers\n\n\nclass BillBaseSerializer(serializers.Serializer):\n vendor = serializers.CharField(required=False)\n amount = serializers.FloatField()\n bill_date = serializers.DateField()\n due_date = serializers.DateField()\n\n\nclass BillListSerializer(BillBaseSerializer):\n id = serializers.SerializerMethodField()\n\n def get_id(self, object):\n return object.key.id()\n\n\nclass BillCreateSerializer(BillBaseSerializer):\n line_items = serializers.JSONField(default=None)\n company = serializers.CharField()\n branch = serializers.CharField()\n status = serializers.IntegerField()\n date_of_payment = serializers.DateField(default=None)\n notes = serializers.CharField(max_length=500, default=None)\n\n\nclass BillPaymentSerializer(serializers.Serializer):\n status = serializers.IntegerField()\n date_of_payment = serializers.DateField(required=True)\n notes = serializers.CharField(max_length=500, required=False)\n\n\nclass BillDetailSerializer(BillCreateSerializer, BillListSerializer):\n created_by = serializers.CharField(default=None)\n created_on = serializers.DateField(default=None)\n updated_by = serializers.CharField(default=None)\n updated_on = serializers.DateField(default=None)\n",
"step-5": null,
"step-ids": [
9,
10,
11,
12
]
}
|
[
9,
10,
11,
12
] |
# -*- coding: utf-8 -*-
"""
Animation practical output
The code that follows builds on the "Communications.py" file
Additional code that follows has in part been modified from that of
https://www.geog.leeds.ac.uk/courses/computing/practicals/python/agent-framework/part8/index.html
https://www.geog.leeds.ac.uk/courses/computing/practicals/python/agent-framework/part8/examples/animatedmodel.py
https://www.geog.leeds.ac.uk/courses/computing/practicals/python/agent-framework/part8/examples/animatedmodel2.py
"""
import random
import operator
import matplotlib.pyplot
import matplotlib.animation
import agentframeworkanimate
import csv
# Reading the in.txt file to create the environment.
with open("in.txt", newline="") as raster:
dataset = csv.reader(raster, quoting=csv.QUOTE_NONNUMERIC)
environment = []
for row in dataset:
rowlist = []
for value in row:
rowlist.append(value)
environment.append(rowlist)
# Setting initial parameters.
num_of_agents = 10
num_of_iterations = 100
neighbourhood = 20
agents = []
# Variables to animate the model.
fig = matplotlib.pyplot.figure(figsize=(7, 7))
ax = fig.add_axes([0, 0, 1, 1])
ax.set_autoscale_on(False)
# Make the agents.
# Addition of environment as argument for Agent class to allow interaction between agents and environment.
# Addition of agents as argument for Agent class to allow agents to interact with each other.
for i in range(num_of_agents):
agents.append(agentframeworkanimate.Agent(environment, agents))
carry_on = True
# Creating model animation.
def update(frame_number):
fig.clear()
global carry_on
# Move the agents and store what they eat
for j in range(num_of_iterations):
# Shuffle function used to randomise the order agents are processed with each iteration.
random.shuffle(agents)
for i in range(num_of_agents):
agents[i].move()
agents[i].eat()
agents[i].share_with_neighbours(neighbourhood)
# Stopping condition for animation when all agents have 100 in their store.
if agents[i].store == 100:
carry_on = False
print("Stopping condition met")
# Generate scatterplot of agents after model iterations.
matplotlib.pyplot.xlim(0, 99)
matplotlib.pyplot.ylim(0, 99)
matplotlib.pyplot.imshow(environment)
for i in range(num_of_agents):
matplotlib.pyplot.scatter(agents[i].x,agents[i].y)
# Generator function to stop animation.
# Will stop animation after 10 iterations unless carry_on variable is set to False.
def gen_function(b = [0]):
a = 0
global carry_on
while (a < 100) & (carry_on):
yield a
a = a + 1
# Animation will run until generator function condition is met
#animation = matplotlib.animation.FuncAnimation(fig, update, interval=1, repeat=False, frames=10)
animation = matplotlib.animation.FuncAnimation(fig, update, frames=gen_function, repeat=False)
matplotlib.pyplot.show()
# Writing the final environment to a text file.
with open("out.txt", "w", newline="") as finalenviron:
writer = csv.writer(finalenviron, delimiter=",")
for row in environment:
writer.writerow(row)
|
normal
|
{
"blob_id": "4ea266d4f4c18efbba4204d7301652f8966c18a5",
"index": 9724,
"step-1": "<mask token>\n\n\ndef gen_function(b=[0]):\n a = 0\n global carry_on\n while (a < 100) & carry_on:\n yield a\n a = a + 1\n\n\n<mask token>\n",
"step-2": "<mask token>\nwith open('in.txt', newline='') as raster:\n dataset = csv.reader(raster, quoting=csv.QUOTE_NONNUMERIC)\n environment = []\n for row in dataset:\n rowlist = []\n for value in row:\n rowlist.append(value)\n environment.append(rowlist)\n<mask token>\nax.set_autoscale_on(False)\nfor i in range(num_of_agents):\n agents.append(agentframeworkanimate.Agent(environment, agents))\n<mask token>\n\n\ndef update(frame_number):\n fig.clear()\n global carry_on\n for j in range(num_of_iterations):\n random.shuffle(agents)\n for i in range(num_of_agents):\n agents[i].move()\n agents[i].eat()\n agents[i].share_with_neighbours(neighbourhood)\n if agents[i].store == 100:\n carry_on = False\n print('Stopping condition met')\n matplotlib.pyplot.xlim(0, 99)\n matplotlib.pyplot.ylim(0, 99)\n matplotlib.pyplot.imshow(environment)\n for i in range(num_of_agents):\n matplotlib.pyplot.scatter(agents[i].x, agents[i].y)\n\n\ndef gen_function(b=[0]):\n a = 0\n global carry_on\n while (a < 100) & carry_on:\n yield a\n a = a + 1\n\n\n<mask token>\nmatplotlib.pyplot.show()\nwith open('out.txt', 'w', newline='') as finalenviron:\n writer = csv.writer(finalenviron, delimiter=',')\n for row in environment:\n writer.writerow(row)\n",
"step-3": "<mask token>\nwith open('in.txt', newline='') as raster:\n dataset = csv.reader(raster, quoting=csv.QUOTE_NONNUMERIC)\n environment = []\n for row in dataset:\n rowlist = []\n for value in row:\n rowlist.append(value)\n environment.append(rowlist)\nnum_of_agents = 10\nnum_of_iterations = 100\nneighbourhood = 20\nagents = []\nfig = matplotlib.pyplot.figure(figsize=(7, 7))\nax = fig.add_axes([0, 0, 1, 1])\nax.set_autoscale_on(False)\nfor i in range(num_of_agents):\n agents.append(agentframeworkanimate.Agent(environment, agents))\ncarry_on = True\n\n\ndef update(frame_number):\n fig.clear()\n global carry_on\n for j in range(num_of_iterations):\n random.shuffle(agents)\n for i in range(num_of_agents):\n agents[i].move()\n agents[i].eat()\n agents[i].share_with_neighbours(neighbourhood)\n if agents[i].store == 100:\n carry_on = False\n print('Stopping condition met')\n matplotlib.pyplot.xlim(0, 99)\n matplotlib.pyplot.ylim(0, 99)\n matplotlib.pyplot.imshow(environment)\n for i in range(num_of_agents):\n matplotlib.pyplot.scatter(agents[i].x, agents[i].y)\n\n\ndef gen_function(b=[0]):\n a = 0\n global carry_on\n while (a < 100) & carry_on:\n yield a\n a = a + 1\n\n\nanimation = matplotlib.animation.FuncAnimation(fig, update, frames=\n gen_function, repeat=False)\nmatplotlib.pyplot.show()\nwith open('out.txt', 'w', newline='') as finalenviron:\n writer = csv.writer(finalenviron, delimiter=',')\n for row in environment:\n writer.writerow(row)\n",
"step-4": "<mask token>\nimport random\nimport operator\nimport matplotlib.pyplot\nimport matplotlib.animation\nimport agentframeworkanimate\nimport csv\nwith open('in.txt', newline='') as raster:\n dataset = csv.reader(raster, quoting=csv.QUOTE_NONNUMERIC)\n environment = []\n for row in dataset:\n rowlist = []\n for value in row:\n rowlist.append(value)\n environment.append(rowlist)\nnum_of_agents = 10\nnum_of_iterations = 100\nneighbourhood = 20\nagents = []\nfig = matplotlib.pyplot.figure(figsize=(7, 7))\nax = fig.add_axes([0, 0, 1, 1])\nax.set_autoscale_on(False)\nfor i in range(num_of_agents):\n agents.append(agentframeworkanimate.Agent(environment, agents))\ncarry_on = True\n\n\ndef update(frame_number):\n fig.clear()\n global carry_on\n for j in range(num_of_iterations):\n random.shuffle(agents)\n for i in range(num_of_agents):\n agents[i].move()\n agents[i].eat()\n agents[i].share_with_neighbours(neighbourhood)\n if agents[i].store == 100:\n carry_on = False\n print('Stopping condition met')\n matplotlib.pyplot.xlim(0, 99)\n matplotlib.pyplot.ylim(0, 99)\n matplotlib.pyplot.imshow(environment)\n for i in range(num_of_agents):\n matplotlib.pyplot.scatter(agents[i].x, agents[i].y)\n\n\ndef gen_function(b=[0]):\n a = 0\n global carry_on\n while (a < 100) & carry_on:\n yield a\n a = a + 1\n\n\nanimation = matplotlib.animation.FuncAnimation(fig, update, frames=\n gen_function, repeat=False)\nmatplotlib.pyplot.show()\nwith open('out.txt', 'w', newline='') as finalenviron:\n writer = csv.writer(finalenviron, delimiter=',')\n for row in environment:\n writer.writerow(row)\n",
"step-5": "# -*- coding: utf-8 -*-\n\"\"\"\nAnimation practical output\n\nThe code that follows builds on the \"Communications.py\" file\n\nAdditional code that follows has in part been modified from that of\nhttps://www.geog.leeds.ac.uk/courses/computing/practicals/python/agent-framework/part8/index.html\nhttps://www.geog.leeds.ac.uk/courses/computing/practicals/python/agent-framework/part8/examples/animatedmodel.py\nhttps://www.geog.leeds.ac.uk/courses/computing/practicals/python/agent-framework/part8/examples/animatedmodel2.py\n\"\"\"\n\nimport random\nimport operator\nimport matplotlib.pyplot\nimport matplotlib.animation\nimport agentframeworkanimate\nimport csv\n\n\n# Reading the in.txt file to create the environment.\nwith open(\"in.txt\", newline=\"\") as raster:\n dataset = csv.reader(raster, quoting=csv.QUOTE_NONNUMERIC)\n environment = []\n for row in dataset:\n rowlist = []\n for value in row:\n rowlist.append(value)\n environment.append(rowlist)\n \n# Setting initial parameters.\nnum_of_agents = 10\nnum_of_iterations = 100\nneighbourhood = 20\nagents = []\n\n# Variables to animate the model.\nfig = matplotlib.pyplot.figure(figsize=(7, 7))\nax = fig.add_axes([0, 0, 1, 1])\n\nax.set_autoscale_on(False)\n\n# Make the agents.\n# Addition of environment as argument for Agent class to allow interaction between agents and environment.\n# Addition of agents as argument for Agent class to allow agents to interact with each other.\nfor i in range(num_of_agents):\n agents.append(agentframeworkanimate.Agent(environment, agents))\n\n\ncarry_on = True\n\n# Creating model animation.\ndef update(frame_number):\n fig.clear()\n global carry_on \n\n# Move the agents and store what they eat\n for j in range(num_of_iterations):\n # Shuffle function used to randomise the order agents are processed with each iteration.\n random.shuffle(agents)\n for i in range(num_of_agents):\n agents[i].move()\n agents[i].eat()\n agents[i].share_with_neighbours(neighbourhood)\n \n # Stopping condition for animation when all agents have 100 in their store.\n if agents[i].store == 100:\n carry_on = False\n print(\"Stopping condition met\")\n\n # Generate scatterplot of agents after model iterations.\n matplotlib.pyplot.xlim(0, 99)\n matplotlib.pyplot.ylim(0, 99)\n matplotlib.pyplot.imshow(environment) \n for i in range(num_of_agents):\n matplotlib.pyplot.scatter(agents[i].x,agents[i].y)\n \n# Generator function to stop animation.\n# Will stop animation after 10 iterations unless carry_on variable is set to False.\ndef gen_function(b = [0]):\n a = 0\n global carry_on\n while (a < 100) & (carry_on):\n yield a\n a = a + 1 \n\n# Animation will run until generator function condition is met\n#animation = matplotlib.animation.FuncAnimation(fig, update, interval=1, repeat=False, frames=10)\nanimation = matplotlib.animation.FuncAnimation(fig, update, frames=gen_function, repeat=False)\n\nmatplotlib.pyplot.show()\n\n \n# Writing the final environment to a text file.\nwith open(\"out.txt\", \"w\", newline=\"\") as finalenviron:\n writer = csv.writer(finalenviron, delimiter=\",\")\n for row in environment:\n writer.writerow(row)\n",
"step-ids": [
1,
3,
4,
5,
6
]
}
|
[
1,
3,
4,
5,
6
] |
from .lasot import Lasot
from .got10k import Got10k
from .tracking_net import TrackingNet
from .imagenetvid import ImagenetVID
from .imagenetdet import ImagenetDET
from .coco_seq import MSCOCOSeq
from .vot import VOT
from .youtube_vos import YoutubeVOS
from .youtube_bb import YoutubeBB
|
normal
|
{
"blob_id": "e12ca2c4592a629ce78cae7211fedaf02352a603",
"index": 4700,
"step-1": "<mask token>\n",
"step-2": "from .lasot import Lasot\nfrom .got10k import Got10k\nfrom .tracking_net import TrackingNet\nfrom .imagenetvid import ImagenetVID\nfrom .imagenetdet import ImagenetDET\nfrom .coco_seq import MSCOCOSeq\nfrom .vot import VOT\nfrom .youtube_vos import YoutubeVOS\nfrom .youtube_bb import YoutubeBB\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
<|reserved_special_token_0|>
def eff_param_string():
"""
The parametrization of the efficiencies from AN-2015-11 as a string that can
be used in a TF1 constructor.
p0 * (1 - p1 * (Erf(pT + p2) - p1 / alpha * (pT - p3 * (pT^2 - p3 / beta * pT^3))))
"""
return (
'[0] * (1 - [1] * (TMath::Erf(x[0] + [2]) - [1] / [4] * (x[0] - [3] * (pow(x[0], 2) - [3] / [5] * pow(x[0], 3)))))'
)
def eff_param():
"""
Get the parametrization as ROOT.TF1
"""
return r.TF1('photon_eff_param', eff_param_string(), 0, 7)
def eff_param_sym():
"""
Get the parametrization as sympy symbolic expression by doing some string
manipulation on the parametrization and then using sympy.sympify
"""
param_str = eff_param_string()
param_str = param_str.replace('TMath::Erf', 'erf').replace('x[0]', 'x')
param_str = re.sub('\\[([0-9])\\]', 'p\\1', param_str)
param_str = re.sub('pow\\((.*?)\\s*?,\\s*?([0-9])\\)', '\\1**\\2',
param_str)
return sp.sympify(param_str)
<|reserved_special_token_0|>
def get_cov_func(params, corr):
"""
Get the uncertainty function where only pT is left as a free parameter.
This will return a python function that can be evaluated at any given point
"""
eff = eff_param_sym()
free_params = []
for sym in eff.free_symbols:
if sym.name in params and params[sym.name][1] != 0:
free_params.append(sym)
free_params.sort(key=lambda x: int(x.name.replace('p', '')))
cov_eff = func_cov(eff, free_params)
subst_vals = {p: v[0] for p, v in params.iteritems()}
subst_vals.update({('sigma_' + p): v[1] for p, v in params.iteritems()})
subst_vals.update(get_corr_subs_values(corr))
return sp.lambdify(sp.symbols('x'), cov_eff.subs(subst_vals))
<|reserved_special_token_0|>
def set_params_errors(func, *params):
"""
Set all the parameters as pairs of value and uncertainty (in the order they)
are in the params list. If uncertainty = 0, the parameter is fixed
"""
central = np.array([p[0] for p in params])
uncer = np.array([p[1] for p in params])
func.SetParameters(central)
func.SetParErrors(uncer)
for idx, err in enumerate(uncer):
if err == 0:
func.FixParameter(idx, func.GetParameter(idx))
def load_params(param_file):
"""
Load the parameter file and return the list of dicts stored in it
"""
with open(param_file, 'r') as pfile:
eff_params = json.load(pfile)
return eff_params
def create_param(params, sigma_shift, uncorrelated):
"""
Create the function from the passed params and give it an appropriate name
"""
if sigma_shift == 0:
func = eff_param()
set_params_errors(func, params['p0'], params['p1'], params['p2'],
params['p3'], params['alpha'], params['beta'])
func.SetName(get_name(params['eta'], 'photon_eff_pt'))
return func
params['p4'] = params['alpha']
params['p5'] = params['beta']
corr = np.identity(4) if uncorrelated else CORRELATIONS
graph = get_graph_err(params, corr, np.abs(sigma_shift), 200)
if sigma_shift < 0:
graph = get_lower_band(graph)
else:
graph = get_upper_band(graph)
graph.SetName(get_name(params['eta'], 'photon_eff_pt'))
return graph
def main(args):
"""Main"""
file_option = 'update' if args.update else 'recreate'
outfile = r.TFile.Open(args.outfile, file_option)
all_params = load_params(args.paramfile)
for params in all_params:
eff = create_param(params, args.sigma, args.uncorrelated)
eff.Write()
outfile.Close()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def eff_param_string():
"""
The parametrization of the efficiencies from AN-2015-11 as a string that can
be used in a TF1 constructor.
p0 * (1 - p1 * (Erf(pT + p2) - p1 / alpha * (pT - p3 * (pT^2 - p3 / beta * pT^3))))
"""
return (
'[0] * (1 - [1] * (TMath::Erf(x[0] + [2]) - [1] / [4] * (x[0] - [3] * (pow(x[0], 2) - [3] / [5] * pow(x[0], 3)))))'
)
def eff_param():
"""
Get the parametrization as ROOT.TF1
"""
return r.TF1('photon_eff_param', eff_param_string(), 0, 7)
def eff_param_sym():
"""
Get the parametrization as sympy symbolic expression by doing some string
manipulation on the parametrization and then using sympy.sympify
"""
param_str = eff_param_string()
param_str = param_str.replace('TMath::Erf', 'erf').replace('x[0]', 'x')
param_str = re.sub('\\[([0-9])\\]', 'p\\1', param_str)
param_str = re.sub('pow\\((.*?)\\s*?,\\s*?([0-9])\\)', '\\1**\\2',
param_str)
return sp.sympify(param_str)
<|reserved_special_token_0|>
def get_cov_func(params, corr):
"""
Get the uncertainty function where only pT is left as a free parameter.
This will return a python function that can be evaluated at any given point
"""
eff = eff_param_sym()
free_params = []
for sym in eff.free_symbols:
if sym.name in params and params[sym.name][1] != 0:
free_params.append(sym)
free_params.sort(key=lambda x: int(x.name.replace('p', '')))
cov_eff = func_cov(eff, free_params)
subst_vals = {p: v[0] for p, v in params.iteritems()}
subst_vals.update({('sigma_' + p): v[1] for p, v in params.iteritems()})
subst_vals.update(get_corr_subs_values(corr))
return sp.lambdify(sp.symbols('x'), cov_eff.subs(subst_vals))
def get_graph_err(params, corr, n_sigma=1.0, n_points=100):
"""
Get the function evaluated at n_points with uncertainties taking into
account correlations between the parameters
"""
eff_f = eff_param_sym()
eff_f = eff_f.subs({p: v[0] for p, v in params.iteritems()})
eff_f = sp.lambdify(sp.symbols('x'), eff_f)
var_f = get_cov_func(params, corr)
x_bins = np.linspace(0.4, 7, n_points + 1)
x_cent = 0.5 * (x_bins[1:] + x_bins[:-1])
x_err = np.diff(x_bins)
y_cent = np.array([eff_f(x) for x in x_cent])
y_err = np.sqrt(np.array([var_f(x) for x in x_cent])) * n_sigma
return r.TGraphErrors(len(x_cent), x_cent, y_cent, x_err, y_err)
def set_params_errors(func, *params):
"""
Set all the parameters as pairs of value and uncertainty (in the order they)
are in the params list. If uncertainty = 0, the parameter is fixed
"""
central = np.array([p[0] for p in params])
uncer = np.array([p[1] for p in params])
func.SetParameters(central)
func.SetParErrors(uncer)
for idx, err in enumerate(uncer):
if err == 0:
func.FixParameter(idx, func.GetParameter(idx))
def load_params(param_file):
"""
Load the parameter file and return the list of dicts stored in it
"""
with open(param_file, 'r') as pfile:
eff_params = json.load(pfile)
return eff_params
def create_param(params, sigma_shift, uncorrelated):
"""
Create the function from the passed params and give it an appropriate name
"""
if sigma_shift == 0:
func = eff_param()
set_params_errors(func, params['p0'], params['p1'], params['p2'],
params['p3'], params['alpha'], params['beta'])
func.SetName(get_name(params['eta'], 'photon_eff_pt'))
return func
params['p4'] = params['alpha']
params['p5'] = params['beta']
corr = np.identity(4) if uncorrelated else CORRELATIONS
graph = get_graph_err(params, corr, np.abs(sigma_shift), 200)
if sigma_shift < 0:
graph = get_lower_band(graph)
else:
graph = get_upper_band(graph)
graph.SetName(get_name(params['eta'], 'photon_eff_pt'))
return graph
def main(args):
"""Main"""
file_option = 'update' if args.update else 'recreate'
outfile = r.TFile.Open(args.outfile, file_option)
all_params = load_params(args.paramfile)
for params in all_params:
eff = create_param(params, args.sigma, args.uncorrelated)
eff.Write()
outfile.Close()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def eff_param_string():
"""
The parametrization of the efficiencies from AN-2015-11 as a string that can
be used in a TF1 constructor.
p0 * (1 - p1 * (Erf(pT + p2) - p1 / alpha * (pT - p3 * (pT^2 - p3 / beta * pT^3))))
"""
return (
'[0] * (1 - [1] * (TMath::Erf(x[0] + [2]) - [1] / [4] * (x[0] - [3] * (pow(x[0], 2) - [3] / [5] * pow(x[0], 3)))))'
)
def eff_param():
"""
Get the parametrization as ROOT.TF1
"""
return r.TF1('photon_eff_param', eff_param_string(), 0, 7)
def eff_param_sym():
"""
Get the parametrization as sympy symbolic expression by doing some string
manipulation on the parametrization and then using sympy.sympify
"""
param_str = eff_param_string()
param_str = param_str.replace('TMath::Erf', 'erf').replace('x[0]', 'x')
param_str = re.sub('\\[([0-9])\\]', 'p\\1', param_str)
param_str = re.sub('pow\\((.*?)\\s*?,\\s*?([0-9])\\)', '\\1**\\2',
param_str)
return sp.sympify(param_str)
def get_corr_subs_values(corr):
"""
Get the dictionary of substitution values for the correlation matrix
"""
subs_dict = {}
n_dim = corr.shape[0]
for irow in xrange(0, n_dim):
for icol in xrange(irow + 1, n_dim):
subs_dict['rho_p{}p{}'.format(irow, icol)] = corr[irow, icol]
return subs_dict
def get_cov_func(params, corr):
"""
Get the uncertainty function where only pT is left as a free parameter.
This will return a python function that can be evaluated at any given point
"""
eff = eff_param_sym()
free_params = []
for sym in eff.free_symbols:
if sym.name in params and params[sym.name][1] != 0:
free_params.append(sym)
free_params.sort(key=lambda x: int(x.name.replace('p', '')))
cov_eff = func_cov(eff, free_params)
subst_vals = {p: v[0] for p, v in params.iteritems()}
subst_vals.update({('sigma_' + p): v[1] for p, v in params.iteritems()})
subst_vals.update(get_corr_subs_values(corr))
return sp.lambdify(sp.symbols('x'), cov_eff.subs(subst_vals))
def get_graph_err(params, corr, n_sigma=1.0, n_points=100):
"""
Get the function evaluated at n_points with uncertainties taking into
account correlations between the parameters
"""
eff_f = eff_param_sym()
eff_f = eff_f.subs({p: v[0] for p, v in params.iteritems()})
eff_f = sp.lambdify(sp.symbols('x'), eff_f)
var_f = get_cov_func(params, corr)
x_bins = np.linspace(0.4, 7, n_points + 1)
x_cent = 0.5 * (x_bins[1:] + x_bins[:-1])
x_err = np.diff(x_bins)
y_cent = np.array([eff_f(x) for x in x_cent])
y_err = np.sqrt(np.array([var_f(x) for x in x_cent])) * n_sigma
return r.TGraphErrors(len(x_cent), x_cent, y_cent, x_err, y_err)
def set_params_errors(func, *params):
"""
Set all the parameters as pairs of value and uncertainty (in the order they)
are in the params list. If uncertainty = 0, the parameter is fixed
"""
central = np.array([p[0] for p in params])
uncer = np.array([p[1] for p in params])
func.SetParameters(central)
func.SetParErrors(uncer)
for idx, err in enumerate(uncer):
if err == 0:
func.FixParameter(idx, func.GetParameter(idx))
def load_params(param_file):
"""
Load the parameter file and return the list of dicts stored in it
"""
with open(param_file, 'r') as pfile:
eff_params = json.load(pfile)
return eff_params
def create_param(params, sigma_shift, uncorrelated):
"""
Create the function from the passed params and give it an appropriate name
"""
if sigma_shift == 0:
func = eff_param()
set_params_errors(func, params['p0'], params['p1'], params['p2'],
params['p3'], params['alpha'], params['beta'])
func.SetName(get_name(params['eta'], 'photon_eff_pt'))
return func
params['p4'] = params['alpha']
params['p5'] = params['beta']
corr = np.identity(4) if uncorrelated else CORRELATIONS
graph = get_graph_err(params, corr, np.abs(sigma_shift), 200)
if sigma_shift < 0:
graph = get_lower_band(graph)
else:
graph = get_upper_band(graph)
graph.SetName(get_name(params['eta'], 'photon_eff_pt'))
return graph
def main(args):
"""Main"""
file_option = 'update' if args.update else 'recreate'
outfile = r.TFile.Open(args.outfile, file_option)
all_params = load_params(args.paramfile)
for params in all_params:
eff = create_param(params, args.sigma, args.uncorrelated)
eff.Write()
outfile.Close()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
r.PyConfig.IgnoreCommandLineOptions = True
<|reserved_special_token_0|>
COVARIANCE = np.array([[1.181e-06, 1.545e-06, -4.328e-06, 4.156e-06], [
1.545e-06, 7.215e-06, -1.714e-05, 5.177e-06], [-4.328e-06, -1.714e-05,
4.228e-05, -1.481e-05], [4.156e-06, 5.177e-06, -1.481e-05, 1.506e-05]])
CORRELATIONS = np.matmul(np.matmul(np.diag(1 / np.sqrt(np.diag(COVARIANCE))
), COVARIANCE), np.diag(1 / np.sqrt(np.diag(COVARIANCE))))
def eff_param_string():
"""
The parametrization of the efficiencies from AN-2015-11 as a string that can
be used in a TF1 constructor.
p0 * (1 - p1 * (Erf(pT + p2) - p1 / alpha * (pT - p3 * (pT^2 - p3 / beta * pT^3))))
"""
return (
'[0] * (1 - [1] * (TMath::Erf(x[0] + [2]) - [1] / [4] * (x[0] - [3] * (pow(x[0], 2) - [3] / [5] * pow(x[0], 3)))))'
)
def eff_param():
"""
Get the parametrization as ROOT.TF1
"""
return r.TF1('photon_eff_param', eff_param_string(), 0, 7)
def eff_param_sym():
"""
Get the parametrization as sympy symbolic expression by doing some string
manipulation on the parametrization and then using sympy.sympify
"""
param_str = eff_param_string()
param_str = param_str.replace('TMath::Erf', 'erf').replace('x[0]', 'x')
param_str = re.sub('\\[([0-9])\\]', 'p\\1', param_str)
param_str = re.sub('pow\\((.*?)\\s*?,\\s*?([0-9])\\)', '\\1**\\2',
param_str)
return sp.sympify(param_str)
def get_corr_subs_values(corr):
"""
Get the dictionary of substitution values for the correlation matrix
"""
subs_dict = {}
n_dim = corr.shape[0]
for irow in xrange(0, n_dim):
for icol in xrange(irow + 1, n_dim):
subs_dict['rho_p{}p{}'.format(irow, icol)] = corr[irow, icol]
return subs_dict
def get_cov_func(params, corr):
"""
Get the uncertainty function where only pT is left as a free parameter.
This will return a python function that can be evaluated at any given point
"""
eff = eff_param_sym()
free_params = []
for sym in eff.free_symbols:
if sym.name in params and params[sym.name][1] != 0:
free_params.append(sym)
free_params.sort(key=lambda x: int(x.name.replace('p', '')))
cov_eff = func_cov(eff, free_params)
subst_vals = {p: v[0] for p, v in params.iteritems()}
subst_vals.update({('sigma_' + p): v[1] for p, v in params.iteritems()})
subst_vals.update(get_corr_subs_values(corr))
return sp.lambdify(sp.symbols('x'), cov_eff.subs(subst_vals))
def get_graph_err(params, corr, n_sigma=1.0, n_points=100):
"""
Get the function evaluated at n_points with uncertainties taking into
account correlations between the parameters
"""
eff_f = eff_param_sym()
eff_f = eff_f.subs({p: v[0] for p, v in params.iteritems()})
eff_f = sp.lambdify(sp.symbols('x'), eff_f)
var_f = get_cov_func(params, corr)
x_bins = np.linspace(0.4, 7, n_points + 1)
x_cent = 0.5 * (x_bins[1:] + x_bins[:-1])
x_err = np.diff(x_bins)
y_cent = np.array([eff_f(x) for x in x_cent])
y_err = np.sqrt(np.array([var_f(x) for x in x_cent])) * n_sigma
return r.TGraphErrors(len(x_cent), x_cent, y_cent, x_err, y_err)
def set_params_errors(func, *params):
"""
Set all the parameters as pairs of value and uncertainty (in the order they)
are in the params list. If uncertainty = 0, the parameter is fixed
"""
central = np.array([p[0] for p in params])
uncer = np.array([p[1] for p in params])
func.SetParameters(central)
func.SetParErrors(uncer)
for idx, err in enumerate(uncer):
if err == 0:
func.FixParameter(idx, func.GetParameter(idx))
def load_params(param_file):
"""
Load the parameter file and return the list of dicts stored in it
"""
with open(param_file, 'r') as pfile:
eff_params = json.load(pfile)
return eff_params
def create_param(params, sigma_shift, uncorrelated):
"""
Create the function from the passed params and give it an appropriate name
"""
if sigma_shift == 0:
func = eff_param()
set_params_errors(func, params['p0'], params['p1'], params['p2'],
params['p3'], params['alpha'], params['beta'])
func.SetName(get_name(params['eta'], 'photon_eff_pt'))
return func
params['p4'] = params['alpha']
params['p5'] = params['beta']
corr = np.identity(4) if uncorrelated else CORRELATIONS
graph = get_graph_err(params, corr, np.abs(sigma_shift), 200)
if sigma_shift < 0:
graph = get_lower_band(graph)
else:
graph = get_upper_band(graph)
graph.SetName(get_name(params['eta'], 'photon_eff_pt'))
return graph
def main(args):
"""Main"""
file_option = 'update' if args.update else 'recreate'
outfile = r.TFile.Open(args.outfile, file_option)
all_params = load_params(args.paramfile)
for params in all_params:
eff = create_param(params, args.sigma, args.uncorrelated)
eff.Write()
outfile.Close()
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description=
'script to generate TF1 photon efficiency parametrizations from json file holding the fit parameters'
)
parser.add_argument('paramfile', help=
'json file containing the fitted parameters')
parser.add_argument('-o', '--outfile', help=
'root file into which the TF1 should be stored', default=
'photon_effs_param.root')
parser.add_argument('-u', '--update', help=
'update the output file instead of recreating it', default=False,
action='store_true')
parser.add_argument('-s', '--sigma', help=
'Use the central value + [sigma] * uncertainty for each parameter',
type=float, default=0)
parser.add_argument('--uncorrelated', default=False, action=
'store_true', help=
'Assume that the free parameters are uncorrelated instead of using correlation parameters from a global fit'
)
clargs = parser.parse_args()
main(clargs)
<|reserved_special_token_1|>
#!/usr/bin/env python
"""
Script that generates the photon efficiency curves and stores them in a root
file.
For the moment only the pT curves for the different eta bins are created
"""
import re
import json
import ROOT as r
r.PyConfig.IgnoreCommandLineOptions = True
import numpy as np
import sympy as sp
from utils.symbolic import func_cov
from utils.graph_utils import get_lower_band, get_upper_band
from common_func import get_name
# Covariance matrix from the fit integrated over the whole eta range, where
# alpha and beta were fixed. This will be used to calculate the correlation
# coefficients between the fitted parameters, which will then be used to get
# the uncertainty bands for the parametrization
COVARIANCE = np.array([
[1.181e-06, 1.545e-06, -4.328e-06, 4.156e-06],
[1.545e-06, 7.215e-06, -1.714e-05, 5.177e-06],
[-4.328e-06, -1.714e-05, 4.228e-05, -1.481e-05],
[4.156e-06, 5.177e-06, -1.481e-05, 1.506e-05],
])
# corr = diag(cov)^{-1/2} * cov * diag(cov)^{-1/2}
CORRELATIONS = np.matmul(
np.matmul(
np.diag(1/np.sqrt(np.diag(COVARIANCE))), COVARIANCE,
), np.diag(1/np.sqrt(np.diag(COVARIANCE)))
)
def eff_param_string():
"""
The parametrization of the efficiencies from AN-2015-11 as a string that can
be used in a TF1 constructor.
p0 * (1 - p1 * (Erf(pT + p2) - p1 / alpha * (pT - p3 * (pT^2 - p3 / beta * pT^3))))
"""
return '[0] * (1 - [1] * (TMath::Erf(x[0] + [2]) - [1] / [4] * (x[0] - [3] * (pow(x[0], 2) - [3] / [5] * pow(x[0], 3)))))'
def eff_param():
"""
Get the parametrization as ROOT.TF1
"""
return r.TF1('photon_eff_param', eff_param_string(), 0, 7)
def eff_param_sym():
"""
Get the parametrization as sympy symbolic expression by doing some string
manipulation on the parametrization and then using sympy.sympify
"""
param_str = eff_param_string()
# replace call to ROOTs erf and give x[0] a parseable name
param_str = param_str.replace('TMath::Erf', 'erf').replace('x[0]', 'x')
# convert parameters from [x] notation to px notation
param_str = re.sub(r'\[([0-9])\]', r'p\1', param_str)
# replace pow(x, y) with x**y (pythonic) syntax
param_str = re.sub(r'pow\((.*?)\s*?,\s*?([0-9])\)', r'\1**\2', param_str)
return sp.sympify(param_str)
def get_corr_subs_values(corr):
"""
Get the dictionary of substitution values for the correlation matrix
"""
subs_dict = {}
n_dim = corr.shape[0]
for irow in xrange(0, n_dim):
for icol in xrange(irow + 1, n_dim):
subs_dict['rho_p{}p{}'.format(irow, icol)] = corr[irow, icol]
return subs_dict
def get_cov_func(params, corr):
"""
Get the uncertainty function where only pT is left as a free parameter.
This will return a python function that can be evaluated at any given point
"""
eff = eff_param_sym()
# get the list of free parameters
free_params = []
for sym in eff.free_symbols:
if sym.name in params and params[sym.name][1] != 0:
free_params.append(sym)
# sort the parameters according to their name, such that the correlation
# coefficients actually match
free_params.sort(key=lambda x: int(x.name.replace('p', '')))
cov_eff = func_cov(eff, free_params)
# build up the dictionary of symbol -> value that will be substituted.
# In the end the returned function will only have one free parameter left
subst_vals = {
p: v[0] for p, v in params.iteritems()
}
subst_vals.update({
'sigma_' + p: v[1] for p, v in params.iteritems()
})
subst_vals.update(
get_corr_subs_values(corr)
)
# NOTE: here it is assumed that 'x' is the only free parameter left
return sp.lambdify(sp.symbols('x'), cov_eff.subs(subst_vals))
def get_graph_err(params, corr, n_sigma=1.0, n_points=100):
"""
Get the function evaluated at n_points with uncertainties taking into
account correlations between the parameters
"""
# central function
eff_f = eff_param_sym()
eff_f = eff_f.subs({p: v[0] for p, v in params.iteritems()})
# NOTE: assume that 'x' is the only free parameter left
eff_f = sp.lambdify(sp.symbols('x'), eff_f)
# uncertainty function (as function of pT)
var_f = get_cov_func(params, corr)
x_bins = np.linspace(0.4, 7, n_points + 1)
x_cent = 0.5 * (x_bins[1:] + x_bins[:-1]) # bin centers
x_err = np.diff(x_bins) # "uncertainties" in x
y_cent = np.array([eff_f(x) for x in x_cent])
y_err = np.sqrt(np.array([var_f(x) for x in x_cent])) * n_sigma
return r.TGraphErrors(len(x_cent), x_cent, y_cent, x_err, y_err)
def set_params_errors(func, *params):
"""
Set all the parameters as pairs of value and uncertainty (in the order they)
are in the params list. If uncertainty = 0, the parameter is fixed
"""
central = np.array([p[0] for p in params])
uncer = np.array([p[1] for p in params])
func.SetParameters(central)
func.SetParErrors(uncer)
for idx, err in enumerate(uncer):
if err == 0:
func.FixParameter(idx, func.GetParameter(idx))
def load_params(param_file):
"""
Load the parameter file and return the list of dicts stored in it
"""
with open(param_file, 'r') as pfile:
eff_params = json.load(pfile)
return eff_params
def create_param(params, sigma_shift, uncorrelated):
"""
Create the function from the passed params and give it an appropriate name
"""
# if the central results are desired. Use the exact parametrization as TF1
if sigma_shift == 0:
func = eff_param()
set_params_errors(func, params["p0"], params["p1"], params["p2"],
params["p3"], params["alpha"], params["beta"])
func.SetName(get_name(params["eta"], 'photon_eff_pt'))
return func
# else get an aproximation by evaluating the function at a given number of
# points and determine the uncertainties at these points, then store the
# points as a TGraph where the y-values are the central + uncertainty values
# at each evaluation point
# NOTE: Since eff_param_sym names alpha and beta p4 and p5 respectively
# (can't use beta in an expression that goes through sympy.sympify), we have
# to clone them here. We can leave the original values in, since they will
# not be picked up by the substitution command
params['p4'] = params['alpha']
params['p5'] = params['beta']
# use the global correlation matrix or an identity matrix if uncorrelated
# parameters are desired
corr = np.identity(4) if uncorrelated else CORRELATIONS
graph = get_graph_err(params, corr, np.abs(sigma_shift), 200)
if sigma_shift < 0:
graph = get_lower_band(graph)
else:
graph = get_upper_band(graph)
graph.SetName(get_name(params['eta'], 'photon_eff_pt'))
return graph
def main(args):
"""Main"""
file_option = 'update' if args.update else 'recreate'
outfile = r.TFile.Open(args.outfile, file_option)
all_params = load_params(args.paramfile)
for params in all_params:
eff = create_param(params, args.sigma, args.uncorrelated)
eff.Write()
outfile.Close()
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='script to generate TF1 '
'photon efficiency parametrizations from '
'json file holding the fit parameters')
parser.add_argument('paramfile', help='json file containing the fitted '
'parameters')
parser.add_argument('-o', '--outfile', help='root file into which the TF1 '
'should be stored', default='photon_effs_param.root')
parser.add_argument('-u', '--update', help='update the output file instead '
'of recreating it', default=False, action='store_true')
parser.add_argument('-s', '--sigma', help='Use the central value + [sigma] '
'* uncertainty for each parameter', type=float,
default=0)
parser.add_argument('--uncorrelated', default=False, action='store_true',
help='Assume that the free parameters are uncorrelated '
'instead of using correlation parameters from a global '
'fit')
clargs = parser.parse_args()
main(clargs)
|
flexible
|
{
"blob_id": "fd450b5454b65ed69b411028788c587f9674760c",
"index": 966,
"step-1": "<mask token>\n\n\ndef eff_param_string():\n \"\"\"\n The parametrization of the efficiencies from AN-2015-11 as a string that can\n be used in a TF1 constructor.\n\n p0 * (1 - p1 * (Erf(pT + p2) - p1 / alpha * (pT - p3 * (pT^2 - p3 / beta * pT^3))))\n \"\"\"\n return (\n '[0] * (1 - [1] * (TMath::Erf(x[0] + [2]) - [1] / [4] * (x[0] - [3] * (pow(x[0], 2) - [3] / [5] * pow(x[0], 3)))))'\n )\n\n\ndef eff_param():\n \"\"\"\n Get the parametrization as ROOT.TF1\n \"\"\"\n return r.TF1('photon_eff_param', eff_param_string(), 0, 7)\n\n\ndef eff_param_sym():\n \"\"\"\n Get the parametrization as sympy symbolic expression by doing some string\n manipulation on the parametrization and then using sympy.sympify\n \"\"\"\n param_str = eff_param_string()\n param_str = param_str.replace('TMath::Erf', 'erf').replace('x[0]', 'x')\n param_str = re.sub('\\\\[([0-9])\\\\]', 'p\\\\1', param_str)\n param_str = re.sub('pow\\\\((.*?)\\\\s*?,\\\\s*?([0-9])\\\\)', '\\\\1**\\\\2',\n param_str)\n return sp.sympify(param_str)\n\n\n<mask token>\n\n\ndef get_cov_func(params, corr):\n \"\"\"\n Get the uncertainty function where only pT is left as a free parameter.\n\n This will return a python function that can be evaluated at any given point\n \"\"\"\n eff = eff_param_sym()\n free_params = []\n for sym in eff.free_symbols:\n if sym.name in params and params[sym.name][1] != 0:\n free_params.append(sym)\n free_params.sort(key=lambda x: int(x.name.replace('p', '')))\n cov_eff = func_cov(eff, free_params)\n subst_vals = {p: v[0] for p, v in params.iteritems()}\n subst_vals.update({('sigma_' + p): v[1] for p, v in params.iteritems()})\n subst_vals.update(get_corr_subs_values(corr))\n return sp.lambdify(sp.symbols('x'), cov_eff.subs(subst_vals))\n\n\n<mask token>\n\n\ndef set_params_errors(func, *params):\n \"\"\"\n Set all the parameters as pairs of value and uncertainty (in the order they)\n are in the params list. If uncertainty = 0, the parameter is fixed\n \"\"\"\n central = np.array([p[0] for p in params])\n uncer = np.array([p[1] for p in params])\n func.SetParameters(central)\n func.SetParErrors(uncer)\n for idx, err in enumerate(uncer):\n if err == 0:\n func.FixParameter(idx, func.GetParameter(idx))\n\n\ndef load_params(param_file):\n \"\"\"\n Load the parameter file and return the list of dicts stored in it\n \"\"\"\n with open(param_file, 'r') as pfile:\n eff_params = json.load(pfile)\n return eff_params\n\n\ndef create_param(params, sigma_shift, uncorrelated):\n \"\"\"\n Create the function from the passed params and give it an appropriate name\n \"\"\"\n if sigma_shift == 0:\n func = eff_param()\n set_params_errors(func, params['p0'], params['p1'], params['p2'],\n params['p3'], params['alpha'], params['beta'])\n func.SetName(get_name(params['eta'], 'photon_eff_pt'))\n return func\n params['p4'] = params['alpha']\n params['p5'] = params['beta']\n corr = np.identity(4) if uncorrelated else CORRELATIONS\n graph = get_graph_err(params, corr, np.abs(sigma_shift), 200)\n if sigma_shift < 0:\n graph = get_lower_band(graph)\n else:\n graph = get_upper_band(graph)\n graph.SetName(get_name(params['eta'], 'photon_eff_pt'))\n return graph\n\n\ndef main(args):\n \"\"\"Main\"\"\"\n file_option = 'update' if args.update else 'recreate'\n outfile = r.TFile.Open(args.outfile, file_option)\n all_params = load_params(args.paramfile)\n for params in all_params:\n eff = create_param(params, args.sigma, args.uncorrelated)\n eff.Write()\n outfile.Close()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef eff_param_string():\n \"\"\"\n The parametrization of the efficiencies from AN-2015-11 as a string that can\n be used in a TF1 constructor.\n\n p0 * (1 - p1 * (Erf(pT + p2) - p1 / alpha * (pT - p3 * (pT^2 - p3 / beta * pT^3))))\n \"\"\"\n return (\n '[0] * (1 - [1] * (TMath::Erf(x[0] + [2]) - [1] / [4] * (x[0] - [3] * (pow(x[0], 2) - [3] / [5] * pow(x[0], 3)))))'\n )\n\n\ndef eff_param():\n \"\"\"\n Get the parametrization as ROOT.TF1\n \"\"\"\n return r.TF1('photon_eff_param', eff_param_string(), 0, 7)\n\n\ndef eff_param_sym():\n \"\"\"\n Get the parametrization as sympy symbolic expression by doing some string\n manipulation on the parametrization and then using sympy.sympify\n \"\"\"\n param_str = eff_param_string()\n param_str = param_str.replace('TMath::Erf', 'erf').replace('x[0]', 'x')\n param_str = re.sub('\\\\[([0-9])\\\\]', 'p\\\\1', param_str)\n param_str = re.sub('pow\\\\((.*?)\\\\s*?,\\\\s*?([0-9])\\\\)', '\\\\1**\\\\2',\n param_str)\n return sp.sympify(param_str)\n\n\n<mask token>\n\n\ndef get_cov_func(params, corr):\n \"\"\"\n Get the uncertainty function where only pT is left as a free parameter.\n\n This will return a python function that can be evaluated at any given point\n \"\"\"\n eff = eff_param_sym()\n free_params = []\n for sym in eff.free_symbols:\n if sym.name in params and params[sym.name][1] != 0:\n free_params.append(sym)\n free_params.sort(key=lambda x: int(x.name.replace('p', '')))\n cov_eff = func_cov(eff, free_params)\n subst_vals = {p: v[0] for p, v in params.iteritems()}\n subst_vals.update({('sigma_' + p): v[1] for p, v in params.iteritems()})\n subst_vals.update(get_corr_subs_values(corr))\n return sp.lambdify(sp.symbols('x'), cov_eff.subs(subst_vals))\n\n\ndef get_graph_err(params, corr, n_sigma=1.0, n_points=100):\n \"\"\"\n Get the function evaluated at n_points with uncertainties taking into\n account correlations between the parameters\n \"\"\"\n eff_f = eff_param_sym()\n eff_f = eff_f.subs({p: v[0] for p, v in params.iteritems()})\n eff_f = sp.lambdify(sp.symbols('x'), eff_f)\n var_f = get_cov_func(params, corr)\n x_bins = np.linspace(0.4, 7, n_points + 1)\n x_cent = 0.5 * (x_bins[1:] + x_bins[:-1])\n x_err = np.diff(x_bins)\n y_cent = np.array([eff_f(x) for x in x_cent])\n y_err = np.sqrt(np.array([var_f(x) for x in x_cent])) * n_sigma\n return r.TGraphErrors(len(x_cent), x_cent, y_cent, x_err, y_err)\n\n\ndef set_params_errors(func, *params):\n \"\"\"\n Set all the parameters as pairs of value and uncertainty (in the order they)\n are in the params list. If uncertainty = 0, the parameter is fixed\n \"\"\"\n central = np.array([p[0] for p in params])\n uncer = np.array([p[1] for p in params])\n func.SetParameters(central)\n func.SetParErrors(uncer)\n for idx, err in enumerate(uncer):\n if err == 0:\n func.FixParameter(idx, func.GetParameter(idx))\n\n\ndef load_params(param_file):\n \"\"\"\n Load the parameter file and return the list of dicts stored in it\n \"\"\"\n with open(param_file, 'r') as pfile:\n eff_params = json.load(pfile)\n return eff_params\n\n\ndef create_param(params, sigma_shift, uncorrelated):\n \"\"\"\n Create the function from the passed params and give it an appropriate name\n \"\"\"\n if sigma_shift == 0:\n func = eff_param()\n set_params_errors(func, params['p0'], params['p1'], params['p2'],\n params['p3'], params['alpha'], params['beta'])\n func.SetName(get_name(params['eta'], 'photon_eff_pt'))\n return func\n params['p4'] = params['alpha']\n params['p5'] = params['beta']\n corr = np.identity(4) if uncorrelated else CORRELATIONS\n graph = get_graph_err(params, corr, np.abs(sigma_shift), 200)\n if sigma_shift < 0:\n graph = get_lower_band(graph)\n else:\n graph = get_upper_band(graph)\n graph.SetName(get_name(params['eta'], 'photon_eff_pt'))\n return graph\n\n\ndef main(args):\n \"\"\"Main\"\"\"\n file_option = 'update' if args.update else 'recreate'\n outfile = r.TFile.Open(args.outfile, file_option)\n all_params = load_params(args.paramfile)\n for params in all_params:\n eff = create_param(params, args.sigma, args.uncorrelated)\n eff.Write()\n outfile.Close()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef eff_param_string():\n \"\"\"\n The parametrization of the efficiencies from AN-2015-11 as a string that can\n be used in a TF1 constructor.\n\n p0 * (1 - p1 * (Erf(pT + p2) - p1 / alpha * (pT - p3 * (pT^2 - p3 / beta * pT^3))))\n \"\"\"\n return (\n '[0] * (1 - [1] * (TMath::Erf(x[0] + [2]) - [1] / [4] * (x[0] - [3] * (pow(x[0], 2) - [3] / [5] * pow(x[0], 3)))))'\n )\n\n\ndef eff_param():\n \"\"\"\n Get the parametrization as ROOT.TF1\n \"\"\"\n return r.TF1('photon_eff_param', eff_param_string(), 0, 7)\n\n\ndef eff_param_sym():\n \"\"\"\n Get the parametrization as sympy symbolic expression by doing some string\n manipulation on the parametrization and then using sympy.sympify\n \"\"\"\n param_str = eff_param_string()\n param_str = param_str.replace('TMath::Erf', 'erf').replace('x[0]', 'x')\n param_str = re.sub('\\\\[([0-9])\\\\]', 'p\\\\1', param_str)\n param_str = re.sub('pow\\\\((.*?)\\\\s*?,\\\\s*?([0-9])\\\\)', '\\\\1**\\\\2',\n param_str)\n return sp.sympify(param_str)\n\n\ndef get_corr_subs_values(corr):\n \"\"\"\n Get the dictionary of substitution values for the correlation matrix\n \"\"\"\n subs_dict = {}\n n_dim = corr.shape[0]\n for irow in xrange(0, n_dim):\n for icol in xrange(irow + 1, n_dim):\n subs_dict['rho_p{}p{}'.format(irow, icol)] = corr[irow, icol]\n return subs_dict\n\n\ndef get_cov_func(params, corr):\n \"\"\"\n Get the uncertainty function where only pT is left as a free parameter.\n\n This will return a python function that can be evaluated at any given point\n \"\"\"\n eff = eff_param_sym()\n free_params = []\n for sym in eff.free_symbols:\n if sym.name in params and params[sym.name][1] != 0:\n free_params.append(sym)\n free_params.sort(key=lambda x: int(x.name.replace('p', '')))\n cov_eff = func_cov(eff, free_params)\n subst_vals = {p: v[0] for p, v in params.iteritems()}\n subst_vals.update({('sigma_' + p): v[1] for p, v in params.iteritems()})\n subst_vals.update(get_corr_subs_values(corr))\n return sp.lambdify(sp.symbols('x'), cov_eff.subs(subst_vals))\n\n\ndef get_graph_err(params, corr, n_sigma=1.0, n_points=100):\n \"\"\"\n Get the function evaluated at n_points with uncertainties taking into\n account correlations between the parameters\n \"\"\"\n eff_f = eff_param_sym()\n eff_f = eff_f.subs({p: v[0] for p, v in params.iteritems()})\n eff_f = sp.lambdify(sp.symbols('x'), eff_f)\n var_f = get_cov_func(params, corr)\n x_bins = np.linspace(0.4, 7, n_points + 1)\n x_cent = 0.5 * (x_bins[1:] + x_bins[:-1])\n x_err = np.diff(x_bins)\n y_cent = np.array([eff_f(x) for x in x_cent])\n y_err = np.sqrt(np.array([var_f(x) for x in x_cent])) * n_sigma\n return r.TGraphErrors(len(x_cent), x_cent, y_cent, x_err, y_err)\n\n\ndef set_params_errors(func, *params):\n \"\"\"\n Set all the parameters as pairs of value and uncertainty (in the order they)\n are in the params list. If uncertainty = 0, the parameter is fixed\n \"\"\"\n central = np.array([p[0] for p in params])\n uncer = np.array([p[1] for p in params])\n func.SetParameters(central)\n func.SetParErrors(uncer)\n for idx, err in enumerate(uncer):\n if err == 0:\n func.FixParameter(idx, func.GetParameter(idx))\n\n\ndef load_params(param_file):\n \"\"\"\n Load the parameter file and return the list of dicts stored in it\n \"\"\"\n with open(param_file, 'r') as pfile:\n eff_params = json.load(pfile)\n return eff_params\n\n\ndef create_param(params, sigma_shift, uncorrelated):\n \"\"\"\n Create the function from the passed params and give it an appropriate name\n \"\"\"\n if sigma_shift == 0:\n func = eff_param()\n set_params_errors(func, params['p0'], params['p1'], params['p2'],\n params['p3'], params['alpha'], params['beta'])\n func.SetName(get_name(params['eta'], 'photon_eff_pt'))\n return func\n params['p4'] = params['alpha']\n params['p5'] = params['beta']\n corr = np.identity(4) if uncorrelated else CORRELATIONS\n graph = get_graph_err(params, corr, np.abs(sigma_shift), 200)\n if sigma_shift < 0:\n graph = get_lower_band(graph)\n else:\n graph = get_upper_band(graph)\n graph.SetName(get_name(params['eta'], 'photon_eff_pt'))\n return graph\n\n\ndef main(args):\n \"\"\"Main\"\"\"\n file_option = 'update' if args.update else 'recreate'\n outfile = r.TFile.Open(args.outfile, file_option)\n all_params = load_params(args.paramfile)\n for params in all_params:\n eff = create_param(params, args.sigma, args.uncorrelated)\n eff.Write()\n outfile.Close()\n\n\n<mask token>\n",
"step-4": "<mask token>\nr.PyConfig.IgnoreCommandLineOptions = True\n<mask token>\nCOVARIANCE = np.array([[1.181e-06, 1.545e-06, -4.328e-06, 4.156e-06], [\n 1.545e-06, 7.215e-06, -1.714e-05, 5.177e-06], [-4.328e-06, -1.714e-05, \n 4.228e-05, -1.481e-05], [4.156e-06, 5.177e-06, -1.481e-05, 1.506e-05]])\nCORRELATIONS = np.matmul(np.matmul(np.diag(1 / np.sqrt(np.diag(COVARIANCE))\n ), COVARIANCE), np.diag(1 / np.sqrt(np.diag(COVARIANCE))))\n\n\ndef eff_param_string():\n \"\"\"\n The parametrization of the efficiencies from AN-2015-11 as a string that can\n be used in a TF1 constructor.\n\n p0 * (1 - p1 * (Erf(pT + p2) - p1 / alpha * (pT - p3 * (pT^2 - p3 / beta * pT^3))))\n \"\"\"\n return (\n '[0] * (1 - [1] * (TMath::Erf(x[0] + [2]) - [1] / [4] * (x[0] - [3] * (pow(x[0], 2) - [3] / [5] * pow(x[0], 3)))))'\n )\n\n\ndef eff_param():\n \"\"\"\n Get the parametrization as ROOT.TF1\n \"\"\"\n return r.TF1('photon_eff_param', eff_param_string(), 0, 7)\n\n\ndef eff_param_sym():\n \"\"\"\n Get the parametrization as sympy symbolic expression by doing some string\n manipulation on the parametrization and then using sympy.sympify\n \"\"\"\n param_str = eff_param_string()\n param_str = param_str.replace('TMath::Erf', 'erf').replace('x[0]', 'x')\n param_str = re.sub('\\\\[([0-9])\\\\]', 'p\\\\1', param_str)\n param_str = re.sub('pow\\\\((.*?)\\\\s*?,\\\\s*?([0-9])\\\\)', '\\\\1**\\\\2',\n param_str)\n return sp.sympify(param_str)\n\n\ndef get_corr_subs_values(corr):\n \"\"\"\n Get the dictionary of substitution values for the correlation matrix\n \"\"\"\n subs_dict = {}\n n_dim = corr.shape[0]\n for irow in xrange(0, n_dim):\n for icol in xrange(irow + 1, n_dim):\n subs_dict['rho_p{}p{}'.format(irow, icol)] = corr[irow, icol]\n return subs_dict\n\n\ndef get_cov_func(params, corr):\n \"\"\"\n Get the uncertainty function where only pT is left as a free parameter.\n\n This will return a python function that can be evaluated at any given point\n \"\"\"\n eff = eff_param_sym()\n free_params = []\n for sym in eff.free_symbols:\n if sym.name in params and params[sym.name][1] != 0:\n free_params.append(sym)\n free_params.sort(key=lambda x: int(x.name.replace('p', '')))\n cov_eff = func_cov(eff, free_params)\n subst_vals = {p: v[0] for p, v in params.iteritems()}\n subst_vals.update({('sigma_' + p): v[1] for p, v in params.iteritems()})\n subst_vals.update(get_corr_subs_values(corr))\n return sp.lambdify(sp.symbols('x'), cov_eff.subs(subst_vals))\n\n\ndef get_graph_err(params, corr, n_sigma=1.0, n_points=100):\n \"\"\"\n Get the function evaluated at n_points with uncertainties taking into\n account correlations between the parameters\n \"\"\"\n eff_f = eff_param_sym()\n eff_f = eff_f.subs({p: v[0] for p, v in params.iteritems()})\n eff_f = sp.lambdify(sp.symbols('x'), eff_f)\n var_f = get_cov_func(params, corr)\n x_bins = np.linspace(0.4, 7, n_points + 1)\n x_cent = 0.5 * (x_bins[1:] + x_bins[:-1])\n x_err = np.diff(x_bins)\n y_cent = np.array([eff_f(x) for x in x_cent])\n y_err = np.sqrt(np.array([var_f(x) for x in x_cent])) * n_sigma\n return r.TGraphErrors(len(x_cent), x_cent, y_cent, x_err, y_err)\n\n\ndef set_params_errors(func, *params):\n \"\"\"\n Set all the parameters as pairs of value and uncertainty (in the order they)\n are in the params list. If uncertainty = 0, the parameter is fixed\n \"\"\"\n central = np.array([p[0] for p in params])\n uncer = np.array([p[1] for p in params])\n func.SetParameters(central)\n func.SetParErrors(uncer)\n for idx, err in enumerate(uncer):\n if err == 0:\n func.FixParameter(idx, func.GetParameter(idx))\n\n\ndef load_params(param_file):\n \"\"\"\n Load the parameter file and return the list of dicts stored in it\n \"\"\"\n with open(param_file, 'r') as pfile:\n eff_params = json.load(pfile)\n return eff_params\n\n\ndef create_param(params, sigma_shift, uncorrelated):\n \"\"\"\n Create the function from the passed params and give it an appropriate name\n \"\"\"\n if sigma_shift == 0:\n func = eff_param()\n set_params_errors(func, params['p0'], params['p1'], params['p2'],\n params['p3'], params['alpha'], params['beta'])\n func.SetName(get_name(params['eta'], 'photon_eff_pt'))\n return func\n params['p4'] = params['alpha']\n params['p5'] = params['beta']\n corr = np.identity(4) if uncorrelated else CORRELATIONS\n graph = get_graph_err(params, corr, np.abs(sigma_shift), 200)\n if sigma_shift < 0:\n graph = get_lower_band(graph)\n else:\n graph = get_upper_band(graph)\n graph.SetName(get_name(params['eta'], 'photon_eff_pt'))\n return graph\n\n\ndef main(args):\n \"\"\"Main\"\"\"\n file_option = 'update' if args.update else 'recreate'\n outfile = r.TFile.Open(args.outfile, file_option)\n all_params = load_params(args.paramfile)\n for params in all_params:\n eff = create_param(params, args.sigma, args.uncorrelated)\n eff.Write()\n outfile.Close()\n\n\nif __name__ == '__main__':\n import argparse\n parser = argparse.ArgumentParser(description=\n 'script to generate TF1 photon efficiency parametrizations from json file holding the fit parameters'\n )\n parser.add_argument('paramfile', help=\n 'json file containing the fitted parameters')\n parser.add_argument('-o', '--outfile', help=\n 'root file into which the TF1 should be stored', default=\n 'photon_effs_param.root')\n parser.add_argument('-u', '--update', help=\n 'update the output file instead of recreating it', default=False,\n action='store_true')\n parser.add_argument('-s', '--sigma', help=\n 'Use the central value + [sigma] * uncertainty for each parameter',\n type=float, default=0)\n parser.add_argument('--uncorrelated', default=False, action=\n 'store_true', help=\n 'Assume that the free parameters are uncorrelated instead of using correlation parameters from a global fit'\n )\n clargs = parser.parse_args()\n main(clargs)\n",
"step-5": "#!/usr/bin/env python\n\"\"\"\nScript that generates the photon efficiency curves and stores them in a root\nfile.\n\nFor the moment only the pT curves for the different eta bins are created\n\"\"\"\n\nimport re\nimport json\nimport ROOT as r\nr.PyConfig.IgnoreCommandLineOptions = True\n\nimport numpy as np\nimport sympy as sp\n\nfrom utils.symbolic import func_cov\nfrom utils.graph_utils import get_lower_band, get_upper_band\n\nfrom common_func import get_name\n\n# Covariance matrix from the fit integrated over the whole eta range, where\n# alpha and beta were fixed. This will be used to calculate the correlation\n# coefficients between the fitted parameters, which will then be used to get\n# the uncertainty bands for the parametrization\nCOVARIANCE = np.array([\n [1.181e-06, 1.545e-06, -4.328e-06, 4.156e-06],\n [1.545e-06, 7.215e-06, -1.714e-05, 5.177e-06],\n [-4.328e-06, -1.714e-05, 4.228e-05, -1.481e-05],\n [4.156e-06, 5.177e-06, -1.481e-05, 1.506e-05],\n])\n\n# corr = diag(cov)^{-1/2} * cov * diag(cov)^{-1/2}\nCORRELATIONS = np.matmul(\n np.matmul(\n np.diag(1/np.sqrt(np.diag(COVARIANCE))), COVARIANCE,\n ), np.diag(1/np.sqrt(np.diag(COVARIANCE)))\n)\n\n\ndef eff_param_string():\n \"\"\"\n The parametrization of the efficiencies from AN-2015-11 as a string that can\n be used in a TF1 constructor.\n\n p0 * (1 - p1 * (Erf(pT + p2) - p1 / alpha * (pT - p3 * (pT^2 - p3 / beta * pT^3))))\n \"\"\"\n return '[0] * (1 - [1] * (TMath::Erf(x[0] + [2]) - [1] / [4] * (x[0] - [3] * (pow(x[0], 2) - [3] / [5] * pow(x[0], 3)))))'\n\n\ndef eff_param():\n \"\"\"\n Get the parametrization as ROOT.TF1\n \"\"\"\n return r.TF1('photon_eff_param', eff_param_string(), 0, 7)\n\n\ndef eff_param_sym():\n \"\"\"\n Get the parametrization as sympy symbolic expression by doing some string\n manipulation on the parametrization and then using sympy.sympify\n \"\"\"\n param_str = eff_param_string()\n # replace call to ROOTs erf and give x[0] a parseable name\n param_str = param_str.replace('TMath::Erf', 'erf').replace('x[0]', 'x')\n # convert parameters from [x] notation to px notation\n param_str = re.sub(r'\\[([0-9])\\]', r'p\\1', param_str)\n # replace pow(x, y) with x**y (pythonic) syntax\n param_str = re.sub(r'pow\\((.*?)\\s*?,\\s*?([0-9])\\)', r'\\1**\\2', param_str)\n\n return sp.sympify(param_str)\n\n\ndef get_corr_subs_values(corr):\n \"\"\"\n Get the dictionary of substitution values for the correlation matrix\n \"\"\"\n subs_dict = {}\n n_dim = corr.shape[0]\n for irow in xrange(0, n_dim):\n for icol in xrange(irow + 1, n_dim):\n subs_dict['rho_p{}p{}'.format(irow, icol)] = corr[irow, icol]\n\n return subs_dict\n\n\ndef get_cov_func(params, corr):\n \"\"\"\n Get the uncertainty function where only pT is left as a free parameter.\n\n This will return a python function that can be evaluated at any given point\n \"\"\"\n eff = eff_param_sym()\n # get the list of free parameters\n free_params = []\n for sym in eff.free_symbols:\n if sym.name in params and params[sym.name][1] != 0:\n free_params.append(sym)\n\n # sort the parameters according to their name, such that the correlation\n # coefficients actually match\n free_params.sort(key=lambda x: int(x.name.replace('p', '')))\n\n cov_eff = func_cov(eff, free_params)\n\n # build up the dictionary of symbol -> value that will be substituted.\n # In the end the returned function will only have one free parameter left\n subst_vals = {\n p: v[0] for p, v in params.iteritems()\n }\n subst_vals.update({\n 'sigma_' + p: v[1] for p, v in params.iteritems()\n })\n subst_vals.update(\n get_corr_subs_values(corr)\n )\n\n # NOTE: here it is assumed that 'x' is the only free parameter left\n return sp.lambdify(sp.symbols('x'), cov_eff.subs(subst_vals))\n\n\ndef get_graph_err(params, corr, n_sigma=1.0, n_points=100):\n \"\"\"\n Get the function evaluated at n_points with uncertainties taking into\n account correlations between the parameters\n \"\"\"\n # central function\n eff_f = eff_param_sym()\n eff_f = eff_f.subs({p: v[0] for p, v in params.iteritems()})\n # NOTE: assume that 'x' is the only free parameter left\n eff_f = sp.lambdify(sp.symbols('x'), eff_f)\n\n # uncertainty function (as function of pT)\n var_f = get_cov_func(params, corr)\n\n x_bins = np.linspace(0.4, 7, n_points + 1)\n x_cent = 0.5 * (x_bins[1:] + x_bins[:-1]) # bin centers\n x_err = np.diff(x_bins) # \"uncertainties\" in x\n\n y_cent = np.array([eff_f(x) for x in x_cent])\n y_err = np.sqrt(np.array([var_f(x) for x in x_cent])) * n_sigma\n\n return r.TGraphErrors(len(x_cent), x_cent, y_cent, x_err, y_err)\n\n\ndef set_params_errors(func, *params):\n \"\"\"\n Set all the parameters as pairs of value and uncertainty (in the order they)\n are in the params list. If uncertainty = 0, the parameter is fixed\n \"\"\"\n central = np.array([p[0] for p in params])\n uncer = np.array([p[1] for p in params])\n\n func.SetParameters(central)\n func.SetParErrors(uncer)\n\n for idx, err in enumerate(uncer):\n if err == 0:\n func.FixParameter(idx, func.GetParameter(idx))\n\n\ndef load_params(param_file):\n \"\"\"\n Load the parameter file and return the list of dicts stored in it\n \"\"\"\n with open(param_file, 'r') as pfile:\n eff_params = json.load(pfile)\n return eff_params\n\n\ndef create_param(params, sigma_shift, uncorrelated):\n \"\"\"\n Create the function from the passed params and give it an appropriate name\n \"\"\"\n # if the central results are desired. Use the exact parametrization as TF1\n if sigma_shift == 0:\n func = eff_param()\n set_params_errors(func, params[\"p0\"], params[\"p1\"], params[\"p2\"],\n params[\"p3\"], params[\"alpha\"], params[\"beta\"])\n\n func.SetName(get_name(params[\"eta\"], 'photon_eff_pt'))\n return func\n\n # else get an aproximation by evaluating the function at a given number of\n # points and determine the uncertainties at these points, then store the\n # points as a TGraph where the y-values are the central + uncertainty values\n # at each evaluation point\n\n # NOTE: Since eff_param_sym names alpha and beta p4 and p5 respectively\n # (can't use beta in an expression that goes through sympy.sympify), we have\n # to clone them here. We can leave the original values in, since they will\n # not be picked up by the substitution command\n params['p4'] = params['alpha']\n params['p5'] = params['beta']\n\n # use the global correlation matrix or an identity matrix if uncorrelated\n # parameters are desired\n corr = np.identity(4) if uncorrelated else CORRELATIONS\n graph = get_graph_err(params, corr, np.abs(sigma_shift), 200)\n\n if sigma_shift < 0:\n graph = get_lower_band(graph)\n else:\n graph = get_upper_band(graph)\n\n graph.SetName(get_name(params['eta'], 'photon_eff_pt'))\n\n return graph\n\n\ndef main(args):\n \"\"\"Main\"\"\"\n file_option = 'update' if args.update else 'recreate'\n outfile = r.TFile.Open(args.outfile, file_option)\n\n all_params = load_params(args.paramfile)\n for params in all_params:\n eff = create_param(params, args.sigma, args.uncorrelated)\n eff.Write()\n\n outfile.Close()\n\n\nif __name__ == '__main__':\n import argparse\n parser = argparse.ArgumentParser(description='script to generate TF1 '\n 'photon efficiency parametrizations from '\n 'json file holding the fit parameters')\n parser.add_argument('paramfile', help='json file containing the fitted '\n 'parameters')\n parser.add_argument('-o', '--outfile', help='root file into which the TF1 '\n 'should be stored', default='photon_effs_param.root')\n parser.add_argument('-u', '--update', help='update the output file instead '\n 'of recreating it', default=False, action='store_true')\n parser.add_argument('-s', '--sigma', help='Use the central value + [sigma] '\n '* uncertainty for each parameter', type=float,\n default=0)\n parser.add_argument('--uncorrelated', default=False, action='store_true',\n help='Assume that the free parameters are uncorrelated '\n 'instead of using correlation parameters from a global '\n 'fit')\n\n clargs = parser.parse_args()\n main(clargs)\n",
"step-ids": [
8,
9,
10,
12,
14
]
}
|
[
8,
9,
10,
12,
14
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
# coding=utf-8
"""
author = jamon
"""
|
flexible
|
{
"blob_id": "00790b9d2648d19a37d1d1864e7fdeab0f59f764",
"index": 4266,
"step-1": "<mask token>\n",
"step-2": "# coding=utf-8\n\"\"\"\nauthor = jamon\n\"\"\"",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
# -*- encoding: utf-8 -*-
"""
views: vistas sistema recomendador
@author Camilo Ramírez
@contact camilolinchis@gmail.com
camilortte@hotmail.com
@camilortte on Twitter
@copyright Copyright 2014-2015, RecomendadorUD
@license GPL
@date 2014-10-10
@satus Pre-Alpha
@version= 0..215
"""
from django.views.generic import TemplateView
from apps.recommender_system.models import EstablecimientosRecommender
from apps.establishment_system.models import Establecimiento
from django.contrib.auth.decorators import login_required
from django.utils.decorators import method_decorator
from apps.externals.djangoratings.models import Vote
class RecomendacionView(TemplateView):
template_name = 'recommender/recomendacion.html'
def get_context_data(self, **kwargs):
context = super(RecomendacionView, self).get_context_data(**kwargs)
#context['now'] = timezone.now()
context['recomendaciones']=self.obtener_recomendacion(self.request.user)
return context
def obtener_recomendacion(self,user):
print "Prediciendo recomendacion"
recomendador_instance=EstablecimientosRecommender()
recomendaciones=recomendador_instance.storage.get_recommendations_for_user(user)
print recomendaciones
if recomendaciones:
print "Recomendando"
result=[]
for recomendacion in recomendaciones:
result.append(recomendacion.object)
recomendaciones=result
recomendaciones_leng=len(recomendaciones)
if recomendaciones_leng <10:
query=Establecimiento.objects.all().order_by('-rating_score')
for establecimiento in query:
if establecimiento not in recomendaciones:
if not Vote.objects.filter(object_id=establecimiento.id,user=user.id):
recomendaciones.append(establecimiento)
if len(recomendaciones)>=10:
break
else:
query=Establecimiento.objects.all().order_by('-rating_score')
for establecimiento in query:
if establecimiento not in recomendaciones:
if not Vote.objects.filter(object_id=establecimiento.id,user=user.id):
recomendaciones.append(establecimiento)
if len(recomendaciones)>=10:
print "Se completo la lista de 10 recomendaciones"
break
print "No se encontraron recomendaciones"
return recomendaciones
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(RecomendacionView, self).dispatch(*args, **kwargs)
|
normal
|
{
"blob_id": "c6cbd4d18363f00b73fac873ba45d6063bee7e64",
"index": 3074,
"step-1": "# -*- encoding: utf-8 -*-\n\"\"\"\n \n views: vistas sistema recomendador\n\n @author Camilo Ramírez\n @contact camilolinchis@gmail.com \n camilortte@hotmail.com\n @camilortte on Twitter\n @copyright Copyright 2014-2015, RecomendadorUD\n @license GPL\n @date 2014-10-10\n @satus Pre-Alpha\n @version= 0..215\n\n\n\"\"\"\nfrom django.views.generic import TemplateView\nfrom apps.recommender_system.models import EstablecimientosRecommender\nfrom apps.establishment_system.models import Establecimiento\nfrom django.contrib.auth.decorators import login_required\nfrom django.utils.decorators import method_decorator\nfrom apps.externals.djangoratings.models import Vote\n\n\nclass RecomendacionView(TemplateView):\n template_name = 'recommender/recomendacion.html' \n\n def get_context_data(self, **kwargs): \n context = super(RecomendacionView, self).get_context_data(**kwargs)\n #context['now'] = timezone.now()\n context['recomendaciones']=self.obtener_recomendacion(self.request.user)\n return context\n\n def obtener_recomendacion(self,user):\n print \"Prediciendo recomendacion\"\n recomendador_instance=EstablecimientosRecommender()\n recomendaciones=recomendador_instance.storage.get_recommendations_for_user(user)\n print recomendaciones\n if recomendaciones:\n print \"Recomendando\"\n result=[]\n for recomendacion in recomendaciones:\n result.append(recomendacion.object)\n recomendaciones=result\n\n recomendaciones_leng=len(recomendaciones)\n if recomendaciones_leng <10:\n query=Establecimiento.objects.all().order_by('-rating_score')\n for establecimiento in query:\n if establecimiento not in recomendaciones:\n if not Vote.objects.filter(object_id=establecimiento.id,user=user.id):\n recomendaciones.append(establecimiento)\n if len(recomendaciones)>=10:\n break\n \n else:\n query=Establecimiento.objects.all().order_by('-rating_score')\n for establecimiento in query:\n if establecimiento not in recomendaciones:\n if not Vote.objects.filter(object_id=establecimiento.id,user=user.id):\n recomendaciones.append(establecimiento)\n if len(recomendaciones)>=10:\n print \"Se completo la lista de 10 recomendaciones\"\n break\n print \"No se encontraron recomendaciones\"\n return recomendaciones\n\n @method_decorator(login_required)\n def dispatch(self, *args, **kwargs):\n return super(RecomendacionView, self).dispatch(*args, **kwargs)\n\n\n\n\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def isBalanced(self, root: TreeNode) -> bool:
self.mem = dict()
if root is None:
return True
leftH = self.getHeight(root.left)
rightH = self.getHeight(root.right)
return (
abs(leftH-rightH) <= 1 and
self.isBalanced(root.left) and
self.isBalanced(root.right)
)
def getHeight(self, node):
if node in self.mem:
return self.mem[node]
if node is None:
return 0
h = max(self.getHeight(node.left), self.getHeight(node.right)) + 1
self.mem[node] = h
return h
|
normal
|
{
"blob_id": "9e98a361ef20049cba488b86ad06eb92b3d29d11",
"index": 3584,
"step-1": "<mask token>\n",
"step-2": "class Solution:\n <mask token>\n <mask token>\n",
"step-3": "class Solution:\n\n def isBalanced(self, root: TreeNode) ->bool:\n self.mem = dict()\n if root is None:\n return True\n leftH = self.getHeight(root.left)\n rightH = self.getHeight(root.right)\n return abs(leftH - rightH) <= 1 and self.isBalanced(root.left\n ) and self.isBalanced(root.right)\n <mask token>\n",
"step-4": "class Solution:\n\n def isBalanced(self, root: TreeNode) ->bool:\n self.mem = dict()\n if root is None:\n return True\n leftH = self.getHeight(root.left)\n rightH = self.getHeight(root.right)\n return abs(leftH - rightH) <= 1 and self.isBalanced(root.left\n ) and self.isBalanced(root.right)\n\n def getHeight(self, node):\n if node in self.mem:\n return self.mem[node]\n if node is None:\n return 0\n h = max(self.getHeight(node.left), self.getHeight(node.right)) + 1\n self.mem[node] = h\n return h\n",
"step-5": "# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nclass Solution:\n def isBalanced(self, root: TreeNode) -> bool:\n self.mem = dict()\n if root is None:\n return True\n \n leftH = self.getHeight(root.left)\n rightH = self.getHeight(root.right)\n \n return (\n abs(leftH-rightH) <= 1 and\n self.isBalanced(root.left) and\n self.isBalanced(root.right)\n )\n \n def getHeight(self, node):\n if node in self.mem:\n return self.mem[node]\n if node is None:\n return 0\n \n h = max(self.getHeight(node.left), self.getHeight(node.right)) + 1\n self.mem[node] = h\n return h\n ",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import wikipedia
input_ = input("Type in your question ")
print(wikipedia.summary(input_))
|
normal
|
{
"blob_id": "5eb5388ffe7a7c880d8fcfaa137c2c9a133a0636",
"index": 713,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(wikipedia.summary(input_))\n",
"step-3": "<mask token>\ninput_ = input('Type in your question ')\nprint(wikipedia.summary(input_))\n",
"step-4": "import wikipedia\ninput_ = input('Type in your question ')\nprint(wikipedia.summary(input_))\n",
"step-5": "import wikipedia\ninput_ = input(\"Type in your question \")\nprint(wikipedia.summary(input_))\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for val in theFile.read().split():
temp.append(int(val))
theFile.close()
<|reserved_special_token_0|>
for i in range(n):
for j in range(n):
arr[i, j] = temp[k]
k = k + 1
<|reserved_special_token_0|>
for i in range(n):
ke.append([])
<|reserved_special_token_0|>
for i in range(n):
trongso.append([])
for i in range(n):
for j in range(n):
if arr[i, j] != 0:
ke[i].append(j)
trongso[i].append(arr[i, j])
print(trongso[1])
<|reserved_special_token_1|>
<|reserved_special_token_0|>
theFile = open('datapri.txt', 'r')
temp = []
n = int(theFile.readline().format())
for val in theFile.read().split():
temp.append(int(val))
theFile.close()
arr = np.random.rand(n, n)
k = 0
for i in range(n):
for j in range(n):
arr[i, j] = temp[k]
k = k + 1
ke = []
for i in range(n):
ke.append([])
trongso = []
for i in range(n):
trongso.append([])
for i in range(n):
for j in range(n):
if arr[i, j] != 0:
ke[i].append(j)
trongso[i].append(arr[i, j])
print(trongso[1])
<|reserved_special_token_1|>
import numpy as np
theFile = open('datapri.txt', 'r')
temp = []
n = int(theFile.readline().format())
for val in theFile.read().split():
temp.append(int(val))
theFile.close()
arr = np.random.rand(n, n)
k = 0
for i in range(n):
for j in range(n):
arr[i, j] = temp[k]
k = k + 1
ke = []
for i in range(n):
ke.append([])
trongso = []
for i in range(n):
trongso.append([])
for i in range(n):
for j in range(n):
if arr[i, j] != 0:
ke[i].append(j)
trongso[i].append(arr[i, j])
print(trongso[1])
<|reserved_special_token_1|>
import numpy as np
#read data from file
#read data from file
theFile = open('datapri.txt','r')
temp = []
#n la so phan tu cua mang mau
n = int(theFile.readline().format())
for val in theFile.read().split():
temp.append(int(val))
theFile.close()
arr = np.random.rand(n,n)
k = 0
for i in range(n):
for j in range(n):
arr[i,j] = temp[k]
k = k+1
# print(arr)
#tao 1 mang de chua ma tran cac dinh ke
ke = []
for i in range(n):
ke.append([])
trongso = []
for i in range(n):
trongso.append([])
#dua cac dinh vao mang ke
for i in range(n):
for j in range(n):
if(arr[i,j] != 0):
ke[i].append(j)
trongso[i].append(arr[i,j])
print(trongso[1])
# available = [False for i in range(n)]
# vertex = [0 for i in range(n)]
#
# def CorlorGraph():
# #khoi tao dinh dau tien duoc to mau dau tien
# vertex[0] = 0
#
# #khoi tao cac dinh con lai chua duoc to mau
# for i in range(1,n):
# vertex[i] = -1
#
# #to mau cac dinh con lai
# for i in range(1,n):
# for j in (ke[i]):
# if(vertex[j] != -1):
# available[vertex[j]] = True
#
# crz = 0
# for k in range(n):
# if (available[k] == False):
# break
# crz = crz + 1
# vertex[i] = crz
# for j in (ke[i]):
# if (vertex[j] != -1):
# available[vertex[j]] = False
# for i in range(n):
# print("ke",i,"-",ke[i])
# CorlorGraph()
# print("Cac dinh da duoc to mau: ")
# for i in range(n):
# print(i,vertex[i])
|
flexible
|
{
"blob_id": "aa801bc8398cdf69a15d04188dd8429e4624150e",
"index": 5574,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor val in theFile.read().split():\n temp.append(int(val))\ntheFile.close()\n<mask token>\nfor i in range(n):\n for j in range(n):\n arr[i, j] = temp[k]\n k = k + 1\n<mask token>\nfor i in range(n):\n ke.append([])\n<mask token>\nfor i in range(n):\n trongso.append([])\nfor i in range(n):\n for j in range(n):\n if arr[i, j] != 0:\n ke[i].append(j)\n trongso[i].append(arr[i, j])\nprint(trongso[1])\n",
"step-3": "<mask token>\ntheFile = open('datapri.txt', 'r')\ntemp = []\nn = int(theFile.readline().format())\nfor val in theFile.read().split():\n temp.append(int(val))\ntheFile.close()\narr = np.random.rand(n, n)\nk = 0\nfor i in range(n):\n for j in range(n):\n arr[i, j] = temp[k]\n k = k + 1\nke = []\nfor i in range(n):\n ke.append([])\ntrongso = []\nfor i in range(n):\n trongso.append([])\nfor i in range(n):\n for j in range(n):\n if arr[i, j] != 0:\n ke[i].append(j)\n trongso[i].append(arr[i, j])\nprint(trongso[1])\n",
"step-4": "import numpy as np\ntheFile = open('datapri.txt', 'r')\ntemp = []\nn = int(theFile.readline().format())\nfor val in theFile.read().split():\n temp.append(int(val))\ntheFile.close()\narr = np.random.rand(n, n)\nk = 0\nfor i in range(n):\n for j in range(n):\n arr[i, j] = temp[k]\n k = k + 1\nke = []\nfor i in range(n):\n ke.append([])\ntrongso = []\nfor i in range(n):\n trongso.append([])\nfor i in range(n):\n for j in range(n):\n if arr[i, j] != 0:\n ke[i].append(j)\n trongso[i].append(arr[i, j])\nprint(trongso[1])\n",
"step-5": "import numpy as np\n#read data from file\n#read data from file\n\ntheFile = open('datapri.txt','r')\ntemp = []\n#n la so phan tu cua mang mau\nn = int(theFile.readline().format())\nfor val in theFile.read().split():\n temp.append(int(val))\ntheFile.close()\n\narr = np.random.rand(n,n)\nk = 0\nfor i in range(n):\n for j in range(n):\n arr[i,j] = temp[k]\n k = k+1\n# print(arr)\n#tao 1 mang de chua ma tran cac dinh ke\nke = []\nfor i in range(n):\n ke.append([])\ntrongso = []\nfor i in range(n):\n trongso.append([])\n#dua cac dinh vao mang ke\nfor i in range(n):\n for j in range(n):\n if(arr[i,j] != 0):\n ke[i].append(j)\n trongso[i].append(arr[i,j])\nprint(trongso[1])\n# available = [False for i in range(n)]\n# vertex = [0 for i in range(n)]\n#\n# def CorlorGraph():\n# #khoi tao dinh dau tien duoc to mau dau tien\n# vertex[0] = 0\n#\n# #khoi tao cac dinh con lai chua duoc to mau\n# for i in range(1,n):\n# vertex[i] = -1\n#\n# #to mau cac dinh con lai\n# for i in range(1,n):\n# for j in (ke[i]):\n# if(vertex[j] != -1):\n# available[vertex[j]] = True\n#\n# crz = 0\n# for k in range(n):\n# if (available[k] == False):\n# break\n# crz = crz + 1\n# vertex[i] = crz\n# for j in (ke[i]):\n# if (vertex[j] != -1):\n# available[vertex[j]] = False\n# for i in range(n):\n# print(\"ke\",i,\"-\",ke[i])\n# CorlorGraph()\n# print(\"Cac dinh da duoc to mau: \")\n# for i in range(n):\n# print(i,vertex[i])\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#!/usr/bin/python3.8
# -*- coding: utf-8 -*-
__version__ = "0.2.2"
__author__ = 'Anton Vanke <f@hpu.edu.cn>'
class Gobang:
"""
五子棋
=====
一个简单的五子棋类, 可以在控制台下五子棋. 提供以下函数 :
new(): 新局
printcb(): 打印棋盘
player(): 获取当前应落子 ID (轮走方)
sortstep(): 处理总步表
loadstep(): 将 step 步表的内容载入棋盘
recall(): 前进后退的操作
move(): 落子
iswin(): 判断是否获胜
"""
# 棋盘的边长
SIDE = 15
def new(self):
"""新局"""
self.__init__()
def printcb(self):
"""打印棋盘"""
print("\033[7;32;40m+ ", end="")
for c in range(65, 80):
print(chr(c), end=" ")
print("\033[0m\n")
for row in range(len(self.chessboard)):
print("\033[7;32;40m" + chr(row + 97), end="\033[0m ")
for i in self.chessboard[row]:
if i == 0:
print(i, end=" ")
elif i == 1:
print("\033[31m{}\033[0m".format(i), end=" ")
elif i == 2:
print("\033[34m{}\033[0m".format(i), end=" ")
print("\n")
def player(self):
"""获取玩家ID"""
return (len(self.step) % 2) + 1
def sortstep(self):
"""将总步表分配给黑白子"""
self.white, self.black = {}, {}
for s in self.step.items():
if s[0] % 2 == 1:
self.black.update({s[0]: s[1]})
else:
self.white.update({s[0]: s[1]})
def loadstep(self):
""" 载入步表
将 self.step 载入到棋盘上
"""
try:
self.chessboard = [[0 for i in range(self.SIDE)]
for j in range(self.SIDE)]
step_list = list(self.step.values()).copy()
for i in range(len(step_list)):
self.chessboard[ord(step_list[i][0]) -
97][ord(step_list[i][1]) - 97] = (i % 2) + 1
self.sortstep()
return True
except TypeError:
return False
def recall(self, s=-1):
""" 悔棋
"""
if s == -1:
try:
if len(self.max_step) < len(self.step):
self.max_step = self.step.copy()
if len(self.step) == 0:
raise KeyError
except KeyError:
return False
else:
self.step.popitem()
return self.loadstep()
# 重下
elif s == 1:
if len(self.max_step) > len(self.step):
self.step.update(
{len(self.step) + 1: self.max_step[len(self.step) + 1]})
return self.loadstep()
else:
return False
def move(self, row: int = 7, column: int = 7, **kwgs):
"""移動棋盘
row: 棋盘的行号
column: 棋盘的列号
"""
if 's' in kwgs:
row = ord(kwgs['s'][0].lower()) - 97
column = ord(kwgs['s'][1].lower()) - 97
# 判斷是否在棋盤上
if 0 <= row < self.SIDE and 0 <= column < self.SIDE:
# 判斷該位置上是否有子落過
if self.chessboard[row][column] == 0:
self.chessboard[row][column] = self.player()
self.step[len(self.step) +
1] = chr(row + 97) + chr(column + 97)
self.sortstep()
return True
return False
def iswin(self):
"""判断是否结束
"""
step_set_ls = []
cb = self.chessboard
# 将步表转换为列表
for s in self.step.values():
step_set_ls.append((ord(s[0]) - 97, ord(s[1]) - 97))
# print(step_set_ls)
for r, c in step_set_ls:
try:
# 判断 -- 行有 5 子
if cb[r][c - 2] == cb[r][c - 1] == cb[r][c] == cb[r][
c + 1] == cb[r][c + 2] in (1, 2):
return True, cb[r][c]
except IndexError:
pass
try:
# 判断 | 有 5 子
if cb[r - 2][c] == cb[r - 1][c] == cb[r][c] == cb[
r + 1][c] == cb[r + 2][c] in (1, 2):
return True, cb[r][c]
except IndexError:
pass
try:
# 判断 \ 有 5 子
if cb[r - 2][c - 2] == cb[r - 1][c - 1] == cb[r][c] == cb[
r + 1][c + 1] == cb[r + 2][c + 2] in (1, 2):
return True, cb[r][c]
except IndexError:
pass
try:
# 判断 / 列有 5 子
if cb[r + 2][c - 2] == cb[r + 1][c - 1] == cb[r][c] == cb[
r - 1][c + 1] == cb[r - 2][c + 2] in (1, 2):
return True, cb[r][c]
except IndexError:
pass
return False, 0
def __init__(self):
# 棋盤
self.chessboard = [[0 for i in range(self.SIDE)]
for j in range(self.SIDE)]
# 總步表
self.step = {}
# 单局最长步表
self.max_step = {}
# 黑子步表
self.black = {}
# 白子步表
self.white = {}
def _test():
a = Gobang()
# 输入步表
a.step = {
1: 'no',
2: 'oo',
3: 'mn',
4: 'nn',
5: 'lm',
6: 'mm',
7: 'kl',
8: 'll',
}
# 加载
a.loadstep()
# 落子
a.move(9, 10)
# 打印棋盘
a.printcb()
# 输出输赢
print(a.iswin())
a.new()
a.printcb()
if __name__ == "__main__":
_test()
|
normal
|
{
"blob_id": "e0394bfed51cd0af9bca06867e9b556b226f37d1",
"index": 1720,
"step-1": "<mask token>\n\n\nclass Gobang:\n <mask token>\n <mask token>\n\n def new(self):\n \"\"\"新局\"\"\"\n self.__init__()\n\n def printcb(self):\n \"\"\"打印棋盘\"\"\"\n print('\\x1b[7;32;40m+ ', end='')\n for c in range(65, 80):\n print(chr(c), end=' ')\n print('\\x1b[0m\\n')\n for row in range(len(self.chessboard)):\n print('\\x1b[7;32;40m' + chr(row + 97), end='\\x1b[0m ')\n for i in self.chessboard[row]:\n if i == 0:\n print(i, end=' ')\n elif i == 1:\n print('\\x1b[31m{}\\x1b[0m'.format(i), end=' ')\n elif i == 2:\n print('\\x1b[34m{}\\x1b[0m'.format(i), end=' ')\n print('\\n')\n\n def player(self):\n \"\"\"获取玩家ID\"\"\"\n return len(self.step) % 2 + 1\n\n def sortstep(self):\n \"\"\"将总步表分配给黑白子\"\"\"\n self.white, self.black = {}, {}\n for s in self.step.items():\n if s[0] % 2 == 1:\n self.black.update({s[0]: s[1]})\n else:\n self.white.update({s[0]: s[1]})\n <mask token>\n\n def recall(self, s=-1):\n \"\"\" 悔棋\n \"\"\"\n if s == -1:\n try:\n if len(self.max_step) < len(self.step):\n self.max_step = self.step.copy()\n if len(self.step) == 0:\n raise KeyError\n except KeyError:\n return False\n else:\n self.step.popitem()\n return self.loadstep()\n elif s == 1:\n if len(self.max_step) > len(self.step):\n self.step.update({(len(self.step) + 1): self.max_step[len(\n self.step) + 1]})\n return self.loadstep()\n else:\n return False\n <mask token>\n\n def iswin(self):\n \"\"\"判断是否结束\n \"\"\"\n step_set_ls = []\n cb = self.chessboard\n for s in self.step.values():\n step_set_ls.append((ord(s[0]) - 97, ord(s[1]) - 97))\n for r, c in step_set_ls:\n try:\n if cb[r][c - 2] == cb[r][c - 1] == cb[r][c] == cb[r][c + 1\n ] == cb[r][c + 2] in (1, 2):\n return True, cb[r][c]\n except IndexError:\n pass\n try:\n if cb[r - 2][c] == cb[r - 1][c] == cb[r][c] == cb[r + 1][c\n ] == cb[r + 2][c] in (1, 2):\n return True, cb[r][c]\n except IndexError:\n pass\n try:\n if cb[r - 2][c - 2] == cb[r - 1][c - 1] == cb[r][c] == cb[r + 1\n ][c + 1] == cb[r + 2][c + 2] in (1, 2):\n return True, cb[r][c]\n except IndexError:\n pass\n try:\n if cb[r + 2][c - 2] == cb[r + 1][c - 1] == cb[r][c] == cb[r - 1\n ][c + 1] == cb[r - 2][c + 2] in (1, 2):\n return True, cb[r][c]\n except IndexError:\n pass\n return False, 0\n\n def __init__(self):\n self.chessboard = [[(0) for i in range(self.SIDE)] for j in range(\n self.SIDE)]\n self.step = {}\n self.max_step = {}\n self.black = {}\n self.white = {}\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Gobang:\n <mask token>\n SIDE = 15\n\n def new(self):\n \"\"\"新局\"\"\"\n self.__init__()\n\n def printcb(self):\n \"\"\"打印棋盘\"\"\"\n print('\\x1b[7;32;40m+ ', end='')\n for c in range(65, 80):\n print(chr(c), end=' ')\n print('\\x1b[0m\\n')\n for row in range(len(self.chessboard)):\n print('\\x1b[7;32;40m' + chr(row + 97), end='\\x1b[0m ')\n for i in self.chessboard[row]:\n if i == 0:\n print(i, end=' ')\n elif i == 1:\n print('\\x1b[31m{}\\x1b[0m'.format(i), end=' ')\n elif i == 2:\n print('\\x1b[34m{}\\x1b[0m'.format(i), end=' ')\n print('\\n')\n\n def player(self):\n \"\"\"获取玩家ID\"\"\"\n return len(self.step) % 2 + 1\n\n def sortstep(self):\n \"\"\"将总步表分配给黑白子\"\"\"\n self.white, self.black = {}, {}\n for s in self.step.items():\n if s[0] % 2 == 1:\n self.black.update({s[0]: s[1]})\n else:\n self.white.update({s[0]: s[1]})\n\n def loadstep(self):\n \"\"\" 载入步表\n 将 self.step 载入到棋盘上\n \"\"\"\n try:\n self.chessboard = [[(0) for i in range(self.SIDE)] for j in\n range(self.SIDE)]\n step_list = list(self.step.values()).copy()\n for i in range(len(step_list)):\n self.chessboard[ord(step_list[i][0]) - 97][ord(step_list[i]\n [1]) - 97] = i % 2 + 1\n self.sortstep()\n return True\n except TypeError:\n return False\n\n def recall(self, s=-1):\n \"\"\" 悔棋\n \"\"\"\n if s == -1:\n try:\n if len(self.max_step) < len(self.step):\n self.max_step = self.step.copy()\n if len(self.step) == 0:\n raise KeyError\n except KeyError:\n return False\n else:\n self.step.popitem()\n return self.loadstep()\n elif s == 1:\n if len(self.max_step) > len(self.step):\n self.step.update({(len(self.step) + 1): self.max_step[len(\n self.step) + 1]})\n return self.loadstep()\n else:\n return False\n\n def move(self, row: int=7, column: int=7, **kwgs):\n \"\"\"移動棋盘\n row: 棋盘的行号\n column: 棋盘的列号\n \"\"\"\n if 's' in kwgs:\n row = ord(kwgs['s'][0].lower()) - 97\n column = ord(kwgs['s'][1].lower()) - 97\n if 0 <= row < self.SIDE and 0 <= column < self.SIDE:\n if self.chessboard[row][column] == 0:\n self.chessboard[row][column] = self.player()\n self.step[len(self.step) + 1] = chr(row + 97) + chr(column + 97\n )\n self.sortstep()\n return True\n return False\n\n def iswin(self):\n \"\"\"判断是否结束\n \"\"\"\n step_set_ls = []\n cb = self.chessboard\n for s in self.step.values():\n step_set_ls.append((ord(s[0]) - 97, ord(s[1]) - 97))\n for r, c in step_set_ls:\n try:\n if cb[r][c - 2] == cb[r][c - 1] == cb[r][c] == cb[r][c + 1\n ] == cb[r][c + 2] in (1, 2):\n return True, cb[r][c]\n except IndexError:\n pass\n try:\n if cb[r - 2][c] == cb[r - 1][c] == cb[r][c] == cb[r + 1][c\n ] == cb[r + 2][c] in (1, 2):\n return True, cb[r][c]\n except IndexError:\n pass\n try:\n if cb[r - 2][c - 2] == cb[r - 1][c - 1] == cb[r][c] == cb[r + 1\n ][c + 1] == cb[r + 2][c + 2] in (1, 2):\n return True, cb[r][c]\n except IndexError:\n pass\n try:\n if cb[r + 2][c - 2] == cb[r + 1][c - 1] == cb[r][c] == cb[r - 1\n ][c + 1] == cb[r - 2][c + 2] in (1, 2):\n return True, cb[r][c]\n except IndexError:\n pass\n return False, 0\n\n def __init__(self):\n self.chessboard = [[(0) for i in range(self.SIDE)] for j in range(\n self.SIDE)]\n self.step = {}\n self.max_step = {}\n self.black = {}\n self.white = {}\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Gobang:\n \"\"\"\n 五子棋\n =====\n 一个简单的五子棋类, 可以在控制台下五子棋. 提供以下函数 :\n\n new(): 新局\n printcb(): 打印棋盘\n player(): 获取当前应落子 ID (轮走方)\n sortstep(): 处理总步表\n loadstep(): 将 step 步表的内容载入棋盘\n recall(): 前进后退的操作\n move(): 落子\n iswin(): 判断是否获胜\n \"\"\"\n SIDE = 15\n\n def new(self):\n \"\"\"新局\"\"\"\n self.__init__()\n\n def printcb(self):\n \"\"\"打印棋盘\"\"\"\n print('\\x1b[7;32;40m+ ', end='')\n for c in range(65, 80):\n print(chr(c), end=' ')\n print('\\x1b[0m\\n')\n for row in range(len(self.chessboard)):\n print('\\x1b[7;32;40m' + chr(row + 97), end='\\x1b[0m ')\n for i in self.chessboard[row]:\n if i == 0:\n print(i, end=' ')\n elif i == 1:\n print('\\x1b[31m{}\\x1b[0m'.format(i), end=' ')\n elif i == 2:\n print('\\x1b[34m{}\\x1b[0m'.format(i), end=' ')\n print('\\n')\n\n def player(self):\n \"\"\"获取玩家ID\"\"\"\n return len(self.step) % 2 + 1\n\n def sortstep(self):\n \"\"\"将总步表分配给黑白子\"\"\"\n self.white, self.black = {}, {}\n for s in self.step.items():\n if s[0] % 2 == 1:\n self.black.update({s[0]: s[1]})\n else:\n self.white.update({s[0]: s[1]})\n\n def loadstep(self):\n \"\"\" 载入步表\n 将 self.step 载入到棋盘上\n \"\"\"\n try:\n self.chessboard = [[(0) for i in range(self.SIDE)] for j in\n range(self.SIDE)]\n step_list = list(self.step.values()).copy()\n for i in range(len(step_list)):\n self.chessboard[ord(step_list[i][0]) - 97][ord(step_list[i]\n [1]) - 97] = i % 2 + 1\n self.sortstep()\n return True\n except TypeError:\n return False\n\n def recall(self, s=-1):\n \"\"\" 悔棋\n \"\"\"\n if s == -1:\n try:\n if len(self.max_step) < len(self.step):\n self.max_step = self.step.copy()\n if len(self.step) == 0:\n raise KeyError\n except KeyError:\n return False\n else:\n self.step.popitem()\n return self.loadstep()\n elif s == 1:\n if len(self.max_step) > len(self.step):\n self.step.update({(len(self.step) + 1): self.max_step[len(\n self.step) + 1]})\n return self.loadstep()\n else:\n return False\n\n def move(self, row: int=7, column: int=7, **kwgs):\n \"\"\"移動棋盘\n row: 棋盘的行号\n column: 棋盘的列号\n \"\"\"\n if 's' in kwgs:\n row = ord(kwgs['s'][0].lower()) - 97\n column = ord(kwgs['s'][1].lower()) - 97\n if 0 <= row < self.SIDE and 0 <= column < self.SIDE:\n if self.chessboard[row][column] == 0:\n self.chessboard[row][column] = self.player()\n self.step[len(self.step) + 1] = chr(row + 97) + chr(column + 97\n )\n self.sortstep()\n return True\n return False\n\n def iswin(self):\n \"\"\"判断是否结束\n \"\"\"\n step_set_ls = []\n cb = self.chessboard\n for s in self.step.values():\n step_set_ls.append((ord(s[0]) - 97, ord(s[1]) - 97))\n for r, c in step_set_ls:\n try:\n if cb[r][c - 2] == cb[r][c - 1] == cb[r][c] == cb[r][c + 1\n ] == cb[r][c + 2] in (1, 2):\n return True, cb[r][c]\n except IndexError:\n pass\n try:\n if cb[r - 2][c] == cb[r - 1][c] == cb[r][c] == cb[r + 1][c\n ] == cb[r + 2][c] in (1, 2):\n return True, cb[r][c]\n except IndexError:\n pass\n try:\n if cb[r - 2][c - 2] == cb[r - 1][c - 1] == cb[r][c] == cb[r + 1\n ][c + 1] == cb[r + 2][c + 2] in (1, 2):\n return True, cb[r][c]\n except IndexError:\n pass\n try:\n if cb[r + 2][c - 2] == cb[r + 1][c - 1] == cb[r][c] == cb[r - 1\n ][c + 1] == cb[r - 2][c + 2] in (1, 2):\n return True, cb[r][c]\n except IndexError:\n pass\n return False, 0\n\n def __init__(self):\n self.chessboard = [[(0) for i in range(self.SIDE)] for j in range(\n self.SIDE)]\n self.step = {}\n self.max_step = {}\n self.black = {}\n self.white = {}\n\n\ndef _test():\n a = Gobang()\n a.step = {(1): 'no', (2): 'oo', (3): 'mn', (4): 'nn', (5): 'lm', (6):\n 'mm', (7): 'kl', (8): 'll'}\n a.loadstep()\n a.move(9, 10)\n a.printcb()\n print(a.iswin())\n a.new()\n a.printcb()\n\n\nif __name__ == '__main__':\n _test()\n",
"step-4": "__version__ = '0.2.2'\n__author__ = 'Anton Vanke <f@hpu.edu.cn>'\n\n\nclass Gobang:\n \"\"\"\n 五子棋\n =====\n 一个简单的五子棋类, 可以在控制台下五子棋. 提供以下函数 :\n\n new(): 新局\n printcb(): 打印棋盘\n player(): 获取当前应落子 ID (轮走方)\n sortstep(): 处理总步表\n loadstep(): 将 step 步表的内容载入棋盘\n recall(): 前进后退的操作\n move(): 落子\n iswin(): 判断是否获胜\n \"\"\"\n SIDE = 15\n\n def new(self):\n \"\"\"新局\"\"\"\n self.__init__()\n\n def printcb(self):\n \"\"\"打印棋盘\"\"\"\n print('\\x1b[7;32;40m+ ', end='')\n for c in range(65, 80):\n print(chr(c), end=' ')\n print('\\x1b[0m\\n')\n for row in range(len(self.chessboard)):\n print('\\x1b[7;32;40m' + chr(row + 97), end='\\x1b[0m ')\n for i in self.chessboard[row]:\n if i == 0:\n print(i, end=' ')\n elif i == 1:\n print('\\x1b[31m{}\\x1b[0m'.format(i), end=' ')\n elif i == 2:\n print('\\x1b[34m{}\\x1b[0m'.format(i), end=' ')\n print('\\n')\n\n def player(self):\n \"\"\"获取玩家ID\"\"\"\n return len(self.step) % 2 + 1\n\n def sortstep(self):\n \"\"\"将总步表分配给黑白子\"\"\"\n self.white, self.black = {}, {}\n for s in self.step.items():\n if s[0] % 2 == 1:\n self.black.update({s[0]: s[1]})\n else:\n self.white.update({s[0]: s[1]})\n\n def loadstep(self):\n \"\"\" 载入步表\n 将 self.step 载入到棋盘上\n \"\"\"\n try:\n self.chessboard = [[(0) for i in range(self.SIDE)] for j in\n range(self.SIDE)]\n step_list = list(self.step.values()).copy()\n for i in range(len(step_list)):\n self.chessboard[ord(step_list[i][0]) - 97][ord(step_list[i]\n [1]) - 97] = i % 2 + 1\n self.sortstep()\n return True\n except TypeError:\n return False\n\n def recall(self, s=-1):\n \"\"\" 悔棋\n \"\"\"\n if s == -1:\n try:\n if len(self.max_step) < len(self.step):\n self.max_step = self.step.copy()\n if len(self.step) == 0:\n raise KeyError\n except KeyError:\n return False\n else:\n self.step.popitem()\n return self.loadstep()\n elif s == 1:\n if len(self.max_step) > len(self.step):\n self.step.update({(len(self.step) + 1): self.max_step[len(\n self.step) + 1]})\n return self.loadstep()\n else:\n return False\n\n def move(self, row: int=7, column: int=7, **kwgs):\n \"\"\"移動棋盘\n row: 棋盘的行号\n column: 棋盘的列号\n \"\"\"\n if 's' in kwgs:\n row = ord(kwgs['s'][0].lower()) - 97\n column = ord(kwgs['s'][1].lower()) - 97\n if 0 <= row < self.SIDE and 0 <= column < self.SIDE:\n if self.chessboard[row][column] == 0:\n self.chessboard[row][column] = self.player()\n self.step[len(self.step) + 1] = chr(row + 97) + chr(column + 97\n )\n self.sortstep()\n return True\n return False\n\n def iswin(self):\n \"\"\"判断是否结束\n \"\"\"\n step_set_ls = []\n cb = self.chessboard\n for s in self.step.values():\n step_set_ls.append((ord(s[0]) - 97, ord(s[1]) - 97))\n for r, c in step_set_ls:\n try:\n if cb[r][c - 2] == cb[r][c - 1] == cb[r][c] == cb[r][c + 1\n ] == cb[r][c + 2] in (1, 2):\n return True, cb[r][c]\n except IndexError:\n pass\n try:\n if cb[r - 2][c] == cb[r - 1][c] == cb[r][c] == cb[r + 1][c\n ] == cb[r + 2][c] in (1, 2):\n return True, cb[r][c]\n except IndexError:\n pass\n try:\n if cb[r - 2][c - 2] == cb[r - 1][c - 1] == cb[r][c] == cb[r + 1\n ][c + 1] == cb[r + 2][c + 2] in (1, 2):\n return True, cb[r][c]\n except IndexError:\n pass\n try:\n if cb[r + 2][c - 2] == cb[r + 1][c - 1] == cb[r][c] == cb[r - 1\n ][c + 1] == cb[r - 2][c + 2] in (1, 2):\n return True, cb[r][c]\n except IndexError:\n pass\n return False, 0\n\n def __init__(self):\n self.chessboard = [[(0) for i in range(self.SIDE)] for j in range(\n self.SIDE)]\n self.step = {}\n self.max_step = {}\n self.black = {}\n self.white = {}\n\n\ndef _test():\n a = Gobang()\n a.step = {(1): 'no', (2): 'oo', (3): 'mn', (4): 'nn', (5): 'lm', (6):\n 'mm', (7): 'kl', (8): 'll'}\n a.loadstep()\n a.move(9, 10)\n a.printcb()\n print(a.iswin())\n a.new()\n a.printcb()\n\n\nif __name__ == '__main__':\n _test()\n",
"step-5": "#!/usr/bin/python3.8\n# -*- coding: utf-8 -*-\n__version__ = \"0.2.2\"\n__author__ = 'Anton Vanke <f@hpu.edu.cn>'\n\n\nclass Gobang:\n \"\"\"\n 五子棋\n =====\n 一个简单的五子棋类, 可以在控制台下五子棋. 提供以下函数 :\n\n new(): 新局\n printcb(): 打印棋盘\n player(): 获取当前应落子 ID (轮走方)\n sortstep(): 处理总步表\n loadstep(): 将 step 步表的内容载入棋盘\n recall(): 前进后退的操作\n move(): 落子\n iswin(): 判断是否获胜\n \"\"\"\n # 棋盘的边长\n SIDE = 15\n\n def new(self):\n \"\"\"新局\"\"\"\n self.__init__()\n\n def printcb(self):\n \"\"\"打印棋盘\"\"\"\n print(\"\\033[7;32;40m+ \", end=\"\")\n for c in range(65, 80):\n print(chr(c), end=\" \")\n print(\"\\033[0m\\n\")\n for row in range(len(self.chessboard)):\n print(\"\\033[7;32;40m\" + chr(row + 97), end=\"\\033[0m \")\n for i in self.chessboard[row]:\n if i == 0:\n print(i, end=\" \")\n elif i == 1:\n print(\"\\033[31m{}\\033[0m\".format(i), end=\" \")\n elif i == 2:\n print(\"\\033[34m{}\\033[0m\".format(i), end=\" \")\n print(\"\\n\")\n\n def player(self):\n \"\"\"获取玩家ID\"\"\"\n return (len(self.step) % 2) + 1\n\n def sortstep(self):\n \"\"\"将总步表分配给黑白子\"\"\"\n self.white, self.black = {}, {}\n for s in self.step.items():\n if s[0] % 2 == 1:\n self.black.update({s[0]: s[1]})\n else:\n self.white.update({s[0]: s[1]})\n\n def loadstep(self):\n \"\"\" 载入步表\n 将 self.step 载入到棋盘上\n \"\"\"\n try:\n self.chessboard = [[0 for i in range(self.SIDE)]\n for j in range(self.SIDE)]\n step_list = list(self.step.values()).copy()\n for i in range(len(step_list)):\n self.chessboard[ord(step_list[i][0]) -\n 97][ord(step_list[i][1]) - 97] = (i % 2) + 1\n self.sortstep()\n return True\n except TypeError:\n return False\n\n def recall(self, s=-1):\n \"\"\" 悔棋\n \"\"\"\n if s == -1:\n try:\n if len(self.max_step) < len(self.step):\n self.max_step = self.step.copy()\n if len(self.step) == 0:\n raise KeyError\n except KeyError:\n return False\n else:\n self.step.popitem()\n return self.loadstep()\n # 重下\n elif s == 1:\n if len(self.max_step) > len(self.step):\n self.step.update(\n {len(self.step) + 1: self.max_step[len(self.step) + 1]})\n return self.loadstep()\n else:\n return False\n\n def move(self, row: int = 7, column: int = 7, **kwgs):\n \"\"\"移動棋盘\n row: 棋盘的行号\n column: 棋盘的列号\n \"\"\"\n if 's' in kwgs:\n row = ord(kwgs['s'][0].lower()) - 97\n column = ord(kwgs['s'][1].lower()) - 97\n # 判斷是否在棋盤上\n if 0 <= row < self.SIDE and 0 <= column < self.SIDE:\n # 判斷該位置上是否有子落過\n if self.chessboard[row][column] == 0:\n self.chessboard[row][column] = self.player()\n self.step[len(self.step) +\n 1] = chr(row + 97) + chr(column + 97)\n self.sortstep()\n return True\n return False\n\n def iswin(self):\n \"\"\"判断是否结束\n \"\"\"\n step_set_ls = []\n cb = self.chessboard\n # 将步表转换为列表\n for s in self.step.values():\n step_set_ls.append((ord(s[0]) - 97, ord(s[1]) - 97))\n # print(step_set_ls)\n for r, c in step_set_ls:\n try:\n # 判断 -- 行有 5 子\n if cb[r][c - 2] == cb[r][c - 1] == cb[r][c] == cb[r][\n c + 1] == cb[r][c + 2] in (1, 2):\n return True, cb[r][c]\n except IndexError:\n pass\n try:\n # 判断 | 有 5 子\n if cb[r - 2][c] == cb[r - 1][c] == cb[r][c] == cb[\n r + 1][c] == cb[r + 2][c] in (1, 2):\n return True, cb[r][c]\n except IndexError:\n pass\n try:\n # 判断 \\ 有 5 子\n if cb[r - 2][c - 2] == cb[r - 1][c - 1] == cb[r][c] == cb[\n r + 1][c + 1] == cb[r + 2][c + 2] in (1, 2):\n return True, cb[r][c]\n except IndexError:\n pass\n try:\n # 判断 / 列有 5 子\n if cb[r + 2][c - 2] == cb[r + 1][c - 1] == cb[r][c] == cb[\n r - 1][c + 1] == cb[r - 2][c + 2] in (1, 2):\n return True, cb[r][c]\n except IndexError:\n pass\n return False, 0\n\n def __init__(self):\n # 棋盤\n self.chessboard = [[0 for i in range(self.SIDE)]\n for j in range(self.SIDE)]\n # 總步表\n self.step = {}\n # 单局最长步表\n self.max_step = {}\n # 黑子步表\n self.black = {}\n # 白子步表\n self.white = {}\n\n\ndef _test():\n a = Gobang()\n # 输入步表\n a.step = {\n 1: 'no',\n 2: 'oo',\n 3: 'mn',\n 4: 'nn',\n 5: 'lm',\n 6: 'mm',\n 7: 'kl',\n 8: 'll',\n }\n # 加载\n a.loadstep()\n # 落子\n a.move(9, 10)\n # 打印棋盘\n a.printcb()\n # 输出输赢\n print(a.iswin())\n a.new()\n a.printcb()\n\n\nif __name__ == \"__main__\":\n _test()\n",
"step-ids": [
8,
11,
14,
15,
16
]
}
|
[
8,
11,
14,
15,
16
] |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'FormHello.ui'
#
# Created by: PyQt5 UI code generator 5.15.4
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_FormHello(object):
def setupUi(self, FormHello):
FormHello.setObjectName("FormHello")
FormHello.resize(705, 477)
self.LabelHello = QtWidgets.QLabel(FormHello)
self.LabelHello.setGeometry(QtCore.QRect(190, 150, 311, 81))
font = QtGui.QFont()
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.LabelHello.setFont(font)
self.LabelHello.setObjectName("LabelHello")
self.btnClose = QtWidgets.QPushButton(FormHello)
self.btnClose.setGeometry(QtCore.QRect(300, 280, 111, 31))
self.btnClose.setObjectName("btnClose")
self.retranslateUi(FormHello)
QtCore.QMetaObject.connectSlotsByName(FormHello)
def retranslateUi(self, FormHello):
_translate = QtCore.QCoreApplication.translate
FormHello.setWindowTitle(_translate("FormHello", "Demo2_2"))
self.LabelHello.setText(_translate("FormHello", " Hello, by UI Designer"))
self.btnClose.setText(_translate("FormHello", "关闭"))
|
normal
|
{
"blob_id": "fc20a2bf09d510892a4d144fbbd2cb2012c3ad98",
"index": 8579,
"step-1": "<mask token>\n\n\nclass Ui_FormHello(object):\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Ui_FormHello(object):\n\n def setupUi(self, FormHello):\n FormHello.setObjectName('FormHello')\n FormHello.resize(705, 477)\n self.LabelHello = QtWidgets.QLabel(FormHello)\n self.LabelHello.setGeometry(QtCore.QRect(190, 150, 311, 81))\n font = QtGui.QFont()\n font.setPointSize(12)\n font.setBold(True)\n font.setWeight(75)\n self.LabelHello.setFont(font)\n self.LabelHello.setObjectName('LabelHello')\n self.btnClose = QtWidgets.QPushButton(FormHello)\n self.btnClose.setGeometry(QtCore.QRect(300, 280, 111, 31))\n self.btnClose.setObjectName('btnClose')\n self.retranslateUi(FormHello)\n QtCore.QMetaObject.connectSlotsByName(FormHello)\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Ui_FormHello(object):\n\n def setupUi(self, FormHello):\n FormHello.setObjectName('FormHello')\n FormHello.resize(705, 477)\n self.LabelHello = QtWidgets.QLabel(FormHello)\n self.LabelHello.setGeometry(QtCore.QRect(190, 150, 311, 81))\n font = QtGui.QFont()\n font.setPointSize(12)\n font.setBold(True)\n font.setWeight(75)\n self.LabelHello.setFont(font)\n self.LabelHello.setObjectName('LabelHello')\n self.btnClose = QtWidgets.QPushButton(FormHello)\n self.btnClose.setGeometry(QtCore.QRect(300, 280, 111, 31))\n self.btnClose.setObjectName('btnClose')\n self.retranslateUi(FormHello)\n QtCore.QMetaObject.connectSlotsByName(FormHello)\n\n def retranslateUi(self, FormHello):\n _translate = QtCore.QCoreApplication.translate\n FormHello.setWindowTitle(_translate('FormHello', 'Demo2_2'))\n self.LabelHello.setText(_translate('FormHello',\n ' Hello, by UI Designer'))\n self.btnClose.setText(_translate('FormHello', '关闭'))\n",
"step-4": "from PyQt5 import QtCore, QtGui, QtWidgets\n\n\nclass Ui_FormHello(object):\n\n def setupUi(self, FormHello):\n FormHello.setObjectName('FormHello')\n FormHello.resize(705, 477)\n self.LabelHello = QtWidgets.QLabel(FormHello)\n self.LabelHello.setGeometry(QtCore.QRect(190, 150, 311, 81))\n font = QtGui.QFont()\n font.setPointSize(12)\n font.setBold(True)\n font.setWeight(75)\n self.LabelHello.setFont(font)\n self.LabelHello.setObjectName('LabelHello')\n self.btnClose = QtWidgets.QPushButton(FormHello)\n self.btnClose.setGeometry(QtCore.QRect(300, 280, 111, 31))\n self.btnClose.setObjectName('btnClose')\n self.retranslateUi(FormHello)\n QtCore.QMetaObject.connectSlotsByName(FormHello)\n\n def retranslateUi(self, FormHello):\n _translate = QtCore.QCoreApplication.translate\n FormHello.setWindowTitle(_translate('FormHello', 'Demo2_2'))\n self.LabelHello.setText(_translate('FormHello',\n ' Hello, by UI Designer'))\n self.btnClose.setText(_translate('FormHello', '关闭'))\n",
"step-5": "# -*- coding: utf-8 -*-\n\n# Form implementation generated from reading ui file 'FormHello.ui'\n#\n# Created by: PyQt5 UI code generator 5.15.4\n#\n# WARNING: Any manual changes made to this file will be lost when pyuic5 is\n# run again. Do not edit this file unless you know what you are doing.\n\n\nfrom PyQt5 import QtCore, QtGui, QtWidgets\n\n\nclass Ui_FormHello(object):\n def setupUi(self, FormHello):\n FormHello.setObjectName(\"FormHello\")\n FormHello.resize(705, 477)\n self.LabelHello = QtWidgets.QLabel(FormHello)\n self.LabelHello.setGeometry(QtCore.QRect(190, 150, 311, 81))\n font = QtGui.QFont()\n font.setPointSize(12)\n font.setBold(True)\n font.setWeight(75)\n self.LabelHello.setFont(font)\n self.LabelHello.setObjectName(\"LabelHello\")\n self.btnClose = QtWidgets.QPushButton(FormHello)\n self.btnClose.setGeometry(QtCore.QRect(300, 280, 111, 31))\n self.btnClose.setObjectName(\"btnClose\")\n\n self.retranslateUi(FormHello)\n QtCore.QMetaObject.connectSlotsByName(FormHello)\n\n def retranslateUi(self, FormHello):\n _translate = QtCore.QCoreApplication.translate\n FormHello.setWindowTitle(_translate(\"FormHello\", \"Demo2_2\"))\n self.LabelHello.setText(_translate(\"FormHello\", \" Hello, by UI Designer\"))\n self.btnClose.setText(_translate(\"FormHello\", \"关闭\"))\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
plt.scatter(x_int, y_int, c=y_int, cmap=plt.cm.Blues, s=40)
plt.show()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
x_int = list(range(1, 5001))
y_int = [(i ** 3) for i in x_int]
plt.scatter(x_int, y_int, c=y_int, cmap=plt.cm.Blues, s=40)
plt.show()
<|reserved_special_token_1|>
import matplotlib.pyplot as plt
x_int = list(range(1, 5001))
y_int = [(i ** 3) for i in x_int]
plt.scatter(x_int, y_int, c=y_int, cmap=plt.cm.Blues, s=40)
plt.show()
<|reserved_special_token_1|>
import matplotlib.pyplot as plt
x_int = list(range(1, 5001))
y_int = [i**3 for i in x_int]
plt.scatter(x_int, y_int, c=y_int, cmap=plt.cm.Blues, s=40)
plt.show()
|
flexible
|
{
"blob_id": "40e2b695d8aaaa82cb90694b85d12061b4e6eca8",
"index": 8034,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nplt.scatter(x_int, y_int, c=y_int, cmap=plt.cm.Blues, s=40)\nplt.show()\n",
"step-3": "<mask token>\nx_int = list(range(1, 5001))\ny_int = [(i ** 3) for i in x_int]\nplt.scatter(x_int, y_int, c=y_int, cmap=plt.cm.Blues, s=40)\nplt.show()\n",
"step-4": "import matplotlib.pyplot as plt\nx_int = list(range(1, 5001))\ny_int = [(i ** 3) for i in x_int]\nplt.scatter(x_int, y_int, c=y_int, cmap=plt.cm.Blues, s=40)\nplt.show()\n",
"step-5": "import matplotlib.pyplot as plt\n\nx_int = list(range(1, 5001))\ny_int = [i**3 for i in x_int]\n\nplt.scatter(x_int, y_int, c=y_int, cmap=plt.cm.Blues, s=40)\nplt.show()\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# leetcode 718 最长重复子数组
# 给两个整数数组 A 和 B ,返回两个数组中公共的、长度最长的子数组的长度。
#
# 示例 1:
# 输入:
# A: [1,2,3,2,1]
# B: [3,2,1,4,7]
# 输出: 3
# 解释:
# 长度最长的公共子数组是 [3, 2, 1]。
#
# 说明:
# 1 <= len(A), len(B) <= 1000
# 0 <= A[i], B[i] < 100
class Solution:
def findLength(self, A: [int], B: [int])->int:
"""
动态规划, 维护一个公共子串长度表DP
DP[i][j]表示A中以第i个元素,B中以第j个元素结尾的公共子串长度
如果A[i]==B[j], DP[i][j]=DP[i-1][j-1]+1
如果A[i]==B[j], DP[i][j]=0
时间复杂度为:O(mn)
:param A:
:param B:
:return:
"""
na = len(A)
nb = len(B)
# na行,nb列的矩阵
dp = [[0 for _ in range(nb)] for _ in range(na)]
for i in range(na):
for j in range(nb):
if A[i] == B[j]:
if i >= 1 and j >= 1:
dp[i][j] = 1 + dp[i-1][j-1]
else:
dp[i][j] = 1
else:
dp[i][j] = 0
max_length = max(max(row) for row in dp)
return max_length
sol = Solution()
la = [0,0,0,0,1]
lb = [1,0,0,0,0]
print(sol.findLength(la, lb))
|
normal
|
{
"blob_id": "b8219c21dc2cdd497d3de48c59c146a1fd1509ec",
"index": 6673,
"step-1": "class Solution:\n <mask token>\n\n\n<mask token>\n",
"step-2": "class Solution:\n\n def findLength(self, A: [int], B: [int]) ->int:\n \"\"\"\n 动态规划, 维护一个公共子串长度表DP\n DP[i][j]表示A中以第i个元素,B中以第j个元素结尾的公共子串长度\n 如果A[i]==B[j], DP[i][j]=DP[i-1][j-1]+1\n 如果A[i]==B[j], DP[i][j]=0\n 时间复杂度为:O(mn)\n :param A:\n :param B:\n :return:\n \"\"\"\n na = len(A)\n nb = len(B)\n dp = [[(0) for _ in range(nb)] for _ in range(na)]\n for i in range(na):\n for j in range(nb):\n if A[i] == B[j]:\n if i >= 1 and j >= 1:\n dp[i][j] = 1 + dp[i - 1][j - 1]\n else:\n dp[i][j] = 1\n else:\n dp[i][j] = 0\n max_length = max(max(row) for row in dp)\n return max_length\n\n\n<mask token>\n",
"step-3": "class Solution:\n\n def findLength(self, A: [int], B: [int]) ->int:\n \"\"\"\n 动态规划, 维护一个公共子串长度表DP\n DP[i][j]表示A中以第i个元素,B中以第j个元素结尾的公共子串长度\n 如果A[i]==B[j], DP[i][j]=DP[i-1][j-1]+1\n 如果A[i]==B[j], DP[i][j]=0\n 时间复杂度为:O(mn)\n :param A:\n :param B:\n :return:\n \"\"\"\n na = len(A)\n nb = len(B)\n dp = [[(0) for _ in range(nb)] for _ in range(na)]\n for i in range(na):\n for j in range(nb):\n if A[i] == B[j]:\n if i >= 1 and j >= 1:\n dp[i][j] = 1 + dp[i - 1][j - 1]\n else:\n dp[i][j] = 1\n else:\n dp[i][j] = 0\n max_length = max(max(row) for row in dp)\n return max_length\n\n\n<mask token>\nprint(sol.findLength(la, lb))\n",
"step-4": "class Solution:\n\n def findLength(self, A: [int], B: [int]) ->int:\n \"\"\"\n 动态规划, 维护一个公共子串长度表DP\n DP[i][j]表示A中以第i个元素,B中以第j个元素结尾的公共子串长度\n 如果A[i]==B[j], DP[i][j]=DP[i-1][j-1]+1\n 如果A[i]==B[j], DP[i][j]=0\n 时间复杂度为:O(mn)\n :param A:\n :param B:\n :return:\n \"\"\"\n na = len(A)\n nb = len(B)\n dp = [[(0) for _ in range(nb)] for _ in range(na)]\n for i in range(na):\n for j in range(nb):\n if A[i] == B[j]:\n if i >= 1 and j >= 1:\n dp[i][j] = 1 + dp[i - 1][j - 1]\n else:\n dp[i][j] = 1\n else:\n dp[i][j] = 0\n max_length = max(max(row) for row in dp)\n return max_length\n\n\nsol = Solution()\nla = [0, 0, 0, 0, 1]\nlb = [1, 0, 0, 0, 0]\nprint(sol.findLength(la, lb))\n",
"step-5": "# leetcode 718 最长重复子数组\n# 给两个整数数组 A 和 B ,返回两个数组中公共的、长度最长的子数组的长度。\n#\n# 示例 1:\n# 输入:\n# A: [1,2,3,2,1]\n# B: [3,2,1,4,7]\n# 输出: 3\n# 解释:\n# 长度最长的公共子数组是 [3, 2, 1]。\n#\n# 说明:\n# 1 <= len(A), len(B) <= 1000\n# 0 <= A[i], B[i] < 100\n\n\nclass Solution:\n def findLength(self, A: [int], B: [int])->int:\n \"\"\"\n 动态规划, 维护一个公共子串长度表DP\n DP[i][j]表示A中以第i个元素,B中以第j个元素结尾的公共子串长度\n 如果A[i]==B[j], DP[i][j]=DP[i-1][j-1]+1\n 如果A[i]==B[j], DP[i][j]=0\n 时间复杂度为:O(mn)\n :param A:\n :param B:\n :return:\n \"\"\"\n na = len(A)\n nb = len(B)\n # na行,nb列的矩阵\n dp = [[0 for _ in range(nb)] for _ in range(na)]\n for i in range(na):\n for j in range(nb):\n if A[i] == B[j]:\n if i >= 1 and j >= 1:\n dp[i][j] = 1 + dp[i-1][j-1]\n else:\n dp[i][j] = 1\n else:\n dp[i][j] = 0\n max_length = max(max(row) for row in dp)\n return max_length\n\n\nsol = Solution()\nla = [0,0,0,0,1]\nlb = [1,0,0,0,0]\nprint(sol.findLength(la, lb))",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
class Ui_Tab(object):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Ui_Tab(object):
<|reserved_special_token_0|>
def retranslateUi(self, Tab):
_translate = QtCore.QCoreApplication.translate
Tab.setWindowTitle(_translate('Tab', 'Form'))
self.btn_enterPassword.setText(_translate('Tab', 'Enter Password'))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Ui_Tab(object):
def setupUi(self, Tab):
Tab.setObjectName('Tab')
Tab.resize(762, 523)
self.verticalLayout = QtWidgets.QVBoxLayout(Tab)
self.verticalLayout.setObjectName('verticalLayout')
self.hLayout = QtWidgets.QHBoxLayout()
self.hLayout.setObjectName('hLayout')
self.btn_enterPassword = QtWidgets.QPushButton(Tab)
self.btn_enterPassword.setObjectName('btn_enterPassword')
self.hLayout.addWidget(self.btn_enterPassword)
spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.
Expanding, QtWidgets.QSizePolicy.Minimum)
self.hLayout.addItem(spacerItem)
self.verticalLayout.addLayout(self.hLayout)
self.scrollArea = QtWidgets.QScrollArea(Tab)
self.scrollArea.setWidgetResizable(True)
self.scrollArea.setObjectName('scrollArea')
self.scrollAreaWidgetContents = QtWidgets.QWidget()
self.scrollAreaWidgetContents.setGeometry(QtCore.QRect(0, 0, 736, 455))
self.scrollAreaWidgetContents.setObjectName('scrollAreaWidgetContents')
self.gridLayout = QtWidgets.QGridLayout(self.scrollAreaWidgetContents)
self.gridLayout.setObjectName('gridLayout')
self.scrollArea.setWidget(self.scrollAreaWidgetContents)
self.verticalLayout.addWidget(self.scrollArea)
self.retranslateUi(Tab)
QtCore.QMetaObject.connectSlotsByName(Tab)
def retranslateUi(self, Tab):
_translate = QtCore.QCoreApplication.translate
Tab.setWindowTitle(_translate('Tab', 'Form'))
self.btn_enterPassword.setText(_translate('Tab', 'Enter Password'))
<|reserved_special_token_1|>
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Tab(object):
def setupUi(self, Tab):
Tab.setObjectName('Tab')
Tab.resize(762, 523)
self.verticalLayout = QtWidgets.QVBoxLayout(Tab)
self.verticalLayout.setObjectName('verticalLayout')
self.hLayout = QtWidgets.QHBoxLayout()
self.hLayout.setObjectName('hLayout')
self.btn_enterPassword = QtWidgets.QPushButton(Tab)
self.btn_enterPassword.setObjectName('btn_enterPassword')
self.hLayout.addWidget(self.btn_enterPassword)
spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.
Expanding, QtWidgets.QSizePolicy.Minimum)
self.hLayout.addItem(spacerItem)
self.verticalLayout.addLayout(self.hLayout)
self.scrollArea = QtWidgets.QScrollArea(Tab)
self.scrollArea.setWidgetResizable(True)
self.scrollArea.setObjectName('scrollArea')
self.scrollAreaWidgetContents = QtWidgets.QWidget()
self.scrollAreaWidgetContents.setGeometry(QtCore.QRect(0, 0, 736, 455))
self.scrollAreaWidgetContents.setObjectName('scrollAreaWidgetContents')
self.gridLayout = QtWidgets.QGridLayout(self.scrollAreaWidgetContents)
self.gridLayout.setObjectName('gridLayout')
self.scrollArea.setWidget(self.scrollAreaWidgetContents)
self.verticalLayout.addWidget(self.scrollArea)
self.retranslateUi(Tab)
QtCore.QMetaObject.connectSlotsByName(Tab)
def retranslateUi(self, Tab):
_translate = QtCore.QCoreApplication.translate
Tab.setWindowTitle(_translate('Tab', 'Form'))
self.btn_enterPassword.setText(_translate('Tab', 'Enter Password'))
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'src/ui_LibraryTab.ui'
#
# Created: Tue Jun 9 21:46:41 2015
# by: PyQt5 UI code generator 5.4
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Tab(object):
def setupUi(self, Tab):
Tab.setObjectName("Tab")
Tab.resize(762, 523)
self.verticalLayout = QtWidgets.QVBoxLayout(Tab)
self.verticalLayout.setObjectName("verticalLayout")
self.hLayout = QtWidgets.QHBoxLayout()
self.hLayout.setObjectName("hLayout")
self.btn_enterPassword = QtWidgets.QPushButton(Tab)
self.btn_enterPassword.setObjectName("btn_enterPassword")
self.hLayout.addWidget(self.btn_enterPassword)
spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.hLayout.addItem(spacerItem)
self.verticalLayout.addLayout(self.hLayout)
self.scrollArea = QtWidgets.QScrollArea(Tab)
self.scrollArea.setWidgetResizable(True)
self.scrollArea.setObjectName("scrollArea")
self.scrollAreaWidgetContents = QtWidgets.QWidget()
self.scrollAreaWidgetContents.setGeometry(QtCore.QRect(0, 0, 736, 455))
self.scrollAreaWidgetContents.setObjectName("scrollAreaWidgetContents")
self.gridLayout = QtWidgets.QGridLayout(self.scrollAreaWidgetContents)
self.gridLayout.setObjectName("gridLayout")
self.scrollArea.setWidget(self.scrollAreaWidgetContents)
self.verticalLayout.addWidget(self.scrollArea)
self.retranslateUi(Tab)
QtCore.QMetaObject.connectSlotsByName(Tab)
def retranslateUi(self, Tab):
_translate = QtCore.QCoreApplication.translate
Tab.setWindowTitle(_translate("Tab", "Form"))
self.btn_enterPassword.setText(_translate("Tab", "Enter Password"))
|
flexible
|
{
"blob_id": "ef85f94282bfd7c9491c4e28bab61aaab5c792a5",
"index": 232,
"step-1": "<mask token>\n\n\nclass Ui_Tab(object):\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Ui_Tab(object):\n <mask token>\n\n def retranslateUi(self, Tab):\n _translate = QtCore.QCoreApplication.translate\n Tab.setWindowTitle(_translate('Tab', 'Form'))\n self.btn_enterPassword.setText(_translate('Tab', 'Enter Password'))\n",
"step-3": "<mask token>\n\n\nclass Ui_Tab(object):\n\n def setupUi(self, Tab):\n Tab.setObjectName('Tab')\n Tab.resize(762, 523)\n self.verticalLayout = QtWidgets.QVBoxLayout(Tab)\n self.verticalLayout.setObjectName('verticalLayout')\n self.hLayout = QtWidgets.QHBoxLayout()\n self.hLayout.setObjectName('hLayout')\n self.btn_enterPassword = QtWidgets.QPushButton(Tab)\n self.btn_enterPassword.setObjectName('btn_enterPassword')\n self.hLayout.addWidget(self.btn_enterPassword)\n spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.\n Expanding, QtWidgets.QSizePolicy.Minimum)\n self.hLayout.addItem(spacerItem)\n self.verticalLayout.addLayout(self.hLayout)\n self.scrollArea = QtWidgets.QScrollArea(Tab)\n self.scrollArea.setWidgetResizable(True)\n self.scrollArea.setObjectName('scrollArea')\n self.scrollAreaWidgetContents = QtWidgets.QWidget()\n self.scrollAreaWidgetContents.setGeometry(QtCore.QRect(0, 0, 736, 455))\n self.scrollAreaWidgetContents.setObjectName('scrollAreaWidgetContents')\n self.gridLayout = QtWidgets.QGridLayout(self.scrollAreaWidgetContents)\n self.gridLayout.setObjectName('gridLayout')\n self.scrollArea.setWidget(self.scrollAreaWidgetContents)\n self.verticalLayout.addWidget(self.scrollArea)\n self.retranslateUi(Tab)\n QtCore.QMetaObject.connectSlotsByName(Tab)\n\n def retranslateUi(self, Tab):\n _translate = QtCore.QCoreApplication.translate\n Tab.setWindowTitle(_translate('Tab', 'Form'))\n self.btn_enterPassword.setText(_translate('Tab', 'Enter Password'))\n",
"step-4": "from PyQt5 import QtCore, QtGui, QtWidgets\n\n\nclass Ui_Tab(object):\n\n def setupUi(self, Tab):\n Tab.setObjectName('Tab')\n Tab.resize(762, 523)\n self.verticalLayout = QtWidgets.QVBoxLayout(Tab)\n self.verticalLayout.setObjectName('verticalLayout')\n self.hLayout = QtWidgets.QHBoxLayout()\n self.hLayout.setObjectName('hLayout')\n self.btn_enterPassword = QtWidgets.QPushButton(Tab)\n self.btn_enterPassword.setObjectName('btn_enterPassword')\n self.hLayout.addWidget(self.btn_enterPassword)\n spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.\n Expanding, QtWidgets.QSizePolicy.Minimum)\n self.hLayout.addItem(spacerItem)\n self.verticalLayout.addLayout(self.hLayout)\n self.scrollArea = QtWidgets.QScrollArea(Tab)\n self.scrollArea.setWidgetResizable(True)\n self.scrollArea.setObjectName('scrollArea')\n self.scrollAreaWidgetContents = QtWidgets.QWidget()\n self.scrollAreaWidgetContents.setGeometry(QtCore.QRect(0, 0, 736, 455))\n self.scrollAreaWidgetContents.setObjectName('scrollAreaWidgetContents')\n self.gridLayout = QtWidgets.QGridLayout(self.scrollAreaWidgetContents)\n self.gridLayout.setObjectName('gridLayout')\n self.scrollArea.setWidget(self.scrollAreaWidgetContents)\n self.verticalLayout.addWidget(self.scrollArea)\n self.retranslateUi(Tab)\n QtCore.QMetaObject.connectSlotsByName(Tab)\n\n def retranslateUi(self, Tab):\n _translate = QtCore.QCoreApplication.translate\n Tab.setWindowTitle(_translate('Tab', 'Form'))\n self.btn_enterPassword.setText(_translate('Tab', 'Enter Password'))\n",
"step-5": "# -*- coding: utf-8 -*-\n\n# Form implementation generated from reading ui file 'src/ui_LibraryTab.ui'\n#\n# Created: Tue Jun 9 21:46:41 2015\n# by: PyQt5 UI code generator 5.4\n#\n# WARNING! All changes made in this file will be lost!\n\nfrom PyQt5 import QtCore, QtGui, QtWidgets\n\nclass Ui_Tab(object):\n def setupUi(self, Tab):\n Tab.setObjectName(\"Tab\")\n Tab.resize(762, 523)\n self.verticalLayout = QtWidgets.QVBoxLayout(Tab)\n self.verticalLayout.setObjectName(\"verticalLayout\")\n self.hLayout = QtWidgets.QHBoxLayout()\n self.hLayout.setObjectName(\"hLayout\")\n self.btn_enterPassword = QtWidgets.QPushButton(Tab)\n self.btn_enterPassword.setObjectName(\"btn_enterPassword\")\n self.hLayout.addWidget(self.btn_enterPassword)\n spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)\n self.hLayout.addItem(spacerItem)\n self.verticalLayout.addLayout(self.hLayout)\n self.scrollArea = QtWidgets.QScrollArea(Tab)\n self.scrollArea.setWidgetResizable(True)\n self.scrollArea.setObjectName(\"scrollArea\")\n self.scrollAreaWidgetContents = QtWidgets.QWidget()\n self.scrollAreaWidgetContents.setGeometry(QtCore.QRect(0, 0, 736, 455))\n self.scrollAreaWidgetContents.setObjectName(\"scrollAreaWidgetContents\")\n self.gridLayout = QtWidgets.QGridLayout(self.scrollAreaWidgetContents)\n self.gridLayout.setObjectName(\"gridLayout\")\n self.scrollArea.setWidget(self.scrollAreaWidgetContents)\n self.verticalLayout.addWidget(self.scrollArea)\n\n self.retranslateUi(Tab)\n QtCore.QMetaObject.connectSlotsByName(Tab)\n\n def retranslateUi(self, Tab):\n _translate = QtCore.QCoreApplication.translate\n Tab.setWindowTitle(_translate(\"Tab\", \"Form\"))\n self.btn_enterPassword.setText(_translate(\"Tab\", \"Enter Password\"))\n\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.10 on 2018-08-03 10:25
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Proceso',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=128, verbose_name='Nombre')),
('description', models.CharField(max_length=256, verbose_name='Descripci\xf3n')),
('deleted', models.BooleanField(default=False)),
],
),
]
|
normal
|
{
"blob_id": "f15ce7cec032ace65604771fa56e3d9969c98209",
"index": 1964,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n initial = True\n dependencies = []\n operations = [migrations.CreateModel(name='Proceso', fields=[('id',\n models.AutoField(auto_created=True, primary_key=True, serialize=\n False, verbose_name='ID')), ('name', models.CharField(max_length=\n 128, verbose_name='Nombre')), ('description', models.CharField(\n max_length=256, verbose_name='Descripción')), ('deleted', models.\n BooleanField(default=False))])]\n",
"step-4": "from __future__ import unicode_literals\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n initial = True\n dependencies = []\n operations = [migrations.CreateModel(name='Proceso', fields=[('id',\n models.AutoField(auto_created=True, primary_key=True, serialize=\n False, verbose_name='ID')), ('name', models.CharField(max_length=\n 128, verbose_name='Nombre')), ('description', models.CharField(\n max_length=256, verbose_name='Descripción')), ('deleted', models.\n BooleanField(default=False))])]\n",
"step-5": "# -*- coding: utf-8 -*-\n# Generated by Django 1.11.10 on 2018-08-03 10:25\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Proceso',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('name', models.CharField(max_length=128, verbose_name='Nombre')),\n ('description', models.CharField(max_length=256, verbose_name='Descripci\\xf3n')),\n ('deleted', models.BooleanField(default=False)),\n ],\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class Project(models.Model):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class Holidays(models.Model):
holidays = models.DateField()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Project(models.Model):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def get_absolute_url(self):
return reverse('project')
class Holidays(models.Model):
holidays = models.DateField()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Project(models.Model):
actual_developer = models.ForeignKey(User, null=True, blank=True,
on_delete=models.CASCADE)
projects_name = models.CharField(max_length=100)
project_hours = models.CharField(max_length=100)
developer_name = models.CharField(max_length=255)
Month_Cycle = models.CharField(max_length=1000, blank=True, null=True)
mailing_hrs = models.CharField(max_length=100, null=True, blank=True)
developer_email = models.EmailField()
expected_daily_hours = models.CharField(max_length=200, null=True,
blank=True)
expected_cycle_hours = models.CharField(max_length=200, null=True,
default='176 Hr')
cycle_hour_diff = models.IntegerField(null=True, default=0)
def get_absolute_url(self):
return reverse('project')
class Holidays(models.Model):
holidays = models.DateField()
<|reserved_special_token_1|>
from django.db import models
from django.contrib.gis.db import models
from django.contrib.auth.models import User
from django.urls import reverse
class Project(models.Model):
actual_developer = models.ForeignKey(User, null=True, blank=True,
on_delete=models.CASCADE)
projects_name = models.CharField(max_length=100)
project_hours = models.CharField(max_length=100)
developer_name = models.CharField(max_length=255)
Month_Cycle = models.CharField(max_length=1000, blank=True, null=True)
mailing_hrs = models.CharField(max_length=100, null=True, blank=True)
developer_email = models.EmailField()
expected_daily_hours = models.CharField(max_length=200, null=True,
blank=True)
expected_cycle_hours = models.CharField(max_length=200, null=True,
default='176 Hr')
cycle_hour_diff = models.IntegerField(null=True, default=0)
def get_absolute_url(self):
return reverse('project')
class Holidays(models.Model):
holidays = models.DateField()
<|reserved_special_token_1|>
from django.db import models
from django.contrib.gis.db import models
from django.contrib.auth.models import User
from django.urls import reverse
class Project(models.Model):
actual_developer = models.ForeignKey(User,null = True,blank=True, on_delete=models.CASCADE)
# actual_developer = models.CharField(User,null = True,blank=True, max_length=200)
projects_name = models.CharField(max_length=100)
project_hours = models.CharField(max_length=100)
developer_name = models.CharField(max_length=255)
Month_Cycle = models.CharField(max_length = 1000, blank=True, null=True)
mailing_hrs = models.CharField(max_length=100,null=True,blank=True)
developer_email = models.EmailField()
expected_daily_hours = models.CharField(max_length=200, null=True, blank=True)
expected_cycle_hours = models.CharField(max_length=200, null=True, default = "176 Hr")
cycle_hour_diff = models.IntegerField(null=True, default=0)
def get_absolute_url(self):
return reverse('project')
class Holidays(models.Model):
holidays = models.DateField()
|
flexible
|
{
"blob_id": "ac1d38f550e548dff6ba226dbfc3dd1e5ff876a8",
"index": 5563,
"step-1": "<mask token>\n\n\nclass Project(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass Holidays(models.Model):\n holidays = models.DateField()\n",
"step-2": "<mask token>\n\n\nclass Project(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def get_absolute_url(self):\n return reverse('project')\n\n\nclass Holidays(models.Model):\n holidays = models.DateField()\n",
"step-3": "<mask token>\n\n\nclass Project(models.Model):\n actual_developer = models.ForeignKey(User, null=True, blank=True,\n on_delete=models.CASCADE)\n projects_name = models.CharField(max_length=100)\n project_hours = models.CharField(max_length=100)\n developer_name = models.CharField(max_length=255)\n Month_Cycle = models.CharField(max_length=1000, blank=True, null=True)\n mailing_hrs = models.CharField(max_length=100, null=True, blank=True)\n developer_email = models.EmailField()\n expected_daily_hours = models.CharField(max_length=200, null=True,\n blank=True)\n expected_cycle_hours = models.CharField(max_length=200, null=True,\n default='176 Hr')\n cycle_hour_diff = models.IntegerField(null=True, default=0)\n\n def get_absolute_url(self):\n return reverse('project')\n\n\nclass Holidays(models.Model):\n holidays = models.DateField()\n",
"step-4": "from django.db import models\nfrom django.contrib.gis.db import models\nfrom django.contrib.auth.models import User\nfrom django.urls import reverse\n\n\nclass Project(models.Model):\n actual_developer = models.ForeignKey(User, null=True, blank=True,\n on_delete=models.CASCADE)\n projects_name = models.CharField(max_length=100)\n project_hours = models.CharField(max_length=100)\n developer_name = models.CharField(max_length=255)\n Month_Cycle = models.CharField(max_length=1000, blank=True, null=True)\n mailing_hrs = models.CharField(max_length=100, null=True, blank=True)\n developer_email = models.EmailField()\n expected_daily_hours = models.CharField(max_length=200, null=True,\n blank=True)\n expected_cycle_hours = models.CharField(max_length=200, null=True,\n default='176 Hr')\n cycle_hour_diff = models.IntegerField(null=True, default=0)\n\n def get_absolute_url(self):\n return reverse('project')\n\n\nclass Holidays(models.Model):\n holidays = models.DateField()\n",
"step-5": "from django.db import models\nfrom django.contrib.gis.db import models\nfrom django.contrib.auth.models import User\n\nfrom django.urls import reverse\n\n\nclass Project(models.Model):\n\tactual_developer = models.ForeignKey(User,null = True,blank=True, on_delete=models.CASCADE)\n\t# actual_developer = models.CharField(User,null = True,blank=True, max_length=200)\n\tprojects_name = models.CharField(max_length=100)\n\tproject_hours = models.CharField(max_length=100)\n\tdeveloper_name = models.CharField(max_length=255)\n\tMonth_Cycle = models.CharField(max_length = 1000, blank=True, null=True)\n\tmailing_hrs = models.CharField(max_length=100,null=True,blank=True)\n\tdeveloper_email = models.EmailField()\n\texpected_daily_hours = models.CharField(max_length=200, null=True, blank=True)\n\texpected_cycle_hours = models.CharField(max_length=200, null=True, default = \"176 Hr\")\n\tcycle_hour_diff = models.IntegerField(null=True, default=0)\n\n\tdef get_absolute_url(self):\n\t\treturn reverse('project')\n\nclass Holidays(models.Model):\n\tholidays = models.DateField()\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
<|reserved_special_token_0|>
class ConfusionMatrix(Metric):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def draw(self, size=400):
index_label = 'Predicted'
column_label = 'Actual'
matrix = self.generate_data()
min_val, max_val = matrix.Value.min(), matrix.Value.max()
source = ColumnDataSource(matrix)
mapper = LinearColorMapper(palette=COLORS, low=min_val, high=max_val)
hover = HoverTool(tooltips=[('Number', f'@Value')])
p = figure(plot_width=size, plot_height=size, title=
'Confusion Matrix', tools=[hover], toolbar_location=None,
x_range=self.labels, y_range=list(reversed(self.labels)))
p.yaxis.axis_label = index_label
p.xaxis.axis_label = column_label
p.rect(x=column_label, y=index_label, width=1, height=1, source=
source, fill_color=transform('Value', mapper))
self.plot = p
return p
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ConfusionMatrix(Metric):
<|reserved_special_token_0|>
def generate_data(self):
matrix = confusion_matrix(self.y, self.y_pred)
matrix = pd.DataFrame(matrix, index=self.labels, columns=self.labels)
matrix.index.name = 'Predicted'
matrix.columns.name = 'Actual'
return pd.DataFrame(matrix.stack(), columns=['Value']).reset_index()
def draw(self, size=400):
index_label = 'Predicted'
column_label = 'Actual'
matrix = self.generate_data()
min_val, max_val = matrix.Value.min(), matrix.Value.max()
source = ColumnDataSource(matrix)
mapper = LinearColorMapper(palette=COLORS, low=min_val, high=max_val)
hover = HoverTool(tooltips=[('Number', f'@Value')])
p = figure(plot_width=size, plot_height=size, title=
'Confusion Matrix', tools=[hover], toolbar_location=None,
x_range=self.labels, y_range=list(reversed(self.labels)))
p.yaxis.axis_label = index_label
p.xaxis.axis_label = column_label
p.rect(x=column_label, y=index_label, width=1, height=1, source=
source, fill_color=transform('Value', mapper))
self.plot = p
return p
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ConfusionMatrix(Metric):
def __init__(self):
super().__init__('confusion-matrix')
def generate_data(self):
matrix = confusion_matrix(self.y, self.y_pred)
matrix = pd.DataFrame(matrix, index=self.labels, columns=self.labels)
matrix.index.name = 'Predicted'
matrix.columns.name = 'Actual'
return pd.DataFrame(matrix.stack(), columns=['Value']).reset_index()
def draw(self, size=400):
index_label = 'Predicted'
column_label = 'Actual'
matrix = self.generate_data()
min_val, max_val = matrix.Value.min(), matrix.Value.max()
source = ColumnDataSource(matrix)
mapper = LinearColorMapper(palette=COLORS, low=min_val, high=max_val)
hover = HoverTool(tooltips=[('Number', f'@Value')])
p = figure(plot_width=size, plot_height=size, title=
'Confusion Matrix', tools=[hover], toolbar_location=None,
x_range=self.labels, y_range=list(reversed(self.labels)))
p.yaxis.axis_label = index_label
p.xaxis.axis_label = column_label
p.rect(x=column_label, y=index_label, width=1, height=1, source=
source, fill_color=transform('Value', mapper))
self.plot = p
return p
<|reserved_special_token_1|>
import pandas as pd
from bokeh.models import ColumnDataSource, LinearColorMapper, HoverTool
from bokeh.plotting import figure
from bokeh.transform import transform
from sklearn.metrics import confusion_matrix
from reporter.settings import COLORS
from reporter.metrics import Metric
class ConfusionMatrix(Metric):
def __init__(self):
super().__init__('confusion-matrix')
def generate_data(self):
matrix = confusion_matrix(self.y, self.y_pred)
matrix = pd.DataFrame(matrix, index=self.labels, columns=self.labels)
matrix.index.name = 'Predicted'
matrix.columns.name = 'Actual'
return pd.DataFrame(matrix.stack(), columns=['Value']).reset_index()
def draw(self, size=400):
index_label = 'Predicted'
column_label = 'Actual'
matrix = self.generate_data()
min_val, max_val = matrix.Value.min(), matrix.Value.max()
source = ColumnDataSource(matrix)
mapper = LinearColorMapper(palette=COLORS, low=min_val, high=max_val)
hover = HoverTool(tooltips=[('Number', f'@Value')])
p = figure(plot_width=size, plot_height=size, title=
'Confusion Matrix', tools=[hover], toolbar_location=None,
x_range=self.labels, y_range=list(reversed(self.labels)))
p.yaxis.axis_label = index_label
p.xaxis.axis_label = column_label
p.rect(x=column_label, y=index_label, width=1, height=1, source=
source, fill_color=transform('Value', mapper))
self.plot = p
return p
<|reserved_special_token_1|>
import pandas as pd
from bokeh.models import ColumnDataSource, LinearColorMapper, HoverTool
from bokeh.plotting import figure
from bokeh.transform import transform
from sklearn.metrics import confusion_matrix
from reporter.settings import COLORS
from reporter.metrics import Metric
class ConfusionMatrix(Metric):
def __init__(self):
super().__init__('confusion-matrix')
def generate_data(self):
matrix = confusion_matrix(self.y, self.y_pred)
matrix = pd.DataFrame(matrix, index=self.labels, columns=self.labels)
matrix.index.name = 'Predicted'
matrix.columns.name = 'Actual'
return pd.DataFrame(matrix.stack(), columns=['Value']).reset_index()
def draw(self, size=400):
index_label = 'Predicted'
column_label = 'Actual'
matrix = self.generate_data()
min_val, max_val = matrix.Value.min(), matrix.Value.max()
source = ColumnDataSource(matrix)
mapper = LinearColorMapper(palette=COLORS, low=min_val, high=max_val)
hover = HoverTool(tooltips=[
('Number', f"@Value")
])
p = figure(plot_width=size,
plot_height=size,
title='Confusion Matrix',
tools=[hover],
toolbar_location=None,
x_range=self.labels,
y_range=list(reversed(self.labels)))
p.yaxis.axis_label = index_label
p.xaxis.axis_label = column_label
p.rect(x=column_label,
y=index_label,
width=1,
height=1,
source=source,
fill_color=transform('Value', mapper))
self.plot = p
return p
|
flexible
|
{
"blob_id": "9a2002b5ff0fe41f2b5b568f4c278d4376bf4fb1",
"index": 6117,
"step-1": "<mask token>\n\n\nclass ConfusionMatrix(Metric):\n <mask token>\n <mask token>\n\n def draw(self, size=400):\n index_label = 'Predicted'\n column_label = 'Actual'\n matrix = self.generate_data()\n min_val, max_val = matrix.Value.min(), matrix.Value.max()\n source = ColumnDataSource(matrix)\n mapper = LinearColorMapper(palette=COLORS, low=min_val, high=max_val)\n hover = HoverTool(tooltips=[('Number', f'@Value')])\n p = figure(plot_width=size, plot_height=size, title=\n 'Confusion Matrix', tools=[hover], toolbar_location=None,\n x_range=self.labels, y_range=list(reversed(self.labels)))\n p.yaxis.axis_label = index_label\n p.xaxis.axis_label = column_label\n p.rect(x=column_label, y=index_label, width=1, height=1, source=\n source, fill_color=transform('Value', mapper))\n self.plot = p\n return p\n",
"step-2": "<mask token>\n\n\nclass ConfusionMatrix(Metric):\n <mask token>\n\n def generate_data(self):\n matrix = confusion_matrix(self.y, self.y_pred)\n matrix = pd.DataFrame(matrix, index=self.labels, columns=self.labels)\n matrix.index.name = 'Predicted'\n matrix.columns.name = 'Actual'\n return pd.DataFrame(matrix.stack(), columns=['Value']).reset_index()\n\n def draw(self, size=400):\n index_label = 'Predicted'\n column_label = 'Actual'\n matrix = self.generate_data()\n min_val, max_val = matrix.Value.min(), matrix.Value.max()\n source = ColumnDataSource(matrix)\n mapper = LinearColorMapper(palette=COLORS, low=min_val, high=max_val)\n hover = HoverTool(tooltips=[('Number', f'@Value')])\n p = figure(plot_width=size, plot_height=size, title=\n 'Confusion Matrix', tools=[hover], toolbar_location=None,\n x_range=self.labels, y_range=list(reversed(self.labels)))\n p.yaxis.axis_label = index_label\n p.xaxis.axis_label = column_label\n p.rect(x=column_label, y=index_label, width=1, height=1, source=\n source, fill_color=transform('Value', mapper))\n self.plot = p\n return p\n",
"step-3": "<mask token>\n\n\nclass ConfusionMatrix(Metric):\n\n def __init__(self):\n super().__init__('confusion-matrix')\n\n def generate_data(self):\n matrix = confusion_matrix(self.y, self.y_pred)\n matrix = pd.DataFrame(matrix, index=self.labels, columns=self.labels)\n matrix.index.name = 'Predicted'\n matrix.columns.name = 'Actual'\n return pd.DataFrame(matrix.stack(), columns=['Value']).reset_index()\n\n def draw(self, size=400):\n index_label = 'Predicted'\n column_label = 'Actual'\n matrix = self.generate_data()\n min_val, max_val = matrix.Value.min(), matrix.Value.max()\n source = ColumnDataSource(matrix)\n mapper = LinearColorMapper(palette=COLORS, low=min_val, high=max_val)\n hover = HoverTool(tooltips=[('Number', f'@Value')])\n p = figure(plot_width=size, plot_height=size, title=\n 'Confusion Matrix', tools=[hover], toolbar_location=None,\n x_range=self.labels, y_range=list(reversed(self.labels)))\n p.yaxis.axis_label = index_label\n p.xaxis.axis_label = column_label\n p.rect(x=column_label, y=index_label, width=1, height=1, source=\n source, fill_color=transform('Value', mapper))\n self.plot = p\n return p\n",
"step-4": "import pandas as pd\nfrom bokeh.models import ColumnDataSource, LinearColorMapper, HoverTool\nfrom bokeh.plotting import figure\nfrom bokeh.transform import transform\nfrom sklearn.metrics import confusion_matrix\nfrom reporter.settings import COLORS\nfrom reporter.metrics import Metric\n\n\nclass ConfusionMatrix(Metric):\n\n def __init__(self):\n super().__init__('confusion-matrix')\n\n def generate_data(self):\n matrix = confusion_matrix(self.y, self.y_pred)\n matrix = pd.DataFrame(matrix, index=self.labels, columns=self.labels)\n matrix.index.name = 'Predicted'\n matrix.columns.name = 'Actual'\n return pd.DataFrame(matrix.stack(), columns=['Value']).reset_index()\n\n def draw(self, size=400):\n index_label = 'Predicted'\n column_label = 'Actual'\n matrix = self.generate_data()\n min_val, max_val = matrix.Value.min(), matrix.Value.max()\n source = ColumnDataSource(matrix)\n mapper = LinearColorMapper(palette=COLORS, low=min_val, high=max_val)\n hover = HoverTool(tooltips=[('Number', f'@Value')])\n p = figure(plot_width=size, plot_height=size, title=\n 'Confusion Matrix', tools=[hover], toolbar_location=None,\n x_range=self.labels, y_range=list(reversed(self.labels)))\n p.yaxis.axis_label = index_label\n p.xaxis.axis_label = column_label\n p.rect(x=column_label, y=index_label, width=1, height=1, source=\n source, fill_color=transform('Value', mapper))\n self.plot = p\n return p\n",
"step-5": "import pandas as pd\nfrom bokeh.models import ColumnDataSource, LinearColorMapper, HoverTool\nfrom bokeh.plotting import figure\nfrom bokeh.transform import transform\nfrom sklearn.metrics import confusion_matrix\nfrom reporter.settings import COLORS\nfrom reporter.metrics import Metric\n\n\nclass ConfusionMatrix(Metric):\n def __init__(self):\n super().__init__('confusion-matrix')\n\n def generate_data(self):\n matrix = confusion_matrix(self.y, self.y_pred)\n matrix = pd.DataFrame(matrix, index=self.labels, columns=self.labels)\n matrix.index.name = 'Predicted'\n matrix.columns.name = 'Actual'\n return pd.DataFrame(matrix.stack(), columns=['Value']).reset_index()\n\n def draw(self, size=400):\n index_label = 'Predicted'\n column_label = 'Actual'\n\n matrix = self.generate_data()\n min_val, max_val = matrix.Value.min(), matrix.Value.max()\n source = ColumnDataSource(matrix)\n mapper = LinearColorMapper(palette=COLORS, low=min_val, high=max_val)\n\n hover = HoverTool(tooltips=[\n ('Number', f\"@Value\")\n ])\n\n p = figure(plot_width=size,\n plot_height=size,\n title='Confusion Matrix',\n tools=[hover],\n toolbar_location=None,\n x_range=self.labels,\n y_range=list(reversed(self.labels)))\n\n p.yaxis.axis_label = index_label\n p.xaxis.axis_label = column_label\n\n p.rect(x=column_label,\n y=index_label,\n width=1,\n height=1,\n source=source,\n fill_color=transform('Value', mapper))\n self.plot = p\n return p\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
def install_mysql(package='mysql-server', sources=None, keys=None):
if not sources:
sources = []
if not keys:
keys = []
from subprocess import Popen, PIPE
for source in sources:
add_source(source)
if sources:
apt_update()
with open('/var/lib/mysql/mysql.passwd', 'r') as rpw:
root_pass = rpw.read()
dconf = Popen(['debconf-set-selections'], stdin=PIPE)
dconf.stdin.write('%s %s/root_password password %s\n' % (package,
package, root_pass))
dconf.stdin.write('%s %s/root_password_again password %s\n' % (package,
package, root_pass))
dconf.communicate()
dconf.wait()
apt_install(package)
def build_mycnf(cfg):
i_am_a_slave = os.path.isfile('/var/lib/juju/i.am.a.slave')
unit_id = os.environ['JUJU_UNIT_NAME'].split('/')[1]
if i_am_a_slave and cfg.get('tuning-level') != 'fast':
render(source='mysql/binlog.cnf', target=
'/etc/mysql/conf.d/binlog.cnf', context={'unit_id': unit_id,
'format': cfg.get('binlog-format', 'MIXED')})
render(source='mysql/my.cnf', target='/etc/mysql/my.cnf', context=cfg)
def human_to_bytes(human):
if human.isdigit():
return human
factors = {'k': 1024, 'm': 1048576, 'g': 1073741824, 't': 1099511627776}
modifier = human[-1]
if modifier.lower() in factors:
return int(human[:-1]) * factors[modifier.lower()]
raise ValueError('Can only convert K, M, G, and T')
def dataset_size(size, page):
if not size.endswith('%'):
return human_to_bytes(size)
total_mem = human_to_bytes(get_memtotal())
sys_mem_limit = mem_limit()
if is_32bits() and total_mem > sys_mem_limit:
total_ram = sys_mem_limit
factor = int(size[:-1]) * 0.01
pctram = sys_mem_limit * factor
return int(pctram - pctram % page)
def is_32bits():
try:
IS_32BIT_SYSTEM = sys.maxsize < 2 ** 32.0
except OverflowError:
IS_32BIT_SYSTEM = True
return IS_32BIT_SYSTEM
<|reserved_special_token_0|>
def get_memtotal():
with open('/proc/meminfo') as meminfo_file:
for line in meminfo_file:
key, mem = line.split(':', 2)
if key == 'MemTotal':
mtot, modifier = mem.strip().split(' ')
return '%s%s' % (mtot, modifier[0].upper())
def get_db_helper():
return MySQLHelper(rpasswdf_template='/var/lib/mysql/mysql.passwd',
upasswdf_template='/var/lib/mysql/mysql-{}.passwd',
delete_ondisk_passwd_file=False)
def get_db_cursor():
import MySQLdb
db_helper = get_db_helper()
passwd = db_helper.get_mysql_root_password()
connection = MySQLdb.connect(user='root', host='localhost', passwd=passwd)
return connection.cursor()
<|reserved_special_token_0|>
def grant_database(database, user, password):
cursor = get_db_cursor()
cursor.execute("grant all on `%s`.* to `%s` identified by '%s'" % (
database, user, password))
cursor.close()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def install_mysql(package='mysql-server', sources=None, keys=None):
if not sources:
sources = []
if not keys:
keys = []
from subprocess import Popen, PIPE
for source in sources:
add_source(source)
if sources:
apt_update()
with open('/var/lib/mysql/mysql.passwd', 'r') as rpw:
root_pass = rpw.read()
dconf = Popen(['debconf-set-selections'], stdin=PIPE)
dconf.stdin.write('%s %s/root_password password %s\n' % (package,
package, root_pass))
dconf.stdin.write('%s %s/root_password_again password %s\n' % (package,
package, root_pass))
dconf.communicate()
dconf.wait()
apt_install(package)
def build_mycnf(cfg):
i_am_a_slave = os.path.isfile('/var/lib/juju/i.am.a.slave')
unit_id = os.environ['JUJU_UNIT_NAME'].split('/')[1]
if i_am_a_slave and cfg.get('tuning-level') != 'fast':
render(source='mysql/binlog.cnf', target=
'/etc/mysql/conf.d/binlog.cnf', context={'unit_id': unit_id,
'format': cfg.get('binlog-format', 'MIXED')})
render(source='mysql/my.cnf', target='/etc/mysql/my.cnf', context=cfg)
def human_to_bytes(human):
if human.isdigit():
return human
factors = {'k': 1024, 'm': 1048576, 'g': 1073741824, 't': 1099511627776}
modifier = human[-1]
if modifier.lower() in factors:
return int(human[:-1]) * factors[modifier.lower()]
raise ValueError('Can only convert K, M, G, and T')
def dataset_size(size, page):
if not size.endswith('%'):
return human_to_bytes(size)
total_mem = human_to_bytes(get_memtotal())
sys_mem_limit = mem_limit()
if is_32bits() and total_mem > sys_mem_limit:
total_ram = sys_mem_limit
factor = int(size[:-1]) * 0.01
pctram = sys_mem_limit * factor
return int(pctram - pctram % page)
def is_32bits():
try:
IS_32BIT_SYSTEM = sys.maxsize < 2 ** 32.0
except OverflowError:
IS_32BIT_SYSTEM = True
return IS_32BIT_SYSTEM
<|reserved_special_token_0|>
def get_memtotal():
with open('/proc/meminfo') as meminfo_file:
for line in meminfo_file:
key, mem = line.split(':', 2)
if key == 'MemTotal':
mtot, modifier = mem.strip().split(' ')
return '%s%s' % (mtot, modifier[0].upper())
def get_db_helper():
return MySQLHelper(rpasswdf_template='/var/lib/mysql/mysql.passwd',
upasswdf_template='/var/lib/mysql/mysql-{}.passwd',
delete_ondisk_passwd_file=False)
def get_db_cursor():
import MySQLdb
db_helper = get_db_helper()
passwd = db_helper.get_mysql_root_password()
connection = MySQLdb.connect(user='root', host='localhost', passwd=passwd)
return connection.cursor()
def create_database(name):
cursor = get_db_cursor()
cursor.execute("show databases like '%s'" % name)
if cursor.fetchall():
return name
cursor.execute('create database `%s` character set utf8' % name)
cursor.close()
return name
<|reserved_special_token_0|>
def grant_database(database, user, password):
cursor = get_db_cursor()
cursor.execute("grant all on `%s`.* to `%s` identified by '%s'" % (
database, user, password))
cursor.close()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def install_mysql(package='mysql-server', sources=None, keys=None):
if not sources:
sources = []
if not keys:
keys = []
from subprocess import Popen, PIPE
for source in sources:
add_source(source)
if sources:
apt_update()
with open('/var/lib/mysql/mysql.passwd', 'r') as rpw:
root_pass = rpw.read()
dconf = Popen(['debconf-set-selections'], stdin=PIPE)
dconf.stdin.write('%s %s/root_password password %s\n' % (package,
package, root_pass))
dconf.stdin.write('%s %s/root_password_again password %s\n' % (package,
package, root_pass))
dconf.communicate()
dconf.wait()
apt_install(package)
def build_mycnf(cfg):
i_am_a_slave = os.path.isfile('/var/lib/juju/i.am.a.slave')
unit_id = os.environ['JUJU_UNIT_NAME'].split('/')[1]
if i_am_a_slave and cfg.get('tuning-level') != 'fast':
render(source='mysql/binlog.cnf', target=
'/etc/mysql/conf.d/binlog.cnf', context={'unit_id': unit_id,
'format': cfg.get('binlog-format', 'MIXED')})
render(source='mysql/my.cnf', target='/etc/mysql/my.cnf', context=cfg)
def human_to_bytes(human):
if human.isdigit():
return human
factors = {'k': 1024, 'm': 1048576, 'g': 1073741824, 't': 1099511627776}
modifier = human[-1]
if modifier.lower() in factors:
return int(human[:-1]) * factors[modifier.lower()]
raise ValueError('Can only convert K, M, G, and T')
def dataset_size(size, page):
if not size.endswith('%'):
return human_to_bytes(size)
total_mem = human_to_bytes(get_memtotal())
sys_mem_limit = mem_limit()
if is_32bits() and total_mem > sys_mem_limit:
total_ram = sys_mem_limit
factor = int(size[:-1]) * 0.01
pctram = sys_mem_limit * factor
return int(pctram - pctram % page)
def is_32bits():
try:
IS_32BIT_SYSTEM = sys.maxsize < 2 ** 32.0
except OverflowError:
IS_32BIT_SYSTEM = True
return IS_32BIT_SYSTEM
def mem_limit():
import platform
SYS_MEM_LIMIT = human_to_bytes(get_memtotal())
if platform.machine() in ['armv7l']:
SYS_MEM_LIMIT = human_to_bytes('2700M')
elif is_32bits():
SYS_MEM_LIMIT = human_to_bytes('4G')
return SYS_MEM_LIMIT
def get_memtotal():
with open('/proc/meminfo') as meminfo_file:
for line in meminfo_file:
key, mem = line.split(':', 2)
if key == 'MemTotal':
mtot, modifier = mem.strip().split(' ')
return '%s%s' % (mtot, modifier[0].upper())
def get_db_helper():
return MySQLHelper(rpasswdf_template='/var/lib/mysql/mysql.passwd',
upasswdf_template='/var/lib/mysql/mysql-{}.passwd',
delete_ondisk_passwd_file=False)
def get_db_cursor():
import MySQLdb
db_helper = get_db_helper()
passwd = db_helper.get_mysql_root_password()
connection = MySQLdb.connect(user='root', host='localhost', passwd=passwd)
return connection.cursor()
def create_database(name):
cursor = get_db_cursor()
cursor.execute("show databases like '%s'" % name)
if cursor.fetchall():
return name
cursor.execute('create database `%s` character set utf8' % name)
cursor.close()
return name
<|reserved_special_token_0|>
def grant_database(database, user, password):
cursor = get_db_cursor()
cursor.execute("grant all on `%s`.* to `%s` identified by '%s'" % (
database, user, password))
cursor.close()
<|reserved_special_token_1|>
import os
import sys
from subprocess import check_output
from charmhelpers.fetch import apt_install, apt_update, add_source
from charmhelpers.core.templating import render
from charmhelpers.contrib.database.mysql import MySQLHelper
def install_mysql(package='mysql-server', sources=None, keys=None):
if not sources:
sources = []
if not keys:
keys = []
from subprocess import Popen, PIPE
for source in sources:
add_source(source)
if sources:
apt_update()
with open('/var/lib/mysql/mysql.passwd', 'r') as rpw:
root_pass = rpw.read()
dconf = Popen(['debconf-set-selections'], stdin=PIPE)
dconf.stdin.write('%s %s/root_password password %s\n' % (package,
package, root_pass))
dconf.stdin.write('%s %s/root_password_again password %s\n' % (package,
package, root_pass))
dconf.communicate()
dconf.wait()
apt_install(package)
def build_mycnf(cfg):
i_am_a_slave = os.path.isfile('/var/lib/juju/i.am.a.slave')
unit_id = os.environ['JUJU_UNIT_NAME'].split('/')[1]
if i_am_a_slave and cfg.get('tuning-level') != 'fast':
render(source='mysql/binlog.cnf', target=
'/etc/mysql/conf.d/binlog.cnf', context={'unit_id': unit_id,
'format': cfg.get('binlog-format', 'MIXED')})
render(source='mysql/my.cnf', target='/etc/mysql/my.cnf', context=cfg)
def human_to_bytes(human):
if human.isdigit():
return human
factors = {'k': 1024, 'm': 1048576, 'g': 1073741824, 't': 1099511627776}
modifier = human[-1]
if modifier.lower() in factors:
return int(human[:-1]) * factors[modifier.lower()]
raise ValueError('Can only convert K, M, G, and T')
def dataset_size(size, page):
if not size.endswith('%'):
return human_to_bytes(size)
total_mem = human_to_bytes(get_memtotal())
sys_mem_limit = mem_limit()
if is_32bits() and total_mem > sys_mem_limit:
total_ram = sys_mem_limit
factor = int(size[:-1]) * 0.01
pctram = sys_mem_limit * factor
return int(pctram - pctram % page)
def is_32bits():
try:
IS_32BIT_SYSTEM = sys.maxsize < 2 ** 32.0
except OverflowError:
IS_32BIT_SYSTEM = True
return IS_32BIT_SYSTEM
def mem_limit():
import platform
SYS_MEM_LIMIT = human_to_bytes(get_memtotal())
if platform.machine() in ['armv7l']:
SYS_MEM_LIMIT = human_to_bytes('2700M')
elif is_32bits():
SYS_MEM_LIMIT = human_to_bytes('4G')
return SYS_MEM_LIMIT
def get_memtotal():
with open('/proc/meminfo') as meminfo_file:
for line in meminfo_file:
key, mem = line.split(':', 2)
if key == 'MemTotal':
mtot, modifier = mem.strip().split(' ')
return '%s%s' % (mtot, modifier[0].upper())
def get_db_helper():
return MySQLHelper(rpasswdf_template='/var/lib/mysql/mysql.passwd',
upasswdf_template='/var/lib/mysql/mysql-{}.passwd',
delete_ondisk_passwd_file=False)
def get_db_cursor():
import MySQLdb
db_helper = get_db_helper()
passwd = db_helper.get_mysql_root_password()
connection = MySQLdb.connect(user='root', host='localhost', passwd=passwd)
return connection.cursor()
def create_database(name):
cursor = get_db_cursor()
cursor.execute("show databases like '%s'" % name)
if cursor.fetchall():
return name
cursor.execute('create database `%s` character set utf8' % name)
cursor.close()
return name
def create_user():
user, password = check_output(['pwgen', '-N 2', '15']).split('\n')[:-1]
cursor = get_db_cursor()
grant_sql = "grant replication client on *.* to `%s` identified by '%s'"
cursor.execute(grant_sql % (user, password))
cursor.close()
return user, password
def grant_database(database, user, password):
cursor = get_db_cursor()
cursor.execute("grant all on `%s`.* to `%s` identified by '%s'" % (
database, user, password))
cursor.close()
<|reserved_special_token_1|>
import os
import sys
from subprocess import check_output
from charmhelpers.fetch import (
apt_install,
apt_update,
add_source,
)
from charmhelpers.core.templating import render
from charmhelpers.contrib.database.mysql import MySQLHelper
def install_mysql(package='mysql-server', sources=None, keys=None):
if not sources:
sources = []
if not keys:
keys = []
from subprocess import (
Popen,
PIPE,
)
for source in sources:
add_source(source)
if sources:
apt_update()
with open('/var/lib/mysql/mysql.passwd', 'r') as rpw:
root_pass = rpw.read()
dconf = Popen(['debconf-set-selections'], stdin=PIPE)
dconf.stdin.write("%s %s/root_password password %s\n" % (package, package,
root_pass))
dconf.stdin.write("%s %s/root_password_again password %s\n" % (package,
package,
root_pass))
dconf.communicate()
dconf.wait()
apt_install(package)
def build_mycnf(cfg):
i_am_a_slave = os.path.isfile('/var/lib/juju/i.am.a.slave')
# REFACTOR add to charm helpers
unit_id = os.environ['JUJU_UNIT_NAME'].split('/')[1]
if i_am_a_slave and cfg.get('tuning-level') != 'fast':
# On slaves, this gets overwritten
render(
source='mysql/binlog.cnf',
target='/etc/mysql/conf.d/binlog.cnf',
context={
'unit_id': unit_id,
'format': cfg.get('binlog-format', 'MIXED')
},
)
render(source='mysql/my.cnf', target='/etc/mysql/my.cnf',
context=cfg)
def human_to_bytes(human):
if human.isdigit():
return human
factors = {'k': 1024, 'm': 1048576, 'g': 1073741824, 't': 1099511627776}
modifier = human[-1]
if modifier.lower() in factors:
return int(human[:-1]) * factors[modifier.lower()]
raise ValueError("Can only convert K, M, G, and T")
def dataset_size(size, page):
if not size.endswith('%'):
return human_to_bytes(size)
total_mem = human_to_bytes(get_memtotal())
sys_mem_limit = mem_limit()
if is_32bits() and total_mem > sys_mem_limit:
total_ram = sys_mem_limit
factor = int(size[:-1]) * 0.01
pctram = sys_mem_limit * factor
return int(pctram - (pctram % page))
def is_32bits():
try:
IS_32BIT_SYSTEM = sys.maxsize < 2**32.
except OverflowError:
IS_32BIT_SYSTEM = True
return IS_32BIT_SYSTEM
def mem_limit():
import platform
SYS_MEM_LIMIT = human_to_bytes(get_memtotal())
if platform.machine() in ['armv7l']:
SYS_MEM_LIMIT = human_to_bytes('2700M') # experimentally determined
elif is_32bits():
SYS_MEM_LIMIT = human_to_bytes('4G')
return SYS_MEM_LIMIT
def get_memtotal():
with open('/proc/meminfo') as meminfo_file:
for line in meminfo_file:
(key, mem) = line.split(':', 2)
if key == 'MemTotal':
(mtot, modifier) = mem.strip().split(' ')
return '%s%s' % (mtot, modifier[0].upper())
def get_db_helper():
return MySQLHelper(rpasswdf_template='/var/lib/mysql/mysql.passwd',
upasswdf_template='/var/lib/mysql/mysql-{}.passwd',
delete_ondisk_passwd_file=False)
# REFACTOR factory/cache
def get_db_cursor():
import MySQLdb
# Connect to mysql
db_helper = get_db_helper()
passwd = db_helper.get_mysql_root_password()
connection = MySQLdb.connect(user="root", host="localhost", passwd=passwd)
return connection.cursor()
def create_database(name):
# REFACTOR UTF-8
# Clean databasename
cursor = get_db_cursor()
cursor.execute("show databases like '%s'" % name)
if cursor.fetchall():
return name
cursor.execute("create database `%s` character set utf8" % name)
cursor.close()
return name
def create_user():
# REFACTOR pwgen python module? maybe? yeah?
(user, password) = check_output(['pwgen', '-N 2', '15']).split('\n')[:-1]
cursor = get_db_cursor()
grant_sql = "grant replication client on *.* to `%s` identified by '%s'"
cursor.execute(grant_sql % (user, password))
cursor.close()
return (user, password)
def grant_database(database, user, password):
cursor = get_db_cursor()
cursor.execute(
"grant all on `%s`.* to `%s` identified by '%s'" % (database,
user, password))
cursor.close()
#
#relation_id = os.environ.get('JUJU_RELATION_ID')
#change_unit = os.environ.get('JUJU_REMOTE_UNIT')
#
## We'll name the database the same as the service.
#database_name_file = '.%s_database_name' % (relation_id)
## change_unit will be None on broken hooks
#database_name = ''
#if change_unit:
# database_name, _ = change_unit.split("/")
# with open(database_name_file, 'w') as dbnf:
# dbnf.write("%s\n" % database_name)
# dbnf.flush()
#elif os.path.exists(database_name_file):
# with open(database_name_file, 'r') as dbname:
# database_name = dbname.readline().strip()
#else:
# print 'No established database and no REMOTE_UNIT.'
## A user per service unit so we can deny access quickly
#lastrun_path = '/var/lib/juju/%s.%s.lastrun' % (database_name, user)
#slave_configured_path = '/var/lib/juju.slave.configured.for.%s' % database_name
#slave_configured = os.path.exists(slave_configured_path)
#slave = os.path.exists('/var/lib/juju/i.am.a.slave')
#broken_path = '/var/lib/juju/%s.mysql.broken' % database_name
#broken = os.path.exists(broken_path)
#
#
#
#
#def migrate_to_mount(new_path):
# """Invoked when new mountpoint appears. This function safely migrates
# MySQL data from local disk to persistent storage (only if needed)
# """
# old_path = '/var/lib/mysql'
# if os.path.islink(old_path):
# hookenv.log('{} is already a symlink, skipping migration'.format(
# old_path))
# return True
# # Ensure our new mountpoint is empty. Otherwise error and allow
# # users to investigate and migrate manually
# files = os.listdir(new_path)
# try:
# files.remove('lost+found')
# except ValueError:
# pass
# if files:
# raise RuntimeError('Persistent storage contains old data. '
# 'Please investigate and migrate data manually '
# 'to: {}'.format(new_path))
# os.chmod(new_path, 0o700)
# if os.path.isdir('/etc/apparmor.d/local'):
# render('apparmor.j2', '/etc/apparmor.d/local/usr.sbin.mysqld',
# context={'path': os.path.join(new_path, '')})
# host.service_reload('apparmor')
# host.service_stop('mysql')
# host.rsync(os.path.join(old_path, ''), # Ensure we have trailing slashes
# os.path.join(new_path, ''),
# options=['--archive'])
# shutil.rmtree(old_path)
# os.symlink(new_path, old_path)
# host.service_start('mysql')
|
flexible
|
{
"blob_id": "083a9555f8db586fbb065d59e4e333bb16ee3d2a",
"index": 5521,
"step-1": "<mask token>\n\n\ndef install_mysql(package='mysql-server', sources=None, keys=None):\n if not sources:\n sources = []\n if not keys:\n keys = []\n from subprocess import Popen, PIPE\n for source in sources:\n add_source(source)\n if sources:\n apt_update()\n with open('/var/lib/mysql/mysql.passwd', 'r') as rpw:\n root_pass = rpw.read()\n dconf = Popen(['debconf-set-selections'], stdin=PIPE)\n dconf.stdin.write('%s %s/root_password password %s\\n' % (package,\n package, root_pass))\n dconf.stdin.write('%s %s/root_password_again password %s\\n' % (package,\n package, root_pass))\n dconf.communicate()\n dconf.wait()\n apt_install(package)\n\n\ndef build_mycnf(cfg):\n i_am_a_slave = os.path.isfile('/var/lib/juju/i.am.a.slave')\n unit_id = os.environ['JUJU_UNIT_NAME'].split('/')[1]\n if i_am_a_slave and cfg.get('tuning-level') != 'fast':\n render(source='mysql/binlog.cnf', target=\n '/etc/mysql/conf.d/binlog.cnf', context={'unit_id': unit_id,\n 'format': cfg.get('binlog-format', 'MIXED')})\n render(source='mysql/my.cnf', target='/etc/mysql/my.cnf', context=cfg)\n\n\ndef human_to_bytes(human):\n if human.isdigit():\n return human\n factors = {'k': 1024, 'm': 1048576, 'g': 1073741824, 't': 1099511627776}\n modifier = human[-1]\n if modifier.lower() in factors:\n return int(human[:-1]) * factors[modifier.lower()]\n raise ValueError('Can only convert K, M, G, and T')\n\n\ndef dataset_size(size, page):\n if not size.endswith('%'):\n return human_to_bytes(size)\n total_mem = human_to_bytes(get_memtotal())\n sys_mem_limit = mem_limit()\n if is_32bits() and total_mem > sys_mem_limit:\n total_ram = sys_mem_limit\n factor = int(size[:-1]) * 0.01\n pctram = sys_mem_limit * factor\n return int(pctram - pctram % page)\n\n\ndef is_32bits():\n try:\n IS_32BIT_SYSTEM = sys.maxsize < 2 ** 32.0\n except OverflowError:\n IS_32BIT_SYSTEM = True\n return IS_32BIT_SYSTEM\n\n\n<mask token>\n\n\ndef get_memtotal():\n with open('/proc/meminfo') as meminfo_file:\n for line in meminfo_file:\n key, mem = line.split(':', 2)\n if key == 'MemTotal':\n mtot, modifier = mem.strip().split(' ')\n return '%s%s' % (mtot, modifier[0].upper())\n\n\ndef get_db_helper():\n return MySQLHelper(rpasswdf_template='/var/lib/mysql/mysql.passwd',\n upasswdf_template='/var/lib/mysql/mysql-{}.passwd',\n delete_ondisk_passwd_file=False)\n\n\ndef get_db_cursor():\n import MySQLdb\n db_helper = get_db_helper()\n passwd = db_helper.get_mysql_root_password()\n connection = MySQLdb.connect(user='root', host='localhost', passwd=passwd)\n return connection.cursor()\n\n\n<mask token>\n\n\ndef grant_database(database, user, password):\n cursor = get_db_cursor()\n cursor.execute(\"grant all on `%s`.* to `%s` identified by '%s'\" % (\n database, user, password))\n cursor.close()\n",
"step-2": "<mask token>\n\n\ndef install_mysql(package='mysql-server', sources=None, keys=None):\n if not sources:\n sources = []\n if not keys:\n keys = []\n from subprocess import Popen, PIPE\n for source in sources:\n add_source(source)\n if sources:\n apt_update()\n with open('/var/lib/mysql/mysql.passwd', 'r') as rpw:\n root_pass = rpw.read()\n dconf = Popen(['debconf-set-selections'], stdin=PIPE)\n dconf.stdin.write('%s %s/root_password password %s\\n' % (package,\n package, root_pass))\n dconf.stdin.write('%s %s/root_password_again password %s\\n' % (package,\n package, root_pass))\n dconf.communicate()\n dconf.wait()\n apt_install(package)\n\n\ndef build_mycnf(cfg):\n i_am_a_slave = os.path.isfile('/var/lib/juju/i.am.a.slave')\n unit_id = os.environ['JUJU_UNIT_NAME'].split('/')[1]\n if i_am_a_slave and cfg.get('tuning-level') != 'fast':\n render(source='mysql/binlog.cnf', target=\n '/etc/mysql/conf.d/binlog.cnf', context={'unit_id': unit_id,\n 'format': cfg.get('binlog-format', 'MIXED')})\n render(source='mysql/my.cnf', target='/etc/mysql/my.cnf', context=cfg)\n\n\ndef human_to_bytes(human):\n if human.isdigit():\n return human\n factors = {'k': 1024, 'm': 1048576, 'g': 1073741824, 't': 1099511627776}\n modifier = human[-1]\n if modifier.lower() in factors:\n return int(human[:-1]) * factors[modifier.lower()]\n raise ValueError('Can only convert K, M, G, and T')\n\n\ndef dataset_size(size, page):\n if not size.endswith('%'):\n return human_to_bytes(size)\n total_mem = human_to_bytes(get_memtotal())\n sys_mem_limit = mem_limit()\n if is_32bits() and total_mem > sys_mem_limit:\n total_ram = sys_mem_limit\n factor = int(size[:-1]) * 0.01\n pctram = sys_mem_limit * factor\n return int(pctram - pctram % page)\n\n\ndef is_32bits():\n try:\n IS_32BIT_SYSTEM = sys.maxsize < 2 ** 32.0\n except OverflowError:\n IS_32BIT_SYSTEM = True\n return IS_32BIT_SYSTEM\n\n\n<mask token>\n\n\ndef get_memtotal():\n with open('/proc/meminfo') as meminfo_file:\n for line in meminfo_file:\n key, mem = line.split(':', 2)\n if key == 'MemTotal':\n mtot, modifier = mem.strip().split(' ')\n return '%s%s' % (mtot, modifier[0].upper())\n\n\ndef get_db_helper():\n return MySQLHelper(rpasswdf_template='/var/lib/mysql/mysql.passwd',\n upasswdf_template='/var/lib/mysql/mysql-{}.passwd',\n delete_ondisk_passwd_file=False)\n\n\ndef get_db_cursor():\n import MySQLdb\n db_helper = get_db_helper()\n passwd = db_helper.get_mysql_root_password()\n connection = MySQLdb.connect(user='root', host='localhost', passwd=passwd)\n return connection.cursor()\n\n\ndef create_database(name):\n cursor = get_db_cursor()\n cursor.execute(\"show databases like '%s'\" % name)\n if cursor.fetchall():\n return name\n cursor.execute('create database `%s` character set utf8' % name)\n cursor.close()\n return name\n\n\n<mask token>\n\n\ndef grant_database(database, user, password):\n cursor = get_db_cursor()\n cursor.execute(\"grant all on `%s`.* to `%s` identified by '%s'\" % (\n database, user, password))\n cursor.close()\n",
"step-3": "<mask token>\n\n\ndef install_mysql(package='mysql-server', sources=None, keys=None):\n if not sources:\n sources = []\n if not keys:\n keys = []\n from subprocess import Popen, PIPE\n for source in sources:\n add_source(source)\n if sources:\n apt_update()\n with open('/var/lib/mysql/mysql.passwd', 'r') as rpw:\n root_pass = rpw.read()\n dconf = Popen(['debconf-set-selections'], stdin=PIPE)\n dconf.stdin.write('%s %s/root_password password %s\\n' % (package,\n package, root_pass))\n dconf.stdin.write('%s %s/root_password_again password %s\\n' % (package,\n package, root_pass))\n dconf.communicate()\n dconf.wait()\n apt_install(package)\n\n\ndef build_mycnf(cfg):\n i_am_a_slave = os.path.isfile('/var/lib/juju/i.am.a.slave')\n unit_id = os.environ['JUJU_UNIT_NAME'].split('/')[1]\n if i_am_a_slave and cfg.get('tuning-level') != 'fast':\n render(source='mysql/binlog.cnf', target=\n '/etc/mysql/conf.d/binlog.cnf', context={'unit_id': unit_id,\n 'format': cfg.get('binlog-format', 'MIXED')})\n render(source='mysql/my.cnf', target='/etc/mysql/my.cnf', context=cfg)\n\n\ndef human_to_bytes(human):\n if human.isdigit():\n return human\n factors = {'k': 1024, 'm': 1048576, 'g': 1073741824, 't': 1099511627776}\n modifier = human[-1]\n if modifier.lower() in factors:\n return int(human[:-1]) * factors[modifier.lower()]\n raise ValueError('Can only convert K, M, G, and T')\n\n\ndef dataset_size(size, page):\n if not size.endswith('%'):\n return human_to_bytes(size)\n total_mem = human_to_bytes(get_memtotal())\n sys_mem_limit = mem_limit()\n if is_32bits() and total_mem > sys_mem_limit:\n total_ram = sys_mem_limit\n factor = int(size[:-1]) * 0.01\n pctram = sys_mem_limit * factor\n return int(pctram - pctram % page)\n\n\ndef is_32bits():\n try:\n IS_32BIT_SYSTEM = sys.maxsize < 2 ** 32.0\n except OverflowError:\n IS_32BIT_SYSTEM = True\n return IS_32BIT_SYSTEM\n\n\ndef mem_limit():\n import platform\n SYS_MEM_LIMIT = human_to_bytes(get_memtotal())\n if platform.machine() in ['armv7l']:\n SYS_MEM_LIMIT = human_to_bytes('2700M')\n elif is_32bits():\n SYS_MEM_LIMIT = human_to_bytes('4G')\n return SYS_MEM_LIMIT\n\n\ndef get_memtotal():\n with open('/proc/meminfo') as meminfo_file:\n for line in meminfo_file:\n key, mem = line.split(':', 2)\n if key == 'MemTotal':\n mtot, modifier = mem.strip().split(' ')\n return '%s%s' % (mtot, modifier[0].upper())\n\n\ndef get_db_helper():\n return MySQLHelper(rpasswdf_template='/var/lib/mysql/mysql.passwd',\n upasswdf_template='/var/lib/mysql/mysql-{}.passwd',\n delete_ondisk_passwd_file=False)\n\n\ndef get_db_cursor():\n import MySQLdb\n db_helper = get_db_helper()\n passwd = db_helper.get_mysql_root_password()\n connection = MySQLdb.connect(user='root', host='localhost', passwd=passwd)\n return connection.cursor()\n\n\ndef create_database(name):\n cursor = get_db_cursor()\n cursor.execute(\"show databases like '%s'\" % name)\n if cursor.fetchall():\n return name\n cursor.execute('create database `%s` character set utf8' % name)\n cursor.close()\n return name\n\n\n<mask token>\n\n\ndef grant_database(database, user, password):\n cursor = get_db_cursor()\n cursor.execute(\"grant all on `%s`.* to `%s` identified by '%s'\" % (\n database, user, password))\n cursor.close()\n",
"step-4": "import os\nimport sys\nfrom subprocess import check_output\nfrom charmhelpers.fetch import apt_install, apt_update, add_source\nfrom charmhelpers.core.templating import render\nfrom charmhelpers.contrib.database.mysql import MySQLHelper\n\n\ndef install_mysql(package='mysql-server', sources=None, keys=None):\n if not sources:\n sources = []\n if not keys:\n keys = []\n from subprocess import Popen, PIPE\n for source in sources:\n add_source(source)\n if sources:\n apt_update()\n with open('/var/lib/mysql/mysql.passwd', 'r') as rpw:\n root_pass = rpw.read()\n dconf = Popen(['debconf-set-selections'], stdin=PIPE)\n dconf.stdin.write('%s %s/root_password password %s\\n' % (package,\n package, root_pass))\n dconf.stdin.write('%s %s/root_password_again password %s\\n' % (package,\n package, root_pass))\n dconf.communicate()\n dconf.wait()\n apt_install(package)\n\n\ndef build_mycnf(cfg):\n i_am_a_slave = os.path.isfile('/var/lib/juju/i.am.a.slave')\n unit_id = os.environ['JUJU_UNIT_NAME'].split('/')[1]\n if i_am_a_slave and cfg.get('tuning-level') != 'fast':\n render(source='mysql/binlog.cnf', target=\n '/etc/mysql/conf.d/binlog.cnf', context={'unit_id': unit_id,\n 'format': cfg.get('binlog-format', 'MIXED')})\n render(source='mysql/my.cnf', target='/etc/mysql/my.cnf', context=cfg)\n\n\ndef human_to_bytes(human):\n if human.isdigit():\n return human\n factors = {'k': 1024, 'm': 1048576, 'g': 1073741824, 't': 1099511627776}\n modifier = human[-1]\n if modifier.lower() in factors:\n return int(human[:-1]) * factors[modifier.lower()]\n raise ValueError('Can only convert K, M, G, and T')\n\n\ndef dataset_size(size, page):\n if not size.endswith('%'):\n return human_to_bytes(size)\n total_mem = human_to_bytes(get_memtotal())\n sys_mem_limit = mem_limit()\n if is_32bits() and total_mem > sys_mem_limit:\n total_ram = sys_mem_limit\n factor = int(size[:-1]) * 0.01\n pctram = sys_mem_limit * factor\n return int(pctram - pctram % page)\n\n\ndef is_32bits():\n try:\n IS_32BIT_SYSTEM = sys.maxsize < 2 ** 32.0\n except OverflowError:\n IS_32BIT_SYSTEM = True\n return IS_32BIT_SYSTEM\n\n\ndef mem_limit():\n import platform\n SYS_MEM_LIMIT = human_to_bytes(get_memtotal())\n if platform.machine() in ['armv7l']:\n SYS_MEM_LIMIT = human_to_bytes('2700M')\n elif is_32bits():\n SYS_MEM_LIMIT = human_to_bytes('4G')\n return SYS_MEM_LIMIT\n\n\ndef get_memtotal():\n with open('/proc/meminfo') as meminfo_file:\n for line in meminfo_file:\n key, mem = line.split(':', 2)\n if key == 'MemTotal':\n mtot, modifier = mem.strip().split(' ')\n return '%s%s' % (mtot, modifier[0].upper())\n\n\ndef get_db_helper():\n return MySQLHelper(rpasswdf_template='/var/lib/mysql/mysql.passwd',\n upasswdf_template='/var/lib/mysql/mysql-{}.passwd',\n delete_ondisk_passwd_file=False)\n\n\ndef get_db_cursor():\n import MySQLdb\n db_helper = get_db_helper()\n passwd = db_helper.get_mysql_root_password()\n connection = MySQLdb.connect(user='root', host='localhost', passwd=passwd)\n return connection.cursor()\n\n\ndef create_database(name):\n cursor = get_db_cursor()\n cursor.execute(\"show databases like '%s'\" % name)\n if cursor.fetchall():\n return name\n cursor.execute('create database `%s` character set utf8' % name)\n cursor.close()\n return name\n\n\ndef create_user():\n user, password = check_output(['pwgen', '-N 2', '15']).split('\\n')[:-1]\n cursor = get_db_cursor()\n grant_sql = \"grant replication client on *.* to `%s` identified by '%s'\"\n cursor.execute(grant_sql % (user, password))\n cursor.close()\n return user, password\n\n\ndef grant_database(database, user, password):\n cursor = get_db_cursor()\n cursor.execute(\"grant all on `%s`.* to `%s` identified by '%s'\" % (\n database, user, password))\n cursor.close()\n",
"step-5": "import os\nimport sys\n\nfrom subprocess import check_output\n\nfrom charmhelpers.fetch import (\n apt_install,\n apt_update,\n add_source,\n)\n\nfrom charmhelpers.core.templating import render\nfrom charmhelpers.contrib.database.mysql import MySQLHelper\n\n\ndef install_mysql(package='mysql-server', sources=None, keys=None):\n if not sources:\n sources = []\n\n if not keys:\n keys = []\n\n from subprocess import (\n Popen,\n PIPE,\n )\n\n for source in sources:\n add_source(source)\n\n if sources:\n apt_update()\n\n with open('/var/lib/mysql/mysql.passwd', 'r') as rpw:\n root_pass = rpw.read()\n\n dconf = Popen(['debconf-set-selections'], stdin=PIPE)\n dconf.stdin.write(\"%s %s/root_password password %s\\n\" % (package, package,\n root_pass))\n dconf.stdin.write(\"%s %s/root_password_again password %s\\n\" % (package,\n package,\n root_pass))\n dconf.communicate()\n dconf.wait()\n\n apt_install(package)\n\n\ndef build_mycnf(cfg):\n i_am_a_slave = os.path.isfile('/var/lib/juju/i.am.a.slave')\n # REFACTOR add to charm helpers\n unit_id = os.environ['JUJU_UNIT_NAME'].split('/')[1]\n\n if i_am_a_slave and cfg.get('tuning-level') != 'fast':\n # On slaves, this gets overwritten\n render(\n source='mysql/binlog.cnf',\n target='/etc/mysql/conf.d/binlog.cnf',\n context={\n 'unit_id': unit_id,\n 'format': cfg.get('binlog-format', 'MIXED')\n },\n )\n\n render(source='mysql/my.cnf', target='/etc/mysql/my.cnf',\n context=cfg)\n\n\ndef human_to_bytes(human):\n if human.isdigit():\n return human\n factors = {'k': 1024, 'm': 1048576, 'g': 1073741824, 't': 1099511627776}\n modifier = human[-1]\n if modifier.lower() in factors:\n return int(human[:-1]) * factors[modifier.lower()]\n\n raise ValueError(\"Can only convert K, M, G, and T\")\n\n\ndef dataset_size(size, page):\n if not size.endswith('%'):\n return human_to_bytes(size)\n\n total_mem = human_to_bytes(get_memtotal())\n sys_mem_limit = mem_limit()\n if is_32bits() and total_mem > sys_mem_limit:\n total_ram = sys_mem_limit\n\n factor = int(size[:-1]) * 0.01\n pctram = sys_mem_limit * factor\n return int(pctram - (pctram % page))\n\n\ndef is_32bits():\n try:\n IS_32BIT_SYSTEM = sys.maxsize < 2**32.\n except OverflowError:\n IS_32BIT_SYSTEM = True\n\n return IS_32BIT_SYSTEM\n\n\ndef mem_limit():\n import platform\n\n SYS_MEM_LIMIT = human_to_bytes(get_memtotal())\n\n if platform.machine() in ['armv7l']:\n SYS_MEM_LIMIT = human_to_bytes('2700M') # experimentally determined\n elif is_32bits():\n SYS_MEM_LIMIT = human_to_bytes('4G')\n\n return SYS_MEM_LIMIT\n\n\ndef get_memtotal():\n with open('/proc/meminfo') as meminfo_file:\n for line in meminfo_file:\n (key, mem) = line.split(':', 2)\n if key == 'MemTotal':\n (mtot, modifier) = mem.strip().split(' ')\n return '%s%s' % (mtot, modifier[0].upper())\n\n\ndef get_db_helper():\n return MySQLHelper(rpasswdf_template='/var/lib/mysql/mysql.passwd',\n upasswdf_template='/var/lib/mysql/mysql-{}.passwd',\n delete_ondisk_passwd_file=False)\n\n\n# REFACTOR factory/cache\ndef get_db_cursor():\n import MySQLdb\n # Connect to mysql\n db_helper = get_db_helper()\n passwd = db_helper.get_mysql_root_password()\n connection = MySQLdb.connect(user=\"root\", host=\"localhost\", passwd=passwd)\n return connection.cursor()\n\n\ndef create_database(name):\n # REFACTOR UTF-8\n # Clean databasename\n cursor = get_db_cursor()\n cursor.execute(\"show databases like '%s'\" % name)\n if cursor.fetchall():\n return name\n cursor.execute(\"create database `%s` character set utf8\" % name)\n cursor.close()\n return name\n\n\ndef create_user():\n # REFACTOR pwgen python module? maybe? yeah?\n (user, password) = check_output(['pwgen', '-N 2', '15']).split('\\n')[:-1]\n cursor = get_db_cursor()\n grant_sql = \"grant replication client on *.* to `%s` identified by '%s'\"\n cursor.execute(grant_sql % (user, password))\n cursor.close()\n return (user, password)\n\n\ndef grant_database(database, user, password):\n cursor = get_db_cursor()\n cursor.execute(\n \"grant all on `%s`.* to `%s` identified by '%s'\" % (database,\n user, password))\n cursor.close()\n\n\n\n#\n#relation_id = os.environ.get('JUJU_RELATION_ID')\n#change_unit = os.environ.get('JUJU_REMOTE_UNIT')\n#\n## We'll name the database the same as the service.\n#database_name_file = '.%s_database_name' % (relation_id)\n## change_unit will be None on broken hooks\n#database_name = ''\n#if change_unit:\n# database_name, _ = change_unit.split(\"/\")\n# with open(database_name_file, 'w') as dbnf:\n# dbnf.write(\"%s\\n\" % database_name)\n# dbnf.flush()\n#elif os.path.exists(database_name_file):\n# with open(database_name_file, 'r') as dbname:\n# database_name = dbname.readline().strip()\n#else:\n# print 'No established database and no REMOTE_UNIT.'\n## A user per service unit so we can deny access quickly\n#lastrun_path = '/var/lib/juju/%s.%s.lastrun' % (database_name, user)\n#slave_configured_path = '/var/lib/juju.slave.configured.for.%s' % database_name\n#slave_configured = os.path.exists(slave_configured_path)\n#slave = os.path.exists('/var/lib/juju/i.am.a.slave')\n#broken_path = '/var/lib/juju/%s.mysql.broken' % database_name\n#broken = os.path.exists(broken_path)\n#\n#\n#\n#\n#def migrate_to_mount(new_path):\n# \"\"\"Invoked when new mountpoint appears. This function safely migrates\n# MySQL data from local disk to persistent storage (only if needed)\n# \"\"\"\n# old_path = '/var/lib/mysql'\n# if os.path.islink(old_path):\n# hookenv.log('{} is already a symlink, skipping migration'.format(\n# old_path))\n# return True\n# # Ensure our new mountpoint is empty. Otherwise error and allow\n# # users to investigate and migrate manually\n# files = os.listdir(new_path)\n# try:\n# files.remove('lost+found')\n# except ValueError:\n# pass\n# if files:\n# raise RuntimeError('Persistent storage contains old data. '\n# 'Please investigate and migrate data manually '\n# 'to: {}'.format(new_path))\n# os.chmod(new_path, 0o700)\n# if os.path.isdir('/etc/apparmor.d/local'):\n# render('apparmor.j2', '/etc/apparmor.d/local/usr.sbin.mysqld',\n# context={'path': os.path.join(new_path, '')})\n# host.service_reload('apparmor')\n# host.service_stop('mysql')\n# host.rsync(os.path.join(old_path, ''), # Ensure we have trailing slashes\n# os.path.join(new_path, ''),\n# options=['--archive'])\n# shutil.rmtree(old_path)\n# os.symlink(new_path, old_path)\n# host.service_start('mysql')\n",
"step-ids": [
9,
10,
11,
13,
14
]
}
|
[
9,
10,
11,
13,
14
] |
import sys
from Node import Node
from PriorityQueue import PriorityQueue
def Print(text):
if text is None or len(text) == 0:
print('invalid text.')
print('--------------------------------------------------------------')
return
text_set = set()
for i in text:
text_set.add(i)
if len(text_set) == 1:
print('invalid text.')
print('--------------------------------------------------------------')
return
print("The size of the data is: {}\n".format(sys.getsizeof(text)))
print("The content of the data is: {}\n".format(text))
encoded_data, tree = huffman_encoding(text)
print("The size of the encoded data is: {}\n".format(sys.getsizeof(int(encoded_data, base=2))))
print("The content of the encoded data is: {}\n".format(encoded_data))
decoded_data = huffman_decoding(encoded_data, tree)
print("The size of the decoded data is: {}\n".format(sys.getsizeof(decoded_data)))
print("The content of the encoded data is: {}\n".format(decoded_data))
print('--------------------------------------------------------------')
# this method will print huffman tree
def inorder(root):
if root is not None:
inorder(root.left)
print('Data: ', root.data, 'Freq: ', root.frequency)
if root.right is not None:
print('Right: ', root.right.data)
if root.left is not None:
print('Left: ', root.left.data)
inorder(root.right)
# end method inorder(root)
def generate_encoded_data(root):
"""
:param root: is a root of huffman tree
:return: dictionary contains all codes for each letter in the text.
"""
return generate_encoded_data2(root, {}, '')
# helper method
def generate_encoded_data2(root, dic, code):
if root is not None:
# go left of the tree if root has a left child.
if root.left is not None:
s = code + '0'
generate_encoded_data2(root.left, dic, s)
# if root is a leaf node then add this letter as a key and the code as a value.
if str(root.data).isalpha() or root.data == ' ':
dic.update({root.data: code})
# go left of the tree if root has a right child.
if root.right is not None:
s = code + '1'
generate_encoded_data2(root.right, dic, s)
return dic
else:
return None
def huffman_encoding(data):
"""
:param data: is the text that will we encode.
:return: encoded text as a binary and a root of huffman tree.
"""
if len(data) == 0 or data is None:
print('Please enter a valid data.')
return '', None
min_heap = PriorityQueue()
count_dic = {}
# count frequency of each letter and add it in count_dic as a value of the letter.
for i in range(len(data)):
if data[i] in count_dic:
count_dic[data[i]] += 1
else:
count_dic[data[i]] = 1
# add all element in count_dic to min_heap.
for i, j in count_dic.items():
new_node = Node(i, j)
min_heap.push(new_node, new_node.frequency)
count: int = 1
# create huffman tree phase 1.
while min_heap.size() >= 2:
item_1 = min_heap.pop()
item_2 = min_heap.pop()
sum_frequency = item_1.frequency + item_2.frequency
node = Node(count, sum_frequency, item_1, item_2)
min_heap.push(node, node.frequency)
count += 1
# the root of huffman tree.
root = min_heap.pop()
# generate the Encoded Data.
codes_ = generate_encoded_data(root)
# create string represent encoded data.
encoded = ''
for char in data:
if codes_.get(char) is not None:
encoded += codes_.get(char)
return encoded, root
def huffman_decoding(data, root):
"""
:param data: is the encoded text as a binary.
:param root: is the root of huffman tree.
:return: the decoded data.
"""
if len(data) == 0:
print('Please enter a valid data.')
return '', None
decoded = ''
i = 0
curr = root
while i < len(data):
"""
If the current bit of encoded data is 0, move to the left child, else move to the right child of the tree if
the current bit is 1.
"""
if data[i] == '0':
curr = curr.left
else:
curr = curr.right
# go to the next cell of the encoded data.
i += 1
# if curr is leaf node then this node contain a letter.
if curr.is_leaf():
# add this letter to decoded data.
decoded += curr.data
# return and start from the root to find the next letter.
curr = root
return decoded
# Test case 1 -----------------------------------
a_great_sentence = 'The bird is the word'
Print(a_great_sentence)
# Test case 2 -----------------------------------
t1 = ''
Print(t1) # will print 'invalid text'
# Test case 3 -----------------------------------
t2 = 'AAAAAB'
Print(t2)
# Test case 4 -----------------------------------
t3 = 'AAAAA'
Print(t3) # will print 'invalid text'
|
normal
|
{
"blob_id": "bcdd36b534fd3551de9cb40efc11581f4d95a002",
"index": 9717,
"step-1": "<mask token>\n\n\ndef Print(text):\n if text is None or len(text) == 0:\n print('invalid text.')\n print('--------------------------------------------------------------')\n return\n text_set = set()\n for i in text:\n text_set.add(i)\n if len(text_set) == 1:\n print('invalid text.')\n print('--------------------------------------------------------------')\n return\n print('The size of the data is: {}\\n'.format(sys.getsizeof(text)))\n print('The content of the data is: {}\\n'.format(text))\n encoded_data, tree = huffman_encoding(text)\n print('The size of the encoded data is: {}\\n'.format(sys.getsizeof(int(\n encoded_data, base=2))))\n print('The content of the encoded data is: {}\\n'.format(encoded_data))\n decoded_data = huffman_decoding(encoded_data, tree)\n print('The size of the decoded data is: {}\\n'.format(sys.getsizeof(\n decoded_data)))\n print('The content of the encoded data is: {}\\n'.format(decoded_data))\n print('--------------------------------------------------------------')\n\n\ndef inorder(root):\n if root is not None:\n inorder(root.left)\n print('Data: ', root.data, 'Freq: ', root.frequency)\n if root.right is not None:\n print('Right: ', root.right.data)\n if root.left is not None:\n print('Left: ', root.left.data)\n inorder(root.right)\n\n\ndef generate_encoded_data(root):\n \"\"\"\n :param root: is a root of huffman tree\n :return: dictionary contains all codes for each letter in the text.\n \"\"\"\n return generate_encoded_data2(root, {}, '')\n\n\ndef generate_encoded_data2(root, dic, code):\n if root is not None:\n if root.left is not None:\n s = code + '0'\n generate_encoded_data2(root.left, dic, s)\n if str(root.data).isalpha() or root.data == ' ':\n dic.update({root.data: code})\n if root.right is not None:\n s = code + '1'\n generate_encoded_data2(root.right, dic, s)\n return dic\n else:\n return None\n\n\n<mask token>\n\n\ndef huffman_decoding(data, root):\n \"\"\"\n :param data: is the encoded text as a binary.\n :param root: is the root of huffman tree.\n :return: the decoded data.\n \"\"\"\n if len(data) == 0:\n print('Please enter a valid data.')\n return '', None\n decoded = ''\n i = 0\n curr = root\n while i < len(data):\n \"\"\"\n If the current bit of encoded data is 0, move to the left child, else move to the right child of the tree if\n the current bit is 1.\n \"\"\"\n if data[i] == '0':\n curr = curr.left\n else:\n curr = curr.right\n i += 1\n if curr.is_leaf():\n decoded += curr.data\n curr = root\n return decoded\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef Print(text):\n if text is None or len(text) == 0:\n print('invalid text.')\n print('--------------------------------------------------------------')\n return\n text_set = set()\n for i in text:\n text_set.add(i)\n if len(text_set) == 1:\n print('invalid text.')\n print('--------------------------------------------------------------')\n return\n print('The size of the data is: {}\\n'.format(sys.getsizeof(text)))\n print('The content of the data is: {}\\n'.format(text))\n encoded_data, tree = huffman_encoding(text)\n print('The size of the encoded data is: {}\\n'.format(sys.getsizeof(int(\n encoded_data, base=2))))\n print('The content of the encoded data is: {}\\n'.format(encoded_data))\n decoded_data = huffman_decoding(encoded_data, tree)\n print('The size of the decoded data is: {}\\n'.format(sys.getsizeof(\n decoded_data)))\n print('The content of the encoded data is: {}\\n'.format(decoded_data))\n print('--------------------------------------------------------------')\n\n\ndef inorder(root):\n if root is not None:\n inorder(root.left)\n print('Data: ', root.data, 'Freq: ', root.frequency)\n if root.right is not None:\n print('Right: ', root.right.data)\n if root.left is not None:\n print('Left: ', root.left.data)\n inorder(root.right)\n\n\ndef generate_encoded_data(root):\n \"\"\"\n :param root: is a root of huffman tree\n :return: dictionary contains all codes for each letter in the text.\n \"\"\"\n return generate_encoded_data2(root, {}, '')\n\n\ndef generate_encoded_data2(root, dic, code):\n if root is not None:\n if root.left is not None:\n s = code + '0'\n generate_encoded_data2(root.left, dic, s)\n if str(root.data).isalpha() or root.data == ' ':\n dic.update({root.data: code})\n if root.right is not None:\n s = code + '1'\n generate_encoded_data2(root.right, dic, s)\n return dic\n else:\n return None\n\n\ndef huffman_encoding(data):\n \"\"\"\n :param data: is the text that will we encode.\n :return: encoded text as a binary and a root of huffman tree.\n \"\"\"\n if len(data) == 0 or data is None:\n print('Please enter a valid data.')\n return '', None\n min_heap = PriorityQueue()\n count_dic = {}\n for i in range(len(data)):\n if data[i] in count_dic:\n count_dic[data[i]] += 1\n else:\n count_dic[data[i]] = 1\n for i, j in count_dic.items():\n new_node = Node(i, j)\n min_heap.push(new_node, new_node.frequency)\n count: int = 1\n while min_heap.size() >= 2:\n item_1 = min_heap.pop()\n item_2 = min_heap.pop()\n sum_frequency = item_1.frequency + item_2.frequency\n node = Node(count, sum_frequency, item_1, item_2)\n min_heap.push(node, node.frequency)\n count += 1\n root = min_heap.pop()\n codes_ = generate_encoded_data(root)\n encoded = ''\n for char in data:\n if codes_.get(char) is not None:\n encoded += codes_.get(char)\n return encoded, root\n\n\ndef huffman_decoding(data, root):\n \"\"\"\n :param data: is the encoded text as a binary.\n :param root: is the root of huffman tree.\n :return: the decoded data.\n \"\"\"\n if len(data) == 0:\n print('Please enter a valid data.')\n return '', None\n decoded = ''\n i = 0\n curr = root\n while i < len(data):\n \"\"\"\n If the current bit of encoded data is 0, move to the left child, else move to the right child of the tree if\n the current bit is 1.\n \"\"\"\n if data[i] == '0':\n curr = curr.left\n else:\n curr = curr.right\n i += 1\n if curr.is_leaf():\n decoded += curr.data\n curr = root\n return decoded\n\n\n<mask token>\nPrint(a_great_sentence)\n<mask token>\nPrint(t1)\n<mask token>\nPrint(t2)\n<mask token>\nPrint(t3)\n",
"step-3": "<mask token>\n\n\ndef Print(text):\n if text is None or len(text) == 0:\n print('invalid text.')\n print('--------------------------------------------------------------')\n return\n text_set = set()\n for i in text:\n text_set.add(i)\n if len(text_set) == 1:\n print('invalid text.')\n print('--------------------------------------------------------------')\n return\n print('The size of the data is: {}\\n'.format(sys.getsizeof(text)))\n print('The content of the data is: {}\\n'.format(text))\n encoded_data, tree = huffman_encoding(text)\n print('The size of the encoded data is: {}\\n'.format(sys.getsizeof(int(\n encoded_data, base=2))))\n print('The content of the encoded data is: {}\\n'.format(encoded_data))\n decoded_data = huffman_decoding(encoded_data, tree)\n print('The size of the decoded data is: {}\\n'.format(sys.getsizeof(\n decoded_data)))\n print('The content of the encoded data is: {}\\n'.format(decoded_data))\n print('--------------------------------------------------------------')\n\n\ndef inorder(root):\n if root is not None:\n inorder(root.left)\n print('Data: ', root.data, 'Freq: ', root.frequency)\n if root.right is not None:\n print('Right: ', root.right.data)\n if root.left is not None:\n print('Left: ', root.left.data)\n inorder(root.right)\n\n\ndef generate_encoded_data(root):\n \"\"\"\n :param root: is a root of huffman tree\n :return: dictionary contains all codes for each letter in the text.\n \"\"\"\n return generate_encoded_data2(root, {}, '')\n\n\ndef generate_encoded_data2(root, dic, code):\n if root is not None:\n if root.left is not None:\n s = code + '0'\n generate_encoded_data2(root.left, dic, s)\n if str(root.data).isalpha() or root.data == ' ':\n dic.update({root.data: code})\n if root.right is not None:\n s = code + '1'\n generate_encoded_data2(root.right, dic, s)\n return dic\n else:\n return None\n\n\ndef huffman_encoding(data):\n \"\"\"\n :param data: is the text that will we encode.\n :return: encoded text as a binary and a root of huffman tree.\n \"\"\"\n if len(data) == 0 or data is None:\n print('Please enter a valid data.')\n return '', None\n min_heap = PriorityQueue()\n count_dic = {}\n for i in range(len(data)):\n if data[i] in count_dic:\n count_dic[data[i]] += 1\n else:\n count_dic[data[i]] = 1\n for i, j in count_dic.items():\n new_node = Node(i, j)\n min_heap.push(new_node, new_node.frequency)\n count: int = 1\n while min_heap.size() >= 2:\n item_1 = min_heap.pop()\n item_2 = min_heap.pop()\n sum_frequency = item_1.frequency + item_2.frequency\n node = Node(count, sum_frequency, item_1, item_2)\n min_heap.push(node, node.frequency)\n count += 1\n root = min_heap.pop()\n codes_ = generate_encoded_data(root)\n encoded = ''\n for char in data:\n if codes_.get(char) is not None:\n encoded += codes_.get(char)\n return encoded, root\n\n\ndef huffman_decoding(data, root):\n \"\"\"\n :param data: is the encoded text as a binary.\n :param root: is the root of huffman tree.\n :return: the decoded data.\n \"\"\"\n if len(data) == 0:\n print('Please enter a valid data.')\n return '', None\n decoded = ''\n i = 0\n curr = root\n while i < len(data):\n \"\"\"\n If the current bit of encoded data is 0, move to the left child, else move to the right child of the tree if\n the current bit is 1.\n \"\"\"\n if data[i] == '0':\n curr = curr.left\n else:\n curr = curr.right\n i += 1\n if curr.is_leaf():\n decoded += curr.data\n curr = root\n return decoded\n\n\na_great_sentence = 'The bird is the word'\nPrint(a_great_sentence)\nt1 = ''\nPrint(t1)\nt2 = 'AAAAAB'\nPrint(t2)\nt3 = 'AAAAA'\nPrint(t3)\n",
"step-4": "import sys\nfrom Node import Node\nfrom PriorityQueue import PriorityQueue\n\n\ndef Print(text):\n if text is None or len(text) == 0:\n print('invalid text.')\n print('--------------------------------------------------------------')\n return\n text_set = set()\n for i in text:\n text_set.add(i)\n if len(text_set) == 1:\n print('invalid text.')\n print('--------------------------------------------------------------')\n return\n print('The size of the data is: {}\\n'.format(sys.getsizeof(text)))\n print('The content of the data is: {}\\n'.format(text))\n encoded_data, tree = huffman_encoding(text)\n print('The size of the encoded data is: {}\\n'.format(sys.getsizeof(int(\n encoded_data, base=2))))\n print('The content of the encoded data is: {}\\n'.format(encoded_data))\n decoded_data = huffman_decoding(encoded_data, tree)\n print('The size of the decoded data is: {}\\n'.format(sys.getsizeof(\n decoded_data)))\n print('The content of the encoded data is: {}\\n'.format(decoded_data))\n print('--------------------------------------------------------------')\n\n\ndef inorder(root):\n if root is not None:\n inorder(root.left)\n print('Data: ', root.data, 'Freq: ', root.frequency)\n if root.right is not None:\n print('Right: ', root.right.data)\n if root.left is not None:\n print('Left: ', root.left.data)\n inorder(root.right)\n\n\ndef generate_encoded_data(root):\n \"\"\"\n :param root: is a root of huffman tree\n :return: dictionary contains all codes for each letter in the text.\n \"\"\"\n return generate_encoded_data2(root, {}, '')\n\n\ndef generate_encoded_data2(root, dic, code):\n if root is not None:\n if root.left is not None:\n s = code + '0'\n generate_encoded_data2(root.left, dic, s)\n if str(root.data).isalpha() or root.data == ' ':\n dic.update({root.data: code})\n if root.right is not None:\n s = code + '1'\n generate_encoded_data2(root.right, dic, s)\n return dic\n else:\n return None\n\n\ndef huffman_encoding(data):\n \"\"\"\n :param data: is the text that will we encode.\n :return: encoded text as a binary and a root of huffman tree.\n \"\"\"\n if len(data) == 0 or data is None:\n print('Please enter a valid data.')\n return '', None\n min_heap = PriorityQueue()\n count_dic = {}\n for i in range(len(data)):\n if data[i] in count_dic:\n count_dic[data[i]] += 1\n else:\n count_dic[data[i]] = 1\n for i, j in count_dic.items():\n new_node = Node(i, j)\n min_heap.push(new_node, new_node.frequency)\n count: int = 1\n while min_heap.size() >= 2:\n item_1 = min_heap.pop()\n item_2 = min_heap.pop()\n sum_frequency = item_1.frequency + item_2.frequency\n node = Node(count, sum_frequency, item_1, item_2)\n min_heap.push(node, node.frequency)\n count += 1\n root = min_heap.pop()\n codes_ = generate_encoded_data(root)\n encoded = ''\n for char in data:\n if codes_.get(char) is not None:\n encoded += codes_.get(char)\n return encoded, root\n\n\ndef huffman_decoding(data, root):\n \"\"\"\n :param data: is the encoded text as a binary.\n :param root: is the root of huffman tree.\n :return: the decoded data.\n \"\"\"\n if len(data) == 0:\n print('Please enter a valid data.')\n return '', None\n decoded = ''\n i = 0\n curr = root\n while i < len(data):\n \"\"\"\n If the current bit of encoded data is 0, move to the left child, else move to the right child of the tree if\n the current bit is 1.\n \"\"\"\n if data[i] == '0':\n curr = curr.left\n else:\n curr = curr.right\n i += 1\n if curr.is_leaf():\n decoded += curr.data\n curr = root\n return decoded\n\n\na_great_sentence = 'The bird is the word'\nPrint(a_great_sentence)\nt1 = ''\nPrint(t1)\nt2 = 'AAAAAB'\nPrint(t2)\nt3 = 'AAAAA'\nPrint(t3)\n",
"step-5": "import sys\r\nfrom Node import Node\r\nfrom PriorityQueue import PriorityQueue\r\n\r\n\r\ndef Print(text):\r\n if text is None or len(text) == 0:\r\n print('invalid text.')\r\n print('--------------------------------------------------------------')\r\n return\r\n\r\n text_set = set()\r\n for i in text:\r\n text_set.add(i)\r\n\r\n if len(text_set) == 1:\r\n print('invalid text.')\r\n print('--------------------------------------------------------------')\r\n return\r\n\r\n print(\"The size of the data is: {}\\n\".format(sys.getsizeof(text)))\r\n print(\"The content of the data is: {}\\n\".format(text))\r\n\r\n encoded_data, tree = huffman_encoding(text)\r\n\r\n print(\"The size of the encoded data is: {}\\n\".format(sys.getsizeof(int(encoded_data, base=2))))\r\n print(\"The content of the encoded data is: {}\\n\".format(encoded_data))\r\n\r\n decoded_data = huffman_decoding(encoded_data, tree)\r\n\r\n print(\"The size of the decoded data is: {}\\n\".format(sys.getsizeof(decoded_data)))\r\n print(\"The content of the encoded data is: {}\\n\".format(decoded_data))\r\n print('--------------------------------------------------------------')\r\n\r\n\r\n# this method will print huffman tree\r\ndef inorder(root):\r\n if root is not None:\r\n inorder(root.left)\r\n print('Data: ', root.data, 'Freq: ', root.frequency)\r\n if root.right is not None:\r\n print('Right: ', root.right.data)\r\n if root.left is not None:\r\n print('Left: ', root.left.data)\r\n inorder(root.right)\r\n\r\n# end method inorder(root)\r\n\r\n\r\ndef generate_encoded_data(root):\r\n \"\"\"\r\n :param root: is a root of huffman tree\r\n :return: dictionary contains all codes for each letter in the text.\r\n \"\"\"\r\n return generate_encoded_data2(root, {}, '')\r\n\r\n\r\n# helper method\r\ndef generate_encoded_data2(root, dic, code):\r\n if root is not None:\r\n # go left of the tree if root has a left child.\r\n if root.left is not None:\r\n s = code + '0'\r\n generate_encoded_data2(root.left, dic, s)\r\n\r\n # if root is a leaf node then add this letter as a key and the code as a value.\r\n if str(root.data).isalpha() or root.data == ' ':\r\n dic.update({root.data: code})\r\n\r\n # go left of the tree if root has a right child.\r\n if root.right is not None:\r\n s = code + '1'\r\n generate_encoded_data2(root.right, dic, s)\r\n\r\n return dic\r\n else:\r\n return None\r\n\r\n\r\ndef huffman_encoding(data):\r\n \"\"\"\r\n :param data: is the text that will we encode.\r\n :return: encoded text as a binary and a root of huffman tree.\r\n \"\"\"\r\n if len(data) == 0 or data is None:\r\n print('Please enter a valid data.')\r\n return '', None\r\n\r\n min_heap = PriorityQueue()\r\n count_dic = {}\r\n # count frequency of each letter and add it in count_dic as a value of the letter.\r\n for i in range(len(data)):\r\n if data[i] in count_dic:\r\n count_dic[data[i]] += 1\r\n else:\r\n count_dic[data[i]] = 1\r\n\r\n # add all element in count_dic to min_heap.\r\n for i, j in count_dic.items():\r\n new_node = Node(i, j)\r\n min_heap.push(new_node, new_node.frequency)\r\n\r\n count: int = 1\r\n\r\n # create huffman tree phase 1.\r\n while min_heap.size() >= 2:\r\n item_1 = min_heap.pop()\r\n item_2 = min_heap.pop()\r\n sum_frequency = item_1.frequency + item_2.frequency\r\n node = Node(count, sum_frequency, item_1, item_2)\r\n min_heap.push(node, node.frequency)\r\n count += 1\r\n\r\n # the root of huffman tree.\r\n root = min_heap.pop()\r\n # generate the Encoded Data.\r\n codes_ = generate_encoded_data(root)\r\n\r\n # create string represent encoded data.\r\n encoded = ''\r\n for char in data:\r\n if codes_.get(char) is not None:\r\n encoded += codes_.get(char)\r\n\r\n return encoded, root\r\n\r\n\r\ndef huffman_decoding(data, root):\r\n \"\"\"\r\n :param data: is the encoded text as a binary.\r\n :param root: is the root of huffman tree.\r\n :return: the decoded data.\r\n \"\"\"\r\n if len(data) == 0:\r\n print('Please enter a valid data.')\r\n return '', None\r\n\r\n decoded = ''\r\n i = 0\r\n curr = root\r\n while i < len(data):\r\n \"\"\"\r\n If the current bit of encoded data is 0, move to the left child, else move to the right child of the tree if\r\n the current bit is 1.\r\n \"\"\"\r\n if data[i] == '0':\r\n curr = curr.left\r\n else:\r\n curr = curr.right\r\n # go to the next cell of the encoded data.\r\n i += 1\r\n\r\n # if curr is leaf node then this node contain a letter.\r\n if curr.is_leaf():\r\n # add this letter to decoded data.\r\n decoded += curr.data\r\n # return and start from the root to find the next letter.\r\n curr = root\r\n\r\n return decoded\r\n\r\n\r\n# Test case 1 -----------------------------------\r\na_great_sentence = 'The bird is the word'\r\nPrint(a_great_sentence)\r\n\r\n# Test case 2 -----------------------------------\r\nt1 = ''\r\nPrint(t1) # will print 'invalid text'\r\n\r\n# Test case 3 -----------------------------------\r\nt2 = 'AAAAAB'\r\nPrint(t2)\r\n\r\n# Test case 4 -----------------------------------\r\nt3 = 'AAAAA'\r\nPrint(t3) # will print 'invalid text'\r\n",
"step-ids": [
5,
7,
8,
9,
10
]
}
|
[
5,
7,
8,
9,
10
] |
# -*- coding: utf-8 -*-
"""Test custom node separator."""
import six
from helper import assert_raises, eq_
import anytree as at
class MyNode(at.Node):
separator = "|"
def test_render():
"""Render string cast."""
root = MyNode("root")
s0 = MyNode("sub0", parent=root)
MyNode("sub0B", parent=s0)
MyNode("sub0A", parent=s0)
MyNode("sub1", parent=root)
r = at.RenderTree(root)
expected = "\n".join(
[
"MyNode('|root')",
"├── MyNode('|root|sub0')",
"│ ├── MyNode('|root|sub0|sub0B')",
"│ └── MyNode('|root|sub0|sub0A')",
"└── MyNode('|root|sub1')",
]
)
if six.PY2:
eq_(str(r).decode("utf-8"), expected)
else:
eq_(str(r), expected)
def test_get():
"""Get."""
top = MyNode("top", parent=None)
sub0 = MyNode("sub0", parent=top)
sub0sub0 = MyNode("sub0sub0", parent=sub0)
sub0sub1 = MyNode("sub0sub1", parent=sub0)
sub1 = MyNode("sub1", parent=top)
r = at.Resolver("name")
eq_(r.get(top, "sub0|sub0sub0"), sub0sub0)
eq_(r.get(sub1, ".."), top)
eq_(r.get(sub1, "..|sub0|sub0sub1"), sub0sub1)
eq_(r.get(sub1, "."), sub1)
eq_(r.get(sub1, ""), sub1)
with assert_raises(at.ChildResolverError, "MyNode('|top') has no child sub2. Children are: 'sub0', 'sub1'."):
r.get(top, "sub2")
eq_(r.get(sub0sub0, "|top"), top)
eq_(r.get(sub0sub0, "|top|sub0"), sub0)
with assert_raises(at.ResolverError, "root node missing. root is '|top'."):
r.get(sub0sub0, "|")
with assert_raises(at.ResolverError, "unknown root node '|bar'. root is '|top'."):
r.get(sub0sub0, "|bar")
def test_glob():
"""Wildcard."""
top = MyNode("top", parent=None)
sub0 = MyNode("sub0", parent=top)
sub0sub0 = MyNode("sub0", parent=sub0)
sub0sub1 = MyNode("sub1", parent=sub0)
sub0sub1sub0 = MyNode("sub0", parent=sub0sub1)
MyNode("sub1", parent=sub0sub1)
sub1 = MyNode("sub1", parent=top)
sub1sub0 = MyNode("sub0", parent=sub1)
r = at.Resolver()
eq_(r.glob(top, "*|*|sub0"), [sub0sub1sub0])
eq_(r.glob(top, "sub0|sub?"), [sub0sub0, sub0sub1])
eq_(r.glob(sub1, "..|.|*"), [sub0, sub1])
eq_(r.glob(top, "*|*"), [sub0sub0, sub0sub1, sub1sub0])
eq_(r.glob(top, "*|sub0"), [sub0sub0, sub1sub0])
with assert_raises(at.ChildResolverError, "MyNode('|top|sub1') has no child sub1. Children are: 'sub0'."):
r.glob(top, "sub1|sub1")
|
normal
|
{
"blob_id": "a430b4629ee06dbfb267f839599383624e37451e",
"index": 4582,
"step-1": "<mask token>\n\n\nclass MyNode(at.Node):\n separator = '|'\n\n\ndef test_render():\n \"\"\"Render string cast.\"\"\"\n root = MyNode('root')\n s0 = MyNode('sub0', parent=root)\n MyNode('sub0B', parent=s0)\n MyNode('sub0A', parent=s0)\n MyNode('sub1', parent=root)\n r = at.RenderTree(root)\n expected = '\\n'.join([\"MyNode('|root')\", \"├── MyNode('|root|sub0')\",\n \"│ ├── MyNode('|root|sub0|sub0B')\",\n \"│ └── MyNode('|root|sub0|sub0A')\", \"└── MyNode('|root|sub1')\"])\n if six.PY2:\n eq_(str(r).decode('utf-8'), expected)\n else:\n eq_(str(r), expected)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass MyNode(at.Node):\n separator = '|'\n\n\ndef test_render():\n \"\"\"Render string cast.\"\"\"\n root = MyNode('root')\n s0 = MyNode('sub0', parent=root)\n MyNode('sub0B', parent=s0)\n MyNode('sub0A', parent=s0)\n MyNode('sub1', parent=root)\n r = at.RenderTree(root)\n expected = '\\n'.join([\"MyNode('|root')\", \"├── MyNode('|root|sub0')\",\n \"│ ├── MyNode('|root|sub0|sub0B')\",\n \"│ └── MyNode('|root|sub0|sub0A')\", \"└── MyNode('|root|sub1')\"])\n if six.PY2:\n eq_(str(r).decode('utf-8'), expected)\n else:\n eq_(str(r), expected)\n\n\n<mask token>\n\n\ndef test_glob():\n \"\"\"Wildcard.\"\"\"\n top = MyNode('top', parent=None)\n sub0 = MyNode('sub0', parent=top)\n sub0sub0 = MyNode('sub0', parent=sub0)\n sub0sub1 = MyNode('sub1', parent=sub0)\n sub0sub1sub0 = MyNode('sub0', parent=sub0sub1)\n MyNode('sub1', parent=sub0sub1)\n sub1 = MyNode('sub1', parent=top)\n sub1sub0 = MyNode('sub0', parent=sub1)\n r = at.Resolver()\n eq_(r.glob(top, '*|*|sub0'), [sub0sub1sub0])\n eq_(r.glob(top, 'sub0|sub?'), [sub0sub0, sub0sub1])\n eq_(r.glob(sub1, '..|.|*'), [sub0, sub1])\n eq_(r.glob(top, '*|*'), [sub0sub0, sub0sub1, sub1sub0])\n eq_(r.glob(top, '*|sub0'), [sub0sub0, sub1sub0])\n with assert_raises(at.ChildResolverError,\n \"MyNode('|top|sub1') has no child sub1. Children are: 'sub0'.\"):\n r.glob(top, 'sub1|sub1')\n",
"step-3": "<mask token>\n\n\nclass MyNode(at.Node):\n separator = '|'\n\n\ndef test_render():\n \"\"\"Render string cast.\"\"\"\n root = MyNode('root')\n s0 = MyNode('sub0', parent=root)\n MyNode('sub0B', parent=s0)\n MyNode('sub0A', parent=s0)\n MyNode('sub1', parent=root)\n r = at.RenderTree(root)\n expected = '\\n'.join([\"MyNode('|root')\", \"├── MyNode('|root|sub0')\",\n \"│ ├── MyNode('|root|sub0|sub0B')\",\n \"│ └── MyNode('|root|sub0|sub0A')\", \"└── MyNode('|root|sub1')\"])\n if six.PY2:\n eq_(str(r).decode('utf-8'), expected)\n else:\n eq_(str(r), expected)\n\n\ndef test_get():\n \"\"\"Get.\"\"\"\n top = MyNode('top', parent=None)\n sub0 = MyNode('sub0', parent=top)\n sub0sub0 = MyNode('sub0sub0', parent=sub0)\n sub0sub1 = MyNode('sub0sub1', parent=sub0)\n sub1 = MyNode('sub1', parent=top)\n r = at.Resolver('name')\n eq_(r.get(top, 'sub0|sub0sub0'), sub0sub0)\n eq_(r.get(sub1, '..'), top)\n eq_(r.get(sub1, '..|sub0|sub0sub1'), sub0sub1)\n eq_(r.get(sub1, '.'), sub1)\n eq_(r.get(sub1, ''), sub1)\n with assert_raises(at.ChildResolverError,\n \"MyNode('|top') has no child sub2. Children are: 'sub0', 'sub1'.\"):\n r.get(top, 'sub2')\n eq_(r.get(sub0sub0, '|top'), top)\n eq_(r.get(sub0sub0, '|top|sub0'), sub0)\n with assert_raises(at.ResolverError, \"root node missing. root is '|top'.\"):\n r.get(sub0sub0, '|')\n with assert_raises(at.ResolverError,\n \"unknown root node '|bar'. root is '|top'.\"):\n r.get(sub0sub0, '|bar')\n\n\ndef test_glob():\n \"\"\"Wildcard.\"\"\"\n top = MyNode('top', parent=None)\n sub0 = MyNode('sub0', parent=top)\n sub0sub0 = MyNode('sub0', parent=sub0)\n sub0sub1 = MyNode('sub1', parent=sub0)\n sub0sub1sub0 = MyNode('sub0', parent=sub0sub1)\n MyNode('sub1', parent=sub0sub1)\n sub1 = MyNode('sub1', parent=top)\n sub1sub0 = MyNode('sub0', parent=sub1)\n r = at.Resolver()\n eq_(r.glob(top, '*|*|sub0'), [sub0sub1sub0])\n eq_(r.glob(top, 'sub0|sub?'), [sub0sub0, sub0sub1])\n eq_(r.glob(sub1, '..|.|*'), [sub0, sub1])\n eq_(r.glob(top, '*|*'), [sub0sub0, sub0sub1, sub1sub0])\n eq_(r.glob(top, '*|sub0'), [sub0sub0, sub1sub0])\n with assert_raises(at.ChildResolverError,\n \"MyNode('|top|sub1') has no child sub1. Children are: 'sub0'.\"):\n r.glob(top, 'sub1|sub1')\n",
"step-4": "<mask token>\nimport six\nfrom helper import assert_raises, eq_\nimport anytree as at\n\n\nclass MyNode(at.Node):\n separator = '|'\n\n\ndef test_render():\n \"\"\"Render string cast.\"\"\"\n root = MyNode('root')\n s0 = MyNode('sub0', parent=root)\n MyNode('sub0B', parent=s0)\n MyNode('sub0A', parent=s0)\n MyNode('sub1', parent=root)\n r = at.RenderTree(root)\n expected = '\\n'.join([\"MyNode('|root')\", \"├── MyNode('|root|sub0')\",\n \"│ ├── MyNode('|root|sub0|sub0B')\",\n \"│ └── MyNode('|root|sub0|sub0A')\", \"└── MyNode('|root|sub1')\"])\n if six.PY2:\n eq_(str(r).decode('utf-8'), expected)\n else:\n eq_(str(r), expected)\n\n\ndef test_get():\n \"\"\"Get.\"\"\"\n top = MyNode('top', parent=None)\n sub0 = MyNode('sub0', parent=top)\n sub0sub0 = MyNode('sub0sub0', parent=sub0)\n sub0sub1 = MyNode('sub0sub1', parent=sub0)\n sub1 = MyNode('sub1', parent=top)\n r = at.Resolver('name')\n eq_(r.get(top, 'sub0|sub0sub0'), sub0sub0)\n eq_(r.get(sub1, '..'), top)\n eq_(r.get(sub1, '..|sub0|sub0sub1'), sub0sub1)\n eq_(r.get(sub1, '.'), sub1)\n eq_(r.get(sub1, ''), sub1)\n with assert_raises(at.ChildResolverError,\n \"MyNode('|top') has no child sub2. Children are: 'sub0', 'sub1'.\"):\n r.get(top, 'sub2')\n eq_(r.get(sub0sub0, '|top'), top)\n eq_(r.get(sub0sub0, '|top|sub0'), sub0)\n with assert_raises(at.ResolverError, \"root node missing. root is '|top'.\"):\n r.get(sub0sub0, '|')\n with assert_raises(at.ResolverError,\n \"unknown root node '|bar'. root is '|top'.\"):\n r.get(sub0sub0, '|bar')\n\n\ndef test_glob():\n \"\"\"Wildcard.\"\"\"\n top = MyNode('top', parent=None)\n sub0 = MyNode('sub0', parent=top)\n sub0sub0 = MyNode('sub0', parent=sub0)\n sub0sub1 = MyNode('sub1', parent=sub0)\n sub0sub1sub0 = MyNode('sub0', parent=sub0sub1)\n MyNode('sub1', parent=sub0sub1)\n sub1 = MyNode('sub1', parent=top)\n sub1sub0 = MyNode('sub0', parent=sub1)\n r = at.Resolver()\n eq_(r.glob(top, '*|*|sub0'), [sub0sub1sub0])\n eq_(r.glob(top, 'sub0|sub?'), [sub0sub0, sub0sub1])\n eq_(r.glob(sub1, '..|.|*'), [sub0, sub1])\n eq_(r.glob(top, '*|*'), [sub0sub0, sub0sub1, sub1sub0])\n eq_(r.glob(top, '*|sub0'), [sub0sub0, sub1sub0])\n with assert_raises(at.ChildResolverError,\n \"MyNode('|top|sub1') has no child sub1. Children are: 'sub0'.\"):\n r.glob(top, 'sub1|sub1')\n",
"step-5": "# -*- coding: utf-8 -*-\n\"\"\"Test custom node separator.\"\"\"\n\nimport six\nfrom helper import assert_raises, eq_\n\nimport anytree as at\n\n\nclass MyNode(at.Node):\n\n separator = \"|\"\n\n\ndef test_render():\n \"\"\"Render string cast.\"\"\"\n root = MyNode(\"root\")\n s0 = MyNode(\"sub0\", parent=root)\n MyNode(\"sub0B\", parent=s0)\n MyNode(\"sub0A\", parent=s0)\n MyNode(\"sub1\", parent=root)\n r = at.RenderTree(root)\n\n expected = \"\\n\".join(\n [\n \"MyNode('|root')\",\n \"├── MyNode('|root|sub0')\",\n \"│ ├── MyNode('|root|sub0|sub0B')\",\n \"│ └── MyNode('|root|sub0|sub0A')\",\n \"└── MyNode('|root|sub1')\",\n ]\n )\n if six.PY2:\n eq_(str(r).decode(\"utf-8\"), expected)\n else:\n eq_(str(r), expected)\n\n\ndef test_get():\n \"\"\"Get.\"\"\"\n top = MyNode(\"top\", parent=None)\n sub0 = MyNode(\"sub0\", parent=top)\n sub0sub0 = MyNode(\"sub0sub0\", parent=sub0)\n sub0sub1 = MyNode(\"sub0sub1\", parent=sub0)\n sub1 = MyNode(\"sub1\", parent=top)\n r = at.Resolver(\"name\")\n eq_(r.get(top, \"sub0|sub0sub0\"), sub0sub0)\n eq_(r.get(sub1, \"..\"), top)\n eq_(r.get(sub1, \"..|sub0|sub0sub1\"), sub0sub1)\n eq_(r.get(sub1, \".\"), sub1)\n eq_(r.get(sub1, \"\"), sub1)\n with assert_raises(at.ChildResolverError, \"MyNode('|top') has no child sub2. Children are: 'sub0', 'sub1'.\"):\n r.get(top, \"sub2\")\n eq_(r.get(sub0sub0, \"|top\"), top)\n eq_(r.get(sub0sub0, \"|top|sub0\"), sub0)\n with assert_raises(at.ResolverError, \"root node missing. root is '|top'.\"):\n r.get(sub0sub0, \"|\")\n with assert_raises(at.ResolverError, \"unknown root node '|bar'. root is '|top'.\"):\n r.get(sub0sub0, \"|bar\")\n\n\ndef test_glob():\n \"\"\"Wildcard.\"\"\"\n top = MyNode(\"top\", parent=None)\n sub0 = MyNode(\"sub0\", parent=top)\n sub0sub0 = MyNode(\"sub0\", parent=sub0)\n sub0sub1 = MyNode(\"sub1\", parent=sub0)\n sub0sub1sub0 = MyNode(\"sub0\", parent=sub0sub1)\n MyNode(\"sub1\", parent=sub0sub1)\n sub1 = MyNode(\"sub1\", parent=top)\n sub1sub0 = MyNode(\"sub0\", parent=sub1)\n r = at.Resolver()\n eq_(r.glob(top, \"*|*|sub0\"), [sub0sub1sub0])\n\n eq_(r.glob(top, \"sub0|sub?\"), [sub0sub0, sub0sub1])\n eq_(r.glob(sub1, \"..|.|*\"), [sub0, sub1])\n eq_(r.glob(top, \"*|*\"), [sub0sub0, sub0sub1, sub1sub0])\n eq_(r.glob(top, \"*|sub0\"), [sub0sub0, sub1sub0])\n with assert_raises(at.ChildResolverError, \"MyNode('|top|sub1') has no child sub1. Children are: 'sub0'.\"):\n r.glob(top, \"sub1|sub1\")\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
<|reserved_special_token_0|>
def log_utility(curr_dr):
"""
More data rate increases the utility following a log function: High initial increase, then flattens.
:param curr_dr: Current data rate
:param factor: Factor to multiply the log function with
:param add: Add to current data rate before passing to log function
:return: Utility
"""
assert MIN_UTILITY == -20 and MAX_UTILITY == 20, 'The chosen log utility requires min/max utility to be -20/+20'
if curr_dr == 0:
return MIN_UTILITY
return np.clip(10 * np.log10(curr_dr), MIN_UTILITY, MAX_UTILITY)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def step_utility(curr_dr, req_dr):
"""
Flat negative utility as long as the required data rate is not met; then positive. Nothing in between.
:param curr_dr: Current data rate
:param req_dr: Required data rate
:return: Min or max utility depending on whether the required data rate is met
"""
if curr_dr >= req_dr:
return MAX_UTILITY
return MIN_UTILITY
def log_utility(curr_dr):
"""
More data rate increases the utility following a log function: High initial increase, then flattens.
:param curr_dr: Current data rate
:param factor: Factor to multiply the log function with
:param add: Add to current data rate before passing to log function
:return: Utility
"""
assert MIN_UTILITY == -20 and MAX_UTILITY == 20, 'The chosen log utility requires min/max utility to be -20/+20'
if curr_dr == 0:
return MIN_UTILITY
return np.clip(10 * np.log10(curr_dr), MIN_UTILITY, MAX_UTILITY)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def linear_clipped_utility(curr_dr, max_dr=MAX_UTILITY):
"""
Utility that directly equals the data rate, increasing linearly up to a given maximum.
:param max_dr: Maximum data rate at which the utility does not increase further
:return: Utility
"""
assert curr_dr >= 0 and max_dr >= 0
assert MIN_UTILITY == 0 and MAX_UTILITY == max_dr, 'The chosen linear utility requires MIN_UTILITY=0 and sensible MAX_UTILITY. Set sensible values manually!'
return np.clip(curr_dr, MIN_UTILITY, MAX_UTILITY)
def step_utility(curr_dr, req_dr):
"""
Flat negative utility as long as the required data rate is not met; then positive. Nothing in between.
:param curr_dr: Current data rate
:param req_dr: Required data rate
:return: Min or max utility depending on whether the required data rate is met
"""
if curr_dr >= req_dr:
return MAX_UTILITY
return MIN_UTILITY
def log_utility(curr_dr):
"""
More data rate increases the utility following a log function: High initial increase, then flattens.
:param curr_dr: Current data rate
:param factor: Factor to multiply the log function with
:param add: Add to current data rate before passing to log function
:return: Utility
"""
assert MIN_UTILITY == -20 and MAX_UTILITY == 20, 'The chosen log utility requires min/max utility to be -20/+20'
if curr_dr == 0:
return MIN_UTILITY
return np.clip(10 * np.log10(curr_dr), MIN_UTILITY, MAX_UTILITY)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import numpy as np
from deepcomp.util.constants import MIN_UTILITY, MAX_UTILITY
def linear_clipped_utility(curr_dr, max_dr=MAX_UTILITY):
"""
Utility that directly equals the data rate, increasing linearly up to a given maximum.
:param max_dr: Maximum data rate at which the utility does not increase further
:return: Utility
"""
assert curr_dr >= 0 and max_dr >= 0
assert MIN_UTILITY == 0 and MAX_UTILITY == max_dr, 'The chosen linear utility requires MIN_UTILITY=0 and sensible MAX_UTILITY. Set sensible values manually!'
return np.clip(curr_dr, MIN_UTILITY, MAX_UTILITY)
def step_utility(curr_dr, req_dr):
"""
Flat negative utility as long as the required data rate is not met; then positive. Nothing in between.
:param curr_dr: Current data rate
:param req_dr: Required data rate
:return: Min or max utility depending on whether the required data rate is met
"""
if curr_dr >= req_dr:
return MAX_UTILITY
return MIN_UTILITY
def log_utility(curr_dr):
"""
More data rate increases the utility following a log function: High initial increase, then flattens.
:param curr_dr: Current data rate
:param factor: Factor to multiply the log function with
:param add: Add to current data rate before passing to log function
:return: Utility
"""
assert MIN_UTILITY == -20 and MAX_UTILITY == 20, 'The chosen log utility requires min/max utility to be -20/+20'
if curr_dr == 0:
return MIN_UTILITY
return np.clip(10 * np.log10(curr_dr), MIN_UTILITY, MAX_UTILITY)
<|reserved_special_token_1|>
"""
Auxiliary functions for calculating the utility of achieving a certain data rate (for a UE).
Attention: The absolute reward that's achieved with different utilities cannot be compared directly (diff ranges)!
"""
import numpy as np
from deepcomp.util.constants import MIN_UTILITY, MAX_UTILITY
def linear_clipped_utility(curr_dr, max_dr=MAX_UTILITY):
"""
Utility that directly equals the data rate, increasing linearly up to a given maximum.
:param max_dr: Maximum data rate at which the utility does not increase further
:return: Utility
"""
assert curr_dr >= 0 and max_dr >= 0
assert MIN_UTILITY == 0 and MAX_UTILITY == max_dr, \
"The chosen linear utility requires MIN_UTILITY=0 and sensible MAX_UTILITY. Set sensible values manually!"
return np.clip(curr_dr, MIN_UTILITY, MAX_UTILITY)
def step_utility(curr_dr, req_dr):
"""
Flat negative utility as long as the required data rate is not met; then positive. Nothing in between.
:param curr_dr: Current data rate
:param req_dr: Required data rate
:return: Min or max utility depending on whether the required data rate is met
"""
if curr_dr >= req_dr:
return MAX_UTILITY
return MIN_UTILITY
def log_utility(curr_dr):
"""
More data rate increases the utility following a log function: High initial increase, then flattens.
:param curr_dr: Current data rate
:param factor: Factor to multiply the log function with
:param add: Add to current data rate before passing to log function
:return: Utility
"""
# 4*log(0.1+x) looks good: around -10 for no dr; 0 for 0.9 dr; slightly positive for more
# 10*log10(0.1+x) is even better because it's steeper, is exactly -10 for dr=0, and flatter for larger dr
# with many UEs where each UE only gets around 0.1 data rate, 100*log(0.9+x) looks good (eg, 50 UEs on medium env)
# better: 10*log10(x) --> clip to [-20, 20]; -20 for <= 0.01 dr; +20 for >= 100 dr
# ensure min/max utility are set correctly for this utility function
assert MIN_UTILITY == -20 and MAX_UTILITY == 20, "The chosen log utility requires min/max utility to be -20/+20"
if curr_dr == 0:
return MIN_UTILITY
return np.clip(10 * np.log10(curr_dr), MIN_UTILITY, MAX_UTILITY)
|
flexible
|
{
"blob_id": "e3de072d6bce2ecc105306c06b9a9aa0362130ff",
"index": 6234,
"step-1": "<mask token>\n\n\ndef log_utility(curr_dr):\n \"\"\"\n More data rate increases the utility following a log function: High initial increase, then flattens.\n\n :param curr_dr: Current data rate\n :param factor: Factor to multiply the log function with\n :param add: Add to current data rate before passing to log function\n :return: Utility\n \"\"\"\n assert MIN_UTILITY == -20 and MAX_UTILITY == 20, 'The chosen log utility requires min/max utility to be -20/+20'\n if curr_dr == 0:\n return MIN_UTILITY\n return np.clip(10 * np.log10(curr_dr), MIN_UTILITY, MAX_UTILITY)\n",
"step-2": "<mask token>\n\n\ndef step_utility(curr_dr, req_dr):\n \"\"\"\n Flat negative utility as long as the required data rate is not met; then positive. Nothing in between.\n\n :param curr_dr: Current data rate\n :param req_dr: Required data rate\n :return: Min or max utility depending on whether the required data rate is met\n \"\"\"\n if curr_dr >= req_dr:\n return MAX_UTILITY\n return MIN_UTILITY\n\n\ndef log_utility(curr_dr):\n \"\"\"\n More data rate increases the utility following a log function: High initial increase, then flattens.\n\n :param curr_dr: Current data rate\n :param factor: Factor to multiply the log function with\n :param add: Add to current data rate before passing to log function\n :return: Utility\n \"\"\"\n assert MIN_UTILITY == -20 and MAX_UTILITY == 20, 'The chosen log utility requires min/max utility to be -20/+20'\n if curr_dr == 0:\n return MIN_UTILITY\n return np.clip(10 * np.log10(curr_dr), MIN_UTILITY, MAX_UTILITY)\n",
"step-3": "<mask token>\n\n\ndef linear_clipped_utility(curr_dr, max_dr=MAX_UTILITY):\n \"\"\"\n Utility that directly equals the data rate, increasing linearly up to a given maximum.\n\n :param max_dr: Maximum data rate at which the utility does not increase further\n :return: Utility\n \"\"\"\n assert curr_dr >= 0 and max_dr >= 0\n assert MIN_UTILITY == 0 and MAX_UTILITY == max_dr, 'The chosen linear utility requires MIN_UTILITY=0 and sensible MAX_UTILITY. Set sensible values manually!'\n return np.clip(curr_dr, MIN_UTILITY, MAX_UTILITY)\n\n\ndef step_utility(curr_dr, req_dr):\n \"\"\"\n Flat negative utility as long as the required data rate is not met; then positive. Nothing in between.\n\n :param curr_dr: Current data rate\n :param req_dr: Required data rate\n :return: Min or max utility depending on whether the required data rate is met\n \"\"\"\n if curr_dr >= req_dr:\n return MAX_UTILITY\n return MIN_UTILITY\n\n\ndef log_utility(curr_dr):\n \"\"\"\n More data rate increases the utility following a log function: High initial increase, then flattens.\n\n :param curr_dr: Current data rate\n :param factor: Factor to multiply the log function with\n :param add: Add to current data rate before passing to log function\n :return: Utility\n \"\"\"\n assert MIN_UTILITY == -20 and MAX_UTILITY == 20, 'The chosen log utility requires min/max utility to be -20/+20'\n if curr_dr == 0:\n return MIN_UTILITY\n return np.clip(10 * np.log10(curr_dr), MIN_UTILITY, MAX_UTILITY)\n",
"step-4": "<mask token>\nimport numpy as np\nfrom deepcomp.util.constants import MIN_UTILITY, MAX_UTILITY\n\n\ndef linear_clipped_utility(curr_dr, max_dr=MAX_UTILITY):\n \"\"\"\n Utility that directly equals the data rate, increasing linearly up to a given maximum.\n\n :param max_dr: Maximum data rate at which the utility does not increase further\n :return: Utility\n \"\"\"\n assert curr_dr >= 0 and max_dr >= 0\n assert MIN_UTILITY == 0 and MAX_UTILITY == max_dr, 'The chosen linear utility requires MIN_UTILITY=0 and sensible MAX_UTILITY. Set sensible values manually!'\n return np.clip(curr_dr, MIN_UTILITY, MAX_UTILITY)\n\n\ndef step_utility(curr_dr, req_dr):\n \"\"\"\n Flat negative utility as long as the required data rate is not met; then positive. Nothing in between.\n\n :param curr_dr: Current data rate\n :param req_dr: Required data rate\n :return: Min or max utility depending on whether the required data rate is met\n \"\"\"\n if curr_dr >= req_dr:\n return MAX_UTILITY\n return MIN_UTILITY\n\n\ndef log_utility(curr_dr):\n \"\"\"\n More data rate increases the utility following a log function: High initial increase, then flattens.\n\n :param curr_dr: Current data rate\n :param factor: Factor to multiply the log function with\n :param add: Add to current data rate before passing to log function\n :return: Utility\n \"\"\"\n assert MIN_UTILITY == -20 and MAX_UTILITY == 20, 'The chosen log utility requires min/max utility to be -20/+20'\n if curr_dr == 0:\n return MIN_UTILITY\n return np.clip(10 * np.log10(curr_dr), MIN_UTILITY, MAX_UTILITY)\n",
"step-5": "\"\"\"\nAuxiliary functions for calculating the utility of achieving a certain data rate (for a UE).\nAttention: The absolute reward that's achieved with different utilities cannot be compared directly (diff ranges)!\n\"\"\"\nimport numpy as np\n\nfrom deepcomp.util.constants import MIN_UTILITY, MAX_UTILITY\n\n\ndef linear_clipped_utility(curr_dr, max_dr=MAX_UTILITY):\n \"\"\"\n Utility that directly equals the data rate, increasing linearly up to a given maximum.\n\n :param max_dr: Maximum data rate at which the utility does not increase further\n :return: Utility\n \"\"\"\n assert curr_dr >= 0 and max_dr >= 0\n assert MIN_UTILITY == 0 and MAX_UTILITY == max_dr, \\\n \"The chosen linear utility requires MIN_UTILITY=0 and sensible MAX_UTILITY. Set sensible values manually!\"\n return np.clip(curr_dr, MIN_UTILITY, MAX_UTILITY)\n\n\ndef step_utility(curr_dr, req_dr):\n \"\"\"\n Flat negative utility as long as the required data rate is not met; then positive. Nothing in between.\n\n :param curr_dr: Current data rate\n :param req_dr: Required data rate\n :return: Min or max utility depending on whether the required data rate is met\n \"\"\"\n if curr_dr >= req_dr:\n return MAX_UTILITY\n return MIN_UTILITY\n\n\ndef log_utility(curr_dr):\n \"\"\"\n More data rate increases the utility following a log function: High initial increase, then flattens.\n\n :param curr_dr: Current data rate\n :param factor: Factor to multiply the log function with\n :param add: Add to current data rate before passing to log function\n :return: Utility\n \"\"\"\n # 4*log(0.1+x) looks good: around -10 for no dr; 0 for 0.9 dr; slightly positive for more\n # 10*log10(0.1+x) is even better because it's steeper, is exactly -10 for dr=0, and flatter for larger dr\n # with many UEs where each UE only gets around 0.1 data rate, 100*log(0.9+x) looks good (eg, 50 UEs on medium env)\n\n # better: 10*log10(x) --> clip to [-20, 20]; -20 for <= 0.01 dr; +20 for >= 100 dr\n # ensure min/max utility are set correctly for this utility function\n assert MIN_UTILITY == -20 and MAX_UTILITY == 20, \"The chosen log utility requires min/max utility to be -20/+20\"\n if curr_dr == 0:\n return MIN_UTILITY\n return np.clip(10 * np.log10(curr_dr), MIN_UTILITY, MAX_UTILITY)\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
import paho.mqtt.client as paho
import RPi.GPIO as GPIO
import json, time, math
import clearblade
from clearblade import auth
from clearblade import Client
from urlparse import urlparse
#Fill init values
systemKey = ______
secretKey = ______
userName = _______
userPW = _______
edgeIP = "http://_______:9000"
auth = auth.Auth()
userClient = Client.UserClient(systemKey, secretKey, userName, userPW, edgeIP)
auth.Authenticate(userClient)
print "Authenticated"
sequence = [
[1,1,0,0],
[0,1,1,0],
[0,0,1,1],
[1,0,0,1]
]
ccwseq = [
[1,0,0,1],
[0,0,1,1],
[0,1,1,0],
[1,1,0,0]
]
time.sleep(1)
GPIO.setmode(GPIO.BOARD)
controlPinArray = [31, 33, 35,37]
for pin in controlPinArray:
GPIO.setup(pin, GPIO.OUT)
GPIO.output(pin,0)
# Stepper motor logic
def StepMotor(controlState, angle):
step = math.ceil(int(angle) * 1.4222222)
print step
stepAngle = int(step)
print "Step Angle "+str(stepAngle)
if controlState == "CW":
for i in range(stepAngle):
for fullStep in range(4):
for pin in range(4):
GPIO.output(controlPinArray[pin], sequence[fullStep][pin])
time.sleep(0.0015)
else:
for i in range(stepAngle):
for halfStep in range(4):
for pin in range(4):
GPIO.output(controlPinArray[pin], ccwseq[halfStep][pin])
time.sleep(0.002)
# Define event callbacks
def on_connect(mosq, obj, rc):
print("rc: " + str(rc))
def on_message(mosq, obj, msg):
print(msg.topic + " " + str(msg.qos) + " " + str(msg.payload))
command = msg.payload
commandJson = json.loads(command)
controlState = str(commandJson['controlState'])
angle = commandJson['state']
print ("Control State :"+controlState)
print ("Angle:"+str(angle))
StepMotor(controlState, angle)
print "Done"
def on_publish(mosq, obj, mid):
print("mid: " + str(mid))
def on_subscribe(mosq, obj, mid, granted_qos):
print("Subscribed: " + str(mid) + " " + str(granted_qos))
def on_log(mosq, obj, level, string):
print(string)
mqttc = paho.Client()
# Assign event callbacks
mqttc.on_message = on_message
mqttc.on_connect = on_connect
mqttc.on_publish = on_publish
mqttc.on_subscribe = on_subscribe
# Connect to clearblade
mqttc.username_pw_set(userClient.UserToken, userClient.systemKey)
msgAddr = urlparse(userClient.platform)
msgAddr = msgAddr.hostname
mqttc.connect(msgAddr,"1883", 30)
# Start subscribe, with QoS level 0
mqttc.subscribe("motor/angle", 0)
# Continue the network loop, exit when an error occurs
rc = 0
while rc == 0:
rc = mqttc.loop()
|
normal
|
{
"blob_id": "299d13fbcdb75673026db1e3a0352c8b19d453c1",
"index": 6314,
"step-1": "import paho.mqtt.client as paho\nimport RPi.GPIO as GPIO\nimport json, time, math\nimport clearblade\nfrom clearblade import auth\nfrom clearblade import Client\nfrom urlparse import urlparse\n\n#Fill init values\nsystemKey = ______\nsecretKey = ______\nuserName = _______\nuserPW = _______\nedgeIP = \"http://_______:9000\"\n\nauth = auth.Auth()\nuserClient = Client.UserClient(systemKey, secretKey, userName, userPW, edgeIP)\nauth.Authenticate(userClient)\nprint \"Authenticated\"\n\nsequence = [\n\t[1,1,0,0],\n\t[0,1,1,0],\n\t[0,0,1,1],\n\t[1,0,0,1]\n]\n\nccwseq = [\n [1,0,0,1],\n [0,0,1,1],\n\t[0,1,1,0],\n\t[1,1,0,0]\n]\t\n\ntime.sleep(1)\nGPIO.setmode(GPIO.BOARD)\n\ncontrolPinArray = [31, 33, 35,37]\n\nfor pin in controlPinArray:\n GPIO.setup(pin, GPIO.OUT)\n GPIO.output(pin,0)\n\n# Stepper motor logic\ndef StepMotor(controlState, angle):\n step = math.ceil(int(angle) * 1.4222222)\n print step\n stepAngle = int(step)\n print \"Step Angle \"+str(stepAngle)\n\tif controlState == \"CW\":\n\t for i in range(stepAngle):\n \t for fullStep in range(4):\n\t\t\t\tfor pin in range(4):\n\t\t\t\t\tGPIO.output(controlPinArray[pin], sequence[fullStep][pin])\n\t time.sleep(0.0015)\n\telse:\n\t\t for i in range(stepAngle):\n for halfStep in range(4):\n for pin in range(4):\n GPIO.output(controlPinArray[pin], ccwseq[halfStep][pin])\n time.sleep(0.002)\n\n\n\n# Define event callbacks\ndef on_connect(mosq, obj, rc):\n\tprint(\"rc: \" + str(rc))\n\ndef on_message(mosq, obj, msg):\n\tprint(msg.topic + \" \" + str(msg.qos) + \" \" + str(msg.payload))\n\tcommand = msg.payload\n\tcommandJson = json.loads(command)\n\tcontrolState = str(commandJson['controlState'])\n\tangle = commandJson['state']\n\tprint (\"Control State :\"+controlState)\n\tprint (\"Angle:\"+str(angle))\n\tStepMotor(controlState, angle)\n\tprint \"Done\"\n\ndef on_publish(mosq, obj, mid):\n\tprint(\"mid: \" + str(mid))\n\ndef on_subscribe(mosq, obj, mid, granted_qos):\n\tprint(\"Subscribed: \" + str(mid) + \" \" + str(granted_qos))\n\ndef on_log(mosq, obj, level, string):\n\tprint(string)\n\nmqttc = paho.Client()\n# Assign event callbacks\nmqttc.on_message = on_message\nmqttc.on_connect = on_connect\nmqttc.on_publish = on_publish\nmqttc.on_subscribe = on_subscribe\n\n# Connect to clearblade\nmqttc.username_pw_set(userClient.UserToken, userClient.systemKey)\nmsgAddr = urlparse(userClient.platform)\nmsgAddr = msgAddr.hostname\nmqttc.connect(msgAddr,\"1883\", 30)\n\n# Start subscribe, with QoS level 0\nmqttc.subscribe(\"motor/angle\", 0)\n\n# Continue the network loop, exit when an error occurs\nrc = 0\nwhile rc == 0:\n rc = mqttc.loop()\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
# -*- coding:utf-8 -*-
from flask import redirect, url_for, render_template
from flask.globals import request, session
from flask_admin import BaseView, expose
from util import navigator, common
class Billings(BaseView):
@expose('/')
def index(self):
return redirect(url_for('.billingHistory'))
@expose('/billingHistory')
def billingHistory(self):
self.menuItems = session['navigator']
self.pageAuth = common.getPageAuth()
return render_template("views/pages/billings/billingHistory.html", admin_view=self)
@expose('/billingDetail')
def billingDetail(self):
self.menuItems = session['navigator']
self.pageAuth = common.getPageAuth()
#예비 정산명세서 Seq
self.billingSeq = request.args.get("billingSeq")
#정산명세서 Seq
if request.args.get("regBillingSeq") != None :
self.regBillingSeq = request.args.get("regBillingSeq")
elif request.args.get("regBillingSeq") == None :
self.regBillingSeq = ""
#승인 Seq
if request.args.get("apprSeq") != None :
self.apprSeq = request.args.get("apprSeq")
elif request.args.get("apprSeq") == None :
self.apprSeq = ""
#승인Content Seq
if request.args.get("contentSeq") != None :
self.contentSeq = request.args.get("contentSeq")
elif request.args.get("contentSeq") == None :
self.contentSeq = ""
return render_template("views/pages/billings/billingDetail.html", admin_view=self)
@expose('/billings')
def billings(self):
self.menuItems = session['navigator']
self.pageAuth = common.getPageAuth()
return render_template("views/pages/billings/billings.html", admin_view=self)
@expose('/billingsDetail')
def billingsDetail(self):
self.menuItems = session['navigator']
self.pageAuth = common.getPageAuth()
self.seq = request.args.get("seq")
return render_template("views/pages/billings/billingsDetail.html", admin_view=self)
|
normal
|
{
"blob_id": "a9344151a997842972aa68c417a77b3ca80e6cfa",
"index": 3174,
"step-1": "<mask token>\n\n\nclass Billings(BaseView):\n\n @expose('/')\n def index(self):\n return redirect(url_for('.billingHistory'))\n <mask token>\n <mask token>\n <mask token>\n\n @expose('/billingsDetail')\n def billingsDetail(self):\n self.menuItems = session['navigator']\n self.pageAuth = common.getPageAuth()\n self.seq = request.args.get('seq')\n return render_template('views/pages/billings/billingsDetail.html',\n admin_view=self)\n",
"step-2": "<mask token>\n\n\nclass Billings(BaseView):\n\n @expose('/')\n def index(self):\n return redirect(url_for('.billingHistory'))\n <mask token>\n\n @expose('/billingDetail')\n def billingDetail(self):\n self.menuItems = session['navigator']\n self.pageAuth = common.getPageAuth()\n self.billingSeq = request.args.get('billingSeq')\n if request.args.get('regBillingSeq') != None:\n self.regBillingSeq = request.args.get('regBillingSeq')\n elif request.args.get('regBillingSeq') == None:\n self.regBillingSeq = ''\n if request.args.get('apprSeq') != None:\n self.apprSeq = request.args.get('apprSeq')\n elif request.args.get('apprSeq') == None:\n self.apprSeq = ''\n if request.args.get('contentSeq') != None:\n self.contentSeq = request.args.get('contentSeq')\n elif request.args.get('contentSeq') == None:\n self.contentSeq = ''\n return render_template('views/pages/billings/billingDetail.html',\n admin_view=self)\n\n @expose('/billings')\n def billings(self):\n self.menuItems = session['navigator']\n self.pageAuth = common.getPageAuth()\n return render_template('views/pages/billings/billings.html',\n admin_view=self)\n\n @expose('/billingsDetail')\n def billingsDetail(self):\n self.menuItems = session['navigator']\n self.pageAuth = common.getPageAuth()\n self.seq = request.args.get('seq')\n return render_template('views/pages/billings/billingsDetail.html',\n admin_view=self)\n",
"step-3": "<mask token>\n\n\nclass Billings(BaseView):\n\n @expose('/')\n def index(self):\n return redirect(url_for('.billingHistory'))\n\n @expose('/billingHistory')\n def billingHistory(self):\n self.menuItems = session['navigator']\n self.pageAuth = common.getPageAuth()\n return render_template('views/pages/billings/billingHistory.html',\n admin_view=self)\n\n @expose('/billingDetail')\n def billingDetail(self):\n self.menuItems = session['navigator']\n self.pageAuth = common.getPageAuth()\n self.billingSeq = request.args.get('billingSeq')\n if request.args.get('regBillingSeq') != None:\n self.regBillingSeq = request.args.get('regBillingSeq')\n elif request.args.get('regBillingSeq') == None:\n self.regBillingSeq = ''\n if request.args.get('apprSeq') != None:\n self.apprSeq = request.args.get('apprSeq')\n elif request.args.get('apprSeq') == None:\n self.apprSeq = ''\n if request.args.get('contentSeq') != None:\n self.contentSeq = request.args.get('contentSeq')\n elif request.args.get('contentSeq') == None:\n self.contentSeq = ''\n return render_template('views/pages/billings/billingDetail.html',\n admin_view=self)\n\n @expose('/billings')\n def billings(self):\n self.menuItems = session['navigator']\n self.pageAuth = common.getPageAuth()\n return render_template('views/pages/billings/billings.html',\n admin_view=self)\n\n @expose('/billingsDetail')\n def billingsDetail(self):\n self.menuItems = session['navigator']\n self.pageAuth = common.getPageAuth()\n self.seq = request.args.get('seq')\n return render_template('views/pages/billings/billingsDetail.html',\n admin_view=self)\n",
"step-4": "from flask import redirect, url_for, render_template\nfrom flask.globals import request, session\nfrom flask_admin import BaseView, expose\nfrom util import navigator, common\n\n\nclass Billings(BaseView):\n\n @expose('/')\n def index(self):\n return redirect(url_for('.billingHistory'))\n\n @expose('/billingHistory')\n def billingHistory(self):\n self.menuItems = session['navigator']\n self.pageAuth = common.getPageAuth()\n return render_template('views/pages/billings/billingHistory.html',\n admin_view=self)\n\n @expose('/billingDetail')\n def billingDetail(self):\n self.menuItems = session['navigator']\n self.pageAuth = common.getPageAuth()\n self.billingSeq = request.args.get('billingSeq')\n if request.args.get('regBillingSeq') != None:\n self.regBillingSeq = request.args.get('regBillingSeq')\n elif request.args.get('regBillingSeq') == None:\n self.regBillingSeq = ''\n if request.args.get('apprSeq') != None:\n self.apprSeq = request.args.get('apprSeq')\n elif request.args.get('apprSeq') == None:\n self.apprSeq = ''\n if request.args.get('contentSeq') != None:\n self.contentSeq = request.args.get('contentSeq')\n elif request.args.get('contentSeq') == None:\n self.contentSeq = ''\n return render_template('views/pages/billings/billingDetail.html',\n admin_view=self)\n\n @expose('/billings')\n def billings(self):\n self.menuItems = session['navigator']\n self.pageAuth = common.getPageAuth()\n return render_template('views/pages/billings/billings.html',\n admin_view=self)\n\n @expose('/billingsDetail')\n def billingsDetail(self):\n self.menuItems = session['navigator']\n self.pageAuth = common.getPageAuth()\n self.seq = request.args.get('seq')\n return render_template('views/pages/billings/billingsDetail.html',\n admin_view=self)\n",
"step-5": "# -*- coding:utf-8 -*-\nfrom flask import redirect, url_for, render_template\nfrom flask.globals import request, session\nfrom flask_admin import BaseView, expose\n\nfrom util import navigator, common\n\n\nclass Billings(BaseView):\n\n @expose('/')\n def index(self):\n return redirect(url_for('.billingHistory'))\n \n @expose('/billingHistory')\n def billingHistory(self):\n self.menuItems = session['navigator']\n self.pageAuth = common.getPageAuth()\n \n return render_template(\"views/pages/billings/billingHistory.html\", admin_view=self)\n\n @expose('/billingDetail')\n def billingDetail(self):\n self.menuItems = session['navigator']\n self.pageAuth = common.getPageAuth()\n \n #예비 정산명세서 Seq\n self.billingSeq = request.args.get(\"billingSeq\")\n \n #정산명세서 Seq\n if request.args.get(\"regBillingSeq\") != None :\n self.regBillingSeq = request.args.get(\"regBillingSeq\")\n elif request.args.get(\"regBillingSeq\") == None :\n self.regBillingSeq = \"\"\n \n \n #승인 Seq\n if request.args.get(\"apprSeq\") != None :\n self.apprSeq = request.args.get(\"apprSeq\")\n elif request.args.get(\"apprSeq\") == None :\n self.apprSeq = \"\"\n \n #승인Content Seq\n if request.args.get(\"contentSeq\") != None :\n self.contentSeq = request.args.get(\"contentSeq\")\n elif request.args.get(\"contentSeq\") == None :\n self.contentSeq = \"\"\n \n return render_template(\"views/pages/billings/billingDetail.html\", admin_view=self)\n\n @expose('/billings')\n def billings(self):\n self.menuItems = session['navigator']\n self.pageAuth = common.getPageAuth()\n \n return render_template(\"views/pages/billings/billings.html\", admin_view=self)\n \n @expose('/billingsDetail')\n def billingsDetail(self):\n self.menuItems = session['navigator']\n self.pageAuth = common.getPageAuth()\n self.seq = request.args.get(\"seq\")\n \n return render_template(\"views/pages/billings/billingsDetail.html\", admin_view=self)\n \n",
"step-ids": [
3,
5,
6,
7,
8
]
}
|
[
3,
5,
6,
7,
8
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print('[')
sys.stdout.close()
for name in names:
url = ('https://newsapi.org/v2/everything?sources=' + name +
'&pageSize=100&language=en&from=2018-04-01&to=2018-04-01&apiKey=c0456841cb6a4dc794e3ec64e86b7e6e'
)
count = 0
response = requests.get(url)
sys.stdout = open('/sources/output20180401.json', 'a+')
print(json.dumps(response.json()))
print(',')
sys.stdout.close()
<|reserved_special_token_0|>
print(']')
sys.stdout.close()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
names = ['abc-news', 'abc-news-au', 'aftenposten', 'al-jazeera-english',
'ars-technica', 'associated-press', 'australian-financial-review',
'axios', 'bbc-news', 'bbc-sport', 'bleacher-report', 'bloomberg',
'breitbart-news', 'business-insider', 'business-insider-uk', 'buzzfeed',
'cbc-news', 'cbs-news', 'cnbc', 'cnn', 'crypto-coins-news',
'daily-mail', 'engadget', 'entertainment-weekly', 'espn', 'engadget',
'espn-cric-info', 'financial-post', 'financial-times',
'football-italia', 'fortune', 'fox-sports', 'fox-news', 'four-four-two',
'google-news', 'google-news-ca', 'google-news-uk',
'google-news-ingoogle-news-au', 'hacker-new', 'ign', 'independent',
'mashable', 'metro', 'mirror', 'mtv-news', 'medical-news-today',
'mtv-news-uk', 'national-geographic', 'msnbc', 'nbc-news', 'news24',
'new-scientist', 'newsweek', 'news-com-au', 'new-york-magazine',
'next-big-future', 'nfl-news', 'nhl-news', 'politico', 'polygon',
'recode', 'reuters', 'reddit-r-all', 'rte', 'techradar',
'the-economist', 'the-globe-and-mail', 'the-guardian-au',
'the-guardian-uk', 'techcrunch', 'the-hill', 'talksport', 'the-hindu',
'the-irish-times', 'the-lad-bible', 'the-huffington-post',
'the-new-york-times', 'the-times-of-india', 'the-telegraph',
'the-verge', 'the-wall-street-journal', 'the-washington-post', 'time',
'usa-today', 'vice-news', 'wired', 'xinhua-net', 'der-tagesspiegel']
sys.stdout = open('/sources/output20180401.json', 'a+')
print('[')
sys.stdout.close()
for name in names:
url = ('https://newsapi.org/v2/everything?sources=' + name +
'&pageSize=100&language=en&from=2018-04-01&to=2018-04-01&apiKey=c0456841cb6a4dc794e3ec64e86b7e6e'
)
count = 0
response = requests.get(url)
sys.stdout = open('/sources/output20180401.json', 'a+')
print(json.dumps(response.json()))
print(',')
sys.stdout.close()
sys.stdout = open('/sources/output20180401.json', 'a+')
print(']')
sys.stdout.close()
<|reserved_special_token_1|>
import requests
import json
import io
import sys
names = ['abc-news', 'abc-news-au', 'aftenposten', 'al-jazeera-english',
'ars-technica', 'associated-press', 'australian-financial-review',
'axios', 'bbc-news', 'bbc-sport', 'bleacher-report', 'bloomberg',
'breitbart-news', 'business-insider', 'business-insider-uk', 'buzzfeed',
'cbc-news', 'cbs-news', 'cnbc', 'cnn', 'crypto-coins-news',
'daily-mail', 'engadget', 'entertainment-weekly', 'espn', 'engadget',
'espn-cric-info', 'financial-post', 'financial-times',
'football-italia', 'fortune', 'fox-sports', 'fox-news', 'four-four-two',
'google-news', 'google-news-ca', 'google-news-uk',
'google-news-ingoogle-news-au', 'hacker-new', 'ign', 'independent',
'mashable', 'metro', 'mirror', 'mtv-news', 'medical-news-today',
'mtv-news-uk', 'national-geographic', 'msnbc', 'nbc-news', 'news24',
'new-scientist', 'newsweek', 'news-com-au', 'new-york-magazine',
'next-big-future', 'nfl-news', 'nhl-news', 'politico', 'polygon',
'recode', 'reuters', 'reddit-r-all', 'rte', 'techradar',
'the-economist', 'the-globe-and-mail', 'the-guardian-au',
'the-guardian-uk', 'techcrunch', 'the-hill', 'talksport', 'the-hindu',
'the-irish-times', 'the-lad-bible', 'the-huffington-post',
'the-new-york-times', 'the-times-of-india', 'the-telegraph',
'the-verge', 'the-wall-street-journal', 'the-washington-post', 'time',
'usa-today', 'vice-news', 'wired', 'xinhua-net', 'der-tagesspiegel']
sys.stdout = open('/sources/output20180401.json', 'a+')
print('[')
sys.stdout.close()
for name in names:
url = ('https://newsapi.org/v2/everything?sources=' + name +
'&pageSize=100&language=en&from=2018-04-01&to=2018-04-01&apiKey=c0456841cb6a4dc794e3ec64e86b7e6e'
)
count = 0
response = requests.get(url)
sys.stdout = open('/sources/output20180401.json', 'a+')
print(json.dumps(response.json()))
print(',')
sys.stdout.close()
sys.stdout = open('/sources/output20180401.json', 'a+')
print(']')
sys.stdout.close()
<|reserved_special_token_1|>
import requests
import json
import io
import sys
names = ['abc-news', 'abc-news-au', 'aftenposten','al-jazeera-english','ars-technica','associated-press','australian-financial-review','axios', 'bbc-news', 'bbc-sport','bleacher-report', 'bloomberg','breitbart-news','business-insider', 'business-insider-uk','buzzfeed','cbc-news', 'cbs-news','cnbc','cnn','crypto-coins-news','daily-mail','engadget','entertainment-weekly','espn','engadget','espn-cric-info','financial-post','financial-times','football-italia','fortune','fox-sports','fox-news','four-four-two','google-news','google-news-ca','google-news-uk','google-news-in''google-news-au','hacker-new','ign','independent','mashable','metro','mirror','mtv-news','medical-news-today','mtv-news-uk','national-geographic','msnbc','nbc-news','news24','new-scientist','newsweek','news-com-au','new-york-magazine','next-big-future','nfl-news','nhl-news','politico','polygon','recode','reuters','reddit-r-all','rte','techradar','the-economist','the-globe-and-mail','the-guardian-au','the-guardian-uk','techcrunch','the-hill','talksport','the-hindu','the-irish-times','the-lad-bible','the-huffington-post','the-new-york-times','the-times-of-india','the-telegraph','the-verge','the-wall-street-journal','the-washington-post','time','usa-today','vice-news','wired','xinhua-net','der-tagesspiegel']
sys.stdout=open("/sources/output20180401.json","a+")
print("[")
sys.stdout.close()
for name in names:
url = ('https://newsapi.org/v2/everything?sources='+name+'&pageSize=100&language=en&from=2018-04-01&to=2018-04-01&apiKey=c0456841cb6a4dc794e3ec64e86b7e6e')
count = 0
response = requests.get(url)
sys.stdout=open("/sources/output20180401.json","a+")
print(json.dumps(response.json()))
print(",")
sys.stdout.close()
sys.stdout=open("/sources/output20180401.json","a+")
print("]")
sys.stdout.close()
#&from=2018-03-28
|
flexible
|
{
"blob_id": "590baf17d9fdad9f52869fa354112d3aa5f7d5f0",
"index": 8943,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint('[')\nsys.stdout.close()\nfor name in names:\n url = ('https://newsapi.org/v2/everything?sources=' + name +\n '&pageSize=100&language=en&from=2018-04-01&to=2018-04-01&apiKey=c0456841cb6a4dc794e3ec64e86b7e6e'\n )\n count = 0\n response = requests.get(url)\n sys.stdout = open('/sources/output20180401.json', 'a+')\n print(json.dumps(response.json()))\n print(',')\n sys.stdout.close()\n<mask token>\nprint(']')\nsys.stdout.close()\n",
"step-3": "<mask token>\nnames = ['abc-news', 'abc-news-au', 'aftenposten', 'al-jazeera-english',\n 'ars-technica', 'associated-press', 'australian-financial-review',\n 'axios', 'bbc-news', 'bbc-sport', 'bleacher-report', 'bloomberg',\n 'breitbart-news', 'business-insider', 'business-insider-uk', 'buzzfeed',\n 'cbc-news', 'cbs-news', 'cnbc', 'cnn', 'crypto-coins-news',\n 'daily-mail', 'engadget', 'entertainment-weekly', 'espn', 'engadget',\n 'espn-cric-info', 'financial-post', 'financial-times',\n 'football-italia', 'fortune', 'fox-sports', 'fox-news', 'four-four-two',\n 'google-news', 'google-news-ca', 'google-news-uk',\n 'google-news-ingoogle-news-au', 'hacker-new', 'ign', 'independent',\n 'mashable', 'metro', 'mirror', 'mtv-news', 'medical-news-today',\n 'mtv-news-uk', 'national-geographic', 'msnbc', 'nbc-news', 'news24',\n 'new-scientist', 'newsweek', 'news-com-au', 'new-york-magazine',\n 'next-big-future', 'nfl-news', 'nhl-news', 'politico', 'polygon',\n 'recode', 'reuters', 'reddit-r-all', 'rte', 'techradar',\n 'the-economist', 'the-globe-and-mail', 'the-guardian-au',\n 'the-guardian-uk', 'techcrunch', 'the-hill', 'talksport', 'the-hindu',\n 'the-irish-times', 'the-lad-bible', 'the-huffington-post',\n 'the-new-york-times', 'the-times-of-india', 'the-telegraph',\n 'the-verge', 'the-wall-street-journal', 'the-washington-post', 'time',\n 'usa-today', 'vice-news', 'wired', 'xinhua-net', 'der-tagesspiegel']\nsys.stdout = open('/sources/output20180401.json', 'a+')\nprint('[')\nsys.stdout.close()\nfor name in names:\n url = ('https://newsapi.org/v2/everything?sources=' + name +\n '&pageSize=100&language=en&from=2018-04-01&to=2018-04-01&apiKey=c0456841cb6a4dc794e3ec64e86b7e6e'\n )\n count = 0\n response = requests.get(url)\n sys.stdout = open('/sources/output20180401.json', 'a+')\n print(json.dumps(response.json()))\n print(',')\n sys.stdout.close()\nsys.stdout = open('/sources/output20180401.json', 'a+')\nprint(']')\nsys.stdout.close()\n",
"step-4": "import requests\nimport json\nimport io\nimport sys\nnames = ['abc-news', 'abc-news-au', 'aftenposten', 'al-jazeera-english',\n 'ars-technica', 'associated-press', 'australian-financial-review',\n 'axios', 'bbc-news', 'bbc-sport', 'bleacher-report', 'bloomberg',\n 'breitbart-news', 'business-insider', 'business-insider-uk', 'buzzfeed',\n 'cbc-news', 'cbs-news', 'cnbc', 'cnn', 'crypto-coins-news',\n 'daily-mail', 'engadget', 'entertainment-weekly', 'espn', 'engadget',\n 'espn-cric-info', 'financial-post', 'financial-times',\n 'football-italia', 'fortune', 'fox-sports', 'fox-news', 'four-four-two',\n 'google-news', 'google-news-ca', 'google-news-uk',\n 'google-news-ingoogle-news-au', 'hacker-new', 'ign', 'independent',\n 'mashable', 'metro', 'mirror', 'mtv-news', 'medical-news-today',\n 'mtv-news-uk', 'national-geographic', 'msnbc', 'nbc-news', 'news24',\n 'new-scientist', 'newsweek', 'news-com-au', 'new-york-magazine',\n 'next-big-future', 'nfl-news', 'nhl-news', 'politico', 'polygon',\n 'recode', 'reuters', 'reddit-r-all', 'rte', 'techradar',\n 'the-economist', 'the-globe-and-mail', 'the-guardian-au',\n 'the-guardian-uk', 'techcrunch', 'the-hill', 'talksport', 'the-hindu',\n 'the-irish-times', 'the-lad-bible', 'the-huffington-post',\n 'the-new-york-times', 'the-times-of-india', 'the-telegraph',\n 'the-verge', 'the-wall-street-journal', 'the-washington-post', 'time',\n 'usa-today', 'vice-news', 'wired', 'xinhua-net', 'der-tagesspiegel']\nsys.stdout = open('/sources/output20180401.json', 'a+')\nprint('[')\nsys.stdout.close()\nfor name in names:\n url = ('https://newsapi.org/v2/everything?sources=' + name +\n '&pageSize=100&language=en&from=2018-04-01&to=2018-04-01&apiKey=c0456841cb6a4dc794e3ec64e86b7e6e'\n )\n count = 0\n response = requests.get(url)\n sys.stdout = open('/sources/output20180401.json', 'a+')\n print(json.dumps(response.json()))\n print(',')\n sys.stdout.close()\nsys.stdout = open('/sources/output20180401.json', 'a+')\nprint(']')\nsys.stdout.close()\n",
"step-5": "import requests\nimport json\nimport io\nimport sys\n\nnames = ['abc-news', 'abc-news-au', 'aftenposten','al-jazeera-english','ars-technica','associated-press','australian-financial-review','axios', 'bbc-news', 'bbc-sport','bleacher-report', 'bloomberg','breitbart-news','business-insider', 'business-insider-uk','buzzfeed','cbc-news', 'cbs-news','cnbc','cnn','crypto-coins-news','daily-mail','engadget','entertainment-weekly','espn','engadget','espn-cric-info','financial-post','financial-times','football-italia','fortune','fox-sports','fox-news','four-four-two','google-news','google-news-ca','google-news-uk','google-news-in''google-news-au','hacker-new','ign','independent','mashable','metro','mirror','mtv-news','medical-news-today','mtv-news-uk','national-geographic','msnbc','nbc-news','news24','new-scientist','newsweek','news-com-au','new-york-magazine','next-big-future','nfl-news','nhl-news','politico','polygon','recode','reuters','reddit-r-all','rte','techradar','the-economist','the-globe-and-mail','the-guardian-au','the-guardian-uk','techcrunch','the-hill','talksport','the-hindu','the-irish-times','the-lad-bible','the-huffington-post','the-new-york-times','the-times-of-india','the-telegraph','the-verge','the-wall-street-journal','the-washington-post','time','usa-today','vice-news','wired','xinhua-net','der-tagesspiegel']\nsys.stdout=open(\"/sources/output20180401.json\",\"a+\")\nprint(\"[\")\nsys.stdout.close()\nfor name in names:\n\turl = ('https://newsapi.org/v2/everything?sources='+name+'&pageSize=100&language=en&from=2018-04-01&to=2018-04-01&apiKey=c0456841cb6a4dc794e3ec64e86b7e6e')\n\tcount = 0\n\tresponse = requests.get(url) \n\tsys.stdout=open(\"/sources/output20180401.json\",\"a+\")\n\tprint(json.dumps(response.json()))\n\tprint(\",\")\n\tsys.stdout.close()\n\nsys.stdout=open(\"/sources/output20180401.json\",\"a+\")\nprint(\"]\")\nsys.stdout.close()\n#&from=2018-03-28\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#!/usr/bin/env python
###############################################################################
# \file
#
# $Id:$
#
# Copyright (C) Brno University of Technology
#
# This file is part of software developed by Robo@FIT group.
#
# Author: Tomas Lokaj
# Supervised by: Michal Spanel (spanel@fit.vutbr.cz)
# Date: 12/09/2012
#
# This file is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This file is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this file. If not, see <http://www.gnu.org/licenses/>.
#
import roslib; roslib.load_manifest('srs_interaction_primitives')
import rospy
import actionlib
from std_msgs.msg import *
from visualization_msgs.msg import *
from geometry_msgs.msg import *
from srs_interaction_primitives.msg import *
import random
import time
from srs_interaction_primitives.srv import ClickablePositions
if __name__ == '__main__':
rospy.init_node('clickable_positions_action_client', anonymous=True)
#===========================================================================
# rospy.wait_for_service('interaction_primitives/clickable_positions')
# click_positions = rospy.ServiceProxy('interaction_primitives/clickable_positions', ClickablePositions)
#
# color = ColorRGBA()
# color.r = random.uniform(0, 1)
# color.g = random.uniform(0, 1)
# color.b = random.uniform(0, 1)
# color.a = 1;
#
# radius = random.uniform(0, 1)
#
# positions = []
# for i in range(0, random.randint(2, 10)):
# positions.append(Point(random.uniform(-10.0, 10.0), random.uniform(-10.0, 10.0), random.uniform(-10.0, 10.0)))
#
# frame_id = "/world"
#
# topic = str(random.randint(0, 10000))
#
# resp = click_positions(frame_id, topic, radius, color, positions)
#
#===========================================================================
client = actionlib.SimpleActionClient("clickable_positions_server", ClickablePositionsAction)
client.wait_for_server()
rospy.loginfo("Server ready")
goal = ClickablePositionsGoal()
color = ColorRGBA()
color.r = random.uniform(0, 1)
color.g = random.uniform(0, 1)
color.b = random.uniform(0, 1)
color.a = 1;
goal.topic_suffix = str(random.randint(0, 10000))
goal.color = color
goal.radius = random.uniform(0, 1)
for i in range(0, random.randint(2, 10)):
goal.positions.append(Point(random.uniform(-10.0, 10.0), random.uniform(-10.0, 10.0), random.uniform(-10.0, 10.0)))
goal.frame_id = "/world"
# Fill in the goal here
client.send_goal(goal)
client.wait_for_result(rospy.Duration.from_sec(50.0))
if client.get_state() == 3:
rospy.loginfo("Goal completed:")
print client.get_result()
else:
rospy.logwarn("Action was preempted")
|
normal
|
{
"blob_id": "3bf1b4cfce55820605653d9dc57bab839f2dea55",
"index": 5864,
"step-1": "#!/usr/bin/env python\n###############################################################################\n# \\file\n#\n# $Id:$\n#\n# Copyright (C) Brno University of Technology\n#\n# This file is part of software developed by Robo@FIT group.\n# \n# Author: Tomas Lokaj\n# Supervised by: Michal Spanel (spanel@fit.vutbr.cz)\n# Date: 12/09/2012\n#\n# This file is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n# \n# This file is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser General Public License for more details.\n# \n# You should have received a copy of the GNU Lesser General Public License\n# along with this file. If not, see <http://www.gnu.org/licenses/>.\n#\n\nimport roslib; roslib.load_manifest('srs_interaction_primitives')\nimport rospy\nimport actionlib\nfrom std_msgs.msg import *\nfrom visualization_msgs.msg import *\nfrom geometry_msgs.msg import *\nfrom srs_interaction_primitives.msg import *\nimport random\nimport time\n\nfrom srs_interaction_primitives.srv import ClickablePositions \n\n\nif __name__ == '__main__':\n rospy.init_node('clickable_positions_action_client', anonymous=True)\n \n #===========================================================================\n # rospy.wait_for_service('interaction_primitives/clickable_positions')\n # click_positions = rospy.ServiceProxy('interaction_primitives/clickable_positions', ClickablePositions)\n # \n # color = ColorRGBA()\n # color.r = random.uniform(0, 1)\n # color.g = random.uniform(0, 1)\n # color.b = random.uniform(0, 1)\n # color.a = 1; \n # \n # radius = random.uniform(0, 1)\n # \n # positions = []\n # for i in range(0, random.randint(2, 10)):\n # positions.append(Point(random.uniform(-10.0, 10.0), random.uniform(-10.0, 10.0), random.uniform(-10.0, 10.0)))\n # \n # frame_id = \"/world\"\n # \n # topic = str(random.randint(0, 10000))\n # \n # resp = click_positions(frame_id, topic, radius, color, positions)\n # \n #===========================================================================\n \n client = actionlib.SimpleActionClient(\"clickable_positions_server\", ClickablePositionsAction)\n client.wait_for_server()\n rospy.loginfo(\"Server ready\")\n\n goal = ClickablePositionsGoal()\n color = ColorRGBA()\n color.r = random.uniform(0, 1)\n color.g = random.uniform(0, 1)\n color.b = random.uniform(0, 1)\n color.a = 1; \n goal.topic_suffix = str(random.randint(0, 10000))\n goal.color = color\n goal.radius = random.uniform(0, 1)\n for i in range(0, random.randint(2, 10)):\n goal.positions.append(Point(random.uniform(-10.0, 10.0), random.uniform(-10.0, 10.0), random.uniform(-10.0, 10.0)))\n goal.frame_id = \"/world\"\n # Fill in the goal here\n client.send_goal(goal)\n client.wait_for_result(rospy.Duration.from_sec(50.0))\n if client.get_state() == 3:\n rospy.loginfo(\"Goal completed:\")\n print client.get_result()\n else:\n rospy.logwarn(\"Action was preempted\")\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
initial = True
dependencies = [migrations.swappable_dependency(settings.
AUTH_USER_MODEL), ('login', '0014_auto_20210529_1637')]
operations = [migrations.CreateModel(name='Location', fields=[('id',
models.BigAutoField(auto_created=True, primary_key=True, serialize=
False, verbose_name='ID')), ('street', models.CharField(blank=True,
max_length=255, null=True)), ('postal_code', models.IntegerField(
blank=True, null=True)), ('city', models.CharField(blank=True,
max_length=255, null=True)), ('country', models.CharField(blank=
True, max_length=255, null=True)), ('facility', models.CharField(
blank=True, max_length=255, null=True))]), migrations.CreateModel(
name='StudySession', fields=[('id', models.BigAutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name=
'ID')), ('is_active', models.BooleanField(default=True)), (
'start_time', models.TimeField()), ('end_time', models.TimeField()),
('date', models.DateField()), ('available_spots', models.
IntegerField(default=1)), ('taken_spots', models.IntegerField(
default=0)), ('description', models.CharField(blank=True,
max_length=500, null=True)), ('location', models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to=
'study_sessions.location')), ('subject', models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to='login.subject'))]),
migrations.CreateModel(name='Participant', fields=[('id', models.
BigAutoField(auto_created=True, primary_key=True, serialize=False,
verbose_name='ID')), ('study_session', models.ForeignKey(on_delete=
django.db.models.deletion.CASCADE, to='study_sessions.studysession'
)), ('user', models.ForeignKey(on_delete=django.db.models.deletion.
CASCADE, to=settings.AUTH_USER_MODEL))])]
<|reserved_special_token_1|>
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [migrations.swappable_dependency(settings.
AUTH_USER_MODEL), ('login', '0014_auto_20210529_1637')]
operations = [migrations.CreateModel(name='Location', fields=[('id',
models.BigAutoField(auto_created=True, primary_key=True, serialize=
False, verbose_name='ID')), ('street', models.CharField(blank=True,
max_length=255, null=True)), ('postal_code', models.IntegerField(
blank=True, null=True)), ('city', models.CharField(blank=True,
max_length=255, null=True)), ('country', models.CharField(blank=
True, max_length=255, null=True)), ('facility', models.CharField(
blank=True, max_length=255, null=True))]), migrations.CreateModel(
name='StudySession', fields=[('id', models.BigAutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name=
'ID')), ('is_active', models.BooleanField(default=True)), (
'start_time', models.TimeField()), ('end_time', models.TimeField()),
('date', models.DateField()), ('available_spots', models.
IntegerField(default=1)), ('taken_spots', models.IntegerField(
default=0)), ('description', models.CharField(blank=True,
max_length=500, null=True)), ('location', models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to=
'study_sessions.location')), ('subject', models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to='login.subject'))]),
migrations.CreateModel(name='Participant', fields=[('id', models.
BigAutoField(auto_created=True, primary_key=True, serialize=False,
verbose_name='ID')), ('study_session', models.ForeignKey(on_delete=
django.db.models.deletion.CASCADE, to='study_sessions.studysession'
)), ('user', models.ForeignKey(on_delete=django.db.models.deletion.
CASCADE, to=settings.AUTH_USER_MODEL))])]
<|reserved_special_token_1|>
# Generated by Django 3.2.3 on 2021-05-29 16:37
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('login', '0014_auto_20210529_1637'),
]
operations = [
migrations.CreateModel(
name='Location',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('street', models.CharField(blank=True, max_length=255, null=True)),
('postal_code', models.IntegerField(blank=True, null=True)),
('city', models.CharField(blank=True, max_length=255, null=True)),
('country', models.CharField(blank=True, max_length=255, null=True)),
('facility', models.CharField(blank=True, max_length=255, null=True)),
],
),
migrations.CreateModel(
name='StudySession',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('is_active', models.BooleanField(default=True)),
('start_time', models.TimeField()),
('end_time', models.TimeField()),
('date', models.DateField()),
('available_spots', models.IntegerField(default=1)),
('taken_spots', models.IntegerField(default=0)),
('description', models.CharField(blank=True, max_length=500, null=True)),
('location', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='study_sessions.location')),
('subject', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='login.subject')),
],
),
migrations.CreateModel(
name='Participant',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('study_session', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='study_sessions.studysession')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
flexible
|
{
"blob_id": "6285d1665bacbff746f44f42ce65981f937fff64",
"index": 4189,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n initial = True\n dependencies = [migrations.swappable_dependency(settings.\n AUTH_USER_MODEL), ('login', '0014_auto_20210529_1637')]\n operations = [migrations.CreateModel(name='Location', fields=[('id',\n models.BigAutoField(auto_created=True, primary_key=True, serialize=\n False, verbose_name='ID')), ('street', models.CharField(blank=True,\n max_length=255, null=True)), ('postal_code', models.IntegerField(\n blank=True, null=True)), ('city', models.CharField(blank=True,\n max_length=255, null=True)), ('country', models.CharField(blank=\n True, max_length=255, null=True)), ('facility', models.CharField(\n blank=True, max_length=255, null=True))]), migrations.CreateModel(\n name='StudySession', fields=[('id', models.BigAutoField(\n auto_created=True, primary_key=True, serialize=False, verbose_name=\n 'ID')), ('is_active', models.BooleanField(default=True)), (\n 'start_time', models.TimeField()), ('end_time', models.TimeField()),\n ('date', models.DateField()), ('available_spots', models.\n IntegerField(default=1)), ('taken_spots', models.IntegerField(\n default=0)), ('description', models.CharField(blank=True,\n max_length=500, null=True)), ('location', models.ForeignKey(\n on_delete=django.db.models.deletion.CASCADE, to=\n 'study_sessions.location')), ('subject', models.ForeignKey(\n on_delete=django.db.models.deletion.CASCADE, to='login.subject'))]),\n migrations.CreateModel(name='Participant', fields=[('id', models.\n BigAutoField(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')), ('study_session', models.ForeignKey(on_delete=\n django.db.models.deletion.CASCADE, to='study_sessions.studysession'\n )), ('user', models.ForeignKey(on_delete=django.db.models.deletion.\n CASCADE, to=settings.AUTH_USER_MODEL))])]\n",
"step-4": "from django.conf import settings\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n initial = True\n dependencies = [migrations.swappable_dependency(settings.\n AUTH_USER_MODEL), ('login', '0014_auto_20210529_1637')]\n operations = [migrations.CreateModel(name='Location', fields=[('id',\n models.BigAutoField(auto_created=True, primary_key=True, serialize=\n False, verbose_name='ID')), ('street', models.CharField(blank=True,\n max_length=255, null=True)), ('postal_code', models.IntegerField(\n blank=True, null=True)), ('city', models.CharField(blank=True,\n max_length=255, null=True)), ('country', models.CharField(blank=\n True, max_length=255, null=True)), ('facility', models.CharField(\n blank=True, max_length=255, null=True))]), migrations.CreateModel(\n name='StudySession', fields=[('id', models.BigAutoField(\n auto_created=True, primary_key=True, serialize=False, verbose_name=\n 'ID')), ('is_active', models.BooleanField(default=True)), (\n 'start_time', models.TimeField()), ('end_time', models.TimeField()),\n ('date', models.DateField()), ('available_spots', models.\n IntegerField(default=1)), ('taken_spots', models.IntegerField(\n default=0)), ('description', models.CharField(blank=True,\n max_length=500, null=True)), ('location', models.ForeignKey(\n on_delete=django.db.models.deletion.CASCADE, to=\n 'study_sessions.location')), ('subject', models.ForeignKey(\n on_delete=django.db.models.deletion.CASCADE, to='login.subject'))]),\n migrations.CreateModel(name='Participant', fields=[('id', models.\n BigAutoField(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')), ('study_session', models.ForeignKey(on_delete=\n django.db.models.deletion.CASCADE, to='study_sessions.studysession'\n )), ('user', models.ForeignKey(on_delete=django.db.models.deletion.\n CASCADE, to=settings.AUTH_USER_MODEL))])]\n",
"step-5": "# Generated by Django 3.2.3 on 2021-05-29 16:37\n\nfrom django.conf import settings\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ('login', '0014_auto_20210529_1637'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Location',\n fields=[\n ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('street', models.CharField(blank=True, max_length=255, null=True)),\n ('postal_code', models.IntegerField(blank=True, null=True)),\n ('city', models.CharField(blank=True, max_length=255, null=True)),\n ('country', models.CharField(blank=True, max_length=255, null=True)),\n ('facility', models.CharField(blank=True, max_length=255, null=True)),\n ],\n ),\n migrations.CreateModel(\n name='StudySession',\n fields=[\n ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('is_active', models.BooleanField(default=True)),\n ('start_time', models.TimeField()),\n ('end_time', models.TimeField()),\n ('date', models.DateField()),\n ('available_spots', models.IntegerField(default=1)),\n ('taken_spots', models.IntegerField(default=0)),\n ('description', models.CharField(blank=True, max_length=500, null=True)),\n ('location', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='study_sessions.location')),\n ('subject', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='login.subject')),\n ],\n ),\n migrations.CreateModel(\n name='Participant',\n fields=[\n ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('study_session', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='study_sessions.studysession')),\n ('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),\n ],\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def getCollapsedFastqDataframe(file):
df = pd.read_table(file, header=None, delim_whitespace=True)
df = df.dropna(axis=1, how='all')
sample = file.split('/')
sample = sample[len(sample) - 1]
sample = sample.split('.')[0]
df.columns = ['READS', 'SEQUENCE']
return df
def getManifestID(name, tumor):
id = manifest_data.loc[(manifest_data['DISEASE.ABBV'] == tumor) & (
manifest_data['NAME'] == name)]['ID']
id = id.tolist()[0]
id = str(id)
return str(id)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def getCollapsedFastqDataframe(file):
df = pd.read_table(file, header=None, delim_whitespace=True)
df = df.dropna(axis=1, how='all')
sample = file.split('/')
sample = sample[len(sample) - 1]
sample = sample.split('.')[0]
df.columns = ['READS', 'SEQUENCE']
return df
def getManifestID(name, tumor):
id = manifest_data.loc[(manifest_data['DISEASE.ABBV'] == tumor) & (
manifest_data['NAME'] == name)]['ID']
id = id.tolist()[0]
id = str(id)
return str(id)
<|reserved_special_token_0|>
for subdir, dirs, files in os.walk(base_path):
if '/collapsed_fastq' in subdir:
folders = subdir.split('/')
tumor = folders[len(folders) - 2]
if tumor in ['THCA', 'STAD', 'SKCM', 'PCPG']:
continue
print(tumor)
summary_file = (base_path + 'motif_reads/' + mirna + '/' + motif +
'.' + tumor + '.common.reads.fastq.collapsed.summary.tsv')
if Path(summary_file).exists():
summary_data = pd.read_table(summary_file, header='infer', sep='\t'
)
else:
print('SUMMARY FILE NOT FOUND')
summary_data = pd.DataFrame({'SEQUENCE': []})
matched_ids = list(summary_data)
common_seqs = list(summary_data['SEQUENCE'])
total_time_start = time.time()
for f in os.listdir(subdir):
time_start = time.time()
if f[0] == '.':
break
patient = f.split('.')[0]
id = getManifestID(patient, tumor)
if id not in matched_ids:
matched_ids.append(id)
if Path(summary_file).exists():
summary_data = pd.read_table(summary_file, header=
'infer', sep='\t')
else:
print('SUMMARY FILE NOT FOUND')
summary_data = pd.DataFrame({'SEQUENCE': []})
matched_ids = list(summary_data)
common_seqs = list(summary_data['SEQUENCE'])
summary_data = None
collapsed_file = subdir + '/' + f
collapsed_data = getCollapsedFastqDataframe(collapsed_file)
if len(common_seqs) > 0:
collapsed_data = collapsed_data[collapsed_data.SEQUENCE
.isin(common_seqs)]
num_rows = collapsed_data.shape[0]
collapsed_data.columns = [str(id), 'SEQUENCE']
match_collapsed_data = collapsed_data
match_collapsed_data.columns = [str(id), 'SEQUENCE']
if Path(summary_file).exists():
summary_data = pd.read_table(summary_file, header=
'infer', sep='\t')
summary_data = pd.merge(summary_data,
match_collapsed_data, on='SEQUENCE', sort=False,
how='inner')
else:
summary_data = match_collapsed_data
summary_data.to_csv(summary_file, sep='\t', index=False)
summary_data = pd.DataFrame({'SEQUENCE': []})
time_end = time.time()
summary_data = pd.read_table(summary_file, header='infer', sep='\t')
match_summary_data = summary_data.copy()
for index, row in summary_data.iterrows():
sequence = str(row['SEQUENCE'])
if motif not in sequence:
match_summary_data = match_summary_data[match_summary_data.
SEQUENCE != sequence]
match_summary_data.to_csv(summary_file, sep='\t', index=False)
total_time_end = time.time()
print('TOTAl TUMOR TIME: ' + str(total_time_end - total_time_start))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
base_path = '/media/user/2TB (MAC)/Susanna/'
collapsed_ext = '.converted.unpaired.fastq.collapsed'
manifest_file = base_path + 'all-tumor-manifest.csv'
manifest_data = pd.read_csv(manifest_file, header='infer', sep=',')
<|reserved_special_token_0|>
def getCollapsedFastqDataframe(file):
df = pd.read_table(file, header=None, delim_whitespace=True)
df = df.dropna(axis=1, how='all')
sample = file.split('/')
sample = sample[len(sample) - 1]
sample = sample.split('.')[0]
df.columns = ['READS', 'SEQUENCE']
return df
def getManifestID(name, tumor):
id = manifest_data.loc[(manifest_data['DISEASE.ABBV'] == tumor) & (
manifest_data['NAME'] == name)]['ID']
id = id.tolist()[0]
id = str(id)
return str(id)
motifs = ['TGGTTATCTAGCT', 'TTATCAGACTGAT']
mirnas = ['hsa-miR-9-5p-1-2-3', 'hsa-miR-21-5p']
i = 1
motif = motifs[i]
mirna = mirnas[i]
for subdir, dirs, files in os.walk(base_path):
if '/collapsed_fastq' in subdir:
folders = subdir.split('/')
tumor = folders[len(folders) - 2]
if tumor in ['THCA', 'STAD', 'SKCM', 'PCPG']:
continue
print(tumor)
summary_file = (base_path + 'motif_reads/' + mirna + '/' + motif +
'.' + tumor + '.common.reads.fastq.collapsed.summary.tsv')
if Path(summary_file).exists():
summary_data = pd.read_table(summary_file, header='infer', sep='\t'
)
else:
print('SUMMARY FILE NOT FOUND')
summary_data = pd.DataFrame({'SEQUENCE': []})
matched_ids = list(summary_data)
common_seqs = list(summary_data['SEQUENCE'])
total_time_start = time.time()
for f in os.listdir(subdir):
time_start = time.time()
if f[0] == '.':
break
patient = f.split('.')[0]
id = getManifestID(patient, tumor)
if id not in matched_ids:
matched_ids.append(id)
if Path(summary_file).exists():
summary_data = pd.read_table(summary_file, header=
'infer', sep='\t')
else:
print('SUMMARY FILE NOT FOUND')
summary_data = pd.DataFrame({'SEQUENCE': []})
matched_ids = list(summary_data)
common_seqs = list(summary_data['SEQUENCE'])
summary_data = None
collapsed_file = subdir + '/' + f
collapsed_data = getCollapsedFastqDataframe(collapsed_file)
if len(common_seqs) > 0:
collapsed_data = collapsed_data[collapsed_data.SEQUENCE
.isin(common_seqs)]
num_rows = collapsed_data.shape[0]
collapsed_data.columns = [str(id), 'SEQUENCE']
match_collapsed_data = collapsed_data
match_collapsed_data.columns = [str(id), 'SEQUENCE']
if Path(summary_file).exists():
summary_data = pd.read_table(summary_file, header=
'infer', sep='\t')
summary_data = pd.merge(summary_data,
match_collapsed_data, on='SEQUENCE', sort=False,
how='inner')
else:
summary_data = match_collapsed_data
summary_data.to_csv(summary_file, sep='\t', index=False)
summary_data = pd.DataFrame({'SEQUENCE': []})
time_end = time.time()
summary_data = pd.read_table(summary_file, header='infer', sep='\t')
match_summary_data = summary_data.copy()
for index, row in summary_data.iterrows():
sequence = str(row['SEQUENCE'])
if motif not in sequence:
match_summary_data = match_summary_data[match_summary_data.
SEQUENCE != sequence]
match_summary_data.to_csv(summary_file, sep='\t', index=False)
total_time_end = time.time()
print('TOTAl TUMOR TIME: ' + str(total_time_end - total_time_start))
<|reserved_special_token_1|>
import os
import os.path
import numpy as np
import pandas as pd
import collections
import subprocess
from pathlib import Path
import time
base_path = '/media/user/2TB (MAC)/Susanna/'
collapsed_ext = '.converted.unpaired.fastq.collapsed'
manifest_file = base_path + 'all-tumor-manifest.csv'
manifest_data = pd.read_csv(manifest_file, header='infer', sep=',')
<|reserved_special_token_0|>
def getCollapsedFastqDataframe(file):
df = pd.read_table(file, header=None, delim_whitespace=True)
df = df.dropna(axis=1, how='all')
sample = file.split('/')
sample = sample[len(sample) - 1]
sample = sample.split('.')[0]
df.columns = ['READS', 'SEQUENCE']
return df
def getManifestID(name, tumor):
id = manifest_data.loc[(manifest_data['DISEASE.ABBV'] == tumor) & (
manifest_data['NAME'] == name)]['ID']
id = id.tolist()[0]
id = str(id)
return str(id)
motifs = ['TGGTTATCTAGCT', 'TTATCAGACTGAT']
mirnas = ['hsa-miR-9-5p-1-2-3', 'hsa-miR-21-5p']
i = 1
motif = motifs[i]
mirna = mirnas[i]
for subdir, dirs, files in os.walk(base_path):
if '/collapsed_fastq' in subdir:
folders = subdir.split('/')
tumor = folders[len(folders) - 2]
if tumor in ['THCA', 'STAD', 'SKCM', 'PCPG']:
continue
print(tumor)
summary_file = (base_path + 'motif_reads/' + mirna + '/' + motif +
'.' + tumor + '.common.reads.fastq.collapsed.summary.tsv')
if Path(summary_file).exists():
summary_data = pd.read_table(summary_file, header='infer', sep='\t'
)
else:
print('SUMMARY FILE NOT FOUND')
summary_data = pd.DataFrame({'SEQUENCE': []})
matched_ids = list(summary_data)
common_seqs = list(summary_data['SEQUENCE'])
total_time_start = time.time()
for f in os.listdir(subdir):
time_start = time.time()
if f[0] == '.':
break
patient = f.split('.')[0]
id = getManifestID(patient, tumor)
if id not in matched_ids:
matched_ids.append(id)
if Path(summary_file).exists():
summary_data = pd.read_table(summary_file, header=
'infer', sep='\t')
else:
print('SUMMARY FILE NOT FOUND')
summary_data = pd.DataFrame({'SEQUENCE': []})
matched_ids = list(summary_data)
common_seqs = list(summary_data['SEQUENCE'])
summary_data = None
collapsed_file = subdir + '/' + f
collapsed_data = getCollapsedFastqDataframe(collapsed_file)
if len(common_seqs) > 0:
collapsed_data = collapsed_data[collapsed_data.SEQUENCE
.isin(common_seqs)]
num_rows = collapsed_data.shape[0]
collapsed_data.columns = [str(id), 'SEQUENCE']
match_collapsed_data = collapsed_data
match_collapsed_data.columns = [str(id), 'SEQUENCE']
if Path(summary_file).exists():
summary_data = pd.read_table(summary_file, header=
'infer', sep='\t')
summary_data = pd.merge(summary_data,
match_collapsed_data, on='SEQUENCE', sort=False,
how='inner')
else:
summary_data = match_collapsed_data
summary_data.to_csv(summary_file, sep='\t', index=False)
summary_data = pd.DataFrame({'SEQUENCE': []})
time_end = time.time()
summary_data = pd.read_table(summary_file, header='infer', sep='\t')
match_summary_data = summary_data.copy()
for index, row in summary_data.iterrows():
sequence = str(row['SEQUENCE'])
if motif not in sequence:
match_summary_data = match_summary_data[match_summary_data.
SEQUENCE != sequence]
match_summary_data.to_csv(summary_file, sep='\t', index=False)
total_time_end = time.time()
print('TOTAl TUMOR TIME: ' + str(total_time_end - total_time_start))
<|reserved_special_token_1|>
## PURPOSE: get reads for certain motifs across certain tumors
## INPUT: manifest data all-tumor-manifest.csv
## collapsed fastq files sample.converted.unpaired.fastq.collapsed
## OUTPUT: table containing reads for specific motif across samples motif.tumor.common.reads.fastq.collapsed.summary.tsv
import os
import os.path
import numpy as np
import pandas as pd
import collections
import subprocess
from pathlib import Path
import time
base_path = '/media/user/2TB (MAC)/Susanna/'
collapsed_ext = '.converted.unpaired.fastq.collapsed'
manifest_file = base_path + 'all-tumor-manifest.csv'
manifest_data =pd.read_csv(manifest_file, header='infer', sep=',')
'''
file = base_path + 'TARGET/TARGET-manifest.csv'
data = pd.read_csv(file, header='infer', sep=',')
data['DISEASE.ABBV'] = 'TARGET'
manifest_data = pd.concat([manifest_data, data])
print(manifest_data.shape)
manifest_data.to_csv(manifest_file, sep=',', index=False)
'''
def getCollapsedFastqDataframe(file):
df = pd.read_table(file, header=None, delim_whitespace=True)
df = df.dropna(axis=1, how='all')
sample = file.split('/')
sample = sample[len(sample)-1]
sample = sample.split('.')[0]
df.columns = ['READS', 'SEQUENCE']
return df
def getManifestID(name, tumor):
id = manifest_data.loc[(manifest_data['DISEASE.ABBV']==tumor) & (manifest_data['NAME']==name)]['ID']
id = id.tolist()[0]
id = str(id)
return str(id)
motifs = ['TGGTTATCTAGCT', 'TTATCAGACTGAT']
mirnas = ['hsa-miR-9-5p-1-2-3', 'hsa-miR-21-5p']
i = 1
motif = motifs[i]
mirna = mirnas[i]
for subdir, dirs, files in os.walk(base_path):
if '/collapsed_fastq' in subdir:
folders = subdir.split('/')
tumor = folders[len(folders)-2]
if tumor in ['THCA', 'STAD', 'SKCM', 'PCPG']:
continue
print(tumor)
summary_file = base_path + 'motif_reads/' + mirna + '/' + motif + '.' + tumor + '.common.reads.fastq.collapsed.summary.tsv'
if Path(summary_file).exists():
summary_data = pd.read_table(summary_file, header='infer', sep='\t')
else:
print('SUMMARY FILE NOT FOUND')
summary_data = pd.DataFrame({'SEQUENCE':[]})
matched_ids = list(summary_data)
common_seqs = list(summary_data['SEQUENCE'])
total_time_start = time.time()
for f in os.listdir(subdir):
time_start = time.time()
if f[0] == '.':
break
patient = f.split('.')[0]
id = getManifestID(patient, tumor)
if id not in matched_ids:
matched_ids.append(id)
if Path(summary_file).exists():
summary_data = pd.read_table(summary_file, header='infer', sep='\t')
else:
print('SUMMARY FILE NOT FOUND')
summary_data = pd.DataFrame({'SEQUENCE':[]})
matched_ids = list(summary_data)
common_seqs = list(summary_data['SEQUENCE'])
#matched_seq = list(summary_data['SEQUENCE'])
summary_data = None
collapsed_file = subdir+'/'+f
collapsed_data = getCollapsedFastqDataframe(collapsed_file)
#print(collapsed_data.shape[0])
if len(common_seqs) > 0:
collapsed_data = collapsed_data[collapsed_data.SEQUENCE.isin(common_seqs)]
num_rows = collapsed_data.shape[0]
#print(collapsed_data.shape[0])
collapsed_data.columns = [str(id), 'SEQUENCE']
match_collapsed_data = collapsed_data #pd.DataFrame(columns = ['READS', 'SEQUENCE'])
match_collapsed_data.columns = [str(id), 'SEQUENCE']
if Path(summary_file).exists():
summary_data = pd.read_table(summary_file, header='infer', sep='\t')
summary_data = pd.merge(summary_data, match_collapsed_data, on='SEQUENCE', sort=False, how='inner')
else:
summary_data = match_collapsed_data
summary_data.to_csv(summary_file, sep='\t', index=False)
summary_data = pd.DataFrame({'SEQUENCE':[]})
time_end = time.time()
#print('TUMOR: ' + tumor + ' SAMPLE: ' + str(patient) + ' TOTAL TIME: ' + str((time_end-time_start)/60) + ' ROWS: ' + str(num_rows))
summary_data = pd.read_table(summary_file, header='infer', sep='\t')
match_summary_data = summary_data.copy()
for index, row in summary_data.iterrows():
sequence = str(row['SEQUENCE'])
if motif not in sequence:
match_summary_data = match_summary_data[match_summary_data.SEQUENCE != sequence]
match_summary_data.to_csv(summary_file, sep='\t', index=False)
total_time_end = time.time()
print('TOTAl TUMOR TIME: ' + str(total_time_end-total_time_start))
|
flexible
|
{
"blob_id": "ddabceb223f4e457a0f69af5abf793ae72e5f432",
"index": 1465,
"step-1": "<mask token>\n\n\ndef getCollapsedFastqDataframe(file):\n df = pd.read_table(file, header=None, delim_whitespace=True)\n df = df.dropna(axis=1, how='all')\n sample = file.split('/')\n sample = sample[len(sample) - 1]\n sample = sample.split('.')[0]\n df.columns = ['READS', 'SEQUENCE']\n return df\n\n\ndef getManifestID(name, tumor):\n id = manifest_data.loc[(manifest_data['DISEASE.ABBV'] == tumor) & (\n manifest_data['NAME'] == name)]['ID']\n id = id.tolist()[0]\n id = str(id)\n return str(id)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef getCollapsedFastqDataframe(file):\n df = pd.read_table(file, header=None, delim_whitespace=True)\n df = df.dropna(axis=1, how='all')\n sample = file.split('/')\n sample = sample[len(sample) - 1]\n sample = sample.split('.')[0]\n df.columns = ['READS', 'SEQUENCE']\n return df\n\n\ndef getManifestID(name, tumor):\n id = manifest_data.loc[(manifest_data['DISEASE.ABBV'] == tumor) & (\n manifest_data['NAME'] == name)]['ID']\n id = id.tolist()[0]\n id = str(id)\n return str(id)\n\n\n<mask token>\nfor subdir, dirs, files in os.walk(base_path):\n if '/collapsed_fastq' in subdir:\n folders = subdir.split('/')\n tumor = folders[len(folders) - 2]\n if tumor in ['THCA', 'STAD', 'SKCM', 'PCPG']:\n continue\n print(tumor)\n summary_file = (base_path + 'motif_reads/' + mirna + '/' + motif +\n '.' + tumor + '.common.reads.fastq.collapsed.summary.tsv')\n if Path(summary_file).exists():\n summary_data = pd.read_table(summary_file, header='infer', sep='\\t'\n )\n else:\n print('SUMMARY FILE NOT FOUND')\n summary_data = pd.DataFrame({'SEQUENCE': []})\n matched_ids = list(summary_data)\n common_seqs = list(summary_data['SEQUENCE'])\n total_time_start = time.time()\n for f in os.listdir(subdir):\n time_start = time.time()\n if f[0] == '.':\n break\n patient = f.split('.')[0]\n id = getManifestID(patient, tumor)\n if id not in matched_ids:\n matched_ids.append(id)\n if Path(summary_file).exists():\n summary_data = pd.read_table(summary_file, header=\n 'infer', sep='\\t')\n else:\n print('SUMMARY FILE NOT FOUND')\n summary_data = pd.DataFrame({'SEQUENCE': []})\n matched_ids = list(summary_data)\n common_seqs = list(summary_data['SEQUENCE'])\n summary_data = None\n collapsed_file = subdir + '/' + f\n collapsed_data = getCollapsedFastqDataframe(collapsed_file)\n if len(common_seqs) > 0:\n collapsed_data = collapsed_data[collapsed_data.SEQUENCE\n .isin(common_seqs)]\n num_rows = collapsed_data.shape[0]\n collapsed_data.columns = [str(id), 'SEQUENCE']\n match_collapsed_data = collapsed_data\n match_collapsed_data.columns = [str(id), 'SEQUENCE']\n if Path(summary_file).exists():\n summary_data = pd.read_table(summary_file, header=\n 'infer', sep='\\t')\n summary_data = pd.merge(summary_data,\n match_collapsed_data, on='SEQUENCE', sort=False,\n how='inner')\n else:\n summary_data = match_collapsed_data\n summary_data.to_csv(summary_file, sep='\\t', index=False)\n summary_data = pd.DataFrame({'SEQUENCE': []})\n time_end = time.time()\n summary_data = pd.read_table(summary_file, header='infer', sep='\\t')\n match_summary_data = summary_data.copy()\n for index, row in summary_data.iterrows():\n sequence = str(row['SEQUENCE'])\n if motif not in sequence:\n match_summary_data = match_summary_data[match_summary_data.\n SEQUENCE != sequence]\n match_summary_data.to_csv(summary_file, sep='\\t', index=False)\n total_time_end = time.time()\n print('TOTAl TUMOR TIME: ' + str(total_time_end - total_time_start))\n",
"step-3": "<mask token>\nbase_path = '/media/user/2TB (MAC)/Susanna/'\ncollapsed_ext = '.converted.unpaired.fastq.collapsed'\nmanifest_file = base_path + 'all-tumor-manifest.csv'\nmanifest_data = pd.read_csv(manifest_file, header='infer', sep=',')\n<mask token>\n\n\ndef getCollapsedFastqDataframe(file):\n df = pd.read_table(file, header=None, delim_whitespace=True)\n df = df.dropna(axis=1, how='all')\n sample = file.split('/')\n sample = sample[len(sample) - 1]\n sample = sample.split('.')[0]\n df.columns = ['READS', 'SEQUENCE']\n return df\n\n\ndef getManifestID(name, tumor):\n id = manifest_data.loc[(manifest_data['DISEASE.ABBV'] == tumor) & (\n manifest_data['NAME'] == name)]['ID']\n id = id.tolist()[0]\n id = str(id)\n return str(id)\n\n\nmotifs = ['TGGTTATCTAGCT', 'TTATCAGACTGAT']\nmirnas = ['hsa-miR-9-5p-1-2-3', 'hsa-miR-21-5p']\ni = 1\nmotif = motifs[i]\nmirna = mirnas[i]\nfor subdir, dirs, files in os.walk(base_path):\n if '/collapsed_fastq' in subdir:\n folders = subdir.split('/')\n tumor = folders[len(folders) - 2]\n if tumor in ['THCA', 'STAD', 'SKCM', 'PCPG']:\n continue\n print(tumor)\n summary_file = (base_path + 'motif_reads/' + mirna + '/' + motif +\n '.' + tumor + '.common.reads.fastq.collapsed.summary.tsv')\n if Path(summary_file).exists():\n summary_data = pd.read_table(summary_file, header='infer', sep='\\t'\n )\n else:\n print('SUMMARY FILE NOT FOUND')\n summary_data = pd.DataFrame({'SEQUENCE': []})\n matched_ids = list(summary_data)\n common_seqs = list(summary_data['SEQUENCE'])\n total_time_start = time.time()\n for f in os.listdir(subdir):\n time_start = time.time()\n if f[0] == '.':\n break\n patient = f.split('.')[0]\n id = getManifestID(patient, tumor)\n if id not in matched_ids:\n matched_ids.append(id)\n if Path(summary_file).exists():\n summary_data = pd.read_table(summary_file, header=\n 'infer', sep='\\t')\n else:\n print('SUMMARY FILE NOT FOUND')\n summary_data = pd.DataFrame({'SEQUENCE': []})\n matched_ids = list(summary_data)\n common_seqs = list(summary_data['SEQUENCE'])\n summary_data = None\n collapsed_file = subdir + '/' + f\n collapsed_data = getCollapsedFastqDataframe(collapsed_file)\n if len(common_seqs) > 0:\n collapsed_data = collapsed_data[collapsed_data.SEQUENCE\n .isin(common_seqs)]\n num_rows = collapsed_data.shape[0]\n collapsed_data.columns = [str(id), 'SEQUENCE']\n match_collapsed_data = collapsed_data\n match_collapsed_data.columns = [str(id), 'SEQUENCE']\n if Path(summary_file).exists():\n summary_data = pd.read_table(summary_file, header=\n 'infer', sep='\\t')\n summary_data = pd.merge(summary_data,\n match_collapsed_data, on='SEQUENCE', sort=False,\n how='inner')\n else:\n summary_data = match_collapsed_data\n summary_data.to_csv(summary_file, sep='\\t', index=False)\n summary_data = pd.DataFrame({'SEQUENCE': []})\n time_end = time.time()\n summary_data = pd.read_table(summary_file, header='infer', sep='\\t')\n match_summary_data = summary_data.copy()\n for index, row in summary_data.iterrows():\n sequence = str(row['SEQUENCE'])\n if motif not in sequence:\n match_summary_data = match_summary_data[match_summary_data.\n SEQUENCE != sequence]\n match_summary_data.to_csv(summary_file, sep='\\t', index=False)\n total_time_end = time.time()\n print('TOTAl TUMOR TIME: ' + str(total_time_end - total_time_start))\n",
"step-4": "import os\nimport os.path\nimport numpy as np\nimport pandas as pd\nimport collections\nimport subprocess\nfrom pathlib import Path\nimport time\nbase_path = '/media/user/2TB (MAC)/Susanna/'\ncollapsed_ext = '.converted.unpaired.fastq.collapsed'\nmanifest_file = base_path + 'all-tumor-manifest.csv'\nmanifest_data = pd.read_csv(manifest_file, header='infer', sep=',')\n<mask token>\n\n\ndef getCollapsedFastqDataframe(file):\n df = pd.read_table(file, header=None, delim_whitespace=True)\n df = df.dropna(axis=1, how='all')\n sample = file.split('/')\n sample = sample[len(sample) - 1]\n sample = sample.split('.')[0]\n df.columns = ['READS', 'SEQUENCE']\n return df\n\n\ndef getManifestID(name, tumor):\n id = manifest_data.loc[(manifest_data['DISEASE.ABBV'] == tumor) & (\n manifest_data['NAME'] == name)]['ID']\n id = id.tolist()[0]\n id = str(id)\n return str(id)\n\n\nmotifs = ['TGGTTATCTAGCT', 'TTATCAGACTGAT']\nmirnas = ['hsa-miR-9-5p-1-2-3', 'hsa-miR-21-5p']\ni = 1\nmotif = motifs[i]\nmirna = mirnas[i]\nfor subdir, dirs, files in os.walk(base_path):\n if '/collapsed_fastq' in subdir:\n folders = subdir.split('/')\n tumor = folders[len(folders) - 2]\n if tumor in ['THCA', 'STAD', 'SKCM', 'PCPG']:\n continue\n print(tumor)\n summary_file = (base_path + 'motif_reads/' + mirna + '/' + motif +\n '.' + tumor + '.common.reads.fastq.collapsed.summary.tsv')\n if Path(summary_file).exists():\n summary_data = pd.read_table(summary_file, header='infer', sep='\\t'\n )\n else:\n print('SUMMARY FILE NOT FOUND')\n summary_data = pd.DataFrame({'SEQUENCE': []})\n matched_ids = list(summary_data)\n common_seqs = list(summary_data['SEQUENCE'])\n total_time_start = time.time()\n for f in os.listdir(subdir):\n time_start = time.time()\n if f[0] == '.':\n break\n patient = f.split('.')[0]\n id = getManifestID(patient, tumor)\n if id not in matched_ids:\n matched_ids.append(id)\n if Path(summary_file).exists():\n summary_data = pd.read_table(summary_file, header=\n 'infer', sep='\\t')\n else:\n print('SUMMARY FILE NOT FOUND')\n summary_data = pd.DataFrame({'SEQUENCE': []})\n matched_ids = list(summary_data)\n common_seqs = list(summary_data['SEQUENCE'])\n summary_data = None\n collapsed_file = subdir + '/' + f\n collapsed_data = getCollapsedFastqDataframe(collapsed_file)\n if len(common_seqs) > 0:\n collapsed_data = collapsed_data[collapsed_data.SEQUENCE\n .isin(common_seqs)]\n num_rows = collapsed_data.shape[0]\n collapsed_data.columns = [str(id), 'SEQUENCE']\n match_collapsed_data = collapsed_data\n match_collapsed_data.columns = [str(id), 'SEQUENCE']\n if Path(summary_file).exists():\n summary_data = pd.read_table(summary_file, header=\n 'infer', sep='\\t')\n summary_data = pd.merge(summary_data,\n match_collapsed_data, on='SEQUENCE', sort=False,\n how='inner')\n else:\n summary_data = match_collapsed_data\n summary_data.to_csv(summary_file, sep='\\t', index=False)\n summary_data = pd.DataFrame({'SEQUENCE': []})\n time_end = time.time()\n summary_data = pd.read_table(summary_file, header='infer', sep='\\t')\n match_summary_data = summary_data.copy()\n for index, row in summary_data.iterrows():\n sequence = str(row['SEQUENCE'])\n if motif not in sequence:\n match_summary_data = match_summary_data[match_summary_data.\n SEQUENCE != sequence]\n match_summary_data.to_csv(summary_file, sep='\\t', index=False)\n total_time_end = time.time()\n print('TOTAl TUMOR TIME: ' + str(total_time_end - total_time_start))\n",
"step-5": "## PURPOSE: get reads for certain motifs across certain tumors\n## INPUT: manifest data \t\t\t\t\t\t\t all-tumor-manifest.csv\n## \t\t collapsed fastq files \tsample.converted.unpaired.fastq.collapsed\n## OUTPUT: table containing reads for specific motif across samples \tmotif.tumor.common.reads.fastq.collapsed.summary.tsv\nimport os\nimport os.path\nimport numpy as np\nimport pandas as pd\nimport collections\nimport subprocess\nfrom pathlib import Path\nimport time\n\nbase_path = '/media/user/2TB (MAC)/Susanna/'\ncollapsed_ext = '.converted.unpaired.fastq.collapsed'\n\nmanifest_file = base_path + 'all-tumor-manifest.csv'\nmanifest_data =pd.read_csv(manifest_file, header='infer', sep=',')\n'''\nfile = base_path + 'TARGET/TARGET-manifest.csv'\ndata = pd.read_csv(file, header='infer', sep=',')\ndata['DISEASE.ABBV'] = 'TARGET'\n\nmanifest_data = pd.concat([manifest_data, data])\nprint(manifest_data.shape)\nmanifest_data.to_csv(manifest_file, sep=',', index=False)\n'''\n\ndef getCollapsedFastqDataframe(file):\n df = pd.read_table(file, header=None, delim_whitespace=True)\n df = df.dropna(axis=1, how='all')\n sample = file.split('/')\n sample = sample[len(sample)-1]\n sample = sample.split('.')[0]\n df.columns = ['READS', 'SEQUENCE']\n return df \n\ndef getManifestID(name, tumor):\n\tid = manifest_data.loc[(manifest_data['DISEASE.ABBV']==tumor) & (manifest_data['NAME']==name)]['ID']\n\tid = id.tolist()[0]\n\tid = str(id)\n\treturn str(id)\n\n\nmotifs = ['TGGTTATCTAGCT', 'TTATCAGACTGAT']\nmirnas = ['hsa-miR-9-5p-1-2-3', 'hsa-miR-21-5p']\n\ni = 1\nmotif = motifs[i]\nmirna = mirnas[i]\n\n\nfor subdir, dirs, files in os.walk(base_path):\n\tif '/collapsed_fastq' in subdir:\n\n\t\tfolders = subdir.split('/')\n\t\ttumor = folders[len(folders)-2]\n\t\tif tumor in ['THCA', 'STAD', 'SKCM', 'PCPG']:\n\t\t\tcontinue\n\t\tprint(tumor)\n\t\t\n\t\tsummary_file = base_path + 'motif_reads/' + mirna + '/' + motif + '.' + tumor + '.common.reads.fastq.collapsed.summary.tsv'\n\t\tif Path(summary_file).exists():\n\t\t\tsummary_data = pd.read_table(summary_file, header='infer', sep='\\t')\n\t\telse:\n\t\t\tprint('SUMMARY FILE NOT FOUND')\n\t\t\tsummary_data = pd.DataFrame({'SEQUENCE':[]})\n\t\tmatched_ids = list(summary_data)\n\t\tcommon_seqs = list(summary_data['SEQUENCE'])\n\n\t\ttotal_time_start = time.time()\n\n\t\tfor f in os.listdir(subdir):\n\n\t\t\ttime_start = time.time()\n\t\t\tif f[0] == '.':\n\t\t\t\tbreak\n\n\t\t\tpatient = f.split('.')[0]\n\t\t\tid = getManifestID(patient, tumor)\n\t\t\tif id not in matched_ids:\n\n\t\t\t\tmatched_ids.append(id)\n\t\t\t\t\n\t\t\t\tif Path(summary_file).exists():\n\t\t\t\t\tsummary_data = pd.read_table(summary_file, header='infer', sep='\\t')\n\t\t\t\telse:\n\t\t\t\t\tprint('SUMMARY FILE NOT FOUND')\n\t\t\t\t\tsummary_data = pd.DataFrame({'SEQUENCE':[]})\n\t\t\t\tmatched_ids = list(summary_data)\n\t\t\t\tcommon_seqs = list(summary_data['SEQUENCE'])\n\t\t\t\t#matched_seq = list(summary_data['SEQUENCE'])\n\t\t\t\tsummary_data = None\n\n\t\t\t\tcollapsed_file = subdir+'/'+f\t\t\n\t\t\t\tcollapsed_data = getCollapsedFastqDataframe(collapsed_file)\n\t\t\t\t#print(collapsed_data.shape[0])\n\t\t\t\tif len(common_seqs) > 0:\n\t\t\t\t\tcollapsed_data = collapsed_data[collapsed_data.SEQUENCE.isin(common_seqs)]\n\t\t\t\tnum_rows = collapsed_data.shape[0]\n\t\t\t\t#print(collapsed_data.shape[0])\n\t\t\t\tcollapsed_data.columns = [str(id), 'SEQUENCE']\n\t\t\t\tmatch_collapsed_data = collapsed_data #pd.DataFrame(columns = ['READS', 'SEQUENCE'])\n\n\t\t\t\tmatch_collapsed_data.columns = [str(id), 'SEQUENCE']\n\n\t\t\t\tif Path(summary_file).exists():\n\t\t\t\t\tsummary_data = pd.read_table(summary_file, header='infer', sep='\\t')\n\t\t\t\t\tsummary_data = pd.merge(summary_data, match_collapsed_data, on='SEQUENCE', sort=False, how='inner')\n\t\t\t\telse:\n\t\t\t\t\tsummary_data = match_collapsed_data\n\n\n\t\t\t\tsummary_data.to_csv(summary_file, sep='\\t', index=False) \n\t\t\t\tsummary_data = pd.DataFrame({'SEQUENCE':[]})\n\n\t\t\t\ttime_end = time.time()\n\n\t\t\t\t#print('TUMOR: ' + tumor + ' SAMPLE: ' + str(patient) + ' TOTAL TIME: ' + str((time_end-time_start)/60) + ' ROWS: ' + str(num_rows))\n\t\t\n\t\tsummary_data = pd.read_table(summary_file, header='infer', sep='\\t')\n\t\tmatch_summary_data = summary_data.copy()\n\t\tfor index, row in summary_data.iterrows():\n\t\t\tsequence = str(row['SEQUENCE'])\n\t\t\tif motif not in sequence:\n\t\t\t\tmatch_summary_data = match_summary_data[match_summary_data.SEQUENCE != sequence]\n\t\tmatch_summary_data.to_csv(summary_file, sep='\\t', index=False) \n\t\ttotal_time_end = time.time()\n\t\tprint('TOTAl TUMOR TIME: ' + str(total_time_end-total_time_start)) \n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
import time
import json
from threading import Thread
try:
with open('file.json') as f:
name = json.load(f)
except:
f = open("file.json", "w+")
name = {}
def create(k, v, t='0'):
if k in name:
print("ERROR:The data already exists")
else:
if k.isalpha():
if v.isnumeric() and t.isnumeric():
v = int(v)
t = int(t)
if len(name) < (1024 * 1020 * 1024) and v <= (16 * 1024 * 1024):
if t == 0:
p = [v, t]
else:
p = [v, time.time() + t]
if len(k) <= 32:
name[k] = p
print("Key is created")
with open('file.json', 'w') as json_file:
json.dump(name, json_file)
else:
print("ERROR:Key length Exceeded")
else:
print("ERROR:MEMORY Exceeded!!!")
else:
print("ERROR:INVALID INPUT (NUMERIC ONLY)")
else:
print("ERROR:INVALID KEY INPUT (ALPHABETS ONLY)")
def read(k):
if k not in name:
print("ERROR:Key does not exists Enter a valid key!!")
else:
m = name[k]
if m[1] != 0:
if time.time() < m[1]:
print ( k + "-" + str(m[0]))
else:
print("ERROR: " + k + " Time expired")
else:
print(k + "-" + str(m[0]))
with open('file.json', 'w') as js:
json.dump(name, js)
def delete(k):
if k not in name:
print("ERROR:Key does not exists Enter a valid key!!")
else:
m = name[k]
if m[1] != 0:
if time.time() < m[1]:
del name[k]
print("Key (" + k + ") is deleted")
with open('file.json', 'w') as js:
json.dump(name, js)
else:
print("ERROR:ERROR: " + k + " Time expired")
else:
del name[k]
print("Key (" + k + ") is deleted")
with open('file.json', 'w') as js:
json.dump(name, js)
|
normal
|
{
"blob_id": "430dff54da986df4e3a68018d930735c757d49d0",
"index": 6794,
"step-1": "<mask token>\n\n\ndef create(k, v, t='0'):\n if k in name:\n print('ERROR:The data already exists')\n elif k.isalpha():\n if v.isnumeric() and t.isnumeric():\n v = int(v)\n t = int(t)\n if len(name) < 1024 * 1020 * 1024 and v <= 16 * 1024 * 1024:\n if t == 0:\n p = [v, t]\n else:\n p = [v, time.time() + t]\n if len(k) <= 32:\n name[k] = p\n print('Key is created')\n with open('file.json', 'w') as json_file:\n json.dump(name, json_file)\n else:\n print('ERROR:Key length Exceeded')\n else:\n print('ERROR:MEMORY Exceeded!!!')\n else:\n print('ERROR:INVALID INPUT (NUMERIC ONLY)')\n else:\n print('ERROR:INVALID KEY INPUT (ALPHABETS ONLY)')\n\n\ndef read(k):\n if k not in name:\n print('ERROR:Key does not exists Enter a valid key!!')\n else:\n m = name[k]\n if m[1] != 0:\n if time.time() < m[1]:\n print(k + '-' + str(m[0]))\n else:\n print('ERROR: ' + k + ' Time expired')\n else:\n print(k + '-' + str(m[0]))\n with open('file.json', 'w') as js:\n json.dump(name, js)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef create(k, v, t='0'):\n if k in name:\n print('ERROR:The data already exists')\n elif k.isalpha():\n if v.isnumeric() and t.isnumeric():\n v = int(v)\n t = int(t)\n if len(name) < 1024 * 1020 * 1024 and v <= 16 * 1024 * 1024:\n if t == 0:\n p = [v, t]\n else:\n p = [v, time.time() + t]\n if len(k) <= 32:\n name[k] = p\n print('Key is created')\n with open('file.json', 'w') as json_file:\n json.dump(name, json_file)\n else:\n print('ERROR:Key length Exceeded')\n else:\n print('ERROR:MEMORY Exceeded!!!')\n else:\n print('ERROR:INVALID INPUT (NUMERIC ONLY)')\n else:\n print('ERROR:INVALID KEY INPUT (ALPHABETS ONLY)')\n\n\ndef read(k):\n if k not in name:\n print('ERROR:Key does not exists Enter a valid key!!')\n else:\n m = name[k]\n if m[1] != 0:\n if time.time() < m[1]:\n print(k + '-' + str(m[0]))\n else:\n print('ERROR: ' + k + ' Time expired')\n else:\n print(k + '-' + str(m[0]))\n with open('file.json', 'w') as js:\n json.dump(name, js)\n\n\ndef delete(k):\n if k not in name:\n print('ERROR:Key does not exists Enter a valid key!!')\n else:\n m = name[k]\n if m[1] != 0:\n if time.time() < m[1]:\n del name[k]\n print('Key (' + k + ') is deleted')\n with open('file.json', 'w') as js:\n json.dump(name, js)\n else:\n print('ERROR:ERROR: ' + k + ' Time expired')\n else:\n del name[k]\n print('Key (' + k + ') is deleted')\n with open('file.json', 'w') as js:\n json.dump(name, js)\n",
"step-3": "<mask token>\ntry:\n with open('file.json') as f:\n name = json.load(f)\nexcept:\n f = open('file.json', 'w+')\n name = {}\n\n\ndef create(k, v, t='0'):\n if k in name:\n print('ERROR:The data already exists')\n elif k.isalpha():\n if v.isnumeric() and t.isnumeric():\n v = int(v)\n t = int(t)\n if len(name) < 1024 * 1020 * 1024 and v <= 16 * 1024 * 1024:\n if t == 0:\n p = [v, t]\n else:\n p = [v, time.time() + t]\n if len(k) <= 32:\n name[k] = p\n print('Key is created')\n with open('file.json', 'w') as json_file:\n json.dump(name, json_file)\n else:\n print('ERROR:Key length Exceeded')\n else:\n print('ERROR:MEMORY Exceeded!!!')\n else:\n print('ERROR:INVALID INPUT (NUMERIC ONLY)')\n else:\n print('ERROR:INVALID KEY INPUT (ALPHABETS ONLY)')\n\n\ndef read(k):\n if k not in name:\n print('ERROR:Key does not exists Enter a valid key!!')\n else:\n m = name[k]\n if m[1] != 0:\n if time.time() < m[1]:\n print(k + '-' + str(m[0]))\n else:\n print('ERROR: ' + k + ' Time expired')\n else:\n print(k + '-' + str(m[0]))\n with open('file.json', 'w') as js:\n json.dump(name, js)\n\n\ndef delete(k):\n if k not in name:\n print('ERROR:Key does not exists Enter a valid key!!')\n else:\n m = name[k]\n if m[1] != 0:\n if time.time() < m[1]:\n del name[k]\n print('Key (' + k + ') is deleted')\n with open('file.json', 'w') as js:\n json.dump(name, js)\n else:\n print('ERROR:ERROR: ' + k + ' Time expired')\n else:\n del name[k]\n print('Key (' + k + ') is deleted')\n with open('file.json', 'w') as js:\n json.dump(name, js)\n",
"step-4": "import time\nimport json\nfrom threading import Thread\ntry:\n with open('file.json') as f:\n name = json.load(f)\nexcept:\n f = open('file.json', 'w+')\n name = {}\n\n\ndef create(k, v, t='0'):\n if k in name:\n print('ERROR:The data already exists')\n elif k.isalpha():\n if v.isnumeric() and t.isnumeric():\n v = int(v)\n t = int(t)\n if len(name) < 1024 * 1020 * 1024 and v <= 16 * 1024 * 1024:\n if t == 0:\n p = [v, t]\n else:\n p = [v, time.time() + t]\n if len(k) <= 32:\n name[k] = p\n print('Key is created')\n with open('file.json', 'w') as json_file:\n json.dump(name, json_file)\n else:\n print('ERROR:Key length Exceeded')\n else:\n print('ERROR:MEMORY Exceeded!!!')\n else:\n print('ERROR:INVALID INPUT (NUMERIC ONLY)')\n else:\n print('ERROR:INVALID KEY INPUT (ALPHABETS ONLY)')\n\n\ndef read(k):\n if k not in name:\n print('ERROR:Key does not exists Enter a valid key!!')\n else:\n m = name[k]\n if m[1] != 0:\n if time.time() < m[1]:\n print(k + '-' + str(m[0]))\n else:\n print('ERROR: ' + k + ' Time expired')\n else:\n print(k + '-' + str(m[0]))\n with open('file.json', 'w') as js:\n json.dump(name, js)\n\n\ndef delete(k):\n if k not in name:\n print('ERROR:Key does not exists Enter a valid key!!')\n else:\n m = name[k]\n if m[1] != 0:\n if time.time() < m[1]:\n del name[k]\n print('Key (' + k + ') is deleted')\n with open('file.json', 'w') as js:\n json.dump(name, js)\n else:\n print('ERROR:ERROR: ' + k + ' Time expired')\n else:\n del name[k]\n print('Key (' + k + ') is deleted')\n with open('file.json', 'w') as js:\n json.dump(name, js)\n",
"step-5": "import time\r\nimport json\r\nfrom threading import Thread\r\n\r\n\r\ntry:\r\n with open('file.json') as f:\r\n name = json.load(f)\r\nexcept:\r\n f = open(\"file.json\", \"w+\")\r\n name = {}\r\n\r\n\r\ndef create(k, v, t='0'):\r\n if k in name:\r\n print(\"ERROR:The data already exists\")\r\n else:\r\n if k.isalpha():\r\n if v.isnumeric() and t.isnumeric():\r\n v = int(v)\r\n t = int(t)\r\n if len(name) < (1024 * 1020 * 1024) and v <= (16 * 1024 * 1024):\r\n if t == 0:\r\n p = [v, t]\r\n else:\r\n p = [v, time.time() + t]\r\n if len(k) <= 32:\r\n name[k] = p\r\n print(\"Key is created\")\r\n with open('file.json', 'w') as json_file:\r\n json.dump(name, json_file)\r\n else:\r\n print(\"ERROR:Key length Exceeded\")\r\n else:\r\n print(\"ERROR:MEMORY Exceeded!!!\")\r\n else:\r\n print(\"ERROR:INVALID INPUT (NUMERIC ONLY)\")\r\n else:\r\n print(\"ERROR:INVALID KEY INPUT (ALPHABETS ONLY)\")\r\n\r\n\r\ndef read(k):\r\n if k not in name:\r\n print(\"ERROR:Key does not exists Enter a valid key!!\")\r\n else:\r\n m = name[k]\r\n if m[1] != 0:\r\n if time.time() < m[1]:\r\n print ( k + \"-\" + str(m[0]))\r\n else:\r\n print(\"ERROR: \" + k + \" Time expired\")\r\n else:\r\n print(k + \"-\" + str(m[0]))\r\n with open('file.json', 'w') as js:\r\n json.dump(name, js)\r\n\r\n\r\ndef delete(k):\r\n if k not in name:\r\n print(\"ERROR:Key does not exists Enter a valid key!!\")\r\n else:\r\n m = name[k]\r\n if m[1] != 0:\r\n if time.time() < m[1]:\r\n del name[k]\r\n print(\"Key (\" + k + \") is deleted\")\r\n with open('file.json', 'w') as js:\r\n json.dump(name, js)\r\n else:\r\n print(\"ERROR:ERROR: \" + k + \" Time expired\")\r\n else:\r\n del name[k]\r\n print(\"Key (\" + k + \") is deleted\")\r\n with open('file.json', 'w') as js:\r\n json.dump(name, js)",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
"""
util - other functions
"""
import torch
import numpy as np
from common_labelme import Config
from torch.autograd import Variable
I = torch.FloatTensor(np.eye(Config.batch_size),)
E = torch.FloatTensor(np.ones((Config.batch_size, Config.batch_size)))
normalize_1 = Config.batch_size
normalize_2 = Config.batch_size * Config.batch_size - Config.batch_size
def mig_loss_function(output1, output2, p):
new_output = output1 / p
m = (new_output @ output2.transpose(1,0))
noise = torch.rand(1)*0.0001
m1 = torch.log(m*I+ I*noise + E - I)
m2 = m*(E-I)
return -(sum(sum(m1)) + Config.batch_size)/normalize_1 + sum(sum(m2)) / normalize_2
def tvd_loss_function(output1, output2, p):
new_output = output1 / p
m = (new_output @ output2.transpose(1,0))
noise = torch.rand(1)*0.0001
m1 = torch.log(m*I + I * noise + E - I)
m2 = torch.log(m*(E-I) + I )
return -(sum(sum(torch.sign(m1))))/normalize_1 + sum(sum(torch.sign(m2))) / normalize_2
def pearson_loss_function(output1, output2, p):
new_output = output1 / p
m = (new_output @ output2.transpose(1,0))
m1 = m*I
m2 = m*(E-I)
m2 = m2*m2
return -(2 * sum(sum(m1)) - 2 * Config.batch_size) / normalize_1 + (sum(sum(m2)) - normalize_2) / normalize_2
def reverse_kl_loss_function(output1, output2, p):
new_output = output1 / p
m = (new_output @ output2.transpose(1,0))
m1 = m*I
m1 = -I/(m1.float() + E - I)
m2 = torch.log(m*(E-I) + I)
return -(sum(sum(m1)))/normalize_1 + (-sum(sum(m2)) - normalize_2) / normalize_2
def sh_loss_function(output1, output2, p):
new_output = output1 / p
m = (new_output @ output2.transpose(1,0))
m1 = m*I
m1 = torch.sqrt(I/(m1.float() + E - I))
m2 = torch.sqrt(m*(E-I))
return -(-sum(sum(m1)) + Config.batch_size)/normalize_1 + sum(sum(m2)) / normalize_2
def entropy_loss(outputs):
num = outputs.size()[0]
temp = -outputs * torch.log(outputs+0.0001)
loss = torch.sum(temp)
loss /= num
return loss
def M_step(expert_label,mu):
#---------------------------------------------------------------#
# #
# expert_label size : batch_size * expert_num #
# mu : batch_size * num_classes #
# expert_parameters = expert_num * num_classes * num_classes #
# #
#---------------------------------------------------------------#
if not Config.missing:
normalize = torch.sum(mu, 0).float()
expert_label = expert_label.long()
expert_parameters = torch.zeros((Config.expert_num, Config.num_classes, Config.num_classes))
for i in range(mu.size()[0]):
for R in range(Config.expert_num):
expert_parameters[R, :, expert_label[i, R]] += mu[i].float()
expert_parameters = expert_parameters / normalize.unsqueeze(1)
else:
normalize = torch.zeros(Config.expert_num,Config.num_classes)
expert_label = expert_label.long()
expert_parameters = torch.zeros((Config.expert_num, Config.num_classes, Config.num_classes))
for i in range(mu.size()[0]):
for R in range(Config.expert_num):
if expert_label[i,R] < 0:
continue
expert_parameters[R, :, expert_label[i, R]] += mu[i].float()
normalize[R] += mu[i].float()
normalize = normalize + 1 * (normalize == 0).float()
for R in range(Config.expert_num):
expert_parameters[R] = expert_parameters[R] / normalize[R].unsqueeze(1)
expert_parameters = expert_parameters.cuda()
return expert_parameters
def M_step_p_mbem(t):
p = torch.zeros(Config.num_classes)
t = t.long()
for i in range(t.size(0)):
p[t[i]] += 1
p /= t.size()[0]
return p
def M_step_mbem(expert_label,t):
#---------------------------------------------------------------#
# #
# expert_label size : batch_size * expert_num #
# t : batch_size #
# expert_parameters = expert_num * num_classes * num_classes #
# #
#---------------------------------------------------------------#
normalize = torch.zeros(Config.expert_num, Config.num_classes)
expert_label = expert_label.long()
t = t.long()
expert_parameters = torch.zeros((Config.expert_num, Config.num_classes, Config.num_classes))
for i in range(t.size()[0]):
for R in range(Config.expert_num):
if expert_label[i, R] < 0:
continue
expert_parameters[R, t[i], expert_label[i, R]] += 1
normalize[R,t[i]] += 1
normalize = normalize + 1 * (normalize == 0).float()
for R in range(Config.expert_num):
expert_parameters[R] = expert_parameters[R] / normalize[R].unsqueeze(1)
expert_parameters = expert_parameters.cuda()
return expert_parameters
def print_recons_result(right_model, confusion_matrix):
confusion_loss = 0
for i in range(1,len(list(right_model.parameters()))):
para = list(right_model.parameters())[i].detach().cpu()
#print("Expert %d" %i)
local_confusion_matrix = torch.nn.functional.softmax(para, dim=1)
#print(local_confusion_matrix)
residual_matrix = local_confusion_matrix - confusion_matrix[i-1, :, :]
residual = torch.sum(abs(residual_matrix))
confusion_loss += residual
print("Total variation:", confusion_loss.item())
def initial_priori(train_loader):
p = torch.zeros((Config.num_classes))
total = 0
for batch_idx, (left_data, right_data, label) in enumerate(train_loader):
linear_sum = torch.sum(right_data, dim=1)
_, majority = torch.max(linear_sum, 1)
majority = Variable(majority).long()
total += label.size()[0]
for i in range(Config.num_classes):
p[i] += torch.sum(majority == i).float()
p = p/float(total)
return p
def update_priori(model, train_loader):
# waiting for solution
p = torch.zeros((Config.num_classes))
# updating priori by posteri
total = 0
for batch_idx, (left_data, right_data, label) in enumerate(train_loader):
ep = Variable(right_data).float().cuda()
images = Variable(left_data).float().cuda()
outputs = model(images)
_, predicts = torch.max(outputs.data, 1)
total += ep.size()[0]
predicts = predicts.detach().cpu()
for i in range(Config.num_classes):
p[i] += torch.sum(predicts == i).float()
p = p/float(total)
'''
# updating priori by loss
pri = priori
pri = Variable(pri, requires_grad=True)
loss = mig_loss_function(left_outputs.detach(),right_outputs.detach(),p)
loss.backward()
grad = pri.grad
pri = pri.detach() - Config.alpha * grad
pri = torch.exp(pri)
pri = pri / torch.sum(pri)
'''
'''
# true priori
p[0] = 0.5
p[1] = 0.5
'''
return p
|
normal
|
{
"blob_id": "be9179b33991ba743e6e6b7d5dd4dc85ffc09fc3",
"index": 6331,
"step-1": "<mask token>\n\n\ndef mig_loss_function(output1, output2, p):\n new_output = output1 / p\n m = new_output @ output2.transpose(1, 0)\n noise = torch.rand(1) * 0.0001\n m1 = torch.log(m * I + I * noise + E - I)\n m2 = m * (E - I)\n return -(sum(sum(m1)) + Config.batch_size) / normalize_1 + sum(sum(m2)\n ) / normalize_2\n\n\ndef tvd_loss_function(output1, output2, p):\n new_output = output1 / p\n m = new_output @ output2.transpose(1, 0)\n noise = torch.rand(1) * 0.0001\n m1 = torch.log(m * I + I * noise + E - I)\n m2 = torch.log(m * (E - I) + I)\n return -sum(sum(torch.sign(m1))) / normalize_1 + sum(sum(torch.sign(m2))\n ) / normalize_2\n\n\ndef pearson_loss_function(output1, output2, p):\n new_output = output1 / p\n m = new_output @ output2.transpose(1, 0)\n m1 = m * I\n m2 = m * (E - I)\n m2 = m2 * m2\n return -(2 * sum(sum(m1)) - 2 * Config.batch_size) / normalize_1 + (sum\n (sum(m2)) - normalize_2) / normalize_2\n\n\ndef reverse_kl_loss_function(output1, output2, p):\n new_output = output1 / p\n m = new_output @ output2.transpose(1, 0)\n m1 = m * I\n m1 = -I / (m1.float() + E - I)\n m2 = torch.log(m * (E - I) + I)\n return -sum(sum(m1)) / normalize_1 + (-sum(sum(m2)) - normalize_2\n ) / normalize_2\n\n\ndef sh_loss_function(output1, output2, p):\n new_output = output1 / p\n m = new_output @ output2.transpose(1, 0)\n m1 = m * I\n m1 = torch.sqrt(I / (m1.float() + E - I))\n m2 = torch.sqrt(m * (E - I))\n return -(-sum(sum(m1)) + Config.batch_size) / normalize_1 + sum(sum(m2)\n ) / normalize_2\n\n\ndef entropy_loss(outputs):\n num = outputs.size()[0]\n temp = -outputs * torch.log(outputs + 0.0001)\n loss = torch.sum(temp)\n loss /= num\n return loss\n\n\ndef M_step(expert_label, mu):\n if not Config.missing:\n normalize = torch.sum(mu, 0).float()\n expert_label = expert_label.long()\n expert_parameters = torch.zeros((Config.expert_num, Config.\n num_classes, Config.num_classes))\n for i in range(mu.size()[0]):\n for R in range(Config.expert_num):\n expert_parameters[R, :, expert_label[i, R]] += mu[i].float()\n expert_parameters = expert_parameters / normalize.unsqueeze(1)\n else:\n normalize = torch.zeros(Config.expert_num, Config.num_classes)\n expert_label = expert_label.long()\n expert_parameters = torch.zeros((Config.expert_num, Config.\n num_classes, Config.num_classes))\n for i in range(mu.size()[0]):\n for R in range(Config.expert_num):\n if expert_label[i, R] < 0:\n continue\n expert_parameters[R, :, expert_label[i, R]] += mu[i].float()\n normalize[R] += mu[i].float()\n normalize = normalize + 1 * (normalize == 0).float()\n for R in range(Config.expert_num):\n expert_parameters[R] = expert_parameters[R] / normalize[R\n ].unsqueeze(1)\n expert_parameters = expert_parameters.cuda()\n return expert_parameters\n\n\ndef M_step_p_mbem(t):\n p = torch.zeros(Config.num_classes)\n t = t.long()\n for i in range(t.size(0)):\n p[t[i]] += 1\n p /= t.size()[0]\n return p\n\n\ndef M_step_mbem(expert_label, t):\n normalize = torch.zeros(Config.expert_num, Config.num_classes)\n expert_label = expert_label.long()\n t = t.long()\n expert_parameters = torch.zeros((Config.expert_num, Config.num_classes,\n Config.num_classes))\n for i in range(t.size()[0]):\n for R in range(Config.expert_num):\n if expert_label[i, R] < 0:\n continue\n expert_parameters[R, t[i], expert_label[i, R]] += 1\n normalize[R, t[i]] += 1\n normalize = normalize + 1 * (normalize == 0).float()\n for R in range(Config.expert_num):\n expert_parameters[R] = expert_parameters[R] / normalize[R].unsqueeze(1)\n expert_parameters = expert_parameters.cuda()\n return expert_parameters\n\n\ndef print_recons_result(right_model, confusion_matrix):\n confusion_loss = 0\n for i in range(1, len(list(right_model.parameters()))):\n para = list(right_model.parameters())[i].detach().cpu()\n local_confusion_matrix = torch.nn.functional.softmax(para, dim=1)\n residual_matrix = local_confusion_matrix - confusion_matrix[i - 1, :, :\n ]\n residual = torch.sum(abs(residual_matrix))\n confusion_loss += residual\n print('Total variation:', confusion_loss.item())\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef mig_loss_function(output1, output2, p):\n new_output = output1 / p\n m = new_output @ output2.transpose(1, 0)\n noise = torch.rand(1) * 0.0001\n m1 = torch.log(m * I + I * noise + E - I)\n m2 = m * (E - I)\n return -(sum(sum(m1)) + Config.batch_size) / normalize_1 + sum(sum(m2)\n ) / normalize_2\n\n\ndef tvd_loss_function(output1, output2, p):\n new_output = output1 / p\n m = new_output @ output2.transpose(1, 0)\n noise = torch.rand(1) * 0.0001\n m1 = torch.log(m * I + I * noise + E - I)\n m2 = torch.log(m * (E - I) + I)\n return -sum(sum(torch.sign(m1))) / normalize_1 + sum(sum(torch.sign(m2))\n ) / normalize_2\n\n\ndef pearson_loss_function(output1, output2, p):\n new_output = output1 / p\n m = new_output @ output2.transpose(1, 0)\n m1 = m * I\n m2 = m * (E - I)\n m2 = m2 * m2\n return -(2 * sum(sum(m1)) - 2 * Config.batch_size) / normalize_1 + (sum\n (sum(m2)) - normalize_2) / normalize_2\n\n\ndef reverse_kl_loss_function(output1, output2, p):\n new_output = output1 / p\n m = new_output @ output2.transpose(1, 0)\n m1 = m * I\n m1 = -I / (m1.float() + E - I)\n m2 = torch.log(m * (E - I) + I)\n return -sum(sum(m1)) / normalize_1 + (-sum(sum(m2)) - normalize_2\n ) / normalize_2\n\n\ndef sh_loss_function(output1, output2, p):\n new_output = output1 / p\n m = new_output @ output2.transpose(1, 0)\n m1 = m * I\n m1 = torch.sqrt(I / (m1.float() + E - I))\n m2 = torch.sqrt(m * (E - I))\n return -(-sum(sum(m1)) + Config.batch_size) / normalize_1 + sum(sum(m2)\n ) / normalize_2\n\n\ndef entropy_loss(outputs):\n num = outputs.size()[0]\n temp = -outputs * torch.log(outputs + 0.0001)\n loss = torch.sum(temp)\n loss /= num\n return loss\n\n\ndef M_step(expert_label, mu):\n if not Config.missing:\n normalize = torch.sum(mu, 0).float()\n expert_label = expert_label.long()\n expert_parameters = torch.zeros((Config.expert_num, Config.\n num_classes, Config.num_classes))\n for i in range(mu.size()[0]):\n for R in range(Config.expert_num):\n expert_parameters[R, :, expert_label[i, R]] += mu[i].float()\n expert_parameters = expert_parameters / normalize.unsqueeze(1)\n else:\n normalize = torch.zeros(Config.expert_num, Config.num_classes)\n expert_label = expert_label.long()\n expert_parameters = torch.zeros((Config.expert_num, Config.\n num_classes, Config.num_classes))\n for i in range(mu.size()[0]):\n for R in range(Config.expert_num):\n if expert_label[i, R] < 0:\n continue\n expert_parameters[R, :, expert_label[i, R]] += mu[i].float()\n normalize[R] += mu[i].float()\n normalize = normalize + 1 * (normalize == 0).float()\n for R in range(Config.expert_num):\n expert_parameters[R] = expert_parameters[R] / normalize[R\n ].unsqueeze(1)\n expert_parameters = expert_parameters.cuda()\n return expert_parameters\n\n\ndef M_step_p_mbem(t):\n p = torch.zeros(Config.num_classes)\n t = t.long()\n for i in range(t.size(0)):\n p[t[i]] += 1\n p /= t.size()[0]\n return p\n\n\ndef M_step_mbem(expert_label, t):\n normalize = torch.zeros(Config.expert_num, Config.num_classes)\n expert_label = expert_label.long()\n t = t.long()\n expert_parameters = torch.zeros((Config.expert_num, Config.num_classes,\n Config.num_classes))\n for i in range(t.size()[0]):\n for R in range(Config.expert_num):\n if expert_label[i, R] < 0:\n continue\n expert_parameters[R, t[i], expert_label[i, R]] += 1\n normalize[R, t[i]] += 1\n normalize = normalize + 1 * (normalize == 0).float()\n for R in range(Config.expert_num):\n expert_parameters[R] = expert_parameters[R] / normalize[R].unsqueeze(1)\n expert_parameters = expert_parameters.cuda()\n return expert_parameters\n\n\ndef print_recons_result(right_model, confusion_matrix):\n confusion_loss = 0\n for i in range(1, len(list(right_model.parameters()))):\n para = list(right_model.parameters())[i].detach().cpu()\n local_confusion_matrix = torch.nn.functional.softmax(para, dim=1)\n residual_matrix = local_confusion_matrix - confusion_matrix[i - 1, :, :\n ]\n residual = torch.sum(abs(residual_matrix))\n confusion_loss += residual\n print('Total variation:', confusion_loss.item())\n\n\ndef initial_priori(train_loader):\n p = torch.zeros(Config.num_classes)\n total = 0\n for batch_idx, (left_data, right_data, label) in enumerate(train_loader):\n linear_sum = torch.sum(right_data, dim=1)\n _, majority = torch.max(linear_sum, 1)\n majority = Variable(majority).long()\n total += label.size()[0]\n for i in range(Config.num_classes):\n p[i] += torch.sum(majority == i).float()\n p = p / float(total)\n return p\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef mig_loss_function(output1, output2, p):\n new_output = output1 / p\n m = new_output @ output2.transpose(1, 0)\n noise = torch.rand(1) * 0.0001\n m1 = torch.log(m * I + I * noise + E - I)\n m2 = m * (E - I)\n return -(sum(sum(m1)) + Config.batch_size) / normalize_1 + sum(sum(m2)\n ) / normalize_2\n\n\ndef tvd_loss_function(output1, output2, p):\n new_output = output1 / p\n m = new_output @ output2.transpose(1, 0)\n noise = torch.rand(1) * 0.0001\n m1 = torch.log(m * I + I * noise + E - I)\n m2 = torch.log(m * (E - I) + I)\n return -sum(sum(torch.sign(m1))) / normalize_1 + sum(sum(torch.sign(m2))\n ) / normalize_2\n\n\ndef pearson_loss_function(output1, output2, p):\n new_output = output1 / p\n m = new_output @ output2.transpose(1, 0)\n m1 = m * I\n m2 = m * (E - I)\n m2 = m2 * m2\n return -(2 * sum(sum(m1)) - 2 * Config.batch_size) / normalize_1 + (sum\n (sum(m2)) - normalize_2) / normalize_2\n\n\ndef reverse_kl_loss_function(output1, output2, p):\n new_output = output1 / p\n m = new_output @ output2.transpose(1, 0)\n m1 = m * I\n m1 = -I / (m1.float() + E - I)\n m2 = torch.log(m * (E - I) + I)\n return -sum(sum(m1)) / normalize_1 + (-sum(sum(m2)) - normalize_2\n ) / normalize_2\n\n\ndef sh_loss_function(output1, output2, p):\n new_output = output1 / p\n m = new_output @ output2.transpose(1, 0)\n m1 = m * I\n m1 = torch.sqrt(I / (m1.float() + E - I))\n m2 = torch.sqrt(m * (E - I))\n return -(-sum(sum(m1)) + Config.batch_size) / normalize_1 + sum(sum(m2)\n ) / normalize_2\n\n\ndef entropy_loss(outputs):\n num = outputs.size()[0]\n temp = -outputs * torch.log(outputs + 0.0001)\n loss = torch.sum(temp)\n loss /= num\n return loss\n\n\ndef M_step(expert_label, mu):\n if not Config.missing:\n normalize = torch.sum(mu, 0).float()\n expert_label = expert_label.long()\n expert_parameters = torch.zeros((Config.expert_num, Config.\n num_classes, Config.num_classes))\n for i in range(mu.size()[0]):\n for R in range(Config.expert_num):\n expert_parameters[R, :, expert_label[i, R]] += mu[i].float()\n expert_parameters = expert_parameters / normalize.unsqueeze(1)\n else:\n normalize = torch.zeros(Config.expert_num, Config.num_classes)\n expert_label = expert_label.long()\n expert_parameters = torch.zeros((Config.expert_num, Config.\n num_classes, Config.num_classes))\n for i in range(mu.size()[0]):\n for R in range(Config.expert_num):\n if expert_label[i, R] < 0:\n continue\n expert_parameters[R, :, expert_label[i, R]] += mu[i].float()\n normalize[R] += mu[i].float()\n normalize = normalize + 1 * (normalize == 0).float()\n for R in range(Config.expert_num):\n expert_parameters[R] = expert_parameters[R] / normalize[R\n ].unsqueeze(1)\n expert_parameters = expert_parameters.cuda()\n return expert_parameters\n\n\ndef M_step_p_mbem(t):\n p = torch.zeros(Config.num_classes)\n t = t.long()\n for i in range(t.size(0)):\n p[t[i]] += 1\n p /= t.size()[0]\n return p\n\n\ndef M_step_mbem(expert_label, t):\n normalize = torch.zeros(Config.expert_num, Config.num_classes)\n expert_label = expert_label.long()\n t = t.long()\n expert_parameters = torch.zeros((Config.expert_num, Config.num_classes,\n Config.num_classes))\n for i in range(t.size()[0]):\n for R in range(Config.expert_num):\n if expert_label[i, R] < 0:\n continue\n expert_parameters[R, t[i], expert_label[i, R]] += 1\n normalize[R, t[i]] += 1\n normalize = normalize + 1 * (normalize == 0).float()\n for R in range(Config.expert_num):\n expert_parameters[R] = expert_parameters[R] / normalize[R].unsqueeze(1)\n expert_parameters = expert_parameters.cuda()\n return expert_parameters\n\n\ndef print_recons_result(right_model, confusion_matrix):\n confusion_loss = 0\n for i in range(1, len(list(right_model.parameters()))):\n para = list(right_model.parameters())[i].detach().cpu()\n local_confusion_matrix = torch.nn.functional.softmax(para, dim=1)\n residual_matrix = local_confusion_matrix - confusion_matrix[i - 1, :, :\n ]\n residual = torch.sum(abs(residual_matrix))\n confusion_loss += residual\n print('Total variation:', confusion_loss.item())\n\n\ndef initial_priori(train_loader):\n p = torch.zeros(Config.num_classes)\n total = 0\n for batch_idx, (left_data, right_data, label) in enumerate(train_loader):\n linear_sum = torch.sum(right_data, dim=1)\n _, majority = torch.max(linear_sum, 1)\n majority = Variable(majority).long()\n total += label.size()[0]\n for i in range(Config.num_classes):\n p[i] += torch.sum(majority == i).float()\n p = p / float(total)\n return p\n\n\ndef update_priori(model, train_loader):\n p = torch.zeros(Config.num_classes)\n total = 0\n for batch_idx, (left_data, right_data, label) in enumerate(train_loader):\n ep = Variable(right_data).float().cuda()\n images = Variable(left_data).float().cuda()\n outputs = model(images)\n _, predicts = torch.max(outputs.data, 1)\n total += ep.size()[0]\n predicts = predicts.detach().cpu()\n for i in range(Config.num_classes):\n p[i] += torch.sum(predicts == i).float()\n p = p / float(total)\n \"\"\"\n # updating priori by loss\n pri = priori\n pri = Variable(pri, requires_grad=True)\n loss = mig_loss_function(left_outputs.detach(),right_outputs.detach(),p)\n loss.backward()\n grad = pri.grad\n pri = pri.detach() - Config.alpha * grad\n pri = torch.exp(pri)\n pri = pri / torch.sum(pri)\n \n \"\"\"\n \"\"\"\n # true priori\n p[0] = 0.5\n p[1] = 0.5\n \"\"\"\n return p\n",
"step-4": "<mask token>\nimport torch\nimport numpy as np\nfrom common_labelme import Config\nfrom torch.autograd import Variable\nI = torch.FloatTensor(np.eye(Config.batch_size))\nE = torch.FloatTensor(np.ones((Config.batch_size, Config.batch_size)))\nnormalize_1 = Config.batch_size\nnormalize_2 = Config.batch_size * Config.batch_size - Config.batch_size\n\n\ndef mig_loss_function(output1, output2, p):\n new_output = output1 / p\n m = new_output @ output2.transpose(1, 0)\n noise = torch.rand(1) * 0.0001\n m1 = torch.log(m * I + I * noise + E - I)\n m2 = m * (E - I)\n return -(sum(sum(m1)) + Config.batch_size) / normalize_1 + sum(sum(m2)\n ) / normalize_2\n\n\ndef tvd_loss_function(output1, output2, p):\n new_output = output1 / p\n m = new_output @ output2.transpose(1, 0)\n noise = torch.rand(1) * 0.0001\n m1 = torch.log(m * I + I * noise + E - I)\n m2 = torch.log(m * (E - I) + I)\n return -sum(sum(torch.sign(m1))) / normalize_1 + sum(sum(torch.sign(m2))\n ) / normalize_2\n\n\ndef pearson_loss_function(output1, output2, p):\n new_output = output1 / p\n m = new_output @ output2.transpose(1, 0)\n m1 = m * I\n m2 = m * (E - I)\n m2 = m2 * m2\n return -(2 * sum(sum(m1)) - 2 * Config.batch_size) / normalize_1 + (sum\n (sum(m2)) - normalize_2) / normalize_2\n\n\ndef reverse_kl_loss_function(output1, output2, p):\n new_output = output1 / p\n m = new_output @ output2.transpose(1, 0)\n m1 = m * I\n m1 = -I / (m1.float() + E - I)\n m2 = torch.log(m * (E - I) + I)\n return -sum(sum(m1)) / normalize_1 + (-sum(sum(m2)) - normalize_2\n ) / normalize_2\n\n\ndef sh_loss_function(output1, output2, p):\n new_output = output1 / p\n m = new_output @ output2.transpose(1, 0)\n m1 = m * I\n m1 = torch.sqrt(I / (m1.float() + E - I))\n m2 = torch.sqrt(m * (E - I))\n return -(-sum(sum(m1)) + Config.batch_size) / normalize_1 + sum(sum(m2)\n ) / normalize_2\n\n\ndef entropy_loss(outputs):\n num = outputs.size()[0]\n temp = -outputs * torch.log(outputs + 0.0001)\n loss = torch.sum(temp)\n loss /= num\n return loss\n\n\ndef M_step(expert_label, mu):\n if not Config.missing:\n normalize = torch.sum(mu, 0).float()\n expert_label = expert_label.long()\n expert_parameters = torch.zeros((Config.expert_num, Config.\n num_classes, Config.num_classes))\n for i in range(mu.size()[0]):\n for R in range(Config.expert_num):\n expert_parameters[R, :, expert_label[i, R]] += mu[i].float()\n expert_parameters = expert_parameters / normalize.unsqueeze(1)\n else:\n normalize = torch.zeros(Config.expert_num, Config.num_classes)\n expert_label = expert_label.long()\n expert_parameters = torch.zeros((Config.expert_num, Config.\n num_classes, Config.num_classes))\n for i in range(mu.size()[0]):\n for R in range(Config.expert_num):\n if expert_label[i, R] < 0:\n continue\n expert_parameters[R, :, expert_label[i, R]] += mu[i].float()\n normalize[R] += mu[i].float()\n normalize = normalize + 1 * (normalize == 0).float()\n for R in range(Config.expert_num):\n expert_parameters[R] = expert_parameters[R] / normalize[R\n ].unsqueeze(1)\n expert_parameters = expert_parameters.cuda()\n return expert_parameters\n\n\ndef M_step_p_mbem(t):\n p = torch.zeros(Config.num_classes)\n t = t.long()\n for i in range(t.size(0)):\n p[t[i]] += 1\n p /= t.size()[0]\n return p\n\n\ndef M_step_mbem(expert_label, t):\n normalize = torch.zeros(Config.expert_num, Config.num_classes)\n expert_label = expert_label.long()\n t = t.long()\n expert_parameters = torch.zeros((Config.expert_num, Config.num_classes,\n Config.num_classes))\n for i in range(t.size()[0]):\n for R in range(Config.expert_num):\n if expert_label[i, R] < 0:\n continue\n expert_parameters[R, t[i], expert_label[i, R]] += 1\n normalize[R, t[i]] += 1\n normalize = normalize + 1 * (normalize == 0).float()\n for R in range(Config.expert_num):\n expert_parameters[R] = expert_parameters[R] / normalize[R].unsqueeze(1)\n expert_parameters = expert_parameters.cuda()\n return expert_parameters\n\n\ndef print_recons_result(right_model, confusion_matrix):\n confusion_loss = 0\n for i in range(1, len(list(right_model.parameters()))):\n para = list(right_model.parameters())[i].detach().cpu()\n local_confusion_matrix = torch.nn.functional.softmax(para, dim=1)\n residual_matrix = local_confusion_matrix - confusion_matrix[i - 1, :, :\n ]\n residual = torch.sum(abs(residual_matrix))\n confusion_loss += residual\n print('Total variation:', confusion_loss.item())\n\n\ndef initial_priori(train_loader):\n p = torch.zeros(Config.num_classes)\n total = 0\n for batch_idx, (left_data, right_data, label) in enumerate(train_loader):\n linear_sum = torch.sum(right_data, dim=1)\n _, majority = torch.max(linear_sum, 1)\n majority = Variable(majority).long()\n total += label.size()[0]\n for i in range(Config.num_classes):\n p[i] += torch.sum(majority == i).float()\n p = p / float(total)\n return p\n\n\ndef update_priori(model, train_loader):\n p = torch.zeros(Config.num_classes)\n total = 0\n for batch_idx, (left_data, right_data, label) in enumerate(train_loader):\n ep = Variable(right_data).float().cuda()\n images = Variable(left_data).float().cuda()\n outputs = model(images)\n _, predicts = torch.max(outputs.data, 1)\n total += ep.size()[0]\n predicts = predicts.detach().cpu()\n for i in range(Config.num_classes):\n p[i] += torch.sum(predicts == i).float()\n p = p / float(total)\n \"\"\"\n # updating priori by loss\n pri = priori\n pri = Variable(pri, requires_grad=True)\n loss = mig_loss_function(left_outputs.detach(),right_outputs.detach(),p)\n loss.backward()\n grad = pri.grad\n pri = pri.detach() - Config.alpha * grad\n pri = torch.exp(pri)\n pri = pri / torch.sum(pri)\n \n \"\"\"\n \"\"\"\n # true priori\n p[0] = 0.5\n p[1] = 0.5\n \"\"\"\n return p\n",
"step-5": "\"\"\"\nutil - other functions\n\"\"\"\nimport torch\nimport numpy as np\nfrom common_labelme import Config\nfrom torch.autograd import Variable\n\nI = torch.FloatTensor(np.eye(Config.batch_size),)\nE = torch.FloatTensor(np.ones((Config.batch_size, Config.batch_size)))\nnormalize_1 = Config.batch_size\nnormalize_2 = Config.batch_size * Config.batch_size - Config.batch_size\n\ndef mig_loss_function(output1, output2, p):\n new_output = output1 / p\n m = (new_output @ output2.transpose(1,0))\n noise = torch.rand(1)*0.0001\n m1 = torch.log(m*I+ I*noise + E - I)\n m2 = m*(E-I)\n return -(sum(sum(m1)) + Config.batch_size)/normalize_1 + sum(sum(m2)) / normalize_2\n\ndef tvd_loss_function(output1, output2, p):\n new_output = output1 / p\n m = (new_output @ output2.transpose(1,0))\n noise = torch.rand(1)*0.0001\n m1 = torch.log(m*I + I * noise + E - I)\n m2 = torch.log(m*(E-I) + I )\n\n return -(sum(sum(torch.sign(m1))))/normalize_1 + sum(sum(torch.sign(m2))) / normalize_2\n\ndef pearson_loss_function(output1, output2, p):\n new_output = output1 / p\n m = (new_output @ output2.transpose(1,0))\n\n m1 = m*I\n m2 = m*(E-I)\n m2 = m2*m2\n return -(2 * sum(sum(m1)) - 2 * Config.batch_size) / normalize_1 + (sum(sum(m2)) - normalize_2) / normalize_2\n\ndef reverse_kl_loss_function(output1, output2, p):\n new_output = output1 / p\n m = (new_output @ output2.transpose(1,0))\n m1 = m*I\n m1 = -I/(m1.float() + E - I)\n m2 = torch.log(m*(E-I) + I)\n return -(sum(sum(m1)))/normalize_1 + (-sum(sum(m2)) - normalize_2) / normalize_2\n\ndef sh_loss_function(output1, output2, p):\n new_output = output1 / p\n m = (new_output @ output2.transpose(1,0))\n m1 = m*I\n m1 = torch.sqrt(I/(m1.float() + E - I))\n m2 = torch.sqrt(m*(E-I))\n return -(-sum(sum(m1)) + Config.batch_size)/normalize_1 + sum(sum(m2)) / normalize_2\n\ndef entropy_loss(outputs):\n num = outputs.size()[0]\n temp = -outputs * torch.log(outputs+0.0001)\n loss = torch.sum(temp)\n loss /= num\n return loss\n\ndef M_step(expert_label,mu):\n\n #---------------------------------------------------------------#\n # #\n # expert_label size : batch_size * expert_num #\n # mu : batch_size * num_classes #\n # expert_parameters = expert_num * num_classes * num_classes #\n # #\n #---------------------------------------------------------------#\n\n if not Config.missing:\n normalize = torch.sum(mu, 0).float()\n expert_label = expert_label.long()\n expert_parameters = torch.zeros((Config.expert_num, Config.num_classes, Config.num_classes))\n for i in range(mu.size()[0]):\n for R in range(Config.expert_num):\n expert_parameters[R, :, expert_label[i, R]] += mu[i].float()\n\n expert_parameters = expert_parameters / normalize.unsqueeze(1)\n else:\n normalize = torch.zeros(Config.expert_num,Config.num_classes)\n expert_label = expert_label.long()\n expert_parameters = torch.zeros((Config.expert_num, Config.num_classes, Config.num_classes))\n for i in range(mu.size()[0]):\n for R in range(Config.expert_num):\n if expert_label[i,R] < 0:\n continue\n expert_parameters[R, :, expert_label[i, R]] += mu[i].float()\n normalize[R] += mu[i].float()\n\n normalize = normalize + 1 * (normalize == 0).float()\n\n for R in range(Config.expert_num):\n expert_parameters[R] = expert_parameters[R] / normalize[R].unsqueeze(1)\n\n expert_parameters = expert_parameters.cuda()\n return expert_parameters\n\ndef M_step_p_mbem(t):\n\n p = torch.zeros(Config.num_classes)\n t = t.long()\n for i in range(t.size(0)):\n p[t[i]] += 1\n p /= t.size()[0]\n return p\n\n\ndef M_step_mbem(expert_label,t):\n\n #---------------------------------------------------------------#\n # #\n # expert_label size : batch_size * expert_num #\n # t : batch_size #\n # expert_parameters = expert_num * num_classes * num_classes #\n # #\n #---------------------------------------------------------------#\n\n normalize = torch.zeros(Config.expert_num, Config.num_classes)\n expert_label = expert_label.long()\n t = t.long()\n expert_parameters = torch.zeros((Config.expert_num, Config.num_classes, Config.num_classes))\n\n\n for i in range(t.size()[0]):\n for R in range(Config.expert_num):\n if expert_label[i, R] < 0:\n continue\n expert_parameters[R, t[i], expert_label[i, R]] += 1\n normalize[R,t[i]] += 1\n normalize = normalize + 1 * (normalize == 0).float()\n\n for R in range(Config.expert_num):\n expert_parameters[R] = expert_parameters[R] / normalize[R].unsqueeze(1)\n\n expert_parameters = expert_parameters.cuda()\n return expert_parameters\n\n\ndef print_recons_result(right_model, confusion_matrix):\n\n confusion_loss = 0\n for i in range(1,len(list(right_model.parameters()))):\n para = list(right_model.parameters())[i].detach().cpu()\n #print(\"Expert %d\" %i)\n local_confusion_matrix = torch.nn.functional.softmax(para, dim=1)\n #print(local_confusion_matrix)\n residual_matrix = local_confusion_matrix - confusion_matrix[i-1, :, :]\n residual = torch.sum(abs(residual_matrix))\n confusion_loss += residual\n\n print(\"Total variation:\", confusion_loss.item())\n\ndef initial_priori(train_loader):\n p = torch.zeros((Config.num_classes))\n\n\n total = 0\n for batch_idx, (left_data, right_data, label) in enumerate(train_loader):\n linear_sum = torch.sum(right_data, dim=1)\n _, majority = torch.max(linear_sum, 1)\n majority = Variable(majority).long()\n total += label.size()[0]\n for i in range(Config.num_classes):\n p[i] += torch.sum(majority == i).float()\n p = p/float(total)\n return p\n\ndef update_priori(model, train_loader):\n # waiting for solution\n p = torch.zeros((Config.num_classes))\n\n # updating priori by posteri\n\n total = 0\n for batch_idx, (left_data, right_data, label) in enumerate(train_loader):\n ep = Variable(right_data).float().cuda()\n images = Variable(left_data).float().cuda()\n outputs = model(images)\n _, predicts = torch.max(outputs.data, 1)\n total += ep.size()[0]\n predicts = predicts.detach().cpu()\n for i in range(Config.num_classes):\n p[i] += torch.sum(predicts == i).float()\n\n p = p/float(total)\n '''\n # updating priori by loss\n pri = priori\n pri = Variable(pri, requires_grad=True)\n loss = mig_loss_function(left_outputs.detach(),right_outputs.detach(),p)\n loss.backward()\n grad = pri.grad\n pri = pri.detach() - Config.alpha * grad\n pri = torch.exp(pri)\n pri = pri / torch.sum(pri)\n \n '''\n\n '''\n # true priori\n p[0] = 0.5\n p[1] = 0.5\n '''\n return p",
"step-ids": [
10,
11,
12,
14,
15
]
}
|
[
10,
11,
12,
14,
15
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for i in doc_set:
raw = i.lower()
tokens = tokenizer.tokenize(raw)
stopped_tokens = [i for i in tokens if not i in en_stop]
stemmed_tokens = [p_stemmer.stem(i) for i in stopped_tokens]
texts.append(stemmed_tokens)
<|reserved_special_token_0|>
print('LDA............')
<|reserved_special_token_0|>
for topic in topics:
print(type(topic))
print(topic)
print('LSA.................')
<|reserved_special_token_0|>
for topic in topics:
print(topic[1])
print(swn.senti_synsets(topic[1]))
print('----------------------------------------')
<|reserved_special_token_0|>
print(happy.neg_score())
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
tokenizer = RegexpTokenizer('\\w+')
en_stop = get_stop_words('en')
p_stemmer = PorterStemmer()
doc_a = (
'Brocolli is good to eat. My brother likes to eat good brocolli, but not my mother.'
)
doc_b = (
'My mother spends a lot of time driving my brother around to baseball practice.'
)
doc_c = (
'Some health experts suggest that driving may cause increased tension and blood pressure.'
)
doc_d = (
'I often feel pressure to perform well at school, but my mother never seems to drive my brother to do better.'
)
doc_e = 'Health professionals say that brocolli is good for your health.'
rev1 = (
"I pre-ordered this for my wife mostly to use as a Kindle E-reader as I figured the tablet would be slow and the display would be less than impressive. I was wrong. What a bargain this little beauty is! This model cost $49.00 but it comes with ad's displayed on the lock screen when your tablet is dormant. Once your screen times out, they disappear. You can pay $15.00 up front to get an ad free version so I assumed to unlock the tablet I'd have to spend 15 to 30 seconds looking at an ad for Amazon Prime, or a product from the daily specials section of Amazon.com I abstained from paying for Ad removal and was pleasantly surprised to find that the ads are only on the lock screen and that as soon as I unlock the tablet they disappear immediately. Here are my pros and cons thus far. PRO: Perfect size for Ebooks, and web surfing to alleviate strain on the eyes from my 5 phone display nice sturdy casing that gives it a nice heft but still weighs in as one of the lighter tablets on the market Child Accounts- Amazon allows you to set up this tablet with age restricted access for kids making this a low cost piece of tech that is perfect for school kids and allows mom and dad to ration the amount of time lil Johnny can play Clash of Clans and how much he can hit the ol' Visa card for. Battery life thus far; wife was on it for about 5 hours last night and battery was at about 46% Kindle Integration -this goes without saying but having my ebooks and audible books synced to the tablet is awesome and my Kindle books look great"
)
rev2 = (
"UPDATED - After spending quite a bit more time with the device, I would give it a 4.5 due to a few specific gaps that are a bit annoying. However, you are still getting an amazing 7” tablet, with front and rear facing cameras, a gorgeous interface, fairly snappy performance and durability, all for under 50 bucks! I can’t imagine not buying these for myself and my whole family, but not a primary tablet for a techie adult by any means. For background, I have every Kindle, a couple Fires, and multiple tablets from Apple, Microsoft and Samsung. Note that my review with 5 stars considers the value equation, not just performance and how that may or may not compare to other tablets - if you are expecting this to compare to a tablet costing several times more, don't bother. But if you are looking for a great entry level tablet that does most of the things people want, this little tablet definitely delivers the value! PRICING/CONFIG: I prefer this tablet with ads and no accessories to keep the costs down. You have the option to spend more money, but I recommend against it. You can easily see the specs online, so I won’t do you the discourtesy of simply cutting and pasting those here. Here is the price breakdown: 9.99 base price – what an incredible price point! Or buy 5 and get a sixth one free! This puts it into reach of schools and non-profits."
)
rev3 = (
"The short/summed up version: it's the new budget king in the 6-8 size. It's screen is a little lower in resolution but still pleasant to look at, it has enough power for most of the typical tablet tasks, and it shares many of the same features as its higher priced brothers such as front and back cameras, b/g/n wifi, and good overall battery life (minus an hour) My favorite size tablet is 8, so if you're looking at the amazon fire lineup, i would take this over the 6 for sure, and would have a hard time picking the 8 fire at 3x the price. If you re not a prime member, it s still a good tablet, if you are a prime member: it s a great tablet. Possible quality control issue: Mine had two dead pixels (not very noticeable, but still will exchange) You can load APKs(enable unknown sources), i loaded antutu and panasonic image app, both work properly."
)
doc_set = [rev1, rev2, rev3]
texts = []
for i in doc_set:
raw = i.lower()
tokens = tokenizer.tokenize(raw)
stopped_tokens = [i for i in tokens if not i in en_stop]
stemmed_tokens = [p_stemmer.stem(i) for i in stopped_tokens]
texts.append(stemmed_tokens)
dictionary = corpora.Dictionary(texts)
corpus = [dictionary.doc2bow(text) for text in texts]
ldamodel = gensim.models.ldamodel.LdaModel(corpus, num_topics=4, id2word=
dictionary, passes=20)
print('LDA............')
topics = ldamodel.print_topics(num_topics=3, num_words=5)
for topic in topics:
print(type(topic))
print(topic)
print('LSA.................')
lsi = gensim.models.lsimodel.LsiModel(corpus, id2word=dictionary)
<|reserved_special_token_0|>
topics = lsi.print_topics(5)
for topic in topics:
print(topic[1])
print(swn.senti_synsets(topic[1]))
print('----------------------------------------')
happy = swn.senti_synsets('happy')
print(happy.neg_score())
all = swn.all_senti_synsets()
<|reserved_special_token_1|>
from nltk.tokenize import RegexpTokenizer
from stop_words import get_stop_words
from nltk.stem.porter import PorterStemmer
from gensim import corpora, models
import gensim
tokenizer = RegexpTokenizer('\\w+')
en_stop = get_stop_words('en')
p_stemmer = PorterStemmer()
doc_a = (
'Brocolli is good to eat. My brother likes to eat good brocolli, but not my mother.'
)
doc_b = (
'My mother spends a lot of time driving my brother around to baseball practice.'
)
doc_c = (
'Some health experts suggest that driving may cause increased tension and blood pressure.'
)
doc_d = (
'I often feel pressure to perform well at school, but my mother never seems to drive my brother to do better.'
)
doc_e = 'Health professionals say that brocolli is good for your health.'
rev1 = (
"I pre-ordered this for my wife mostly to use as a Kindle E-reader as I figured the tablet would be slow and the display would be less than impressive. I was wrong. What a bargain this little beauty is! This model cost $49.00 but it comes with ad's displayed on the lock screen when your tablet is dormant. Once your screen times out, they disappear. You can pay $15.00 up front to get an ad free version so I assumed to unlock the tablet I'd have to spend 15 to 30 seconds looking at an ad for Amazon Prime, or a product from the daily specials section of Amazon.com I abstained from paying for Ad removal and was pleasantly surprised to find that the ads are only on the lock screen and that as soon as I unlock the tablet they disappear immediately. Here are my pros and cons thus far. PRO: Perfect size for Ebooks, and web surfing to alleviate strain on the eyes from my 5 phone display nice sturdy casing that gives it a nice heft but still weighs in as one of the lighter tablets on the market Child Accounts- Amazon allows you to set up this tablet with age restricted access for kids making this a low cost piece of tech that is perfect for school kids and allows mom and dad to ration the amount of time lil Johnny can play Clash of Clans and how much he can hit the ol' Visa card for. Battery life thus far; wife was on it for about 5 hours last night and battery was at about 46% Kindle Integration -this goes without saying but having my ebooks and audible books synced to the tablet is awesome and my Kindle books look great"
)
rev2 = (
"UPDATED - After spending quite a bit more time with the device, I would give it a 4.5 due to a few specific gaps that are a bit annoying. However, you are still getting an amazing 7” tablet, with front and rear facing cameras, a gorgeous interface, fairly snappy performance and durability, all for under 50 bucks! I can’t imagine not buying these for myself and my whole family, but not a primary tablet for a techie adult by any means. For background, I have every Kindle, a couple Fires, and multiple tablets from Apple, Microsoft and Samsung. Note that my review with 5 stars considers the value equation, not just performance and how that may or may not compare to other tablets - if you are expecting this to compare to a tablet costing several times more, don't bother. But if you are looking for a great entry level tablet that does most of the things people want, this little tablet definitely delivers the value! PRICING/CONFIG: I prefer this tablet with ads and no accessories to keep the costs down. You have the option to spend more money, but I recommend against it. You can easily see the specs online, so I won’t do you the discourtesy of simply cutting and pasting those here. Here is the price breakdown: 9.99 base price – what an incredible price point! Or buy 5 and get a sixth one free! This puts it into reach of schools and non-profits."
)
rev3 = (
"The short/summed up version: it's the new budget king in the 6-8 size. It's screen is a little lower in resolution but still pleasant to look at, it has enough power for most of the typical tablet tasks, and it shares many of the same features as its higher priced brothers such as front and back cameras, b/g/n wifi, and good overall battery life (minus an hour) My favorite size tablet is 8, so if you're looking at the amazon fire lineup, i would take this over the 6 for sure, and would have a hard time picking the 8 fire at 3x the price. If you re not a prime member, it s still a good tablet, if you are a prime member: it s a great tablet. Possible quality control issue: Mine had two dead pixels (not very noticeable, but still will exchange) You can load APKs(enable unknown sources), i loaded antutu and panasonic image app, both work properly."
)
doc_set = [rev1, rev2, rev3]
texts = []
for i in doc_set:
raw = i.lower()
tokens = tokenizer.tokenize(raw)
stopped_tokens = [i for i in tokens if not i in en_stop]
stemmed_tokens = [p_stemmer.stem(i) for i in stopped_tokens]
texts.append(stemmed_tokens)
dictionary = corpora.Dictionary(texts)
corpus = [dictionary.doc2bow(text) for text in texts]
ldamodel = gensim.models.ldamodel.LdaModel(corpus, num_topics=4, id2word=
dictionary, passes=20)
print('LDA............')
topics = ldamodel.print_topics(num_topics=3, num_words=5)
for topic in topics:
print(type(topic))
print(topic)
print('LSA.................')
lsi = gensim.models.lsimodel.LsiModel(corpus, id2word=dictionary)
from nltk.corpus import sentiwordnet as swn
topics = lsi.print_topics(5)
for topic in topics:
print(topic[1])
print(swn.senti_synsets(topic[1]))
print('----------------------------------------')
happy = swn.senti_synsets('happy')
print(happy.neg_score())
all = swn.all_senti_synsets()
<|reserved_special_token_1|>
from nltk.tokenize import RegexpTokenizer
from stop_words import get_stop_words
from nltk.stem.porter import PorterStemmer
from gensim import corpora, models
import gensim
tokenizer = RegexpTokenizer(r'\w+')
# create English stop words list
en_stop = get_stop_words('en')
# Create p_stemmer of class PorterStemmer
p_stemmer = PorterStemmer()
# create sample documents
doc_a = "Brocolli is good to eat. My brother likes to eat good brocolli, but not my mother."
doc_b = "My mother spends a lot of time driving my brother around to baseball practice."
doc_c = "Some health experts suggest that driving may cause increased tension and blood pressure."
doc_d = "I often feel pressure to perform well at school, but my mother never seems to drive my brother to do better."
doc_e = "Health professionals say that brocolli is good for your health."
rev1 = "I pre-ordered this for my wife mostly to use as a Kindle E-reader as I figured the tablet would be slow and the display would be less than impressive. I was wrong. What a bargain this little beauty is! This model cost $49.00 but it comes with ad's displayed on the lock screen when your tablet is dormant. Once your screen times out, they disappear. You can pay $15.00 up front to get an ad free version so I assumed to unlock the tablet I'd have to spend 15 to 30 seconds looking at an ad for Amazon Prime, or a product from the daily specials section of Amazon.com I abstained from paying for Ad removal and was pleasantly surprised to find that the ads are only on the lock screen and that as soon as I unlock the tablet they disappear immediately. Here are my pros and cons thus far. PRO: Perfect size for Ebooks, and web surfing to alleviate strain on the eyes from my 5 phone display nice sturdy casing that gives it a nice heft but still weighs in as one of the lighter tablets on the market Child Accounts- Amazon allows you to set up this tablet with age restricted access for kids making this a low cost piece of tech that is perfect for school kids and allows mom and dad to ration the amount of time lil Johnny can play Clash of Clans and how much he can hit the ol' Visa card for. Battery life thus far; wife was on it for about 5 hours last night and battery was at about 46% Kindle Integration -this goes without saying but having my ebooks and audible books synced to the tablet is awesome and my Kindle books look great"
rev2 = "UPDATED - After spending quite a bit more time with the device, I would give it a 4.5 due to a few specific gaps that are a bit annoying. However, you are still getting an amazing 7” tablet, with front and rear facing cameras, a gorgeous interface, fairly snappy performance and durability, all for under 50 bucks! I can’t imagine not buying these for myself and my whole family, but not a primary tablet for a techie adult by any means. For background, I have every Kindle, a couple Fires, and multiple tablets from Apple, Microsoft and Samsung. Note that my review with 5 stars considers the value equation, not just performance and how that may or may not compare to other tablets - if you are expecting this to compare to a tablet costing several times more, don't bother. But if you are looking for a great entry level tablet that does most of the things people want, this little tablet definitely delivers the value! PRICING/CONFIG: I prefer this tablet with ads and no accessories to keep the costs down. You have the option to spend more money, but I recommend against it. You can easily see the specs online, so I won’t do you the discourtesy of simply cutting and pasting those here. Here is the price breakdown: 9.99 base price – what an incredible price point! Or buy 5 and get a sixth one free! This puts it into reach of schools and non-profits."
rev3 ="The short/summed up version: it's the new budget king in the 6-8 size. It's screen is a little lower in resolution but still pleasant to look at, it has enough power for most of the typical tablet tasks, and it shares many of the same features as its higher priced brothers such as front and back cameras, b/g/n wifi, and good overall battery life (minus an hour) My favorite size tablet is 8, so if you're looking at the amazon fire lineup, i would take this over the 6 for sure, and would have a hard time picking the 8 fire at 3x the price. If you re not a prime member, it s still a good tablet, if you are a prime member: it s a great tablet. Possible quality control issue: Mine had two dead pixels (not very noticeable, but still will exchange) You can load APKs(enable unknown sources), i loaded antutu and panasonic image app, both work properly."
# compile sample documents into a list
#doc_set = [doc_a, doc_b, doc_c, doc_d, doc_e]
doc_set = [rev1,rev2,rev3]
# list for tokenized documents in loop
texts = []
# loop through document list
for i in doc_set:
# clean and tokenize document string
raw = i.lower()
tokens = tokenizer.tokenize(raw)
# remove stop words from tokens
stopped_tokens = [i for i in tokens if not i in en_stop]
# stem tokens
stemmed_tokens = [p_stemmer.stem(i) for i in stopped_tokens]
# add tokens to list
texts.append(stemmed_tokens)
# turn our tokenized documents into a id <-> term dictionary
dictionary = corpora.Dictionary(texts)
# convert tokenized documents into a document-term matrix
corpus = [dictionary.doc2bow(text) for text in texts]
# generate LDA model
ldamodel = gensim.models.ldamodel.LdaModel(corpus, num_topics=4, id2word=dictionary, passes=20)
print("LDA............")
topics = ldamodel.print_topics(num_topics=3, num_words=5)
for topic in topics:
print(type(topic))
print(topic)
print("LSA.................")
#id2word = gensim.corpora.Dictionary.load_from_text("c:\lda_test.txt")
lsi = gensim.models.lsimodel.LsiModel(corpus, id2word=dictionary)
from nltk.corpus import sentiwordnet as swn
topics = lsi.print_topics(5)
for topic in topics:
print(topic[1])
print(swn.senti_synsets(topic[1]))
print("----------------------------------------")
#print(list(swn.senti_synsets('slow')))
happy = swn.senti_synsets('happy')
print(happy.neg_score())
all = swn.all_senti_synsets()
#print(all)
|
flexible
|
{
"blob_id": "3035ac8044b5629d0b5de7934e46890ad36ed551",
"index": 7798,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in doc_set:\n raw = i.lower()\n tokens = tokenizer.tokenize(raw)\n stopped_tokens = [i for i in tokens if not i in en_stop]\n stemmed_tokens = [p_stemmer.stem(i) for i in stopped_tokens]\n texts.append(stemmed_tokens)\n<mask token>\nprint('LDA............')\n<mask token>\nfor topic in topics:\n print(type(topic))\n print(topic)\nprint('LSA.................')\n<mask token>\nfor topic in topics:\n print(topic[1])\n print(swn.senti_synsets(topic[1]))\n print('----------------------------------------')\n<mask token>\nprint(happy.neg_score())\n<mask token>\n",
"step-3": "<mask token>\ntokenizer = RegexpTokenizer('\\\\w+')\nen_stop = get_stop_words('en')\np_stemmer = PorterStemmer()\ndoc_a = (\n 'Brocolli is good to eat. My brother likes to eat good brocolli, but not my mother.'\n )\ndoc_b = (\n 'My mother spends a lot of time driving my brother around to baseball practice.'\n )\ndoc_c = (\n 'Some health experts suggest that driving may cause increased tension and blood pressure.'\n )\ndoc_d = (\n 'I often feel pressure to perform well at school, but my mother never seems to drive my brother to do better.'\n )\ndoc_e = 'Health professionals say that brocolli is good for your health.'\nrev1 = (\n \"I pre-ordered this for my wife mostly to use as a Kindle E-reader as I figured the tablet would be slow and the display would be less than impressive. I was wrong. What a bargain this little beauty is! This model cost $49.00 but it comes with ad's displayed on the lock screen when your tablet is dormant. Once your screen times out, they disappear. You can pay $15.00 up front to get an ad free version so I assumed to unlock the tablet I'd have to spend 15 to 30 seconds looking at an ad for Amazon Prime, or a product from the daily specials section of Amazon.com I abstained from paying for Ad removal and was pleasantly surprised to find that the ads are only on the lock screen and that as soon as I unlock the tablet they disappear immediately. Here are my pros and cons thus far. PRO: Perfect size for Ebooks, and web surfing to alleviate strain on the eyes from my 5 phone display nice sturdy casing that gives it a nice heft but still weighs in as one of the lighter tablets on the market Child Accounts- Amazon allows you to set up this tablet with age restricted access for kids making this a low cost piece of tech that is perfect for school kids and allows mom and dad to ration the amount of time lil Johnny can play Clash of Clans and how much he can hit the ol' Visa card for. Battery life thus far; wife was on it for about 5 hours last night and battery was at about 46% Kindle Integration -this goes without saying but having my ebooks and audible books synced to the tablet is awesome and my Kindle books look great\"\n )\nrev2 = (\n \"UPDATED - After spending quite a bit more time with the device, I would give it a 4.5 due to a few specific gaps that are a bit annoying. However, you are still getting an amazing 7” tablet, with front and rear facing cameras, a gorgeous interface, fairly snappy performance and durability, all for under 50 bucks! I can’t imagine not buying these for myself and my whole family, but not a primary tablet for a techie adult by any means. For background, I have every Kindle, a couple Fires, and multiple tablets from Apple, Microsoft and Samsung. Note that my review with 5 stars considers the value equation, not just performance and how that may or may not compare to other tablets - if you are expecting this to compare to a tablet costing several times more, don't bother. But if you are looking for a great entry level tablet that does most of the things people want, this little tablet definitely delivers the value! PRICING/CONFIG: I prefer this tablet with ads and no accessories to keep the costs down. You have the option to spend more money, but I recommend against it. You can easily see the specs online, so I won’t do you the discourtesy of simply cutting and pasting those here. Here is the price breakdown: 9.99 base price – what an incredible price point! Or buy 5 and get a sixth one free! This puts it into reach of schools and non-profits.\"\n )\nrev3 = (\n \"The short/summed up version: it's the new budget king in the 6-8 size. It's screen is a little lower in resolution but still pleasant to look at, it has enough power for most of the typical tablet tasks, and it shares many of the same features as its higher priced brothers such as front and back cameras, b/g/n wifi, and good overall battery life (minus an hour) My favorite size tablet is 8, so if you're looking at the amazon fire lineup, i would take this over the 6 for sure, and would have a hard time picking the 8 fire at 3x the price. If you re not a prime member, it s still a good tablet, if you are a prime member: it s a great tablet. Possible quality control issue: Mine had two dead pixels (not very noticeable, but still will exchange) You can load APKs(enable unknown sources), i loaded antutu and panasonic image app, both work properly.\"\n )\ndoc_set = [rev1, rev2, rev3]\ntexts = []\nfor i in doc_set:\n raw = i.lower()\n tokens = tokenizer.tokenize(raw)\n stopped_tokens = [i for i in tokens if not i in en_stop]\n stemmed_tokens = [p_stemmer.stem(i) for i in stopped_tokens]\n texts.append(stemmed_tokens)\ndictionary = corpora.Dictionary(texts)\ncorpus = [dictionary.doc2bow(text) for text in texts]\nldamodel = gensim.models.ldamodel.LdaModel(corpus, num_topics=4, id2word=\n dictionary, passes=20)\nprint('LDA............')\ntopics = ldamodel.print_topics(num_topics=3, num_words=5)\nfor topic in topics:\n print(type(topic))\n print(topic)\nprint('LSA.................')\nlsi = gensim.models.lsimodel.LsiModel(corpus, id2word=dictionary)\n<mask token>\ntopics = lsi.print_topics(5)\nfor topic in topics:\n print(topic[1])\n print(swn.senti_synsets(topic[1]))\n print('----------------------------------------')\nhappy = swn.senti_synsets('happy')\nprint(happy.neg_score())\nall = swn.all_senti_synsets()\n",
"step-4": "from nltk.tokenize import RegexpTokenizer\nfrom stop_words import get_stop_words\nfrom nltk.stem.porter import PorterStemmer\nfrom gensim import corpora, models\nimport gensim\ntokenizer = RegexpTokenizer('\\\\w+')\nen_stop = get_stop_words('en')\np_stemmer = PorterStemmer()\ndoc_a = (\n 'Brocolli is good to eat. My brother likes to eat good brocolli, but not my mother.'\n )\ndoc_b = (\n 'My mother spends a lot of time driving my brother around to baseball practice.'\n )\ndoc_c = (\n 'Some health experts suggest that driving may cause increased tension and blood pressure.'\n )\ndoc_d = (\n 'I often feel pressure to perform well at school, but my mother never seems to drive my brother to do better.'\n )\ndoc_e = 'Health professionals say that brocolli is good for your health.'\nrev1 = (\n \"I pre-ordered this for my wife mostly to use as a Kindle E-reader as I figured the tablet would be slow and the display would be less than impressive. I was wrong. What a bargain this little beauty is! This model cost $49.00 but it comes with ad's displayed on the lock screen when your tablet is dormant. Once your screen times out, they disappear. You can pay $15.00 up front to get an ad free version so I assumed to unlock the tablet I'd have to spend 15 to 30 seconds looking at an ad for Amazon Prime, or a product from the daily specials section of Amazon.com I abstained from paying for Ad removal and was pleasantly surprised to find that the ads are only on the lock screen and that as soon as I unlock the tablet they disappear immediately. Here are my pros and cons thus far. PRO: Perfect size for Ebooks, and web surfing to alleviate strain on the eyes from my 5 phone display nice sturdy casing that gives it a nice heft but still weighs in as one of the lighter tablets on the market Child Accounts- Amazon allows you to set up this tablet with age restricted access for kids making this a low cost piece of tech that is perfect for school kids and allows mom and dad to ration the amount of time lil Johnny can play Clash of Clans and how much he can hit the ol' Visa card for. Battery life thus far; wife was on it for about 5 hours last night and battery was at about 46% Kindle Integration -this goes without saying but having my ebooks and audible books synced to the tablet is awesome and my Kindle books look great\"\n )\nrev2 = (\n \"UPDATED - After spending quite a bit more time with the device, I would give it a 4.5 due to a few specific gaps that are a bit annoying. However, you are still getting an amazing 7” tablet, with front and rear facing cameras, a gorgeous interface, fairly snappy performance and durability, all for under 50 bucks! I can’t imagine not buying these for myself and my whole family, but not a primary tablet for a techie adult by any means. For background, I have every Kindle, a couple Fires, and multiple tablets from Apple, Microsoft and Samsung. Note that my review with 5 stars considers the value equation, not just performance and how that may or may not compare to other tablets - if you are expecting this to compare to a tablet costing several times more, don't bother. But if you are looking for a great entry level tablet that does most of the things people want, this little tablet definitely delivers the value! PRICING/CONFIG: I prefer this tablet with ads and no accessories to keep the costs down. You have the option to spend more money, but I recommend against it. You can easily see the specs online, so I won’t do you the discourtesy of simply cutting and pasting those here. Here is the price breakdown: 9.99 base price – what an incredible price point! Or buy 5 and get a sixth one free! This puts it into reach of schools and non-profits.\"\n )\nrev3 = (\n \"The short/summed up version: it's the new budget king in the 6-8 size. It's screen is a little lower in resolution but still pleasant to look at, it has enough power for most of the typical tablet tasks, and it shares many of the same features as its higher priced brothers such as front and back cameras, b/g/n wifi, and good overall battery life (minus an hour) My favorite size tablet is 8, so if you're looking at the amazon fire lineup, i would take this over the 6 for sure, and would have a hard time picking the 8 fire at 3x the price. If you re not a prime member, it s still a good tablet, if you are a prime member: it s a great tablet. Possible quality control issue: Mine had two dead pixels (not very noticeable, but still will exchange) You can load APKs(enable unknown sources), i loaded antutu and panasonic image app, both work properly.\"\n )\ndoc_set = [rev1, rev2, rev3]\ntexts = []\nfor i in doc_set:\n raw = i.lower()\n tokens = tokenizer.tokenize(raw)\n stopped_tokens = [i for i in tokens if not i in en_stop]\n stemmed_tokens = [p_stemmer.stem(i) for i in stopped_tokens]\n texts.append(stemmed_tokens)\ndictionary = corpora.Dictionary(texts)\ncorpus = [dictionary.doc2bow(text) for text in texts]\nldamodel = gensim.models.ldamodel.LdaModel(corpus, num_topics=4, id2word=\n dictionary, passes=20)\nprint('LDA............')\ntopics = ldamodel.print_topics(num_topics=3, num_words=5)\nfor topic in topics:\n print(type(topic))\n print(topic)\nprint('LSA.................')\nlsi = gensim.models.lsimodel.LsiModel(corpus, id2word=dictionary)\nfrom nltk.corpus import sentiwordnet as swn\ntopics = lsi.print_topics(5)\nfor topic in topics:\n print(topic[1])\n print(swn.senti_synsets(topic[1]))\n print('----------------------------------------')\nhappy = swn.senti_synsets('happy')\nprint(happy.neg_score())\nall = swn.all_senti_synsets()\n",
"step-5": "from nltk.tokenize import RegexpTokenizer\nfrom stop_words import get_stop_words\nfrom nltk.stem.porter import PorterStemmer\nfrom gensim import corpora, models\nimport gensim\n\ntokenizer = RegexpTokenizer(r'\\w+')\n\n# create English stop words list\nen_stop = get_stop_words('en')\n\n# Create p_stemmer of class PorterStemmer\np_stemmer = PorterStemmer()\n\n# create sample documents\ndoc_a = \"Brocolli is good to eat. My brother likes to eat good brocolli, but not my mother.\"\ndoc_b = \"My mother spends a lot of time driving my brother around to baseball practice.\"\ndoc_c = \"Some health experts suggest that driving may cause increased tension and blood pressure.\"\ndoc_d = \"I often feel pressure to perform well at school, but my mother never seems to drive my brother to do better.\"\ndoc_e = \"Health professionals say that brocolli is good for your health.\"\n\nrev1 = \"I pre-ordered this for my wife mostly to use as a Kindle E-reader as I figured the tablet would be slow and the display would be less than impressive. I was wrong. What a bargain this little beauty is! This model cost $49.00 but it comes with ad's displayed on the lock screen when your tablet is dormant. Once your screen times out, they disappear. You can pay $15.00 up front to get an ad free version so I assumed to unlock the tablet I'd have to spend 15 to 30 seconds looking at an ad for Amazon Prime, or a product from the daily specials section of Amazon.com I abstained from paying for Ad removal and was pleasantly surprised to find that the ads are only on the lock screen and that as soon as I unlock the tablet they disappear immediately. Here are my pros and cons thus far. PRO: Perfect size for Ebooks, and web surfing to alleviate strain on the eyes from my 5 phone display nice sturdy casing that gives it a nice heft but still weighs in as one of the lighter tablets on the market Child Accounts- Amazon allows you to set up this tablet with age restricted access for kids making this a low cost piece of tech that is perfect for school kids and allows mom and dad to ration the amount of time lil Johnny can play Clash of Clans and how much he can hit the ol' Visa card for. Battery life thus far; wife was on it for about 5 hours last night and battery was at about 46% Kindle Integration -this goes without saying but having my ebooks and audible books synced to the tablet is awesome and my Kindle books look great\"\nrev2 = \"UPDATED - After spending quite a bit more time with the device, I would give it a 4.5 due to a few specific gaps that are a bit annoying. However, you are still getting an amazing 7” tablet, with front and rear facing cameras, a gorgeous interface, fairly snappy performance and durability, all for under 50 bucks! I can’t imagine not buying these for myself and my whole family, but not a primary tablet for a techie adult by any means. For background, I have every Kindle, a couple Fires, and multiple tablets from Apple, Microsoft and Samsung. Note that my review with 5 stars considers the value equation, not just performance and how that may or may not compare to other tablets - if you are expecting this to compare to a tablet costing several times more, don't bother. But if you are looking for a great entry level tablet that does most of the things people want, this little tablet definitely delivers the value! PRICING/CONFIG: I prefer this tablet with ads and no accessories to keep the costs down. You have the option to spend more money, but I recommend against it. You can easily see the specs online, so I won’t do you the discourtesy of simply cutting and pasting those here. Here is the price breakdown: 9.99 base price – what an incredible price point! Or buy 5 and get a sixth one free! This puts it into reach of schools and non-profits.\"\nrev3 =\"The short/summed up version: it's the new budget king in the 6-8 size. It's screen is a little lower in resolution but still pleasant to look at, it has enough power for most of the typical tablet tasks, and it shares many of the same features as its higher priced brothers such as front and back cameras, b/g/n wifi, and good overall battery life (minus an hour) My favorite size tablet is 8, so if you're looking at the amazon fire lineup, i would take this over the 6 for sure, and would have a hard time picking the 8 fire at 3x the price. If you re not a prime member, it s still a good tablet, if you are a prime member: it s a great tablet. Possible quality control issue: Mine had two dead pixels (not very noticeable, but still will exchange) You can load APKs(enable unknown sources), i loaded antutu and panasonic image app, both work properly.\"\n\n# compile sample documents into a list\n#doc_set = [doc_a, doc_b, doc_c, doc_d, doc_e]\ndoc_set = [rev1,rev2,rev3]\n# list for tokenized documents in loop\ntexts = []\n\n# loop through document list\nfor i in doc_set:\n # clean and tokenize document string\n raw = i.lower()\n tokens = tokenizer.tokenize(raw)\n\n # remove stop words from tokens\n stopped_tokens = [i for i in tokens if not i in en_stop]\n\n # stem tokens\n stemmed_tokens = [p_stemmer.stem(i) for i in stopped_tokens]\n\n # add tokens to list\n texts.append(stemmed_tokens)\n\n# turn our tokenized documents into a id <-> term dictionary\ndictionary = corpora.Dictionary(texts)\n\n# convert tokenized documents into a document-term matrix\ncorpus = [dictionary.doc2bow(text) for text in texts]\n\n# generate LDA model\nldamodel = gensim.models.ldamodel.LdaModel(corpus, num_topics=4, id2word=dictionary, passes=20)\nprint(\"LDA............\")\ntopics = ldamodel.print_topics(num_topics=3, num_words=5)\nfor topic in topics:\n print(type(topic))\n print(topic)\n\nprint(\"LSA.................\")\n#id2word = gensim.corpora.Dictionary.load_from_text(\"c:\\lda_test.txt\")\nlsi = gensim.models.lsimodel.LsiModel(corpus, id2word=dictionary)\n\nfrom nltk.corpus import sentiwordnet as swn\n\ntopics = lsi.print_topics(5)\nfor topic in topics:\n print(topic[1])\n print(swn.senti_synsets(topic[1]))\n print(\"----------------------------------------\")\n\n\n\n#print(list(swn.senti_synsets('slow')))\n\nhappy = swn.senti_synsets('happy')\n\nprint(happy.neg_score())\n\nall = swn.all_senti_synsets()\n#print(all)",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
@pytest.fixture
def home_page(getBrowser):
aHome = HomePage(getBrowser)
return aHome
@pytest.fixture
def overall_page(getBrowser):
aOverall = OverallPage(getBrowser)
return aOverall
@pytest.fixture
def page_component(getBrowser):
aPage = PageComponent(getBrowser)
return aPage
@given('Go to overall page')
def goTo_overall(getBrowser):
getBrowser.get(Constants.get_overall_url())
time.sleep(1)
@when('the user sort the list by rank (click it)')
def sort_by_rank(overall_page):
overall_page.sort_by_rank()
time.sleep(1)
@when('the user click the viewmore button to see the <num> model')
def viewmore(overall_page, num):
overall_page.use_viewMore(num)
@when('the user check the overall list')
def view_overall_list():
pass
@when('the user click right arrow to see next page')
def goTo_next_page(page_component):
page_component.click_right_arrow()
<|reserved_special_token_0|>
@then('rating list showed correctly')
def check_list_successfully(overall_page):
make = overall_page.get_value_make(1)
assert make == 'Lamborghini'
@then('rating show according the rank ascending')
def check_rank_sort(overall_page):
first_rank = overall_page.get_value_rank(1)
second_rank = overall_page.get_value_rank(2)
assert first_rank == '1'
assert second_rank == '2'
@then('go to a model page')
def check_goTo_model_page(getBrowser):
assert getBrowser.current_url.startswith(Constants.get_buggycar_host() +
'model/')
@then('go to the next page')
def check_goTo_next_page(page_component):
actual_msg = page_component.get_pag_msg()
assert actual_msg == '« » page 2 of 5'
@then('go to the previous page')
def check_goTo_prev_page(page_component):
actual_msg = page_component.get_pag_msg()
assert actual_msg == '« » page 1 of 5'
@then('go to the <num> page')
def check_goTo_num_page(page_component, num):
actual_msg = page_component.get_pag_msg()
assert actual_msg == '« » page ' + num + ' of 5'
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@pytest.fixture
def home_page(getBrowser):
aHome = HomePage(getBrowser)
return aHome
@pytest.fixture
def overall_page(getBrowser):
aOverall = OverallPage(getBrowser)
return aOverall
@pytest.fixture
def page_component(getBrowser):
aPage = PageComponent(getBrowser)
return aPage
@given('Go to overall page')
def goTo_overall(getBrowser):
getBrowser.get(Constants.get_overall_url())
time.sleep(1)
@when('the user sort the list by rank (click it)')
def sort_by_rank(overall_page):
overall_page.sort_by_rank()
time.sleep(1)
@when('the user click the viewmore button to see the <num> model')
def viewmore(overall_page, num):
overall_page.use_viewMore(num)
@when('the user check the overall list')
def view_overall_list():
pass
@when('the user click right arrow to see next page')
def goTo_next_page(page_component):
page_component.click_right_arrow()
<|reserved_special_token_0|>
@when('the user input <num> in the edit control')
def input_page_num(page_component, num):
page_component.edit_page(num)
@then('rating list showed correctly')
def check_list_successfully(overall_page):
make = overall_page.get_value_make(1)
assert make == 'Lamborghini'
@then('rating show according the rank ascending')
def check_rank_sort(overall_page):
first_rank = overall_page.get_value_rank(1)
second_rank = overall_page.get_value_rank(2)
assert first_rank == '1'
assert second_rank == '2'
@then('go to a model page')
def check_goTo_model_page(getBrowser):
assert getBrowser.current_url.startswith(Constants.get_buggycar_host() +
'model/')
@then('go to the next page')
def check_goTo_next_page(page_component):
actual_msg = page_component.get_pag_msg()
assert actual_msg == '« » page 2 of 5'
@then('go to the previous page')
def check_goTo_prev_page(page_component):
actual_msg = page_component.get_pag_msg()
assert actual_msg == '« » page 1 of 5'
@then('go to the <num> page')
def check_goTo_num_page(page_component, num):
actual_msg = page_component.get_pag_msg()
assert actual_msg == '« » page ' + num + ' of 5'
<|reserved_special_token_1|>
<|reserved_special_token_0|>
scenarios('overall_rating.feature', features_base_dir=Constants.
FEATURE_FILES_BASE_DIR)
@pytest.fixture
def home_page(getBrowser):
aHome = HomePage(getBrowser)
return aHome
@pytest.fixture
def overall_page(getBrowser):
aOverall = OverallPage(getBrowser)
return aOverall
@pytest.fixture
def page_component(getBrowser):
aPage = PageComponent(getBrowser)
return aPage
@given('Go to overall page')
def goTo_overall(getBrowser):
getBrowser.get(Constants.get_overall_url())
time.sleep(1)
@when('the user sort the list by rank (click it)')
def sort_by_rank(overall_page):
overall_page.sort_by_rank()
time.sleep(1)
@when('the user click the viewmore button to see the <num> model')
def viewmore(overall_page, num):
overall_page.use_viewMore(num)
@when('the user check the overall list')
def view_overall_list():
pass
@when('the user click right arrow to see next page')
def goTo_next_page(page_component):
page_component.click_right_arrow()
@when('the user click left arrow to previous page')
def goTo_prev_page(page_component):
page_component.click_left_arrow()
@when('the user input <num> in the edit control')
def input_page_num(page_component, num):
page_component.edit_page(num)
@then('rating list showed correctly')
def check_list_successfully(overall_page):
make = overall_page.get_value_make(1)
assert make == 'Lamborghini'
@then('rating show according the rank ascending')
def check_rank_sort(overall_page):
first_rank = overall_page.get_value_rank(1)
second_rank = overall_page.get_value_rank(2)
assert first_rank == '1'
assert second_rank == '2'
@then('go to a model page')
def check_goTo_model_page(getBrowser):
assert getBrowser.current_url.startswith(Constants.get_buggycar_host() +
'model/')
@then('go to the next page')
def check_goTo_next_page(page_component):
actual_msg = page_component.get_pag_msg()
assert actual_msg == '« » page 2 of 5'
@then('go to the previous page')
def check_goTo_prev_page(page_component):
actual_msg = page_component.get_pag_msg()
assert actual_msg == '« » page 1 of 5'
@then('go to the <num> page')
def check_goTo_num_page(page_component, num):
actual_msg = page_component.get_pag_msg()
assert actual_msg == '« » page ' + num + ' of 5'
<|reserved_special_token_1|>
import time
import pytest
from pytest_bdd import scenarios, given, when, then
from conf import Constants
from page_components.page import PageComponent
from page_components.overall import OverallPage
scenarios('overall_rating.feature', features_base_dir=Constants.
FEATURE_FILES_BASE_DIR)
@pytest.fixture
def home_page(getBrowser):
aHome = HomePage(getBrowser)
return aHome
@pytest.fixture
def overall_page(getBrowser):
aOverall = OverallPage(getBrowser)
return aOverall
@pytest.fixture
def page_component(getBrowser):
aPage = PageComponent(getBrowser)
return aPage
@given('Go to overall page')
def goTo_overall(getBrowser):
getBrowser.get(Constants.get_overall_url())
time.sleep(1)
@when('the user sort the list by rank (click it)')
def sort_by_rank(overall_page):
overall_page.sort_by_rank()
time.sleep(1)
@when('the user click the viewmore button to see the <num> model')
def viewmore(overall_page, num):
overall_page.use_viewMore(num)
@when('the user check the overall list')
def view_overall_list():
pass
@when('the user click right arrow to see next page')
def goTo_next_page(page_component):
page_component.click_right_arrow()
@when('the user click left arrow to previous page')
def goTo_prev_page(page_component):
page_component.click_left_arrow()
@when('the user input <num> in the edit control')
def input_page_num(page_component, num):
page_component.edit_page(num)
@then('rating list showed correctly')
def check_list_successfully(overall_page):
make = overall_page.get_value_make(1)
assert make == 'Lamborghini'
@then('rating show according the rank ascending')
def check_rank_sort(overall_page):
first_rank = overall_page.get_value_rank(1)
second_rank = overall_page.get_value_rank(2)
assert first_rank == '1'
assert second_rank == '2'
@then('go to a model page')
def check_goTo_model_page(getBrowser):
assert getBrowser.current_url.startswith(Constants.get_buggycar_host() +
'model/')
@then('go to the next page')
def check_goTo_next_page(page_component):
actual_msg = page_component.get_pag_msg()
assert actual_msg == '« » page 2 of 5'
@then('go to the previous page')
def check_goTo_prev_page(page_component):
actual_msg = page_component.get_pag_msg()
assert actual_msg == '« » page 1 of 5'
@then('go to the <num> page')
def check_goTo_num_page(page_component, num):
actual_msg = page_component.get_pag_msg()
assert actual_msg == '« » page ' + num + ' of 5'
<|reserved_special_token_1|>
import time
import pytest
from pytest_bdd import scenarios, given, when, then
from conf import Constants
from page_components.page import PageComponent
from page_components.overall import OverallPage
# Scenarios
scenarios('overall_rating.feature', features_base_dir=Constants.FEATURE_FILES_BASE_DIR)
# Fixtures
@pytest.fixture
def home_page(getBrowser):
aHome = HomePage(getBrowser)
return aHome
@pytest.fixture
def overall_page(getBrowser):
aOverall = OverallPage(getBrowser)
return aOverall
@pytest.fixture
def page_component(getBrowser):
aPage = PageComponent(getBrowser)
return aPage
# private method
# Given Steps
@given('Go to overall page')
def goTo_overall(getBrowser):
getBrowser.get(Constants.get_overall_url())
time.sleep(1)
# When Steps
@when('the user sort the list by rank (click it)')
def sort_by_rank(overall_page):
overall_page.sort_by_rank()
time.sleep(1)
@when('the user click the viewmore button to see the <num> model')
def viewmore(overall_page, num):
overall_page.use_viewMore(num)
@when('the user check the overall list')
def view_overall_list():
# dummy code
pass
@when('the user click right arrow to see next page')
def goTo_next_page(page_component):
page_component.click_right_arrow()
@when('the user click left arrow to previous page')
def goTo_prev_page(page_component):
page_component.click_left_arrow()
@when('the user input <num> in the edit control')
def input_page_num(page_component, num):
page_component.edit_page(num)
# Then Steps
@then('rating list showed correctly')
def check_list_successfully(overall_page):
make = overall_page.get_value_make(1)
assert make == 'Lamborghini'
@then('rating show according the rank ascending')
def check_rank_sort(overall_page):
first_rank = overall_page.get_value_rank(1)
second_rank = overall_page.get_value_rank(2)
assert first_rank == '1'
assert second_rank == '2'
@then('go to a model page')
def check_goTo_model_page(getBrowser):
assert getBrowser.current_url.startswith(Constants.get_buggycar_host() + "model/")
@then('go to the next page')
def check_goTo_next_page(page_component):
actual_msg = page_component.get_pag_msg()
assert actual_msg == '« » page 2 of 5'
@then('go to the previous page')
def check_goTo_prev_page(page_component):
actual_msg = page_component.get_pag_msg()
assert actual_msg == '« » page 1 of 5'
@then('go to the <num> page')
def check_goTo_num_page(page_component, num):
actual_msg = page_component.get_pag_msg()
assert actual_msg == '« » page ' + num + ' of 5'
|
flexible
|
{
"blob_id": "2809ed3a5ea1e527609e169bca1440e0db2761b9",
"index": 8408,
"step-1": "<mask token>\n\n\n@pytest.fixture\ndef home_page(getBrowser):\n aHome = HomePage(getBrowser)\n return aHome\n\n\n@pytest.fixture\ndef overall_page(getBrowser):\n aOverall = OverallPage(getBrowser)\n return aOverall\n\n\n@pytest.fixture\ndef page_component(getBrowser):\n aPage = PageComponent(getBrowser)\n return aPage\n\n\n@given('Go to overall page')\ndef goTo_overall(getBrowser):\n getBrowser.get(Constants.get_overall_url())\n time.sleep(1)\n\n\n@when('the user sort the list by rank (click it)')\ndef sort_by_rank(overall_page):\n overall_page.sort_by_rank()\n time.sleep(1)\n\n\n@when('the user click the viewmore button to see the <num> model')\ndef viewmore(overall_page, num):\n overall_page.use_viewMore(num)\n\n\n@when('the user check the overall list')\ndef view_overall_list():\n pass\n\n\n@when('the user click right arrow to see next page')\ndef goTo_next_page(page_component):\n page_component.click_right_arrow()\n\n\n<mask token>\n\n\n@then('rating list showed correctly')\ndef check_list_successfully(overall_page):\n make = overall_page.get_value_make(1)\n assert make == 'Lamborghini'\n\n\n@then('rating show according the rank ascending')\ndef check_rank_sort(overall_page):\n first_rank = overall_page.get_value_rank(1)\n second_rank = overall_page.get_value_rank(2)\n assert first_rank == '1'\n assert second_rank == '2'\n\n\n@then('go to a model page')\ndef check_goTo_model_page(getBrowser):\n assert getBrowser.current_url.startswith(Constants.get_buggycar_host() +\n 'model/')\n\n\n@then('go to the next page')\ndef check_goTo_next_page(page_component):\n actual_msg = page_component.get_pag_msg()\n assert actual_msg == '« » page 2 of 5'\n\n\n@then('go to the previous page')\ndef check_goTo_prev_page(page_component):\n actual_msg = page_component.get_pag_msg()\n assert actual_msg == '« » page 1 of 5'\n\n\n@then('go to the <num> page')\ndef check_goTo_num_page(page_component, num):\n actual_msg = page_component.get_pag_msg()\n assert actual_msg == '« » page ' + num + ' of 5'\n",
"step-2": "<mask token>\n\n\n@pytest.fixture\ndef home_page(getBrowser):\n aHome = HomePage(getBrowser)\n return aHome\n\n\n@pytest.fixture\ndef overall_page(getBrowser):\n aOverall = OverallPage(getBrowser)\n return aOverall\n\n\n@pytest.fixture\ndef page_component(getBrowser):\n aPage = PageComponent(getBrowser)\n return aPage\n\n\n@given('Go to overall page')\ndef goTo_overall(getBrowser):\n getBrowser.get(Constants.get_overall_url())\n time.sleep(1)\n\n\n@when('the user sort the list by rank (click it)')\ndef sort_by_rank(overall_page):\n overall_page.sort_by_rank()\n time.sleep(1)\n\n\n@when('the user click the viewmore button to see the <num> model')\ndef viewmore(overall_page, num):\n overall_page.use_viewMore(num)\n\n\n@when('the user check the overall list')\ndef view_overall_list():\n pass\n\n\n@when('the user click right arrow to see next page')\ndef goTo_next_page(page_component):\n page_component.click_right_arrow()\n\n\n<mask token>\n\n\n@when('the user input <num> in the edit control')\ndef input_page_num(page_component, num):\n page_component.edit_page(num)\n\n\n@then('rating list showed correctly')\ndef check_list_successfully(overall_page):\n make = overall_page.get_value_make(1)\n assert make == 'Lamborghini'\n\n\n@then('rating show according the rank ascending')\ndef check_rank_sort(overall_page):\n first_rank = overall_page.get_value_rank(1)\n second_rank = overall_page.get_value_rank(2)\n assert first_rank == '1'\n assert second_rank == '2'\n\n\n@then('go to a model page')\ndef check_goTo_model_page(getBrowser):\n assert getBrowser.current_url.startswith(Constants.get_buggycar_host() +\n 'model/')\n\n\n@then('go to the next page')\ndef check_goTo_next_page(page_component):\n actual_msg = page_component.get_pag_msg()\n assert actual_msg == '« » page 2 of 5'\n\n\n@then('go to the previous page')\ndef check_goTo_prev_page(page_component):\n actual_msg = page_component.get_pag_msg()\n assert actual_msg == '« » page 1 of 5'\n\n\n@then('go to the <num> page')\ndef check_goTo_num_page(page_component, num):\n actual_msg = page_component.get_pag_msg()\n assert actual_msg == '« » page ' + num + ' of 5'\n",
"step-3": "<mask token>\nscenarios('overall_rating.feature', features_base_dir=Constants.\n FEATURE_FILES_BASE_DIR)\n\n\n@pytest.fixture\ndef home_page(getBrowser):\n aHome = HomePage(getBrowser)\n return aHome\n\n\n@pytest.fixture\ndef overall_page(getBrowser):\n aOverall = OverallPage(getBrowser)\n return aOverall\n\n\n@pytest.fixture\ndef page_component(getBrowser):\n aPage = PageComponent(getBrowser)\n return aPage\n\n\n@given('Go to overall page')\ndef goTo_overall(getBrowser):\n getBrowser.get(Constants.get_overall_url())\n time.sleep(1)\n\n\n@when('the user sort the list by rank (click it)')\ndef sort_by_rank(overall_page):\n overall_page.sort_by_rank()\n time.sleep(1)\n\n\n@when('the user click the viewmore button to see the <num> model')\ndef viewmore(overall_page, num):\n overall_page.use_viewMore(num)\n\n\n@when('the user check the overall list')\ndef view_overall_list():\n pass\n\n\n@when('the user click right arrow to see next page')\ndef goTo_next_page(page_component):\n page_component.click_right_arrow()\n\n\n@when('the user click left arrow to previous page')\ndef goTo_prev_page(page_component):\n page_component.click_left_arrow()\n\n\n@when('the user input <num> in the edit control')\ndef input_page_num(page_component, num):\n page_component.edit_page(num)\n\n\n@then('rating list showed correctly')\ndef check_list_successfully(overall_page):\n make = overall_page.get_value_make(1)\n assert make == 'Lamborghini'\n\n\n@then('rating show according the rank ascending')\ndef check_rank_sort(overall_page):\n first_rank = overall_page.get_value_rank(1)\n second_rank = overall_page.get_value_rank(2)\n assert first_rank == '1'\n assert second_rank == '2'\n\n\n@then('go to a model page')\ndef check_goTo_model_page(getBrowser):\n assert getBrowser.current_url.startswith(Constants.get_buggycar_host() +\n 'model/')\n\n\n@then('go to the next page')\ndef check_goTo_next_page(page_component):\n actual_msg = page_component.get_pag_msg()\n assert actual_msg == '« » page 2 of 5'\n\n\n@then('go to the previous page')\ndef check_goTo_prev_page(page_component):\n actual_msg = page_component.get_pag_msg()\n assert actual_msg == '« » page 1 of 5'\n\n\n@then('go to the <num> page')\ndef check_goTo_num_page(page_component, num):\n actual_msg = page_component.get_pag_msg()\n assert actual_msg == '« » page ' + num + ' of 5'\n",
"step-4": "import time\nimport pytest\nfrom pytest_bdd import scenarios, given, when, then\nfrom conf import Constants\nfrom page_components.page import PageComponent\nfrom page_components.overall import OverallPage\nscenarios('overall_rating.feature', features_base_dir=Constants.\n FEATURE_FILES_BASE_DIR)\n\n\n@pytest.fixture\ndef home_page(getBrowser):\n aHome = HomePage(getBrowser)\n return aHome\n\n\n@pytest.fixture\ndef overall_page(getBrowser):\n aOverall = OverallPage(getBrowser)\n return aOverall\n\n\n@pytest.fixture\ndef page_component(getBrowser):\n aPage = PageComponent(getBrowser)\n return aPage\n\n\n@given('Go to overall page')\ndef goTo_overall(getBrowser):\n getBrowser.get(Constants.get_overall_url())\n time.sleep(1)\n\n\n@when('the user sort the list by rank (click it)')\ndef sort_by_rank(overall_page):\n overall_page.sort_by_rank()\n time.sleep(1)\n\n\n@when('the user click the viewmore button to see the <num> model')\ndef viewmore(overall_page, num):\n overall_page.use_viewMore(num)\n\n\n@when('the user check the overall list')\ndef view_overall_list():\n pass\n\n\n@when('the user click right arrow to see next page')\ndef goTo_next_page(page_component):\n page_component.click_right_arrow()\n\n\n@when('the user click left arrow to previous page')\ndef goTo_prev_page(page_component):\n page_component.click_left_arrow()\n\n\n@when('the user input <num> in the edit control')\ndef input_page_num(page_component, num):\n page_component.edit_page(num)\n\n\n@then('rating list showed correctly')\ndef check_list_successfully(overall_page):\n make = overall_page.get_value_make(1)\n assert make == 'Lamborghini'\n\n\n@then('rating show according the rank ascending')\ndef check_rank_sort(overall_page):\n first_rank = overall_page.get_value_rank(1)\n second_rank = overall_page.get_value_rank(2)\n assert first_rank == '1'\n assert second_rank == '2'\n\n\n@then('go to a model page')\ndef check_goTo_model_page(getBrowser):\n assert getBrowser.current_url.startswith(Constants.get_buggycar_host() +\n 'model/')\n\n\n@then('go to the next page')\ndef check_goTo_next_page(page_component):\n actual_msg = page_component.get_pag_msg()\n assert actual_msg == '« » page 2 of 5'\n\n\n@then('go to the previous page')\ndef check_goTo_prev_page(page_component):\n actual_msg = page_component.get_pag_msg()\n assert actual_msg == '« » page 1 of 5'\n\n\n@then('go to the <num> page')\ndef check_goTo_num_page(page_component, num):\n actual_msg = page_component.get_pag_msg()\n assert actual_msg == '« » page ' + num + ' of 5'\n",
"step-5": "import time\nimport pytest\n\nfrom pytest_bdd import scenarios, given, when, then\n\nfrom conf import Constants\n\nfrom page_components.page import PageComponent\nfrom page_components.overall import OverallPage\n\n\n\n\n# Scenarios\n\nscenarios('overall_rating.feature', features_base_dir=Constants.FEATURE_FILES_BASE_DIR)\n\n# Fixtures\n\n@pytest.fixture\ndef home_page(getBrowser):\n aHome = HomePage(getBrowser)\n return aHome\n\n@pytest.fixture\ndef overall_page(getBrowser):\n aOverall = OverallPage(getBrowser)\n return aOverall\n\n@pytest.fixture\ndef page_component(getBrowser):\n aPage = PageComponent(getBrowser)\n return aPage\n\n# private method\n\n# Given Steps\n\n@given('Go to overall page')\ndef goTo_overall(getBrowser):\n getBrowser.get(Constants.get_overall_url())\n time.sleep(1)\n\n# When Steps\n\n@when('the user sort the list by rank (click it)')\ndef sort_by_rank(overall_page):\n overall_page.sort_by_rank()\n time.sleep(1)\n\n@when('the user click the viewmore button to see the <num> model')\ndef viewmore(overall_page, num):\n overall_page.use_viewMore(num)\n \n@when('the user check the overall list')\ndef view_overall_list():\n # dummy code\n pass\n\n@when('the user click right arrow to see next page')\ndef goTo_next_page(page_component):\n page_component.click_right_arrow()\n\n@when('the user click left arrow to previous page')\ndef goTo_prev_page(page_component):\n page_component.click_left_arrow()\n\n@when('the user input <num> in the edit control')\ndef input_page_num(page_component, num):\n page_component.edit_page(num)\n\n# Then Steps\n\n@then('rating list showed correctly')\ndef check_list_successfully(overall_page):\n make = overall_page.get_value_make(1)\n assert make == 'Lamborghini'\n\n@then('rating show according the rank ascending')\ndef check_rank_sort(overall_page):\n first_rank = overall_page.get_value_rank(1)\n second_rank = overall_page.get_value_rank(2)\n assert first_rank == '1'\n assert second_rank == '2'\n\n@then('go to a model page')\ndef check_goTo_model_page(getBrowser):\n assert getBrowser.current_url.startswith(Constants.get_buggycar_host() + \"model/\")\n\n@then('go to the next page')\ndef check_goTo_next_page(page_component):\n actual_msg = page_component.get_pag_msg()\n assert actual_msg == '« » page 2 of 5'\n\n@then('go to the previous page')\ndef check_goTo_prev_page(page_component):\n actual_msg = page_component.get_pag_msg()\n assert actual_msg == '« » page 1 of 5'\n\n@then('go to the <num> page')\ndef check_goTo_num_page(page_component, num):\n actual_msg = page_component.get_pag_msg()\n assert actual_msg == '« » page ' + num + ' of 5'\n",
"step-ids": [
14,
15,
17,
18,
19
]
}
|
[
14,
15,
17,
18,
19
] |
<|reserved_special_token_0|>
class Transcoder:
<|reserved_special_token_0|>
def __init__(self):
"""Constructor."""
logger.info('__init__:Enter')
self._token = None
self.transcript = None
self._queue = queue.Queue()
def start(self, token):
"""Start recognition."""
logger.info('start:Enter')
self._token = token
threading.Thread(target=self._process).start()
<|reserved_special_token_0|>
def _process(self):
logger.info('_process:Enter')
token = self._authenticate()['token']
uuid = self._start_recognition(token)['uuid']
logger.info('start transcode')
i = 1
while True:
arr = self._stream_generator()
if arr is None:
break
inline = np.hstack(arr)
arr_bytes = inline.tobytes('C')
header = {'Content-Type': 'multipart/form-data', 'X-Token': token}
files = {'voice_id': ('', i, ''), 'voice': ('', arr_bytes,
'application/octet-stream')}
resp = requests.put(f'{VOICE_URL}/{uuid}', headers=header,
files=files)
if resp.status_code == 200:
logger.debug(resp.json())
result = resp.json()[0]
if result[0] == 'TMP_RESULT' or result[0] == 'RESULT':
self._write_result(result[1])
i = i + 1
self._flush_recognition(uuid, token, i)
while True:
if self._get_result(uuid, token) is None:
break
time.sleep(0.1)
self._end_recognition(uuid, token)
logger.info('end transcode')
def _authenticate(self):
speechrecog_jajp_id = model.key.RECAIUS_ID
speechrecog_jajp_password = model.key.RECAIUS_PASSWORD
param = {'speech_recog_jaJP': {'service_id': speechrecog_jajp_id,
'password': speechrecog_jajp_password}}
return requests.post(AUTH_URL, json=param).json()
def _flush_recognition(self, uuid, token, i):
header = {'Content-Type': 'application/json', 'X-Token': token}
param = {'voice_id': i}
resp = requests.put(f'{VOICE_URL}/{uuid}/flush', headers=header,
json=param)
if resp.status_code == 200:
logger.debug(f'frush result:{resp.json()}')
return resp.json()
else:
logger.debug(f'flush result(status:{resp.status_code})')
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def _start_recognition(self, token):
header = {'Content-Type': 'application/json', 'X-Token': token}
param = {'model_id': 1}
return requests.post(VOICE_URL, headers=header, json=param).json()
def _end_recognition(self, uuid, token):
header = {'X-Token': token}
resp = requests.delete(f'{VOICE_URL}/{uuid}', headers=header)
if resp.status_code == 204:
logger.debug(f'delete result(status:{resp.status_code})')
def _write_result(self, transcipt):
self.transcript = transcipt
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Transcoder:
<|reserved_special_token_0|>
def __init__(self):
"""Constructor."""
logger.info('__init__:Enter')
self._token = None
self.transcript = None
self._queue = queue.Queue()
def start(self, token):
"""Start recognition."""
logger.info('start:Enter')
self._token = token
threading.Thread(target=self._process).start()
def write_stream(self, buf):
"""Write audio stream."""
self._queue.put(buf)
def _process(self):
logger.info('_process:Enter')
token = self._authenticate()['token']
uuid = self._start_recognition(token)['uuid']
logger.info('start transcode')
i = 1
while True:
arr = self._stream_generator()
if arr is None:
break
inline = np.hstack(arr)
arr_bytes = inline.tobytes('C')
header = {'Content-Type': 'multipart/form-data', 'X-Token': token}
files = {'voice_id': ('', i, ''), 'voice': ('', arr_bytes,
'application/octet-stream')}
resp = requests.put(f'{VOICE_URL}/{uuid}', headers=header,
files=files)
if resp.status_code == 200:
logger.debug(resp.json())
result = resp.json()[0]
if result[0] == 'TMP_RESULT' or result[0] == 'RESULT':
self._write_result(result[1])
i = i + 1
self._flush_recognition(uuid, token, i)
while True:
if self._get_result(uuid, token) is None:
break
time.sleep(0.1)
self._end_recognition(uuid, token)
logger.info('end transcode')
def _authenticate(self):
speechrecog_jajp_id = model.key.RECAIUS_ID
speechrecog_jajp_password = model.key.RECAIUS_PASSWORD
param = {'speech_recog_jaJP': {'service_id': speechrecog_jajp_id,
'password': speechrecog_jajp_password}}
return requests.post(AUTH_URL, json=param).json()
def _flush_recognition(self, uuid, token, i):
header = {'Content-Type': 'application/json', 'X-Token': token}
param = {'voice_id': i}
resp = requests.put(f'{VOICE_URL}/{uuid}/flush', headers=header,
json=param)
if resp.status_code == 200:
logger.debug(f'frush result:{resp.json()}')
return resp.json()
else:
logger.debug(f'flush result(status:{resp.status_code})')
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def _start_recognition(self, token):
header = {'Content-Type': 'application/json', 'X-Token': token}
param = {'model_id': 1}
return requests.post(VOICE_URL, headers=header, json=param).json()
def _end_recognition(self, uuid, token):
header = {'X-Token': token}
resp = requests.delete(f'{VOICE_URL}/{uuid}', headers=header)
if resp.status_code == 204:
logger.debug(f'delete result(status:{resp.status_code})')
def _write_result(self, transcipt):
self.transcript = transcipt
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Transcoder:
<|reserved_special_token_0|>
def __init__(self):
"""Constructor."""
logger.info('__init__:Enter')
self._token = None
self.transcript = None
self._queue = queue.Queue()
def start(self, token):
"""Start recognition."""
logger.info('start:Enter')
self._token = token
threading.Thread(target=self._process).start()
def write_stream(self, buf):
"""Write audio stream."""
self._queue.put(buf)
def _process(self):
logger.info('_process:Enter')
token = self._authenticate()['token']
uuid = self._start_recognition(token)['uuid']
logger.info('start transcode')
i = 1
while True:
arr = self._stream_generator()
if arr is None:
break
inline = np.hstack(arr)
arr_bytes = inline.tobytes('C')
header = {'Content-Type': 'multipart/form-data', 'X-Token': token}
files = {'voice_id': ('', i, ''), 'voice': ('', arr_bytes,
'application/octet-stream')}
resp = requests.put(f'{VOICE_URL}/{uuid}', headers=header,
files=files)
if resp.status_code == 200:
logger.debug(resp.json())
result = resp.json()[0]
if result[0] == 'TMP_RESULT' or result[0] == 'RESULT':
self._write_result(result[1])
i = i + 1
self._flush_recognition(uuid, token, i)
while True:
if self._get_result(uuid, token) is None:
break
time.sleep(0.1)
self._end_recognition(uuid, token)
logger.info('end transcode')
def _authenticate(self):
speechrecog_jajp_id = model.key.RECAIUS_ID
speechrecog_jajp_password = model.key.RECAIUS_PASSWORD
param = {'speech_recog_jaJP': {'service_id': speechrecog_jajp_id,
'password': speechrecog_jajp_password}}
return requests.post(AUTH_URL, json=param).json()
def _flush_recognition(self, uuid, token, i):
header = {'Content-Type': 'application/json', 'X-Token': token}
param = {'voice_id': i}
resp = requests.put(f'{VOICE_URL}/{uuid}/flush', headers=header,
json=param)
if resp.status_code == 200:
logger.debug(f'frush result:{resp.json()}')
return resp.json()
else:
logger.debug(f'flush result(status:{resp.status_code})')
def _get_result(self, uuid, token):
header = {'X-Token': token}
resp = requests.get(f'{VOICE_URL}/{uuid}/results', headers=header)
if resp.status_code == 200:
logger.debug(f'get result:{resp.json()}')
return resp.json()
else:
logger.debug(f'get result(status:{resp.status_code})')
def _stream_generator(self):
arr = []
while True:
try:
v = self._queue.get_nowait()
if v is None:
return None
arr.append((v * 32767).astype(np.int16))
except queue.Empty:
if len(arr) != 0:
break
else:
time.sleep(0.1)
return arr
def _start_recognition(self, token):
header = {'Content-Type': 'application/json', 'X-Token': token}
param = {'model_id': 1}
return requests.post(VOICE_URL, headers=header, json=param).json()
def _end_recognition(self, uuid, token):
header = {'X-Token': token}
resp = requests.delete(f'{VOICE_URL}/{uuid}', headers=header)
if resp.status_code == 204:
logger.debug(f'delete result(status:{resp.status_code})')
def _write_result(self, transcipt):
self.transcript = transcipt
<|reserved_special_token_1|>
<|reserved_special_token_0|>
AUTH_URL = 'https://api.recaius.jp/auth/v2/tokens'
VOICE_URL = 'https://api.recaius.jp/asr/v2/voices'
class Transcoder:
"""Transcoder Class."""
def __init__(self):
"""Constructor."""
logger.info('__init__:Enter')
self._token = None
self.transcript = None
self._queue = queue.Queue()
def start(self, token):
"""Start recognition."""
logger.info('start:Enter')
self._token = token
threading.Thread(target=self._process).start()
def write_stream(self, buf):
"""Write audio stream."""
self._queue.put(buf)
def _process(self):
logger.info('_process:Enter')
token = self._authenticate()['token']
uuid = self._start_recognition(token)['uuid']
logger.info('start transcode')
i = 1
while True:
arr = self._stream_generator()
if arr is None:
break
inline = np.hstack(arr)
arr_bytes = inline.tobytes('C')
header = {'Content-Type': 'multipart/form-data', 'X-Token': token}
files = {'voice_id': ('', i, ''), 'voice': ('', arr_bytes,
'application/octet-stream')}
resp = requests.put(f'{VOICE_URL}/{uuid}', headers=header,
files=files)
if resp.status_code == 200:
logger.debug(resp.json())
result = resp.json()[0]
if result[0] == 'TMP_RESULT' or result[0] == 'RESULT':
self._write_result(result[1])
i = i + 1
self._flush_recognition(uuid, token, i)
while True:
if self._get_result(uuid, token) is None:
break
time.sleep(0.1)
self._end_recognition(uuid, token)
logger.info('end transcode')
def _authenticate(self):
speechrecog_jajp_id = model.key.RECAIUS_ID
speechrecog_jajp_password = model.key.RECAIUS_PASSWORD
param = {'speech_recog_jaJP': {'service_id': speechrecog_jajp_id,
'password': speechrecog_jajp_password}}
return requests.post(AUTH_URL, json=param).json()
def _flush_recognition(self, uuid, token, i):
header = {'Content-Type': 'application/json', 'X-Token': token}
param = {'voice_id': i}
resp = requests.put(f'{VOICE_URL}/{uuid}/flush', headers=header,
json=param)
if resp.status_code == 200:
logger.debug(f'frush result:{resp.json()}')
return resp.json()
else:
logger.debug(f'flush result(status:{resp.status_code})')
def _get_result(self, uuid, token):
header = {'X-Token': token}
resp = requests.get(f'{VOICE_URL}/{uuid}/results', headers=header)
if resp.status_code == 200:
logger.debug(f'get result:{resp.json()}')
return resp.json()
else:
logger.debug(f'get result(status:{resp.status_code})')
def _stream_generator(self):
arr = []
while True:
try:
v = self._queue.get_nowait()
if v is None:
return None
arr.append((v * 32767).astype(np.int16))
except queue.Empty:
if len(arr) != 0:
break
else:
time.sleep(0.1)
return arr
def _start_recognition(self, token):
header = {'Content-Type': 'application/json', 'X-Token': token}
param = {'model_id': 1}
return requests.post(VOICE_URL, headers=header, json=param).json()
def _end_recognition(self, uuid, token):
header = {'X-Token': token}
resp = requests.delete(f'{VOICE_URL}/{uuid}', headers=header)
if resp.status_code == 204:
logger.debug(f'delete result(status:{resp.status_code})')
def _write_result(self, transcipt):
self.transcript = transcipt
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
"""Transcoder with TOSHIBA RECAIUS API."""
import threading
import queue
import time
import numpy as np
from logzero import logger
import requests
import model.key
AUTH_URL = 'https://api.recaius.jp/auth/v2/tokens'
VOICE_URL = 'https://api.recaius.jp/asr/v2/voices'
class Transcoder:
"""Transcoder Class."""
def __init__(self):
"""Constructor."""
logger.info('__init__:Enter')
self._token = None
self.transcript = None
self._queue = queue.Queue()
def start(self, token):
"""Start recognition."""
logger.info('start:Enter')
self._token = token
threading.Thread(target=self._process).start()
def write_stream(self, buf):
"""Write audio stream."""
self._queue.put(buf)
def _process(self):
logger.info('_process:Enter')
token = self._authenticate()['token']
uuid = self._start_recognition(token)['uuid']
logger.info('start transcode')
i = 1
while True:
arr = self._stream_generator()
if(arr is None):
break
# logger.debug(f'{len(arr)} , {self._queue.qsize()}')
inline = np.hstack(arr)
arr_bytes = inline.tobytes('C')
header = {
'Content-Type': 'multipart/form-data',
'X-Token': token
}
files = {
'voice_id': ('', i, ''),
'voice': ('', arr_bytes, 'application/octet-stream')
}
resp = requests.put(
f'{VOICE_URL}/{uuid}', headers=header, files=files)
if(resp.status_code == 200):
logger.debug(resp.json())
result = resp.json()[0]
if(result[0] == 'TMP_RESULT' or result[0] == 'RESULT'):
self._write_result(result[1])
i = i + 1
self._flush_recognition(uuid, token, i)
while True:
if(self._get_result(uuid, token) is None):
break
time.sleep(0.1)
self._end_recognition(uuid, token)
logger.info('end transcode')
def _authenticate(self):
speechrecog_jajp_id = model.key.RECAIUS_ID
speechrecog_jajp_password = model.key.RECAIUS_PASSWORD
param = {
"speech_recog_jaJP": {
'service_id': speechrecog_jajp_id,
'password': speechrecog_jajp_password
}
}
return requests.post(AUTH_URL, json=param).json()
def _flush_recognition(self, uuid, token, i):
header = {
'Content-Type': 'application/json',
'X-Token': token
}
param = {
'voice_id': i,
}
resp = requests.put(
f'{VOICE_URL}/{uuid}/flush', headers=header, json=param)
if(resp.status_code == 200):
logger.debug(f'frush result:{resp.json()}')
return resp.json()
else:
logger.debug(f'flush result(status:{resp.status_code})')
def _get_result(self, uuid, token):
header = {
'X-Token': token
}
resp = requests.get(f'{VOICE_URL}/{uuid}/results', headers=header)
if(resp.status_code == 200):
logger.debug(f'get result:{resp.json()}')
return resp.json()
else:
logger.debug(f'get result(status:{resp.status_code})')
def _stream_generator(self):
arr = []
while True:
try:
v = self._queue.get_nowait()
# print(v)
if v is None:
return None
arr.append((v * 32767).astype(np.int16))
except queue.Empty:
if(len(arr) != 0):
break
else:
time.sleep(0.1)
return arr
def _start_recognition(self, token):
header = {
'Content-Type': 'application/json',
'X-Token': token
}
param = {
'model_id': 1
}
return requests.post(VOICE_URL, headers=header, json=param).json()
def _end_recognition(self, uuid, token):
header = {
'X-Token': token
}
resp = requests.delete(f'{VOICE_URL}/{uuid}', headers=header)
if(resp.status_code == 204):
logger.debug(f'delete result(status:{resp.status_code})')
def _write_result(self, transcipt):
self.transcript = transcipt
|
flexible
|
{
"blob_id": "421b0c1871350ff541b4e56d1e18d77016884552",
"index": 5199,
"step-1": "<mask token>\n\n\nclass Transcoder:\n <mask token>\n\n def __init__(self):\n \"\"\"Constructor.\"\"\"\n logger.info('__init__:Enter')\n self._token = None\n self.transcript = None\n self._queue = queue.Queue()\n\n def start(self, token):\n \"\"\"Start recognition.\"\"\"\n logger.info('start:Enter')\n self._token = token\n threading.Thread(target=self._process).start()\n <mask token>\n\n def _process(self):\n logger.info('_process:Enter')\n token = self._authenticate()['token']\n uuid = self._start_recognition(token)['uuid']\n logger.info('start transcode')\n i = 1\n while True:\n arr = self._stream_generator()\n if arr is None:\n break\n inline = np.hstack(arr)\n arr_bytes = inline.tobytes('C')\n header = {'Content-Type': 'multipart/form-data', 'X-Token': token}\n files = {'voice_id': ('', i, ''), 'voice': ('', arr_bytes,\n 'application/octet-stream')}\n resp = requests.put(f'{VOICE_URL}/{uuid}', headers=header,\n files=files)\n if resp.status_code == 200:\n logger.debug(resp.json())\n result = resp.json()[0]\n if result[0] == 'TMP_RESULT' or result[0] == 'RESULT':\n self._write_result(result[1])\n i = i + 1\n self._flush_recognition(uuid, token, i)\n while True:\n if self._get_result(uuid, token) is None:\n break\n time.sleep(0.1)\n self._end_recognition(uuid, token)\n logger.info('end transcode')\n\n def _authenticate(self):\n speechrecog_jajp_id = model.key.RECAIUS_ID\n speechrecog_jajp_password = model.key.RECAIUS_PASSWORD\n param = {'speech_recog_jaJP': {'service_id': speechrecog_jajp_id,\n 'password': speechrecog_jajp_password}}\n return requests.post(AUTH_URL, json=param).json()\n\n def _flush_recognition(self, uuid, token, i):\n header = {'Content-Type': 'application/json', 'X-Token': token}\n param = {'voice_id': i}\n resp = requests.put(f'{VOICE_URL}/{uuid}/flush', headers=header,\n json=param)\n if resp.status_code == 200:\n logger.debug(f'frush result:{resp.json()}')\n return resp.json()\n else:\n logger.debug(f'flush result(status:{resp.status_code})')\n <mask token>\n <mask token>\n\n def _start_recognition(self, token):\n header = {'Content-Type': 'application/json', 'X-Token': token}\n param = {'model_id': 1}\n return requests.post(VOICE_URL, headers=header, json=param).json()\n\n def _end_recognition(self, uuid, token):\n header = {'X-Token': token}\n resp = requests.delete(f'{VOICE_URL}/{uuid}', headers=header)\n if resp.status_code == 204:\n logger.debug(f'delete result(status:{resp.status_code})')\n\n def _write_result(self, transcipt):\n self.transcript = transcipt\n",
"step-2": "<mask token>\n\n\nclass Transcoder:\n <mask token>\n\n def __init__(self):\n \"\"\"Constructor.\"\"\"\n logger.info('__init__:Enter')\n self._token = None\n self.transcript = None\n self._queue = queue.Queue()\n\n def start(self, token):\n \"\"\"Start recognition.\"\"\"\n logger.info('start:Enter')\n self._token = token\n threading.Thread(target=self._process).start()\n\n def write_stream(self, buf):\n \"\"\"Write audio stream.\"\"\"\n self._queue.put(buf)\n\n def _process(self):\n logger.info('_process:Enter')\n token = self._authenticate()['token']\n uuid = self._start_recognition(token)['uuid']\n logger.info('start transcode')\n i = 1\n while True:\n arr = self._stream_generator()\n if arr is None:\n break\n inline = np.hstack(arr)\n arr_bytes = inline.tobytes('C')\n header = {'Content-Type': 'multipart/form-data', 'X-Token': token}\n files = {'voice_id': ('', i, ''), 'voice': ('', arr_bytes,\n 'application/octet-stream')}\n resp = requests.put(f'{VOICE_URL}/{uuid}', headers=header,\n files=files)\n if resp.status_code == 200:\n logger.debug(resp.json())\n result = resp.json()[0]\n if result[0] == 'TMP_RESULT' or result[0] == 'RESULT':\n self._write_result(result[1])\n i = i + 1\n self._flush_recognition(uuid, token, i)\n while True:\n if self._get_result(uuid, token) is None:\n break\n time.sleep(0.1)\n self._end_recognition(uuid, token)\n logger.info('end transcode')\n\n def _authenticate(self):\n speechrecog_jajp_id = model.key.RECAIUS_ID\n speechrecog_jajp_password = model.key.RECAIUS_PASSWORD\n param = {'speech_recog_jaJP': {'service_id': speechrecog_jajp_id,\n 'password': speechrecog_jajp_password}}\n return requests.post(AUTH_URL, json=param).json()\n\n def _flush_recognition(self, uuid, token, i):\n header = {'Content-Type': 'application/json', 'X-Token': token}\n param = {'voice_id': i}\n resp = requests.put(f'{VOICE_URL}/{uuid}/flush', headers=header,\n json=param)\n if resp.status_code == 200:\n logger.debug(f'frush result:{resp.json()}')\n return resp.json()\n else:\n logger.debug(f'flush result(status:{resp.status_code})')\n <mask token>\n <mask token>\n\n def _start_recognition(self, token):\n header = {'Content-Type': 'application/json', 'X-Token': token}\n param = {'model_id': 1}\n return requests.post(VOICE_URL, headers=header, json=param).json()\n\n def _end_recognition(self, uuid, token):\n header = {'X-Token': token}\n resp = requests.delete(f'{VOICE_URL}/{uuid}', headers=header)\n if resp.status_code == 204:\n logger.debug(f'delete result(status:{resp.status_code})')\n\n def _write_result(self, transcipt):\n self.transcript = transcipt\n",
"step-3": "<mask token>\n\n\nclass Transcoder:\n <mask token>\n\n def __init__(self):\n \"\"\"Constructor.\"\"\"\n logger.info('__init__:Enter')\n self._token = None\n self.transcript = None\n self._queue = queue.Queue()\n\n def start(self, token):\n \"\"\"Start recognition.\"\"\"\n logger.info('start:Enter')\n self._token = token\n threading.Thread(target=self._process).start()\n\n def write_stream(self, buf):\n \"\"\"Write audio stream.\"\"\"\n self._queue.put(buf)\n\n def _process(self):\n logger.info('_process:Enter')\n token = self._authenticate()['token']\n uuid = self._start_recognition(token)['uuid']\n logger.info('start transcode')\n i = 1\n while True:\n arr = self._stream_generator()\n if arr is None:\n break\n inline = np.hstack(arr)\n arr_bytes = inline.tobytes('C')\n header = {'Content-Type': 'multipart/form-data', 'X-Token': token}\n files = {'voice_id': ('', i, ''), 'voice': ('', arr_bytes,\n 'application/octet-stream')}\n resp = requests.put(f'{VOICE_URL}/{uuid}', headers=header,\n files=files)\n if resp.status_code == 200:\n logger.debug(resp.json())\n result = resp.json()[0]\n if result[0] == 'TMP_RESULT' or result[0] == 'RESULT':\n self._write_result(result[1])\n i = i + 1\n self._flush_recognition(uuid, token, i)\n while True:\n if self._get_result(uuid, token) is None:\n break\n time.sleep(0.1)\n self._end_recognition(uuid, token)\n logger.info('end transcode')\n\n def _authenticate(self):\n speechrecog_jajp_id = model.key.RECAIUS_ID\n speechrecog_jajp_password = model.key.RECAIUS_PASSWORD\n param = {'speech_recog_jaJP': {'service_id': speechrecog_jajp_id,\n 'password': speechrecog_jajp_password}}\n return requests.post(AUTH_URL, json=param).json()\n\n def _flush_recognition(self, uuid, token, i):\n header = {'Content-Type': 'application/json', 'X-Token': token}\n param = {'voice_id': i}\n resp = requests.put(f'{VOICE_URL}/{uuid}/flush', headers=header,\n json=param)\n if resp.status_code == 200:\n logger.debug(f'frush result:{resp.json()}')\n return resp.json()\n else:\n logger.debug(f'flush result(status:{resp.status_code})')\n\n def _get_result(self, uuid, token):\n header = {'X-Token': token}\n resp = requests.get(f'{VOICE_URL}/{uuid}/results', headers=header)\n if resp.status_code == 200:\n logger.debug(f'get result:{resp.json()}')\n return resp.json()\n else:\n logger.debug(f'get result(status:{resp.status_code})')\n\n def _stream_generator(self):\n arr = []\n while True:\n try:\n v = self._queue.get_nowait()\n if v is None:\n return None\n arr.append((v * 32767).astype(np.int16))\n except queue.Empty:\n if len(arr) != 0:\n break\n else:\n time.sleep(0.1)\n return arr\n\n def _start_recognition(self, token):\n header = {'Content-Type': 'application/json', 'X-Token': token}\n param = {'model_id': 1}\n return requests.post(VOICE_URL, headers=header, json=param).json()\n\n def _end_recognition(self, uuid, token):\n header = {'X-Token': token}\n resp = requests.delete(f'{VOICE_URL}/{uuid}', headers=header)\n if resp.status_code == 204:\n logger.debug(f'delete result(status:{resp.status_code})')\n\n def _write_result(self, transcipt):\n self.transcript = transcipt\n",
"step-4": "<mask token>\nAUTH_URL = 'https://api.recaius.jp/auth/v2/tokens'\nVOICE_URL = 'https://api.recaius.jp/asr/v2/voices'\n\n\nclass Transcoder:\n \"\"\"Transcoder Class.\"\"\"\n\n def __init__(self):\n \"\"\"Constructor.\"\"\"\n logger.info('__init__:Enter')\n self._token = None\n self.transcript = None\n self._queue = queue.Queue()\n\n def start(self, token):\n \"\"\"Start recognition.\"\"\"\n logger.info('start:Enter')\n self._token = token\n threading.Thread(target=self._process).start()\n\n def write_stream(self, buf):\n \"\"\"Write audio stream.\"\"\"\n self._queue.put(buf)\n\n def _process(self):\n logger.info('_process:Enter')\n token = self._authenticate()['token']\n uuid = self._start_recognition(token)['uuid']\n logger.info('start transcode')\n i = 1\n while True:\n arr = self._stream_generator()\n if arr is None:\n break\n inline = np.hstack(arr)\n arr_bytes = inline.tobytes('C')\n header = {'Content-Type': 'multipart/form-data', 'X-Token': token}\n files = {'voice_id': ('', i, ''), 'voice': ('', arr_bytes,\n 'application/octet-stream')}\n resp = requests.put(f'{VOICE_URL}/{uuid}', headers=header,\n files=files)\n if resp.status_code == 200:\n logger.debug(resp.json())\n result = resp.json()[0]\n if result[0] == 'TMP_RESULT' or result[0] == 'RESULT':\n self._write_result(result[1])\n i = i + 1\n self._flush_recognition(uuid, token, i)\n while True:\n if self._get_result(uuid, token) is None:\n break\n time.sleep(0.1)\n self._end_recognition(uuid, token)\n logger.info('end transcode')\n\n def _authenticate(self):\n speechrecog_jajp_id = model.key.RECAIUS_ID\n speechrecog_jajp_password = model.key.RECAIUS_PASSWORD\n param = {'speech_recog_jaJP': {'service_id': speechrecog_jajp_id,\n 'password': speechrecog_jajp_password}}\n return requests.post(AUTH_URL, json=param).json()\n\n def _flush_recognition(self, uuid, token, i):\n header = {'Content-Type': 'application/json', 'X-Token': token}\n param = {'voice_id': i}\n resp = requests.put(f'{VOICE_URL}/{uuid}/flush', headers=header,\n json=param)\n if resp.status_code == 200:\n logger.debug(f'frush result:{resp.json()}')\n return resp.json()\n else:\n logger.debug(f'flush result(status:{resp.status_code})')\n\n def _get_result(self, uuid, token):\n header = {'X-Token': token}\n resp = requests.get(f'{VOICE_URL}/{uuid}/results', headers=header)\n if resp.status_code == 200:\n logger.debug(f'get result:{resp.json()}')\n return resp.json()\n else:\n logger.debug(f'get result(status:{resp.status_code})')\n\n def _stream_generator(self):\n arr = []\n while True:\n try:\n v = self._queue.get_nowait()\n if v is None:\n return None\n arr.append((v * 32767).astype(np.int16))\n except queue.Empty:\n if len(arr) != 0:\n break\n else:\n time.sleep(0.1)\n return arr\n\n def _start_recognition(self, token):\n header = {'Content-Type': 'application/json', 'X-Token': token}\n param = {'model_id': 1}\n return requests.post(VOICE_URL, headers=header, json=param).json()\n\n def _end_recognition(self, uuid, token):\n header = {'X-Token': token}\n resp = requests.delete(f'{VOICE_URL}/{uuid}', headers=header)\n if resp.status_code == 204:\n logger.debug(f'delete result(status:{resp.status_code})')\n\n def _write_result(self, transcipt):\n self.transcript = transcipt\n",
"step-5": "# -*- coding: utf-8 -*-\n\"\"\"Transcoder with TOSHIBA RECAIUS API.\"\"\"\nimport threading\nimport queue\nimport time\n\nimport numpy as np\nfrom logzero import logger\nimport requests\n\nimport model.key\n\nAUTH_URL = 'https://api.recaius.jp/auth/v2/tokens'\nVOICE_URL = 'https://api.recaius.jp/asr/v2/voices'\n\n\nclass Transcoder:\n \"\"\"Transcoder Class.\"\"\"\n\n def __init__(self):\n \"\"\"Constructor.\"\"\"\n logger.info('__init__:Enter')\n self._token = None\n self.transcript = None\n self._queue = queue.Queue()\n\n def start(self, token):\n \"\"\"Start recognition.\"\"\"\n logger.info('start:Enter')\n self._token = token\n threading.Thread(target=self._process).start()\n\n def write_stream(self, buf):\n \"\"\"Write audio stream.\"\"\"\n self._queue.put(buf)\n\n def _process(self):\n logger.info('_process:Enter')\n token = self._authenticate()['token']\n uuid = self._start_recognition(token)['uuid']\n logger.info('start transcode')\n i = 1\n while True:\n arr = self._stream_generator()\n if(arr is None):\n break\n # logger.debug(f'{len(arr)} , {self._queue.qsize()}')\n inline = np.hstack(arr)\n arr_bytes = inline.tobytes('C')\n header = {\n 'Content-Type': 'multipart/form-data',\n 'X-Token': token\n }\n files = {\n 'voice_id': ('', i, ''),\n 'voice': ('', arr_bytes, 'application/octet-stream')\n }\n resp = requests.put(\n f'{VOICE_URL}/{uuid}', headers=header, files=files)\n if(resp.status_code == 200):\n logger.debug(resp.json())\n result = resp.json()[0]\n if(result[0] == 'TMP_RESULT' or result[0] == 'RESULT'):\n self._write_result(result[1])\n i = i + 1\n self._flush_recognition(uuid, token, i)\n while True:\n if(self._get_result(uuid, token) is None):\n break\n time.sleep(0.1)\n self._end_recognition(uuid, token)\n logger.info('end transcode')\n\n def _authenticate(self):\n speechrecog_jajp_id = model.key.RECAIUS_ID\n speechrecog_jajp_password = model.key.RECAIUS_PASSWORD\n param = {\n \"speech_recog_jaJP\": {\n 'service_id': speechrecog_jajp_id,\n 'password': speechrecog_jajp_password\n }\n }\n return requests.post(AUTH_URL, json=param).json()\n\n def _flush_recognition(self, uuid, token, i):\n header = {\n 'Content-Type': 'application/json',\n 'X-Token': token\n }\n param = {\n 'voice_id': i,\n }\n resp = requests.put(\n f'{VOICE_URL}/{uuid}/flush', headers=header, json=param)\n if(resp.status_code == 200):\n logger.debug(f'frush result:{resp.json()}')\n return resp.json()\n else:\n logger.debug(f'flush result(status:{resp.status_code})')\n\n def _get_result(self, uuid, token):\n header = {\n 'X-Token': token\n }\n resp = requests.get(f'{VOICE_URL}/{uuid}/results', headers=header)\n if(resp.status_code == 200):\n logger.debug(f'get result:{resp.json()}')\n return resp.json()\n else:\n logger.debug(f'get result(status:{resp.status_code})')\n\n def _stream_generator(self):\n arr = []\n while True:\n try:\n v = self._queue.get_nowait()\n # print(v)\n if v is None:\n return None\n arr.append((v * 32767).astype(np.int16))\n except queue.Empty:\n if(len(arr) != 0):\n break\n else:\n time.sleep(0.1)\n return arr\n\n def _start_recognition(self, token):\n header = {\n 'Content-Type': 'application/json',\n 'X-Token': token\n }\n param = {\n 'model_id': 1\n }\n return requests.post(VOICE_URL, headers=header, json=param).json()\n\n def _end_recognition(self, uuid, token):\n header = {\n 'X-Token': token\n }\n resp = requests.delete(f'{VOICE_URL}/{uuid}', headers=header)\n if(resp.status_code == 204):\n logger.debug(f'delete result(status:{resp.status_code})')\n\n def _write_result(self, transcipt):\n self.transcript = transcipt\n",
"step-ids": [
9,
10,
12,
14,
16
]
}
|
[
9,
10,
12,
14,
16
] |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2020, Lukas Bestle <project-ansible@lukasbestle.com>
# Copyright: (c) 2017, Michael Heap <m@michaelheap.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
module: mas
short_description: Manage Mac App Store applications with mas-cli
description:
- Installs, uninstalls and updates macOS applications from the Mac App Store using the C(mas-cli).
version_added: '0.2.0'
author:
- Michael Heap (@mheap)
- Lukas Bestle (@lukasbestle)
options:
id:
description:
- The Mac App Store identifier of the app(s) you want to manage.
- This can be found by running C(mas search APP_NAME) on your machine.
type: list
elements: int
state:
description:
- Desired state of the app installation.
- The C(absent) value requires root permissions, also see the examples.
type: str
choices:
- absent
- latest
- present
default: present
upgrade_all:
description:
- Upgrade all installed Mac App Store apps.
type: bool
default: "no"
aliases: ["upgrade"]
requirements:
- macOS 10.11+
- "mas-cli (U(https://github.com/mas-cli/mas)) 1.5.0+ available as C(mas) in the bin path"
- The Apple ID to use already needs to be signed in to the Mac App Store (check with C(mas account)).
notes:
- This module supports C(check_mode).
'''
EXAMPLES = '''
- name: Install Keynote
community.general.mas:
id: 409183694
state: present
- name: Install Divvy with command mas installed in /usr/local/bin
community.general.mas:
id: 413857545
state: present
environment:
PATH: /usr/local/bin:{{ ansible_facts.env.PATH }}
- name: Install a list of apps
community.general.mas:
id:
- 409183694 # Keynote
- 413857545 # Divvy
state: present
- name: Ensure the latest Keynote version is installed
community.general.mas:
id: 409183694
state: latest
- name: Upgrade all installed Mac App Store apps
community.general.mas:
upgrade_all: yes
- name: Install specific apps and also upgrade all others
community.general.mas:
id:
- 409183694 # Keynote
- 413857545 # Divvy
state: present
upgrade_all: yes
- name: Uninstall Divvy
community.general.mas:
id: 413857545
state: absent
become: yes # Uninstallation requires root permissions
'''
RETURN = r''' # '''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
from distutils.version import StrictVersion
import os
class Mas(object):
def __init__(self, module):
self.module = module
# Initialize data properties
self.mas_path = self.module.get_bin_path('mas')
self._checked_signin = False
self._installed = None # Populated only if needed
self._outdated = None # Populated only if needed
self.count_install = 0
self.count_upgrade = 0
self.count_uninstall = 0
self.result = {
'changed': False
}
self.check_mas_tool()
def app_command(self, command, id):
''' Runs a `mas` command on a given app; command can be 'install', 'upgrade' or 'uninstall' '''
if not self.module.check_mode:
if command != 'uninstall':
self.check_signin()
rc, out, err = self.run([command, str(id)])
if rc != 0:
self.module.fail_json(
msg="Error running command '{0}' on app '{1}': {2}".format(command, str(id), out.rstrip())
)
# No error or dry run
self.__dict__['count_' + command] += 1
def check_mas_tool(self):
''' Verifies that the `mas` tool is available in a recent version '''
# Is the `mas` tool available at all?
if not self.mas_path:
self.module.fail_json(msg='Required `mas` tool is not installed')
# Is the version recent enough?
rc, out, err = self.run(['version'])
if rc != 0 or not out.strip() or StrictVersion(out.strip()) < StrictVersion('1.5.0'):
self.module.fail_json(msg='`mas` tool in version 1.5.0+ needed, got ' + out.strip())
def check_signin(self):
''' Verifies that the user is signed in to the Mac App Store '''
# Only check this once per execution
if self._checked_signin:
return
rc, out, err = self.run(['account'])
if out.split("\n", 1)[0].rstrip() == 'Not signed in':
self.module.fail_json(msg='You must be signed in to the Mac App Store')
self._checked_signin = True
def exit(self):
''' Exit with the data we have collected over time '''
msgs = []
if self.count_install > 0:
msgs.append('Installed {0} app(s)'.format(self.count_install))
if self.count_upgrade > 0:
msgs.append('Upgraded {0} app(s)'.format(self.count_upgrade))
if self.count_uninstall > 0:
msgs.append('Uninstalled {0} app(s)'.format(self.count_uninstall))
if msgs:
self.result['changed'] = True
self.result['msg'] = ', '.join(msgs)
self.module.exit_json(**self.result)
def get_current_state(self, command):
''' Returns the list of all app IDs; command can either be 'list' or 'outdated' '''
rc, raw_apps, err = self.run([command])
rows = raw_apps.split("\n")
if rows[0] == "No installed apps found":
rows = []
apps = []
for r in rows:
# Format: "123456789 App Name"
r = r.split(' ', 1)
if len(r) == 2:
apps.append(int(r[0]))
return apps
def installed(self):
''' Returns the list of installed apps '''
# Populate cache if not already done
if self._installed is None:
self._installed = self.get_current_state('list')
return self._installed
def is_installed(self, id):
''' Checks whether the given app is installed '''
return int(id) in self.installed()
def is_outdated(self, id):
''' Checks whether the given app is installed, but outdated '''
return int(id) in self.outdated()
def outdated(self):
''' Returns the list of installed, but outdated apps '''
# Populate cache if not already done
if self._outdated is None:
self._outdated = self.get_current_state('outdated')
return self._outdated
def run(self, cmd):
''' Runs a command of the `mas` tool '''
cmd.insert(0, self.mas_path)
return self.module.run_command(cmd, False)
def upgrade_all(self):
''' Upgrades all installed apps and sets the correct result data '''
outdated = self.outdated()
if not self.module.check_mode:
self.check_signin()
rc, out, err = self.run(['upgrade'])
if rc != 0:
self.module.fail_json(msg='Could not upgrade all apps: ' + out.rstrip())
self.count_upgrade += len(outdated)
def main():
module = AnsibleModule(
argument_spec=dict(
id=dict(type='list', elements='int'),
state=dict(type='str', default='present', choices=['absent', 'latest', 'present']),
upgrade_all=dict(type='bool', default=False, aliases=['upgrade']),
),
supports_check_mode=True
)
mas = Mas(module)
if module.params['id']:
apps = module.params['id']
else:
apps = []
state = module.params['state']
upgrade = module.params['upgrade_all']
# Run operations on the given app IDs
for app in sorted(set(apps)):
if state == 'present':
if not mas.is_installed(app):
mas.app_command('install', app)
elif state == 'absent':
if mas.is_installed(app):
# Ensure we are root
if os.getuid() != 0:
module.fail_json(msg="Uninstalling apps requires root permissions ('become: yes')")
mas.app_command('uninstall', app)
elif state == 'latest':
if not mas.is_installed(app):
mas.app_command('install', app)
elif mas.is_outdated(app):
mas.app_command('upgrade', app)
# Upgrade all apps if requested
mas._outdated = None # Clear cache
if upgrade and mas.outdated():
mas.upgrade_all()
# Exit with the collected data
mas.exit()
if __name__ == '__main__':
main()
|
normal
|
{
"blob_id": "8b965fd91396735e0153390b4eff540d3aac3aff",
"index": 4916,
"step-1": "<mask token>\n\n\nclass Mas(object):\n\n def __init__(self, module):\n self.module = module\n self.mas_path = self.module.get_bin_path('mas')\n self._checked_signin = False\n self._installed = None\n self._outdated = None\n self.count_install = 0\n self.count_upgrade = 0\n self.count_uninstall = 0\n self.result = {'changed': False}\n self.check_mas_tool()\n\n def app_command(self, command, id):\n \"\"\" Runs a `mas` command on a given app; command can be 'install', 'upgrade' or 'uninstall' \"\"\"\n if not self.module.check_mode:\n if command != 'uninstall':\n self.check_signin()\n rc, out, err = self.run([command, str(id)])\n if rc != 0:\n self.module.fail_json(msg=\n \"Error running command '{0}' on app '{1}': {2}\".format(\n command, str(id), out.rstrip()))\n self.__dict__['count_' + command] += 1\n\n def check_mas_tool(self):\n \"\"\" Verifies that the `mas` tool is available in a recent version \"\"\"\n if not self.mas_path:\n self.module.fail_json(msg='Required `mas` tool is not installed')\n rc, out, err = self.run(['version'])\n if rc != 0 or not out.strip() or StrictVersion(out.strip()\n ) < StrictVersion('1.5.0'):\n self.module.fail_json(msg=\n '`mas` tool in version 1.5.0+ needed, got ' + out.strip())\n\n def check_signin(self):\n \"\"\" Verifies that the user is signed in to the Mac App Store \"\"\"\n if self._checked_signin:\n return\n rc, out, err = self.run(['account'])\n if out.split('\\n', 1)[0].rstrip() == 'Not signed in':\n self.module.fail_json(msg=\n 'You must be signed in to the Mac App Store')\n self._checked_signin = True\n\n def exit(self):\n \"\"\" Exit with the data we have collected over time \"\"\"\n msgs = []\n if self.count_install > 0:\n msgs.append('Installed {0} app(s)'.format(self.count_install))\n if self.count_upgrade > 0:\n msgs.append('Upgraded {0} app(s)'.format(self.count_upgrade))\n if self.count_uninstall > 0:\n msgs.append('Uninstalled {0} app(s)'.format(self.count_uninstall))\n if msgs:\n self.result['changed'] = True\n self.result['msg'] = ', '.join(msgs)\n self.module.exit_json(**self.result)\n\n def get_current_state(self, command):\n \"\"\" Returns the list of all app IDs; command can either be 'list' or 'outdated' \"\"\"\n rc, raw_apps, err = self.run([command])\n rows = raw_apps.split('\\n')\n if rows[0] == 'No installed apps found':\n rows = []\n apps = []\n for r in rows:\n r = r.split(' ', 1)\n if len(r) == 2:\n apps.append(int(r[0]))\n return apps\n\n def installed(self):\n \"\"\" Returns the list of installed apps \"\"\"\n if self._installed is None:\n self._installed = self.get_current_state('list')\n return self._installed\n\n def is_installed(self, id):\n \"\"\" Checks whether the given app is installed \"\"\"\n return int(id) in self.installed()\n\n def is_outdated(self, id):\n \"\"\" Checks whether the given app is installed, but outdated \"\"\"\n return int(id) in self.outdated()\n\n def outdated(self):\n \"\"\" Returns the list of installed, but outdated apps \"\"\"\n if self._outdated is None:\n self._outdated = self.get_current_state('outdated')\n return self._outdated\n\n def run(self, cmd):\n \"\"\" Runs a command of the `mas` tool \"\"\"\n cmd.insert(0, self.mas_path)\n return self.module.run_command(cmd, False)\n\n def upgrade_all(self):\n \"\"\" Upgrades all installed apps and sets the correct result data \"\"\"\n outdated = self.outdated()\n if not self.module.check_mode:\n self.check_signin()\n rc, out, err = self.run(['upgrade'])\n if rc != 0:\n self.module.fail_json(msg='Could not upgrade all apps: ' +\n out.rstrip())\n self.count_upgrade += len(outdated)\n\n\ndef main():\n module = AnsibleModule(argument_spec=dict(id=dict(type='list', elements\n ='int'), state=dict(type='str', default='present', choices=[\n 'absent', 'latest', 'present']), upgrade_all=dict(type='bool',\n default=False, aliases=['upgrade'])), supports_check_mode=True)\n mas = Mas(module)\n if module.params['id']:\n apps = module.params['id']\n else:\n apps = []\n state = module.params['state']\n upgrade = module.params['upgrade_all']\n for app in sorted(set(apps)):\n if state == 'present':\n if not mas.is_installed(app):\n mas.app_command('install', app)\n elif state == 'absent':\n if mas.is_installed(app):\n if os.getuid() != 0:\n module.fail_json(msg=\n \"Uninstalling apps requires root permissions ('become: yes')\"\n )\n mas.app_command('uninstall', app)\n elif state == 'latest':\n if not mas.is_installed(app):\n mas.app_command('install', app)\n elif mas.is_outdated(app):\n mas.app_command('upgrade', app)\n mas._outdated = None\n if upgrade and mas.outdated():\n mas.upgrade_all()\n mas.exit()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Mas(object):\n\n def __init__(self, module):\n self.module = module\n self.mas_path = self.module.get_bin_path('mas')\n self._checked_signin = False\n self._installed = None\n self._outdated = None\n self.count_install = 0\n self.count_upgrade = 0\n self.count_uninstall = 0\n self.result = {'changed': False}\n self.check_mas_tool()\n\n def app_command(self, command, id):\n \"\"\" Runs a `mas` command on a given app; command can be 'install', 'upgrade' or 'uninstall' \"\"\"\n if not self.module.check_mode:\n if command != 'uninstall':\n self.check_signin()\n rc, out, err = self.run([command, str(id)])\n if rc != 0:\n self.module.fail_json(msg=\n \"Error running command '{0}' on app '{1}': {2}\".format(\n command, str(id), out.rstrip()))\n self.__dict__['count_' + command] += 1\n\n def check_mas_tool(self):\n \"\"\" Verifies that the `mas` tool is available in a recent version \"\"\"\n if not self.mas_path:\n self.module.fail_json(msg='Required `mas` tool is not installed')\n rc, out, err = self.run(['version'])\n if rc != 0 or not out.strip() or StrictVersion(out.strip()\n ) < StrictVersion('1.5.0'):\n self.module.fail_json(msg=\n '`mas` tool in version 1.5.0+ needed, got ' + out.strip())\n\n def check_signin(self):\n \"\"\" Verifies that the user is signed in to the Mac App Store \"\"\"\n if self._checked_signin:\n return\n rc, out, err = self.run(['account'])\n if out.split('\\n', 1)[0].rstrip() == 'Not signed in':\n self.module.fail_json(msg=\n 'You must be signed in to the Mac App Store')\n self._checked_signin = True\n\n def exit(self):\n \"\"\" Exit with the data we have collected over time \"\"\"\n msgs = []\n if self.count_install > 0:\n msgs.append('Installed {0} app(s)'.format(self.count_install))\n if self.count_upgrade > 0:\n msgs.append('Upgraded {0} app(s)'.format(self.count_upgrade))\n if self.count_uninstall > 0:\n msgs.append('Uninstalled {0} app(s)'.format(self.count_uninstall))\n if msgs:\n self.result['changed'] = True\n self.result['msg'] = ', '.join(msgs)\n self.module.exit_json(**self.result)\n\n def get_current_state(self, command):\n \"\"\" Returns the list of all app IDs; command can either be 'list' or 'outdated' \"\"\"\n rc, raw_apps, err = self.run([command])\n rows = raw_apps.split('\\n')\n if rows[0] == 'No installed apps found':\n rows = []\n apps = []\n for r in rows:\n r = r.split(' ', 1)\n if len(r) == 2:\n apps.append(int(r[0]))\n return apps\n\n def installed(self):\n \"\"\" Returns the list of installed apps \"\"\"\n if self._installed is None:\n self._installed = self.get_current_state('list')\n return self._installed\n\n def is_installed(self, id):\n \"\"\" Checks whether the given app is installed \"\"\"\n return int(id) in self.installed()\n\n def is_outdated(self, id):\n \"\"\" Checks whether the given app is installed, but outdated \"\"\"\n return int(id) in self.outdated()\n\n def outdated(self):\n \"\"\" Returns the list of installed, but outdated apps \"\"\"\n if self._outdated is None:\n self._outdated = self.get_current_state('outdated')\n return self._outdated\n\n def run(self, cmd):\n \"\"\" Runs a command of the `mas` tool \"\"\"\n cmd.insert(0, self.mas_path)\n return self.module.run_command(cmd, False)\n\n def upgrade_all(self):\n \"\"\" Upgrades all installed apps and sets the correct result data \"\"\"\n outdated = self.outdated()\n if not self.module.check_mode:\n self.check_signin()\n rc, out, err = self.run(['upgrade'])\n if rc != 0:\n self.module.fail_json(msg='Could not upgrade all apps: ' +\n out.rstrip())\n self.count_upgrade += len(outdated)\n\n\ndef main():\n module = AnsibleModule(argument_spec=dict(id=dict(type='list', elements\n ='int'), state=dict(type='str', default='present', choices=[\n 'absent', 'latest', 'present']), upgrade_all=dict(type='bool',\n default=False, aliases=['upgrade'])), supports_check_mode=True)\n mas = Mas(module)\n if module.params['id']:\n apps = module.params['id']\n else:\n apps = []\n state = module.params['state']\n upgrade = module.params['upgrade_all']\n for app in sorted(set(apps)):\n if state == 'present':\n if not mas.is_installed(app):\n mas.app_command('install', app)\n elif state == 'absent':\n if mas.is_installed(app):\n if os.getuid() != 0:\n module.fail_json(msg=\n \"Uninstalling apps requires root permissions ('become: yes')\"\n )\n mas.app_command('uninstall', app)\n elif state == 'latest':\n if not mas.is_installed(app):\n mas.app_command('install', app)\n elif mas.is_outdated(app):\n mas.app_command('upgrade', app)\n mas._outdated = None\n if upgrade and mas.outdated():\n mas.upgrade_all()\n mas.exit()\n\n\nif __name__ == '__main__':\n main()\n",
"step-3": "<mask token>\n__metaclass__ = type\nDOCUMENTATION = \"\"\"\nmodule: mas\nshort_description: Manage Mac App Store applications with mas-cli\ndescription:\n - Installs, uninstalls and updates macOS applications from the Mac App Store using the C(mas-cli).\nversion_added: '0.2.0'\nauthor:\n - Michael Heap (@mheap)\n - Lukas Bestle (@lukasbestle)\noptions:\n id:\n description:\n - The Mac App Store identifier of the app(s) you want to manage.\n - This can be found by running C(mas search APP_NAME) on your machine.\n type: list\n elements: int\n state:\n description:\n - Desired state of the app installation.\n - The C(absent) value requires root permissions, also see the examples.\n type: str\n choices:\n - absent\n - latest\n - present\n default: present\n upgrade_all:\n description:\n - Upgrade all installed Mac App Store apps.\n type: bool\n default: \"no\"\n aliases: [\"upgrade\"]\nrequirements:\n - macOS 10.11+\n - \"mas-cli (U(https://github.com/mas-cli/mas)) 1.5.0+ available as C(mas) in the bin path\"\n - The Apple ID to use already needs to be signed in to the Mac App Store (check with C(mas account)).\nnotes:\n - This module supports C(check_mode).\n\"\"\"\nEXAMPLES = \"\"\"\n- name: Install Keynote\n community.general.mas:\n id: 409183694\n state: present\n\n- name: Install Divvy with command mas installed in /usr/local/bin\n community.general.mas:\n id: 413857545\n state: present\n environment:\n PATH: /usr/local/bin:{{ ansible_facts.env.PATH }}\n\n- name: Install a list of apps\n community.general.mas:\n id:\n - 409183694 # Keynote\n - 413857545 # Divvy\n state: present\n\n- name: Ensure the latest Keynote version is installed\n community.general.mas:\n id: 409183694\n state: latest\n\n- name: Upgrade all installed Mac App Store apps\n community.general.mas:\n upgrade_all: yes\n\n- name: Install specific apps and also upgrade all others\n community.general.mas:\n id:\n - 409183694 # Keynote\n - 413857545 # Divvy\n state: present\n upgrade_all: yes\n\n- name: Uninstall Divvy\n community.general.mas:\n id: 413857545\n state: absent\n become: yes # Uninstallation requires root permissions\n\"\"\"\nRETURN = ' # '\n<mask token>\n\n\nclass Mas(object):\n\n def __init__(self, module):\n self.module = module\n self.mas_path = self.module.get_bin_path('mas')\n self._checked_signin = False\n self._installed = None\n self._outdated = None\n self.count_install = 0\n self.count_upgrade = 0\n self.count_uninstall = 0\n self.result = {'changed': False}\n self.check_mas_tool()\n\n def app_command(self, command, id):\n \"\"\" Runs a `mas` command on a given app; command can be 'install', 'upgrade' or 'uninstall' \"\"\"\n if not self.module.check_mode:\n if command != 'uninstall':\n self.check_signin()\n rc, out, err = self.run([command, str(id)])\n if rc != 0:\n self.module.fail_json(msg=\n \"Error running command '{0}' on app '{1}': {2}\".format(\n command, str(id), out.rstrip()))\n self.__dict__['count_' + command] += 1\n\n def check_mas_tool(self):\n \"\"\" Verifies that the `mas` tool is available in a recent version \"\"\"\n if not self.mas_path:\n self.module.fail_json(msg='Required `mas` tool is not installed')\n rc, out, err = self.run(['version'])\n if rc != 0 or not out.strip() or StrictVersion(out.strip()\n ) < StrictVersion('1.5.0'):\n self.module.fail_json(msg=\n '`mas` tool in version 1.5.0+ needed, got ' + out.strip())\n\n def check_signin(self):\n \"\"\" Verifies that the user is signed in to the Mac App Store \"\"\"\n if self._checked_signin:\n return\n rc, out, err = self.run(['account'])\n if out.split('\\n', 1)[0].rstrip() == 'Not signed in':\n self.module.fail_json(msg=\n 'You must be signed in to the Mac App Store')\n self._checked_signin = True\n\n def exit(self):\n \"\"\" Exit with the data we have collected over time \"\"\"\n msgs = []\n if self.count_install > 0:\n msgs.append('Installed {0} app(s)'.format(self.count_install))\n if self.count_upgrade > 0:\n msgs.append('Upgraded {0} app(s)'.format(self.count_upgrade))\n if self.count_uninstall > 0:\n msgs.append('Uninstalled {0} app(s)'.format(self.count_uninstall))\n if msgs:\n self.result['changed'] = True\n self.result['msg'] = ', '.join(msgs)\n self.module.exit_json(**self.result)\n\n def get_current_state(self, command):\n \"\"\" Returns the list of all app IDs; command can either be 'list' or 'outdated' \"\"\"\n rc, raw_apps, err = self.run([command])\n rows = raw_apps.split('\\n')\n if rows[0] == 'No installed apps found':\n rows = []\n apps = []\n for r in rows:\n r = r.split(' ', 1)\n if len(r) == 2:\n apps.append(int(r[0]))\n return apps\n\n def installed(self):\n \"\"\" Returns the list of installed apps \"\"\"\n if self._installed is None:\n self._installed = self.get_current_state('list')\n return self._installed\n\n def is_installed(self, id):\n \"\"\" Checks whether the given app is installed \"\"\"\n return int(id) in self.installed()\n\n def is_outdated(self, id):\n \"\"\" Checks whether the given app is installed, but outdated \"\"\"\n return int(id) in self.outdated()\n\n def outdated(self):\n \"\"\" Returns the list of installed, but outdated apps \"\"\"\n if self._outdated is None:\n self._outdated = self.get_current_state('outdated')\n return self._outdated\n\n def run(self, cmd):\n \"\"\" Runs a command of the `mas` tool \"\"\"\n cmd.insert(0, self.mas_path)\n return self.module.run_command(cmd, False)\n\n def upgrade_all(self):\n \"\"\" Upgrades all installed apps and sets the correct result data \"\"\"\n outdated = self.outdated()\n if not self.module.check_mode:\n self.check_signin()\n rc, out, err = self.run(['upgrade'])\n if rc != 0:\n self.module.fail_json(msg='Could not upgrade all apps: ' +\n out.rstrip())\n self.count_upgrade += len(outdated)\n\n\ndef main():\n module = AnsibleModule(argument_spec=dict(id=dict(type='list', elements\n ='int'), state=dict(type='str', default='present', choices=[\n 'absent', 'latest', 'present']), upgrade_all=dict(type='bool',\n default=False, aliases=['upgrade'])), supports_check_mode=True)\n mas = Mas(module)\n if module.params['id']:\n apps = module.params['id']\n else:\n apps = []\n state = module.params['state']\n upgrade = module.params['upgrade_all']\n for app in sorted(set(apps)):\n if state == 'present':\n if not mas.is_installed(app):\n mas.app_command('install', app)\n elif state == 'absent':\n if mas.is_installed(app):\n if os.getuid() != 0:\n module.fail_json(msg=\n \"Uninstalling apps requires root permissions ('become: yes')\"\n )\n mas.app_command('uninstall', app)\n elif state == 'latest':\n if not mas.is_installed(app):\n mas.app_command('install', app)\n elif mas.is_outdated(app):\n mas.app_command('upgrade', app)\n mas._outdated = None\n if upgrade and mas.outdated():\n mas.upgrade_all()\n mas.exit()\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "from __future__ import absolute_import, division, print_function\n__metaclass__ = type\nDOCUMENTATION = \"\"\"\nmodule: mas\nshort_description: Manage Mac App Store applications with mas-cli\ndescription:\n - Installs, uninstalls and updates macOS applications from the Mac App Store using the C(mas-cli).\nversion_added: '0.2.0'\nauthor:\n - Michael Heap (@mheap)\n - Lukas Bestle (@lukasbestle)\noptions:\n id:\n description:\n - The Mac App Store identifier of the app(s) you want to manage.\n - This can be found by running C(mas search APP_NAME) on your machine.\n type: list\n elements: int\n state:\n description:\n - Desired state of the app installation.\n - The C(absent) value requires root permissions, also see the examples.\n type: str\n choices:\n - absent\n - latest\n - present\n default: present\n upgrade_all:\n description:\n - Upgrade all installed Mac App Store apps.\n type: bool\n default: \"no\"\n aliases: [\"upgrade\"]\nrequirements:\n - macOS 10.11+\n - \"mas-cli (U(https://github.com/mas-cli/mas)) 1.5.0+ available as C(mas) in the bin path\"\n - The Apple ID to use already needs to be signed in to the Mac App Store (check with C(mas account)).\nnotes:\n - This module supports C(check_mode).\n\"\"\"\nEXAMPLES = \"\"\"\n- name: Install Keynote\n community.general.mas:\n id: 409183694\n state: present\n\n- name: Install Divvy with command mas installed in /usr/local/bin\n community.general.mas:\n id: 413857545\n state: present\n environment:\n PATH: /usr/local/bin:{{ ansible_facts.env.PATH }}\n\n- name: Install a list of apps\n community.general.mas:\n id:\n - 409183694 # Keynote\n - 413857545 # Divvy\n state: present\n\n- name: Ensure the latest Keynote version is installed\n community.general.mas:\n id: 409183694\n state: latest\n\n- name: Upgrade all installed Mac App Store apps\n community.general.mas:\n upgrade_all: yes\n\n- name: Install specific apps and also upgrade all others\n community.general.mas:\n id:\n - 409183694 # Keynote\n - 413857545 # Divvy\n state: present\n upgrade_all: yes\n\n- name: Uninstall Divvy\n community.general.mas:\n id: 413857545\n state: absent\n become: yes # Uninstallation requires root permissions\n\"\"\"\nRETURN = ' # '\nfrom ansible.module_utils.basic import AnsibleModule\nfrom ansible.module_utils._text import to_native\nfrom distutils.version import StrictVersion\nimport os\n\n\nclass Mas(object):\n\n def __init__(self, module):\n self.module = module\n self.mas_path = self.module.get_bin_path('mas')\n self._checked_signin = False\n self._installed = None\n self._outdated = None\n self.count_install = 0\n self.count_upgrade = 0\n self.count_uninstall = 0\n self.result = {'changed': False}\n self.check_mas_tool()\n\n def app_command(self, command, id):\n \"\"\" Runs a `mas` command on a given app; command can be 'install', 'upgrade' or 'uninstall' \"\"\"\n if not self.module.check_mode:\n if command != 'uninstall':\n self.check_signin()\n rc, out, err = self.run([command, str(id)])\n if rc != 0:\n self.module.fail_json(msg=\n \"Error running command '{0}' on app '{1}': {2}\".format(\n command, str(id), out.rstrip()))\n self.__dict__['count_' + command] += 1\n\n def check_mas_tool(self):\n \"\"\" Verifies that the `mas` tool is available in a recent version \"\"\"\n if not self.mas_path:\n self.module.fail_json(msg='Required `mas` tool is not installed')\n rc, out, err = self.run(['version'])\n if rc != 0 or not out.strip() or StrictVersion(out.strip()\n ) < StrictVersion('1.5.0'):\n self.module.fail_json(msg=\n '`mas` tool in version 1.5.0+ needed, got ' + out.strip())\n\n def check_signin(self):\n \"\"\" Verifies that the user is signed in to the Mac App Store \"\"\"\n if self._checked_signin:\n return\n rc, out, err = self.run(['account'])\n if out.split('\\n', 1)[0].rstrip() == 'Not signed in':\n self.module.fail_json(msg=\n 'You must be signed in to the Mac App Store')\n self._checked_signin = True\n\n def exit(self):\n \"\"\" Exit with the data we have collected over time \"\"\"\n msgs = []\n if self.count_install > 0:\n msgs.append('Installed {0} app(s)'.format(self.count_install))\n if self.count_upgrade > 0:\n msgs.append('Upgraded {0} app(s)'.format(self.count_upgrade))\n if self.count_uninstall > 0:\n msgs.append('Uninstalled {0} app(s)'.format(self.count_uninstall))\n if msgs:\n self.result['changed'] = True\n self.result['msg'] = ', '.join(msgs)\n self.module.exit_json(**self.result)\n\n def get_current_state(self, command):\n \"\"\" Returns the list of all app IDs; command can either be 'list' or 'outdated' \"\"\"\n rc, raw_apps, err = self.run([command])\n rows = raw_apps.split('\\n')\n if rows[0] == 'No installed apps found':\n rows = []\n apps = []\n for r in rows:\n r = r.split(' ', 1)\n if len(r) == 2:\n apps.append(int(r[0]))\n return apps\n\n def installed(self):\n \"\"\" Returns the list of installed apps \"\"\"\n if self._installed is None:\n self._installed = self.get_current_state('list')\n return self._installed\n\n def is_installed(self, id):\n \"\"\" Checks whether the given app is installed \"\"\"\n return int(id) in self.installed()\n\n def is_outdated(self, id):\n \"\"\" Checks whether the given app is installed, but outdated \"\"\"\n return int(id) in self.outdated()\n\n def outdated(self):\n \"\"\" Returns the list of installed, but outdated apps \"\"\"\n if self._outdated is None:\n self._outdated = self.get_current_state('outdated')\n return self._outdated\n\n def run(self, cmd):\n \"\"\" Runs a command of the `mas` tool \"\"\"\n cmd.insert(0, self.mas_path)\n return self.module.run_command(cmd, False)\n\n def upgrade_all(self):\n \"\"\" Upgrades all installed apps and sets the correct result data \"\"\"\n outdated = self.outdated()\n if not self.module.check_mode:\n self.check_signin()\n rc, out, err = self.run(['upgrade'])\n if rc != 0:\n self.module.fail_json(msg='Could not upgrade all apps: ' +\n out.rstrip())\n self.count_upgrade += len(outdated)\n\n\ndef main():\n module = AnsibleModule(argument_spec=dict(id=dict(type='list', elements\n ='int'), state=dict(type='str', default='present', choices=[\n 'absent', 'latest', 'present']), upgrade_all=dict(type='bool',\n default=False, aliases=['upgrade'])), supports_check_mode=True)\n mas = Mas(module)\n if module.params['id']:\n apps = module.params['id']\n else:\n apps = []\n state = module.params['state']\n upgrade = module.params['upgrade_all']\n for app in sorted(set(apps)):\n if state == 'present':\n if not mas.is_installed(app):\n mas.app_command('install', app)\n elif state == 'absent':\n if mas.is_installed(app):\n if os.getuid() != 0:\n module.fail_json(msg=\n \"Uninstalling apps requires root permissions ('become: yes')\"\n )\n mas.app_command('uninstall', app)\n elif state == 'latest':\n if not mas.is_installed(app):\n mas.app_command('install', app)\n elif mas.is_outdated(app):\n mas.app_command('upgrade', app)\n mas._outdated = None\n if upgrade and mas.outdated():\n mas.upgrade_all()\n mas.exit()\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n# Copyright: (c) 2020, Lukas Bestle <project-ansible@lukasbestle.com>\n# Copyright: (c) 2017, Michael Heap <m@michaelheap.com>\n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n\nfrom __future__ import absolute_import, division, print_function\n__metaclass__ = type\n\n\nDOCUMENTATION = '''\nmodule: mas\nshort_description: Manage Mac App Store applications with mas-cli\ndescription:\n - Installs, uninstalls and updates macOS applications from the Mac App Store using the C(mas-cli).\nversion_added: '0.2.0'\nauthor:\n - Michael Heap (@mheap)\n - Lukas Bestle (@lukasbestle)\noptions:\n id:\n description:\n - The Mac App Store identifier of the app(s) you want to manage.\n - This can be found by running C(mas search APP_NAME) on your machine.\n type: list\n elements: int\n state:\n description:\n - Desired state of the app installation.\n - The C(absent) value requires root permissions, also see the examples.\n type: str\n choices:\n - absent\n - latest\n - present\n default: present\n upgrade_all:\n description:\n - Upgrade all installed Mac App Store apps.\n type: bool\n default: \"no\"\n aliases: [\"upgrade\"]\nrequirements:\n - macOS 10.11+\n - \"mas-cli (U(https://github.com/mas-cli/mas)) 1.5.0+ available as C(mas) in the bin path\"\n - The Apple ID to use already needs to be signed in to the Mac App Store (check with C(mas account)).\nnotes:\n - This module supports C(check_mode).\n'''\n\nEXAMPLES = '''\n- name: Install Keynote\n community.general.mas:\n id: 409183694\n state: present\n\n- name: Install Divvy with command mas installed in /usr/local/bin\n community.general.mas:\n id: 413857545\n state: present\n environment:\n PATH: /usr/local/bin:{{ ansible_facts.env.PATH }}\n\n- name: Install a list of apps\n community.general.mas:\n id:\n - 409183694 # Keynote\n - 413857545 # Divvy\n state: present\n\n- name: Ensure the latest Keynote version is installed\n community.general.mas:\n id: 409183694\n state: latest\n\n- name: Upgrade all installed Mac App Store apps\n community.general.mas:\n upgrade_all: yes\n\n- name: Install specific apps and also upgrade all others\n community.general.mas:\n id:\n - 409183694 # Keynote\n - 413857545 # Divvy\n state: present\n upgrade_all: yes\n\n- name: Uninstall Divvy\n community.general.mas:\n id: 413857545\n state: absent\n become: yes # Uninstallation requires root permissions\n'''\n\nRETURN = r''' # '''\n\nfrom ansible.module_utils.basic import AnsibleModule\nfrom ansible.module_utils._text import to_native\nfrom distutils.version import StrictVersion\nimport os\n\n\nclass Mas(object):\n\n def __init__(self, module):\n self.module = module\n\n # Initialize data properties\n self.mas_path = self.module.get_bin_path('mas')\n self._checked_signin = False\n self._installed = None # Populated only if needed\n self._outdated = None # Populated only if needed\n self.count_install = 0\n self.count_upgrade = 0\n self.count_uninstall = 0\n self.result = {\n 'changed': False\n }\n\n self.check_mas_tool()\n\n def app_command(self, command, id):\n ''' Runs a `mas` command on a given app; command can be 'install', 'upgrade' or 'uninstall' '''\n\n if not self.module.check_mode:\n if command != 'uninstall':\n self.check_signin()\n\n rc, out, err = self.run([command, str(id)])\n if rc != 0:\n self.module.fail_json(\n msg=\"Error running command '{0}' on app '{1}': {2}\".format(command, str(id), out.rstrip())\n )\n\n # No error or dry run\n self.__dict__['count_' + command] += 1\n\n def check_mas_tool(self):\n ''' Verifies that the `mas` tool is available in a recent version '''\n\n # Is the `mas` tool available at all?\n if not self.mas_path:\n self.module.fail_json(msg='Required `mas` tool is not installed')\n\n # Is the version recent enough?\n rc, out, err = self.run(['version'])\n if rc != 0 or not out.strip() or StrictVersion(out.strip()) < StrictVersion('1.5.0'):\n self.module.fail_json(msg='`mas` tool in version 1.5.0+ needed, got ' + out.strip())\n\n def check_signin(self):\n ''' Verifies that the user is signed in to the Mac App Store '''\n\n # Only check this once per execution\n if self._checked_signin:\n return\n\n rc, out, err = self.run(['account'])\n if out.split(\"\\n\", 1)[0].rstrip() == 'Not signed in':\n self.module.fail_json(msg='You must be signed in to the Mac App Store')\n\n self._checked_signin = True\n\n def exit(self):\n ''' Exit with the data we have collected over time '''\n\n msgs = []\n if self.count_install > 0:\n msgs.append('Installed {0} app(s)'.format(self.count_install))\n if self.count_upgrade > 0:\n msgs.append('Upgraded {0} app(s)'.format(self.count_upgrade))\n if self.count_uninstall > 0:\n msgs.append('Uninstalled {0} app(s)'.format(self.count_uninstall))\n\n if msgs:\n self.result['changed'] = True\n self.result['msg'] = ', '.join(msgs)\n\n self.module.exit_json(**self.result)\n\n def get_current_state(self, command):\n ''' Returns the list of all app IDs; command can either be 'list' or 'outdated' '''\n\n rc, raw_apps, err = self.run([command])\n rows = raw_apps.split(\"\\n\")\n if rows[0] == \"No installed apps found\":\n rows = []\n apps = []\n for r in rows:\n # Format: \"123456789 App Name\"\n r = r.split(' ', 1)\n if len(r) == 2:\n apps.append(int(r[0]))\n\n return apps\n\n def installed(self):\n ''' Returns the list of installed apps '''\n\n # Populate cache if not already done\n if self._installed is None:\n self._installed = self.get_current_state('list')\n\n return self._installed\n\n def is_installed(self, id):\n ''' Checks whether the given app is installed '''\n\n return int(id) in self.installed()\n\n def is_outdated(self, id):\n ''' Checks whether the given app is installed, but outdated '''\n\n return int(id) in self.outdated()\n\n def outdated(self):\n ''' Returns the list of installed, but outdated apps '''\n\n # Populate cache if not already done\n if self._outdated is None:\n self._outdated = self.get_current_state('outdated')\n\n return self._outdated\n\n def run(self, cmd):\n ''' Runs a command of the `mas` tool '''\n\n cmd.insert(0, self.mas_path)\n return self.module.run_command(cmd, False)\n\n def upgrade_all(self):\n ''' Upgrades all installed apps and sets the correct result data '''\n\n outdated = self.outdated()\n\n if not self.module.check_mode:\n self.check_signin()\n\n rc, out, err = self.run(['upgrade'])\n if rc != 0:\n self.module.fail_json(msg='Could not upgrade all apps: ' + out.rstrip())\n\n self.count_upgrade += len(outdated)\n\n\ndef main():\n module = AnsibleModule(\n argument_spec=dict(\n id=dict(type='list', elements='int'),\n state=dict(type='str', default='present', choices=['absent', 'latest', 'present']),\n upgrade_all=dict(type='bool', default=False, aliases=['upgrade']),\n ),\n supports_check_mode=True\n )\n mas = Mas(module)\n\n if module.params['id']:\n apps = module.params['id']\n else:\n apps = []\n\n state = module.params['state']\n upgrade = module.params['upgrade_all']\n\n # Run operations on the given app IDs\n for app in sorted(set(apps)):\n if state == 'present':\n if not mas.is_installed(app):\n mas.app_command('install', app)\n\n elif state == 'absent':\n if mas.is_installed(app):\n # Ensure we are root\n if os.getuid() != 0:\n module.fail_json(msg=\"Uninstalling apps requires root permissions ('become: yes')\")\n\n mas.app_command('uninstall', app)\n\n elif state == 'latest':\n if not mas.is_installed(app):\n mas.app_command('install', app)\n elif mas.is_outdated(app):\n mas.app_command('upgrade', app)\n\n # Upgrade all apps if requested\n mas._outdated = None # Clear cache\n if upgrade and mas.outdated():\n mas.upgrade_all()\n\n # Exit with the collected data\n mas.exit()\n\n\nif __name__ == '__main__':\n main()\n",
"step-ids": [
14,
15,
16,
17,
18
]
}
|
[
14,
15,
16,
17,
18
] |
# https://www.acmicpc.net/problem/2751
# n 개 수가 주어짐
# 목표 오름차순정렬
# 첫 줄 n개
# 둘째줄부터 n개의 줄에 수가 주어짐 세로로
# 출력 오름차순 정렬한 결과를 한 줄에 하나씩 출력한다?
n=int(input())
n_list=[int(input()) for _ in range(n)]
# print(n_list)
nn_list = []
# 인덱스 2개 관리
mid_idx = len(n_list) //2
left_idx = 0
right_idx = mid_idx +1
while left_idx <= mid_idx and right_idx <= n-1:
# nn_list = []
if n_list[left_idx] < n_list[right_idx]:
nn_list.append(n_list[left_idx])
left_idx += 1
elif n_list[left_idx] > n_list[right_idx]:
nn_list.append(n_list[right_idx])
right_idx+=1
else:
break
print(nn_list, end='\n')
# 문제해결 정렬이 된다 = 값이 하나일때 = if start_idx == end_idx return 값이 하나짜리 리스트가 넘어간다는 것을 기억해라
# merge_sort과정에서
# combined_list=[]
# while f[fidx] <=len(f) or이냐 and냐 b[bidx]<=len(b) and or 모두 동작하지 않음
# f<b
# 작은값을 넣어지고
# 어펜드
# 작은값의 리스트 인덱스 +1
# 반대의 경우도 똑같음
# 프론트를 다 넣으면 백에서 못넣은 값들이 남아있을수도 있다
# 백이든 프론트든 하나는 끝났다 하나는 빈리스트가 나온다
# 나머지 하나의 남은 리스트를 붙여준다
# print(combined_list = combined_list +f[fidx:] +b[bidx:]) 동작을 볼 수 있음
# return combined_list = combined_list +f[fidx:] +b[bidx:]
# 탑 다운 방식
|
normal
|
{
"blob_id": "fb5508b1b5aa36c4921358d6ca7f96fc7d565241",
"index": 5104,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwhile left_idx <= mid_idx and right_idx <= n - 1:\n if n_list[left_idx] < n_list[right_idx]:\n nn_list.append(n_list[left_idx])\n left_idx += 1\n elif n_list[left_idx] > n_list[right_idx]:\n nn_list.append(n_list[right_idx])\n right_idx += 1\n else:\n break\nprint(nn_list, end='\\n')\n",
"step-3": "n = int(input())\nn_list = [int(input()) for _ in range(n)]\nnn_list = []\nmid_idx = len(n_list) // 2\nleft_idx = 0\nright_idx = mid_idx + 1\nwhile left_idx <= mid_idx and right_idx <= n - 1:\n if n_list[left_idx] < n_list[right_idx]:\n nn_list.append(n_list[left_idx])\n left_idx += 1\n elif n_list[left_idx] > n_list[right_idx]:\n nn_list.append(n_list[right_idx])\n right_idx += 1\n else:\n break\nprint(nn_list, end='\\n')\n",
"step-4": "# https://www.acmicpc.net/problem/2751\n\n# n 개 수가 주어짐 \n\n# 목표 오름차순정렬\n\n# 첫 줄 n개\n# 둘째줄부터 n개의 줄에 수가 주어짐 세로로\n\n# 출력 오름차순 정렬한 결과를 한 줄에 하나씩 출력한다?\n\n\nn=int(input())\nn_list=[int(input()) for _ in range(n)]\n# print(n_list)\nnn_list = []\n# 인덱스 2개 관리\nmid_idx = len(n_list) //2\nleft_idx = 0 \nright_idx = mid_idx +1\n\nwhile left_idx <= mid_idx and right_idx <= n-1:\n # nn_list = []\n if n_list[left_idx] < n_list[right_idx]:\n nn_list.append(n_list[left_idx])\n left_idx += 1\n elif n_list[left_idx] > n_list[right_idx]:\n nn_list.append(n_list[right_idx])\n right_idx+=1\n else:\n break\nprint(nn_list, end='\\n')\n\n\n# 문제해결 정렬이 된다 = 값이 하나일때 = if start_idx == end_idx return 값이 하나짜리 리스트가 넘어간다는 것을 기억해라\n# merge_sort과정에서\n# combined_list=[]\n# while f[fidx] <=len(f) or이냐 and냐 b[bidx]<=len(b) and or 모두 동작하지 않음\n\n\n# f<b\n# 작은값을 넣어지고\n# 어펜드\n# 작은값의 리스트 인덱스 +1\n# 반대의 경우도 똑같음\n\n# 프론트를 다 넣으면 백에서 못넣은 값들이 남아있을수도 있다\n\n# 백이든 프론트든 하나는 끝났다 하나는 빈리스트가 나온다\n# 나머지 하나의 남은 리스트를 붙여준다\n# print(combined_list = combined_list +f[fidx:] +b[bidx:]) 동작을 볼 수 있음\n# return combined_list = combined_list +f[fidx:] +b[bidx:]\n# 탑 다운 방식",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import os
import time
from datetime import datetime
from typing import List, Tuple
from pyspark.sql import SparkSession
from Chapter01.utilities01_py.helper_python import create_session
from Chapter02.utilities02_py.domain_objects import WarcRecord
from Chapter02.utilities02_py.helper_python import extract_raw_records, parse_raw_warc
def fall_asleep(record: WarcRecord):
current_uri: str = record.target_uri
start_time = str(datetime.now())
process_id = str(os.getpid())
print('@@1 falling asleep in process {} at {} processing {}'.format(process_id, start_time, current_uri))
time.sleep(5)
end_time = str(datetime.now())
print('@@2 awakening in process {} at {} processing {}'.format(process_id, end_time, current_uri))
return process_id, current_uri
def trivial_filter(processid_uri: (int, str)) -> bool:
new_process_id = str(os.getpid())
timepoint = str(datetime.now())
print('@@3 filter in process {} at {} processing {}'.format(new_process_id, timepoint, processid_uri[1]))
return True
def quick_print(processid_uri: (int, str)) -> (int, int):
new_process_id = str(os.getpid())
timepoint = str(datetime.now())
print('@@4 map2 in process {} at {} processing {}'.format(new_process_id, timepoint, processid_uri[1]))
return processid_uri[0], new_process_id
if __name__ == "__main__":
session: SparkSession = create_session(3, "Wave exploration")
input_warc = "/Users/a/Desktop/Buch/CC-MAIN-20191013195541-20191013222541-00000.warc" # ToDo: Change path
raw_records = extract_raw_records(input_warc, session)
warc_records = raw_records \
.flatMap(parse_raw_warc)
process_ids_rdd = warc_records\
.map(fall_asleep)\
.filter(trivial_filter)\
.map(quick_print)
distinct_process_ids: List[Tuple[int, int]] = process_ids_rdd.distinct().collect()
print(distinct_process_ids)
|
normal
|
{
"blob_id": "fccdf75fe83ad8388c12a63555c4132181fd349a",
"index": 1646,
"step-1": "<mask token>\n\n\ndef fall_asleep(record: WarcRecord):\n current_uri: str = record.target_uri\n start_time = str(datetime.now())\n process_id = str(os.getpid())\n print('@@1 falling asleep in process {} at {} processing {}'.format(\n process_id, start_time, current_uri))\n time.sleep(5)\n end_time = str(datetime.now())\n print('@@2 awakening in process {} at {} processing {}'.format(\n process_id, end_time, current_uri))\n return process_id, current_uri\n\n\n<mask token>\n\n\ndef quick_print(processid_uri: (int, str)) ->(int, int):\n new_process_id = str(os.getpid())\n timepoint = str(datetime.now())\n print('@@4 map2 in process {} at {} processing {}'.format(\n new_process_id, timepoint, processid_uri[1]))\n return processid_uri[0], new_process_id\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef fall_asleep(record: WarcRecord):\n current_uri: str = record.target_uri\n start_time = str(datetime.now())\n process_id = str(os.getpid())\n print('@@1 falling asleep in process {} at {} processing {}'.format(\n process_id, start_time, current_uri))\n time.sleep(5)\n end_time = str(datetime.now())\n print('@@2 awakening in process {} at {} processing {}'.format(\n process_id, end_time, current_uri))\n return process_id, current_uri\n\n\ndef trivial_filter(processid_uri: (int, str)) ->bool:\n new_process_id = str(os.getpid())\n timepoint = str(datetime.now())\n print('@@3 filter in process {} at {} processing {}'.format(\n new_process_id, timepoint, processid_uri[1]))\n return True\n\n\ndef quick_print(processid_uri: (int, str)) ->(int, int):\n new_process_id = str(os.getpid())\n timepoint = str(datetime.now())\n print('@@4 map2 in process {} at {} processing {}'.format(\n new_process_id, timepoint, processid_uri[1]))\n return processid_uri[0], new_process_id\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef fall_asleep(record: WarcRecord):\n current_uri: str = record.target_uri\n start_time = str(datetime.now())\n process_id = str(os.getpid())\n print('@@1 falling asleep in process {} at {} processing {}'.format(\n process_id, start_time, current_uri))\n time.sleep(5)\n end_time = str(datetime.now())\n print('@@2 awakening in process {} at {} processing {}'.format(\n process_id, end_time, current_uri))\n return process_id, current_uri\n\n\ndef trivial_filter(processid_uri: (int, str)) ->bool:\n new_process_id = str(os.getpid())\n timepoint = str(datetime.now())\n print('@@3 filter in process {} at {} processing {}'.format(\n new_process_id, timepoint, processid_uri[1]))\n return True\n\n\ndef quick_print(processid_uri: (int, str)) ->(int, int):\n new_process_id = str(os.getpid())\n timepoint = str(datetime.now())\n print('@@4 map2 in process {} at {} processing {}'.format(\n new_process_id, timepoint, processid_uri[1]))\n return processid_uri[0], new_process_id\n\n\nif __name__ == '__main__':\n session: SparkSession = create_session(3, 'Wave exploration')\n input_warc = (\n '/Users/a/Desktop/Buch/CC-MAIN-20191013195541-20191013222541-00000.warc'\n )\n raw_records = extract_raw_records(input_warc, session)\n warc_records = raw_records.flatMap(parse_raw_warc)\n process_ids_rdd = warc_records.map(fall_asleep).filter(trivial_filter).map(\n quick_print)\n distinct_process_ids: List[Tuple[int, int]] = process_ids_rdd.distinct(\n ).collect()\n print(distinct_process_ids)\n",
"step-4": "import os\nimport time\nfrom datetime import datetime\nfrom typing import List, Tuple\nfrom pyspark.sql import SparkSession\nfrom Chapter01.utilities01_py.helper_python import create_session\nfrom Chapter02.utilities02_py.domain_objects import WarcRecord\nfrom Chapter02.utilities02_py.helper_python import extract_raw_records, parse_raw_warc\n\n\ndef fall_asleep(record: WarcRecord):\n current_uri: str = record.target_uri\n start_time = str(datetime.now())\n process_id = str(os.getpid())\n print('@@1 falling asleep in process {} at {} processing {}'.format(\n process_id, start_time, current_uri))\n time.sleep(5)\n end_time = str(datetime.now())\n print('@@2 awakening in process {} at {} processing {}'.format(\n process_id, end_time, current_uri))\n return process_id, current_uri\n\n\ndef trivial_filter(processid_uri: (int, str)) ->bool:\n new_process_id = str(os.getpid())\n timepoint = str(datetime.now())\n print('@@3 filter in process {} at {} processing {}'.format(\n new_process_id, timepoint, processid_uri[1]))\n return True\n\n\ndef quick_print(processid_uri: (int, str)) ->(int, int):\n new_process_id = str(os.getpid())\n timepoint = str(datetime.now())\n print('@@4 map2 in process {} at {} processing {}'.format(\n new_process_id, timepoint, processid_uri[1]))\n return processid_uri[0], new_process_id\n\n\nif __name__ == '__main__':\n session: SparkSession = create_session(3, 'Wave exploration')\n input_warc = (\n '/Users/a/Desktop/Buch/CC-MAIN-20191013195541-20191013222541-00000.warc'\n )\n raw_records = extract_raw_records(input_warc, session)\n warc_records = raw_records.flatMap(parse_raw_warc)\n process_ids_rdd = warc_records.map(fall_asleep).filter(trivial_filter).map(\n quick_print)\n distinct_process_ids: List[Tuple[int, int]] = process_ids_rdd.distinct(\n ).collect()\n print(distinct_process_ids)\n",
"step-5": "import os\nimport time\nfrom datetime import datetime\nfrom typing import List, Tuple\nfrom pyspark.sql import SparkSession\nfrom Chapter01.utilities01_py.helper_python import create_session\nfrom Chapter02.utilities02_py.domain_objects import WarcRecord\nfrom Chapter02.utilities02_py.helper_python import extract_raw_records, parse_raw_warc\n\n\ndef fall_asleep(record: WarcRecord):\n current_uri: str = record.target_uri\n start_time = str(datetime.now())\n process_id = str(os.getpid())\n print('@@1 falling asleep in process {} at {} processing {}'.format(process_id, start_time, current_uri))\n time.sleep(5)\n end_time = str(datetime.now())\n print('@@2 awakening in process {} at {} processing {}'.format(process_id, end_time, current_uri))\n return process_id, current_uri\n\n\ndef trivial_filter(processid_uri: (int, str)) -> bool:\n new_process_id = str(os.getpid())\n timepoint = str(datetime.now())\n print('@@3 filter in process {} at {} processing {}'.format(new_process_id, timepoint, processid_uri[1]))\n return True\n\n\ndef quick_print(processid_uri: (int, str)) -> (int, int):\n new_process_id = str(os.getpid())\n timepoint = str(datetime.now())\n print('@@4 map2 in process {} at {} processing {}'.format(new_process_id, timepoint, processid_uri[1]))\n return processid_uri[0], new_process_id\n\nif __name__ == \"__main__\":\n session: SparkSession = create_session(3, \"Wave exploration\")\n\n input_warc = \"/Users/a/Desktop/Buch/CC-MAIN-20191013195541-20191013222541-00000.warc\" # ToDo: Change path\n raw_records = extract_raw_records(input_warc, session)\n warc_records = raw_records \\\n .flatMap(parse_raw_warc)\n\n process_ids_rdd = warc_records\\\n .map(fall_asleep)\\\n .filter(trivial_filter)\\\n .map(quick_print)\n\n distinct_process_ids: List[Tuple[int, int]] = process_ids_rdd.distinct().collect()\n print(distinct_process_ids)\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
class MRdegibbsInputSpec(CommandLineInputSpec):
in_file = File(desc='input DWI image', exists=True, mandatory=True,
position=0, argstr='%s')
force = traits.Bool(desc='force overwrite of output files', position=-1,
argstr='-force')
out_file = File(name_template='%s_unringed', name_source='in_file',
keep_extension=True, argstr='%s', position=1, desc=
'the output unringed DWI image')
class MRdegibbsOutputSpec(TraitedSpec):
out_file = File(desc='the output unringed DWI image', exists=True)
class MRdegibbs(CommandLine):
"""Use MRTrix3 degibbs command for removing the gibbs ringing artefact.
For more information, see
<https://mrtrix.readthedocs.io/en/latest/reference/commands/mrdegibbs.html>
"""
_cmd = '/a/software/mrtrix/3.0-rc2/ubuntu-xenial-amd64/bin/mrdegibbs'
input_spec = MRdegibbsInputSpec
output_spec = MRdegibbsOutputSpec
class EddyInputSpec(CommandLineInputSpec):
in_file = File(exists=True, mandatory=True, argstr='--imain=%s', desc=
'File containing all the images to estimate distortions for')
in_mask = File(exists=True, mandatory=True, argstr='--mask=%s', desc=
'Mask to indicate brain')
in_index = File(exists=True, mandatory=True, argstr='--index=%s', desc=
'File containing indices for all volumes in --imain into --acqp and --topup'
)
in_acqp = File(exists=True, mandatory=True, argstr='--acqp=%s', desc=
'File containing acquisition parameters')
in_bvec = File(exists=True, mandatory=True, argstr='--bvecs=%s', desc=
'File containing the b-vectors for all volumes in --imain')
in_bval = File(exists=True, mandatory=True, argstr='--bvals=%s', desc=
'File containing the b-values for all volumes in --imain')
out_base = traits.Str('eddy_corrected', argstr='--out=%s', usedefault=
True, desc='basename for output (warped) image')
session = File(exists=True, argstr='--session=%s', desc=
'File containing session indices for all volumes in --imain')
in_topup_fieldcoef = File(exists=True, argstr='--topup=%s', requires=[
'in_topup_movpar'], desc='topup file containing the field coefficients'
)
in_topup_movpar = File(exists=True, requires=['in_topup_fieldcoef'],
desc='topup movpar.txt file')
flm = traits.Enum('linear', 'quadratic', 'cubic', argstr='--flm=%s',
desc='First level EC model')
slm = traits.Enum('none', 'linear', 'quadratic', argstr='--slm=%s',
desc='Second level EC model')
fep = traits.Bool(False, argstr='--fep', desc=
'Fill empty planes in x- or y-directions')
interp = traits.Enum('spline', 'trilinear', argstr='--interp=%s', desc=
'Interpolation model for estimation step')
nvoxhp = traits.Int(1000, usedefault=True, argstr='--nvoxhp=%s', desc=
'# of voxels used to estimate the hyperparameters')
fudge_factor = traits.Float(10.0, usedefault=True, argstr='--ff=%s',
desc='Fudge factor for hyperparameter error variance')
dont_sep_offs_move = traits.Bool(False, argstr='--dont_sep_offs_move',
desc='Do NOT attempt to separate field offset from subject movement')
dont_peas = traits.Bool(False, argstr='--dont_peas', desc=
'Do NOT perform a post-eddy alignment of shells')
fwhm = traits.Float(desc=
'FWHM for conditioning filter when estimating the parameters',
argstr='--fwhm=%s')
niter = traits.Int(5, usedefault=True, argstr='--niter=%s', desc=
'Number of iterations')
method = traits.Enum('jac', 'lsr', argstr='--resamp=%s', desc=
'Final resampling method (jacobian/least squares)')
repol = traits.Bool(False, argstr='--repol', desc=
'Detect and replace outlier slices')
num_threads = traits.Int(1, usedefault=True, nohash=True, desc=
'Number of openmp threads to use')
is_shelled = traits.Bool(False, argstr='--data_is_shelled', desc=
'Override internal check to ensure that date are acquired on a set of b-value shells'
)
field = traits.Str(argstr='--field=%s', desc=
'NonTOPUP fieldmap scaled in Hz - filename has to be provided without an extension. TOPUP is strongly recommended'
)
field_mat = File(exists=True, argstr='--field_mat=%s', desc=
'Matrix that specifies the relative locations of the field specified by --field and first volume in file --imain'
)
use_cuda = traits.Bool(False, desc='Run eddy using cuda gpu')
class EddyOutputSpec(TraitedSpec):
out_corrected = File(exists=True, desc=
'4D image file containing all the corrected volumes')
out_parameter = File(exists=True, desc=
'text file with parameters definining the field andmovement for each scan'
)
out_rotated_bvecs = File(exists=True, desc=
'File containing rotated b-values for all volumes')
out_movement_rms = File(exists=True, desc=
'Summary of the "total movement" in each volume')
out_restricted_movement_rms = File(exists=True, desc=
'Summary of the "total movement" in each volume disregarding translation in the PE direction'
)
out_shell_alignment_parameters = File(exists=True, desc=
'File containing rigid body movement parameters between the different shells as estimated by a post-hoc mutual information based registration'
)
out_outlier_report = File(exists=True, desc=
'Text-file with a plain language report on what outlier slices eddy has found'
)
class Eddy(CommandLine):
"""
Interface for FSL eddy, a tool for estimating and correcting eddy
currents induced distortions. `User guide
<http://fsl.fmrib.ox.ac.uk/fsl/fslwiki/Eddy/UsersGuide>`_ and
`more info regarding acqp file
<http://fsl.fmrib.ox.ac.uk/fsl/fslwiki/eddy/Faq#How_do_I_know_what_to_put_into_my_--acqp_file>`_.
Examples
--------
>>> from nipype.interfaces.fsl import Eddy
>>> eddy = Eddy()
>>> eddy.inputs.in_file = 'epi.nii'
>>> eddy.inputs.in_mask = 'epi_mask.nii'
>>> eddy.inputs.in_index = 'epi_index.txt'
>>> eddy.inputs.in_acqp = 'epi_acqp.txt'
>>> eddy.inputs.in_bvec = 'bvecs.scheme'
>>> eddy.inputs.in_bval = 'bvals.scheme'
>>> eddy.inputs.use_cuda = True
>>> eddy.cmdline # doctest: +ELLIPSIS
'eddy_cuda --ff=10.0 --acqp=epi_acqp.txt --bvals=bvals.scheme --bvecs=bvecs.scheme --imain=epi.nii --index=epi_index.txt --mask=epi_mask.nii --niter=5 --nvoxhp=1000 --out=.../eddy_corrected'
>>> eddy.inputs.use_cuda = False
>>> eddy.cmdline # doctest: +ELLIPSIS
'eddy_openmp --ff=10.0 --acqp=epi_acqp.txt --bvals=bvals.scheme --bvecs=bvecs.scheme --imain=epi.nii --index=epi_index.txt --mask=epi_mask.nii --niter=5 --nvoxhp=1000 --out=.../eddy_corrected'
>>> res = eddy.run() # doctest: +SKIP
"""
_cmd = '/data/pt_life_dti/scripts/life2018/eddy_openmp-5.0.11'
input_spec = EddyInputSpec
output_spec = EddyOutputSpec
_num_threads = 1
def __init__(self, **inputs):
super(Eddy, self).__init__(**inputs)
self.inputs.on_trait_change(self._num_threads_update, 'num_threads')
if not isdefined(self.inputs.num_threads):
self.inputs.num_threads = self._num_threads
else:
self._num_threads_update()
self.inputs.on_trait_change(self._use_cuda, 'use_cuda')
if isdefined(self.inputs.use_cuda):
self._use_cuda()
def _num_threads_update(self):
self._num_threads = self.inputs.num_threads
if not isdefined(self.inputs.num_threads):
if 'OMP_NUM_THREADS' in self.inputs.environ:
del self.inputs.environ['OMP_NUM_THREADS']
else:
self.inputs.environ['OMP_NUM_THREADS'] = str(self.inputs.
num_threads)
def _use_cuda(self):
self._cmd = 'eddy_cuda' if self.inputs.use_cuda else 'eddy_openmp'
def _run_interface(self, runtime):
FSLDIR = os.getenv('FSLDIR', '')
cmd = self._cmd
if all((FSLDIR != '', cmd == 'eddy_openmp', not os.path.exists(os.
path.join(FSLDIR, 'bin', cmd)))):
self._cmd = 'eddy'
runtime = super(Eddy, self)._run_interface(runtime)
self._cmd = cmd
return runtime
def _format_arg(self, name, spec, value):
if name == 'in_topup_fieldcoef':
return spec.argstr % value.split('_fieldcoef')[0]
if name == 'out_base':
return spec.argstr % os.path.abspath(value)
return super(Eddy, self)._format_arg(name, spec, value)
def _list_outputs(self):
outputs = self.output_spec().get()
outputs['out_corrected'] = os.path.abspath('%s.nii.gz' % self.
inputs.out_base)
outputs['out_parameter'] = os.path.abspath('%s.eddy_parameters' %
self.inputs.out_base)
out_rotated_bvecs = os.path.abspath('%s.eddy_rotated_bvecs' % self.
inputs.out_base)
out_movement_rms = os.path.abspath('%s.eddy_movement_rms' % self.
inputs.out_base)
out_restricted_movement_rms = os.path.abspath(
'%s.eddy_restricted_movement_rms' % self.inputs.out_base)
out_shell_alignment_parameters = os.path.abspath(
'%s.eddy_post_eddy_shell_alignment_parameters' % self.inputs.
out_base)
out_outlier_report = os.path.abspath('%s.eddy_outlier_report' %
self.inputs.out_base)
if os.path.exists(out_rotated_bvecs):
outputs['out_rotated_bvecs'] = out_rotated_bvecs
if os.path.exists(out_movement_rms):
outputs['out_movement_rms'] = out_movement_rms
if os.path.exists(out_restricted_movement_rms):
outputs['out_restricted_movement_rms'
] = out_restricted_movement_rms
if os.path.exists(out_shell_alignment_parameters):
outputs['out_shell_alignment_parameters'
] = out_shell_alignment_parameters
if os.path.exists(out_outlier_report):
outputs['out_outlier_report'] = out_outlier_report
return outputs
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class DWIdenoise(CommandLine):
<|reserved_special_token_0|>
_cmd = '/a/software/mrtrix/3.0-rc2/ubuntu-xenial-amd64/bin/dwidenoise'
input_spec = DWIdenoiseInputSpec
output_spec = DWIdenoiseOutputSpec
class MRdegibbsInputSpec(CommandLineInputSpec):
in_file = File(desc='input DWI image', exists=True, mandatory=True,
position=0, argstr='%s')
force = traits.Bool(desc='force overwrite of output files', position=-1,
argstr='-force')
out_file = File(name_template='%s_unringed', name_source='in_file',
keep_extension=True, argstr='%s', position=1, desc=
'the output unringed DWI image')
class MRdegibbsOutputSpec(TraitedSpec):
out_file = File(desc='the output unringed DWI image', exists=True)
class MRdegibbs(CommandLine):
"""Use MRTrix3 degibbs command for removing the gibbs ringing artefact.
For more information, see
<https://mrtrix.readthedocs.io/en/latest/reference/commands/mrdegibbs.html>
"""
_cmd = '/a/software/mrtrix/3.0-rc2/ubuntu-xenial-amd64/bin/mrdegibbs'
input_spec = MRdegibbsInputSpec
output_spec = MRdegibbsOutputSpec
class EddyInputSpec(CommandLineInputSpec):
in_file = File(exists=True, mandatory=True, argstr='--imain=%s', desc=
'File containing all the images to estimate distortions for')
in_mask = File(exists=True, mandatory=True, argstr='--mask=%s', desc=
'Mask to indicate brain')
in_index = File(exists=True, mandatory=True, argstr='--index=%s', desc=
'File containing indices for all volumes in --imain into --acqp and --topup'
)
in_acqp = File(exists=True, mandatory=True, argstr='--acqp=%s', desc=
'File containing acquisition parameters')
in_bvec = File(exists=True, mandatory=True, argstr='--bvecs=%s', desc=
'File containing the b-vectors for all volumes in --imain')
in_bval = File(exists=True, mandatory=True, argstr='--bvals=%s', desc=
'File containing the b-values for all volumes in --imain')
out_base = traits.Str('eddy_corrected', argstr='--out=%s', usedefault=
True, desc='basename for output (warped) image')
session = File(exists=True, argstr='--session=%s', desc=
'File containing session indices for all volumes in --imain')
in_topup_fieldcoef = File(exists=True, argstr='--topup=%s', requires=[
'in_topup_movpar'], desc='topup file containing the field coefficients'
)
in_topup_movpar = File(exists=True, requires=['in_topup_fieldcoef'],
desc='topup movpar.txt file')
flm = traits.Enum('linear', 'quadratic', 'cubic', argstr='--flm=%s',
desc='First level EC model')
slm = traits.Enum('none', 'linear', 'quadratic', argstr='--slm=%s',
desc='Second level EC model')
fep = traits.Bool(False, argstr='--fep', desc=
'Fill empty planes in x- or y-directions')
interp = traits.Enum('spline', 'trilinear', argstr='--interp=%s', desc=
'Interpolation model for estimation step')
nvoxhp = traits.Int(1000, usedefault=True, argstr='--nvoxhp=%s', desc=
'# of voxels used to estimate the hyperparameters')
fudge_factor = traits.Float(10.0, usedefault=True, argstr='--ff=%s',
desc='Fudge factor for hyperparameter error variance')
dont_sep_offs_move = traits.Bool(False, argstr='--dont_sep_offs_move',
desc='Do NOT attempt to separate field offset from subject movement')
dont_peas = traits.Bool(False, argstr='--dont_peas', desc=
'Do NOT perform a post-eddy alignment of shells')
fwhm = traits.Float(desc=
'FWHM for conditioning filter when estimating the parameters',
argstr='--fwhm=%s')
niter = traits.Int(5, usedefault=True, argstr='--niter=%s', desc=
'Number of iterations')
method = traits.Enum('jac', 'lsr', argstr='--resamp=%s', desc=
'Final resampling method (jacobian/least squares)')
repol = traits.Bool(False, argstr='--repol', desc=
'Detect and replace outlier slices')
num_threads = traits.Int(1, usedefault=True, nohash=True, desc=
'Number of openmp threads to use')
is_shelled = traits.Bool(False, argstr='--data_is_shelled', desc=
'Override internal check to ensure that date are acquired on a set of b-value shells'
)
field = traits.Str(argstr='--field=%s', desc=
'NonTOPUP fieldmap scaled in Hz - filename has to be provided without an extension. TOPUP is strongly recommended'
)
field_mat = File(exists=True, argstr='--field_mat=%s', desc=
'Matrix that specifies the relative locations of the field specified by --field and first volume in file --imain'
)
use_cuda = traits.Bool(False, desc='Run eddy using cuda gpu')
class EddyOutputSpec(TraitedSpec):
out_corrected = File(exists=True, desc=
'4D image file containing all the corrected volumes')
out_parameter = File(exists=True, desc=
'text file with parameters definining the field andmovement for each scan'
)
out_rotated_bvecs = File(exists=True, desc=
'File containing rotated b-values for all volumes')
out_movement_rms = File(exists=True, desc=
'Summary of the "total movement" in each volume')
out_restricted_movement_rms = File(exists=True, desc=
'Summary of the "total movement" in each volume disregarding translation in the PE direction'
)
out_shell_alignment_parameters = File(exists=True, desc=
'File containing rigid body movement parameters between the different shells as estimated by a post-hoc mutual information based registration'
)
out_outlier_report = File(exists=True, desc=
'Text-file with a plain language report on what outlier slices eddy has found'
)
class Eddy(CommandLine):
"""
Interface for FSL eddy, a tool for estimating and correcting eddy
currents induced distortions. `User guide
<http://fsl.fmrib.ox.ac.uk/fsl/fslwiki/Eddy/UsersGuide>`_ and
`more info regarding acqp file
<http://fsl.fmrib.ox.ac.uk/fsl/fslwiki/eddy/Faq#How_do_I_know_what_to_put_into_my_--acqp_file>`_.
Examples
--------
>>> from nipype.interfaces.fsl import Eddy
>>> eddy = Eddy()
>>> eddy.inputs.in_file = 'epi.nii'
>>> eddy.inputs.in_mask = 'epi_mask.nii'
>>> eddy.inputs.in_index = 'epi_index.txt'
>>> eddy.inputs.in_acqp = 'epi_acqp.txt'
>>> eddy.inputs.in_bvec = 'bvecs.scheme'
>>> eddy.inputs.in_bval = 'bvals.scheme'
>>> eddy.inputs.use_cuda = True
>>> eddy.cmdline # doctest: +ELLIPSIS
'eddy_cuda --ff=10.0 --acqp=epi_acqp.txt --bvals=bvals.scheme --bvecs=bvecs.scheme --imain=epi.nii --index=epi_index.txt --mask=epi_mask.nii --niter=5 --nvoxhp=1000 --out=.../eddy_corrected'
>>> eddy.inputs.use_cuda = False
>>> eddy.cmdline # doctest: +ELLIPSIS
'eddy_openmp --ff=10.0 --acqp=epi_acqp.txt --bvals=bvals.scheme --bvecs=bvecs.scheme --imain=epi.nii --index=epi_index.txt --mask=epi_mask.nii --niter=5 --nvoxhp=1000 --out=.../eddy_corrected'
>>> res = eddy.run() # doctest: +SKIP
"""
_cmd = '/data/pt_life_dti/scripts/life2018/eddy_openmp-5.0.11'
input_spec = EddyInputSpec
output_spec = EddyOutputSpec
_num_threads = 1
def __init__(self, **inputs):
super(Eddy, self).__init__(**inputs)
self.inputs.on_trait_change(self._num_threads_update, 'num_threads')
if not isdefined(self.inputs.num_threads):
self.inputs.num_threads = self._num_threads
else:
self._num_threads_update()
self.inputs.on_trait_change(self._use_cuda, 'use_cuda')
if isdefined(self.inputs.use_cuda):
self._use_cuda()
def _num_threads_update(self):
self._num_threads = self.inputs.num_threads
if not isdefined(self.inputs.num_threads):
if 'OMP_NUM_THREADS' in self.inputs.environ:
del self.inputs.environ['OMP_NUM_THREADS']
else:
self.inputs.environ['OMP_NUM_THREADS'] = str(self.inputs.
num_threads)
def _use_cuda(self):
self._cmd = 'eddy_cuda' if self.inputs.use_cuda else 'eddy_openmp'
def _run_interface(self, runtime):
FSLDIR = os.getenv('FSLDIR', '')
cmd = self._cmd
if all((FSLDIR != '', cmd == 'eddy_openmp', not os.path.exists(os.
path.join(FSLDIR, 'bin', cmd)))):
self._cmd = 'eddy'
runtime = super(Eddy, self)._run_interface(runtime)
self._cmd = cmd
return runtime
def _format_arg(self, name, spec, value):
if name == 'in_topup_fieldcoef':
return spec.argstr % value.split('_fieldcoef')[0]
if name == 'out_base':
return spec.argstr % os.path.abspath(value)
return super(Eddy, self)._format_arg(name, spec, value)
def _list_outputs(self):
outputs = self.output_spec().get()
outputs['out_corrected'] = os.path.abspath('%s.nii.gz' % self.
inputs.out_base)
outputs['out_parameter'] = os.path.abspath('%s.eddy_parameters' %
self.inputs.out_base)
out_rotated_bvecs = os.path.abspath('%s.eddy_rotated_bvecs' % self.
inputs.out_base)
out_movement_rms = os.path.abspath('%s.eddy_movement_rms' % self.
inputs.out_base)
out_restricted_movement_rms = os.path.abspath(
'%s.eddy_restricted_movement_rms' % self.inputs.out_base)
out_shell_alignment_parameters = os.path.abspath(
'%s.eddy_post_eddy_shell_alignment_parameters' % self.inputs.
out_base)
out_outlier_report = os.path.abspath('%s.eddy_outlier_report' %
self.inputs.out_base)
if os.path.exists(out_rotated_bvecs):
outputs['out_rotated_bvecs'] = out_rotated_bvecs
if os.path.exists(out_movement_rms):
outputs['out_movement_rms'] = out_movement_rms
if os.path.exists(out_restricted_movement_rms):
outputs['out_restricted_movement_rms'
] = out_restricted_movement_rms
if os.path.exists(out_shell_alignment_parameters):
outputs['out_shell_alignment_parameters'
] = out_shell_alignment_parameters
if os.path.exists(out_outlier_report):
outputs['out_outlier_report'] = out_outlier_report
return outputs
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class DWIdenoiseOutputSpec(TraitedSpec):
<|reserved_special_token_0|>
class DWIdenoise(CommandLine):
"""Use MRTrix3 dwidenoise command to denoisie DWI data and estimate the
noise level based on the optimal threshold for PCA.
For more information, see
<https://mrtrix.readthedocs.io/en/latest/reference/commands/dwidenoise.html>
"""
_cmd = '/a/software/mrtrix/3.0-rc2/ubuntu-xenial-amd64/bin/dwidenoise'
input_spec = DWIdenoiseInputSpec
output_spec = DWIdenoiseOutputSpec
class MRdegibbsInputSpec(CommandLineInputSpec):
in_file = File(desc='input DWI image', exists=True, mandatory=True,
position=0, argstr='%s')
force = traits.Bool(desc='force overwrite of output files', position=-1,
argstr='-force')
out_file = File(name_template='%s_unringed', name_source='in_file',
keep_extension=True, argstr='%s', position=1, desc=
'the output unringed DWI image')
class MRdegibbsOutputSpec(TraitedSpec):
out_file = File(desc='the output unringed DWI image', exists=True)
class MRdegibbs(CommandLine):
"""Use MRTrix3 degibbs command for removing the gibbs ringing artefact.
For more information, see
<https://mrtrix.readthedocs.io/en/latest/reference/commands/mrdegibbs.html>
"""
_cmd = '/a/software/mrtrix/3.0-rc2/ubuntu-xenial-amd64/bin/mrdegibbs'
input_spec = MRdegibbsInputSpec
output_spec = MRdegibbsOutputSpec
class EddyInputSpec(CommandLineInputSpec):
in_file = File(exists=True, mandatory=True, argstr='--imain=%s', desc=
'File containing all the images to estimate distortions for')
in_mask = File(exists=True, mandatory=True, argstr='--mask=%s', desc=
'Mask to indicate brain')
in_index = File(exists=True, mandatory=True, argstr='--index=%s', desc=
'File containing indices for all volumes in --imain into --acqp and --topup'
)
in_acqp = File(exists=True, mandatory=True, argstr='--acqp=%s', desc=
'File containing acquisition parameters')
in_bvec = File(exists=True, mandatory=True, argstr='--bvecs=%s', desc=
'File containing the b-vectors for all volumes in --imain')
in_bval = File(exists=True, mandatory=True, argstr='--bvals=%s', desc=
'File containing the b-values for all volumes in --imain')
out_base = traits.Str('eddy_corrected', argstr='--out=%s', usedefault=
True, desc='basename for output (warped) image')
session = File(exists=True, argstr='--session=%s', desc=
'File containing session indices for all volumes in --imain')
in_topup_fieldcoef = File(exists=True, argstr='--topup=%s', requires=[
'in_topup_movpar'], desc='topup file containing the field coefficients'
)
in_topup_movpar = File(exists=True, requires=['in_topup_fieldcoef'],
desc='topup movpar.txt file')
flm = traits.Enum('linear', 'quadratic', 'cubic', argstr='--flm=%s',
desc='First level EC model')
slm = traits.Enum('none', 'linear', 'quadratic', argstr='--slm=%s',
desc='Second level EC model')
fep = traits.Bool(False, argstr='--fep', desc=
'Fill empty planes in x- or y-directions')
interp = traits.Enum('spline', 'trilinear', argstr='--interp=%s', desc=
'Interpolation model for estimation step')
nvoxhp = traits.Int(1000, usedefault=True, argstr='--nvoxhp=%s', desc=
'# of voxels used to estimate the hyperparameters')
fudge_factor = traits.Float(10.0, usedefault=True, argstr='--ff=%s',
desc='Fudge factor for hyperparameter error variance')
dont_sep_offs_move = traits.Bool(False, argstr='--dont_sep_offs_move',
desc='Do NOT attempt to separate field offset from subject movement')
dont_peas = traits.Bool(False, argstr='--dont_peas', desc=
'Do NOT perform a post-eddy alignment of shells')
fwhm = traits.Float(desc=
'FWHM for conditioning filter when estimating the parameters',
argstr='--fwhm=%s')
niter = traits.Int(5, usedefault=True, argstr='--niter=%s', desc=
'Number of iterations')
method = traits.Enum('jac', 'lsr', argstr='--resamp=%s', desc=
'Final resampling method (jacobian/least squares)')
repol = traits.Bool(False, argstr='--repol', desc=
'Detect and replace outlier slices')
num_threads = traits.Int(1, usedefault=True, nohash=True, desc=
'Number of openmp threads to use')
is_shelled = traits.Bool(False, argstr='--data_is_shelled', desc=
'Override internal check to ensure that date are acquired on a set of b-value shells'
)
field = traits.Str(argstr='--field=%s', desc=
'NonTOPUP fieldmap scaled in Hz - filename has to be provided without an extension. TOPUP is strongly recommended'
)
field_mat = File(exists=True, argstr='--field_mat=%s', desc=
'Matrix that specifies the relative locations of the field specified by --field and first volume in file --imain'
)
use_cuda = traits.Bool(False, desc='Run eddy using cuda gpu')
class EddyOutputSpec(TraitedSpec):
out_corrected = File(exists=True, desc=
'4D image file containing all the corrected volumes')
out_parameter = File(exists=True, desc=
'text file with parameters definining the field andmovement for each scan'
)
out_rotated_bvecs = File(exists=True, desc=
'File containing rotated b-values for all volumes')
out_movement_rms = File(exists=True, desc=
'Summary of the "total movement" in each volume')
out_restricted_movement_rms = File(exists=True, desc=
'Summary of the "total movement" in each volume disregarding translation in the PE direction'
)
out_shell_alignment_parameters = File(exists=True, desc=
'File containing rigid body movement parameters between the different shells as estimated by a post-hoc mutual information based registration'
)
out_outlier_report = File(exists=True, desc=
'Text-file with a plain language report on what outlier slices eddy has found'
)
class Eddy(CommandLine):
"""
Interface for FSL eddy, a tool for estimating and correcting eddy
currents induced distortions. `User guide
<http://fsl.fmrib.ox.ac.uk/fsl/fslwiki/Eddy/UsersGuide>`_ and
`more info regarding acqp file
<http://fsl.fmrib.ox.ac.uk/fsl/fslwiki/eddy/Faq#How_do_I_know_what_to_put_into_my_--acqp_file>`_.
Examples
--------
>>> from nipype.interfaces.fsl import Eddy
>>> eddy = Eddy()
>>> eddy.inputs.in_file = 'epi.nii'
>>> eddy.inputs.in_mask = 'epi_mask.nii'
>>> eddy.inputs.in_index = 'epi_index.txt'
>>> eddy.inputs.in_acqp = 'epi_acqp.txt'
>>> eddy.inputs.in_bvec = 'bvecs.scheme'
>>> eddy.inputs.in_bval = 'bvals.scheme'
>>> eddy.inputs.use_cuda = True
>>> eddy.cmdline # doctest: +ELLIPSIS
'eddy_cuda --ff=10.0 --acqp=epi_acqp.txt --bvals=bvals.scheme --bvecs=bvecs.scheme --imain=epi.nii --index=epi_index.txt --mask=epi_mask.nii --niter=5 --nvoxhp=1000 --out=.../eddy_corrected'
>>> eddy.inputs.use_cuda = False
>>> eddy.cmdline # doctest: +ELLIPSIS
'eddy_openmp --ff=10.0 --acqp=epi_acqp.txt --bvals=bvals.scheme --bvecs=bvecs.scheme --imain=epi.nii --index=epi_index.txt --mask=epi_mask.nii --niter=5 --nvoxhp=1000 --out=.../eddy_corrected'
>>> res = eddy.run() # doctest: +SKIP
"""
_cmd = '/data/pt_life_dti/scripts/life2018/eddy_openmp-5.0.11'
input_spec = EddyInputSpec
output_spec = EddyOutputSpec
_num_threads = 1
def __init__(self, **inputs):
super(Eddy, self).__init__(**inputs)
self.inputs.on_trait_change(self._num_threads_update, 'num_threads')
if not isdefined(self.inputs.num_threads):
self.inputs.num_threads = self._num_threads
else:
self._num_threads_update()
self.inputs.on_trait_change(self._use_cuda, 'use_cuda')
if isdefined(self.inputs.use_cuda):
self._use_cuda()
def _num_threads_update(self):
self._num_threads = self.inputs.num_threads
if not isdefined(self.inputs.num_threads):
if 'OMP_NUM_THREADS' in self.inputs.environ:
del self.inputs.environ['OMP_NUM_THREADS']
else:
self.inputs.environ['OMP_NUM_THREADS'] = str(self.inputs.
num_threads)
def _use_cuda(self):
self._cmd = 'eddy_cuda' if self.inputs.use_cuda else 'eddy_openmp'
def _run_interface(self, runtime):
FSLDIR = os.getenv('FSLDIR', '')
cmd = self._cmd
if all((FSLDIR != '', cmd == 'eddy_openmp', not os.path.exists(os.
path.join(FSLDIR, 'bin', cmd)))):
self._cmd = 'eddy'
runtime = super(Eddy, self)._run_interface(runtime)
self._cmd = cmd
return runtime
def _format_arg(self, name, spec, value):
if name == 'in_topup_fieldcoef':
return spec.argstr % value.split('_fieldcoef')[0]
if name == 'out_base':
return spec.argstr % os.path.abspath(value)
return super(Eddy, self)._format_arg(name, spec, value)
def _list_outputs(self):
outputs = self.output_spec().get()
outputs['out_corrected'] = os.path.abspath('%s.nii.gz' % self.
inputs.out_base)
outputs['out_parameter'] = os.path.abspath('%s.eddy_parameters' %
self.inputs.out_base)
out_rotated_bvecs = os.path.abspath('%s.eddy_rotated_bvecs' % self.
inputs.out_base)
out_movement_rms = os.path.abspath('%s.eddy_movement_rms' % self.
inputs.out_base)
out_restricted_movement_rms = os.path.abspath(
'%s.eddy_restricted_movement_rms' % self.inputs.out_base)
out_shell_alignment_parameters = os.path.abspath(
'%s.eddy_post_eddy_shell_alignment_parameters' % self.inputs.
out_base)
out_outlier_report = os.path.abspath('%s.eddy_outlier_report' %
self.inputs.out_base)
if os.path.exists(out_rotated_bvecs):
outputs['out_rotated_bvecs'] = out_rotated_bvecs
if os.path.exists(out_movement_rms):
outputs['out_movement_rms'] = out_movement_rms
if os.path.exists(out_restricted_movement_rms):
outputs['out_restricted_movement_rms'
] = out_restricted_movement_rms
if os.path.exists(out_shell_alignment_parameters):
outputs['out_shell_alignment_parameters'
] = out_shell_alignment_parameters
if os.path.exists(out_outlier_report):
outputs['out_outlier_report'] = out_outlier_report
return outputs
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class DWIdenoiseInputSpec(CommandLineInputSpec):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class DWIdenoiseOutputSpec(TraitedSpec):
out_file = File(desc='the output denoised DWI image', exists=True)
class DWIdenoise(CommandLine):
"""Use MRTrix3 dwidenoise command to denoisie DWI data and estimate the
noise level based on the optimal threshold for PCA.
For more information, see
<https://mrtrix.readthedocs.io/en/latest/reference/commands/dwidenoise.html>
"""
_cmd = '/a/software/mrtrix/3.0-rc2/ubuntu-xenial-amd64/bin/dwidenoise'
input_spec = DWIdenoiseInputSpec
output_spec = DWIdenoiseOutputSpec
class MRdegibbsInputSpec(CommandLineInputSpec):
in_file = File(desc='input DWI image', exists=True, mandatory=True,
position=0, argstr='%s')
force = traits.Bool(desc='force overwrite of output files', position=-1,
argstr='-force')
out_file = File(name_template='%s_unringed', name_source='in_file',
keep_extension=True, argstr='%s', position=1, desc=
'the output unringed DWI image')
class MRdegibbsOutputSpec(TraitedSpec):
out_file = File(desc='the output unringed DWI image', exists=True)
class MRdegibbs(CommandLine):
"""Use MRTrix3 degibbs command for removing the gibbs ringing artefact.
For more information, see
<https://mrtrix.readthedocs.io/en/latest/reference/commands/mrdegibbs.html>
"""
_cmd = '/a/software/mrtrix/3.0-rc2/ubuntu-xenial-amd64/bin/mrdegibbs'
input_spec = MRdegibbsInputSpec
output_spec = MRdegibbsOutputSpec
class EddyInputSpec(CommandLineInputSpec):
in_file = File(exists=True, mandatory=True, argstr='--imain=%s', desc=
'File containing all the images to estimate distortions for')
in_mask = File(exists=True, mandatory=True, argstr='--mask=%s', desc=
'Mask to indicate brain')
in_index = File(exists=True, mandatory=True, argstr='--index=%s', desc=
'File containing indices for all volumes in --imain into --acqp and --topup'
)
in_acqp = File(exists=True, mandatory=True, argstr='--acqp=%s', desc=
'File containing acquisition parameters')
in_bvec = File(exists=True, mandatory=True, argstr='--bvecs=%s', desc=
'File containing the b-vectors for all volumes in --imain')
in_bval = File(exists=True, mandatory=True, argstr='--bvals=%s', desc=
'File containing the b-values for all volumes in --imain')
out_base = traits.Str('eddy_corrected', argstr='--out=%s', usedefault=
True, desc='basename for output (warped) image')
session = File(exists=True, argstr='--session=%s', desc=
'File containing session indices for all volumes in --imain')
in_topup_fieldcoef = File(exists=True, argstr='--topup=%s', requires=[
'in_topup_movpar'], desc='topup file containing the field coefficients'
)
in_topup_movpar = File(exists=True, requires=['in_topup_fieldcoef'],
desc='topup movpar.txt file')
flm = traits.Enum('linear', 'quadratic', 'cubic', argstr='--flm=%s',
desc='First level EC model')
slm = traits.Enum('none', 'linear', 'quadratic', argstr='--slm=%s',
desc='Second level EC model')
fep = traits.Bool(False, argstr='--fep', desc=
'Fill empty planes in x- or y-directions')
interp = traits.Enum('spline', 'trilinear', argstr='--interp=%s', desc=
'Interpolation model for estimation step')
nvoxhp = traits.Int(1000, usedefault=True, argstr='--nvoxhp=%s', desc=
'# of voxels used to estimate the hyperparameters')
fudge_factor = traits.Float(10.0, usedefault=True, argstr='--ff=%s',
desc='Fudge factor for hyperparameter error variance')
dont_sep_offs_move = traits.Bool(False, argstr='--dont_sep_offs_move',
desc='Do NOT attempt to separate field offset from subject movement')
dont_peas = traits.Bool(False, argstr='--dont_peas', desc=
'Do NOT perform a post-eddy alignment of shells')
fwhm = traits.Float(desc=
'FWHM for conditioning filter when estimating the parameters',
argstr='--fwhm=%s')
niter = traits.Int(5, usedefault=True, argstr='--niter=%s', desc=
'Number of iterations')
method = traits.Enum('jac', 'lsr', argstr='--resamp=%s', desc=
'Final resampling method (jacobian/least squares)')
repol = traits.Bool(False, argstr='--repol', desc=
'Detect and replace outlier slices')
num_threads = traits.Int(1, usedefault=True, nohash=True, desc=
'Number of openmp threads to use')
is_shelled = traits.Bool(False, argstr='--data_is_shelled', desc=
'Override internal check to ensure that date are acquired on a set of b-value shells'
)
field = traits.Str(argstr='--field=%s', desc=
'NonTOPUP fieldmap scaled in Hz - filename has to be provided without an extension. TOPUP is strongly recommended'
)
field_mat = File(exists=True, argstr='--field_mat=%s', desc=
'Matrix that specifies the relative locations of the field specified by --field and first volume in file --imain'
)
use_cuda = traits.Bool(False, desc='Run eddy using cuda gpu')
class EddyOutputSpec(TraitedSpec):
out_corrected = File(exists=True, desc=
'4D image file containing all the corrected volumes')
out_parameter = File(exists=True, desc=
'text file with parameters definining the field andmovement for each scan'
)
out_rotated_bvecs = File(exists=True, desc=
'File containing rotated b-values for all volumes')
out_movement_rms = File(exists=True, desc=
'Summary of the "total movement" in each volume')
out_restricted_movement_rms = File(exists=True, desc=
'Summary of the "total movement" in each volume disregarding translation in the PE direction'
)
out_shell_alignment_parameters = File(exists=True, desc=
'File containing rigid body movement parameters between the different shells as estimated by a post-hoc mutual information based registration'
)
out_outlier_report = File(exists=True, desc=
'Text-file with a plain language report on what outlier slices eddy has found'
)
class Eddy(CommandLine):
"""
Interface for FSL eddy, a tool for estimating and correcting eddy
currents induced distortions. `User guide
<http://fsl.fmrib.ox.ac.uk/fsl/fslwiki/Eddy/UsersGuide>`_ and
`more info regarding acqp file
<http://fsl.fmrib.ox.ac.uk/fsl/fslwiki/eddy/Faq#How_do_I_know_what_to_put_into_my_--acqp_file>`_.
Examples
--------
>>> from nipype.interfaces.fsl import Eddy
>>> eddy = Eddy()
>>> eddy.inputs.in_file = 'epi.nii'
>>> eddy.inputs.in_mask = 'epi_mask.nii'
>>> eddy.inputs.in_index = 'epi_index.txt'
>>> eddy.inputs.in_acqp = 'epi_acqp.txt'
>>> eddy.inputs.in_bvec = 'bvecs.scheme'
>>> eddy.inputs.in_bval = 'bvals.scheme'
>>> eddy.inputs.use_cuda = True
>>> eddy.cmdline # doctest: +ELLIPSIS
'eddy_cuda --ff=10.0 --acqp=epi_acqp.txt --bvals=bvals.scheme --bvecs=bvecs.scheme --imain=epi.nii --index=epi_index.txt --mask=epi_mask.nii --niter=5 --nvoxhp=1000 --out=.../eddy_corrected'
>>> eddy.inputs.use_cuda = False
>>> eddy.cmdline # doctest: +ELLIPSIS
'eddy_openmp --ff=10.0 --acqp=epi_acqp.txt --bvals=bvals.scheme --bvecs=bvecs.scheme --imain=epi.nii --index=epi_index.txt --mask=epi_mask.nii --niter=5 --nvoxhp=1000 --out=.../eddy_corrected'
>>> res = eddy.run() # doctest: +SKIP
"""
_cmd = '/data/pt_life_dti/scripts/life2018/eddy_openmp-5.0.11'
input_spec = EddyInputSpec
output_spec = EddyOutputSpec
_num_threads = 1
def __init__(self, **inputs):
super(Eddy, self).__init__(**inputs)
self.inputs.on_trait_change(self._num_threads_update, 'num_threads')
if not isdefined(self.inputs.num_threads):
self.inputs.num_threads = self._num_threads
else:
self._num_threads_update()
self.inputs.on_trait_change(self._use_cuda, 'use_cuda')
if isdefined(self.inputs.use_cuda):
self._use_cuda()
def _num_threads_update(self):
self._num_threads = self.inputs.num_threads
if not isdefined(self.inputs.num_threads):
if 'OMP_NUM_THREADS' in self.inputs.environ:
del self.inputs.environ['OMP_NUM_THREADS']
else:
self.inputs.environ['OMP_NUM_THREADS'] = str(self.inputs.
num_threads)
def _use_cuda(self):
self._cmd = 'eddy_cuda' if self.inputs.use_cuda else 'eddy_openmp'
def _run_interface(self, runtime):
FSLDIR = os.getenv('FSLDIR', '')
cmd = self._cmd
if all((FSLDIR != '', cmd == 'eddy_openmp', not os.path.exists(os.
path.join(FSLDIR, 'bin', cmd)))):
self._cmd = 'eddy'
runtime = super(Eddy, self)._run_interface(runtime)
self._cmd = cmd
return runtime
def _format_arg(self, name, spec, value):
if name == 'in_topup_fieldcoef':
return spec.argstr % value.split('_fieldcoef')[0]
if name == 'out_base':
return spec.argstr % os.path.abspath(value)
return super(Eddy, self)._format_arg(name, spec, value)
def _list_outputs(self):
outputs = self.output_spec().get()
outputs['out_corrected'] = os.path.abspath('%s.nii.gz' % self.
inputs.out_base)
outputs['out_parameter'] = os.path.abspath('%s.eddy_parameters' %
self.inputs.out_base)
out_rotated_bvecs = os.path.abspath('%s.eddy_rotated_bvecs' % self.
inputs.out_base)
out_movement_rms = os.path.abspath('%s.eddy_movement_rms' % self.
inputs.out_base)
out_restricted_movement_rms = os.path.abspath(
'%s.eddy_restricted_movement_rms' % self.inputs.out_base)
out_shell_alignment_parameters = os.path.abspath(
'%s.eddy_post_eddy_shell_alignment_parameters' % self.inputs.
out_base)
out_outlier_report = os.path.abspath('%s.eddy_outlier_report' %
self.inputs.out_base)
if os.path.exists(out_rotated_bvecs):
outputs['out_rotated_bvecs'] = out_rotated_bvecs
if os.path.exists(out_movement_rms):
outputs['out_movement_rms'] = out_movement_rms
if os.path.exists(out_restricted_movement_rms):
outputs['out_restricted_movement_rms'
] = out_restricted_movement_rms
if os.path.exists(out_shell_alignment_parameters):
outputs['out_shell_alignment_parameters'
] = out_shell_alignment_parameters
if os.path.exists(out_outlier_report):
outputs['out_outlier_report'] = out_outlier_report
return outputs
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
"""
Created on Sat Aug 11 13:13:53 2018
@author: zhang
"""
'''
Warp Commands use during diffusion-weighted images preprocessing
================================================================
dwidenoise & mrdegibbs from MRTrix3.0; eddy-openmp from FSL
-------------------------------------------------------------------------
for unkonwn reason they are not included after loading relavant interface
'''
from nipype.interfaces.base import (CommandLine,
CommandLineInputSpec,
File,
TraitedSpec,
traits,
isdefined,
InputMultiPath)
import os
# warp the dwidenoise function from MRtrix
class DWIdenoiseInputSpec(CommandLineInputSpec):
in_file = InputMultiPath(
File(exists=True),
mandatory=True,
position=0,
argstr="%s",
desc="input DWI image")
noise = File(
argstr='-noise %s',
desc='noise map')
force = traits.Bool(
desc='force overwrite of output files',
position=-1,
argstr='-force')
out_file = File(name_template='%s_denoised',
name_source='in_file',
keep_extension=True,
argstr="%s",
position=1,
desc="the output denoised DWI image")
class DWIdenoiseOutputSpec(TraitedSpec):
out_file = File(desc = "the output denoised DWI image", exists = True)
class DWIdenoise(CommandLine):
"""Use MRTrix3 dwidenoise command to denoisie DWI data and estimate the
noise level based on the optimal threshold for PCA.
For more information, see
<https://mrtrix.readthedocs.io/en/latest/reference/commands/dwidenoise.html>
"""
_cmd = '/a/software/mrtrix/3.0-rc2/ubuntu-xenial-amd64/bin/dwidenoise'
input_spec = DWIdenoiseInputSpec
output_spec = DWIdenoiseOutputSpec
# warp the unring function from MRtrix
class MRdegibbsInputSpec(CommandLineInputSpec):
in_file = File(
desc="input DWI image",
exists=True,
mandatory=True,
position=0,
argstr="%s")
force = traits.Bool(
desc='force overwrite of output files',
position=-1,
argstr='-force')
out_file = File(name_template='%s_unringed',
name_source='in_file',
keep_extension=True,
argstr="%s",
position=1,
desc="the output unringed DWI image")
class MRdegibbsOutputSpec(TraitedSpec):
out_file = File(desc = "the output unringed DWI image", exists = True)
class MRdegibbs(CommandLine):
"""Use MRTrix3 degibbs command for removing the gibbs ringing artefact.
For more information, see
<https://mrtrix.readthedocs.io/en/latest/reference/commands/mrdegibbs.html>
"""
_cmd = '/a/software/mrtrix/3.0-rc2/ubuntu-xenial-amd64/bin/mrdegibbs'
input_spec = MRdegibbsInputSpec
output_spec = MRdegibbsOutputSpec
# Wrap FSL eddy (copy from nipype interface)
class EddyInputSpec(CommandLineInputSpec):
in_file = File(
exists=True,
mandatory=True,
argstr='--imain=%s',
desc=('File containing all the images to estimate '
'distortions for'))
in_mask = File(
exists=True,
mandatory=True,
argstr='--mask=%s',
desc='Mask to indicate brain')
in_index = File(
exists=True,
mandatory=True,
argstr='--index=%s',
desc=('File containing indices for all volumes in --imain '
'into --acqp and --topup'))
in_acqp = File(
exists=True,
mandatory=True,
argstr='--acqp=%s',
desc='File containing acquisition parameters')
in_bvec = File(
exists=True,
mandatory=True,
argstr='--bvecs=%s',
desc=('File containing the b-vectors for all volumes in '
'--imain'))
in_bval = File(
exists=True,
mandatory=True,
argstr='--bvals=%s',
desc=('File containing the b-values for all volumes in '
'--imain'))
out_base = traits.Str(
'eddy_corrected',
argstr='--out=%s',
usedefault=True,
desc=('basename for output (warped) image'))
session = File(
exists=True,
argstr='--session=%s',
desc=('File containing session indices for all volumes in '
'--imain'))
in_topup_fieldcoef = File(
exists=True,
argstr="--topup=%s",
requires=['in_topup_movpar'],
desc=('topup file containing the field '
'coefficients'))
in_topup_movpar = File(
exists=True,
requires=['in_topup_fieldcoef'],
desc='topup movpar.txt file')
flm = traits.Enum(
'linear',
'quadratic',
'cubic',
argstr='--flm=%s',
desc='First level EC model')
slm = traits.Enum(
'none',
'linear',
'quadratic',
argstr='--slm=%s',
desc='Second level EC model')
fep = traits.Bool(
False, argstr='--fep', desc='Fill empty planes in x- or y-directions')
interp = traits.Enum(
'spline',
'trilinear',
argstr='--interp=%s',
desc='Interpolation model for estimation step')
nvoxhp = traits.Int(
1000, usedefault=True,
argstr='--nvoxhp=%s',
desc=('# of voxels used to estimate the '
'hyperparameters'))
fudge_factor = traits.Float(
10.0, usedefault=True,
argstr='--ff=%s',
desc=('Fudge factor for hyperparameter '
'error variance'))
dont_sep_offs_move = traits.Bool(
False,
argstr='--dont_sep_offs_move',
desc=('Do NOT attempt to separate '
'field offset from subject '
'movement'))
dont_peas = traits.Bool(
False,
argstr='--dont_peas',
desc="Do NOT perform a post-eddy alignment of "
"shells")
fwhm = traits.Float(
desc=('FWHM for conditioning filter when estimating '
'the parameters'),
argstr='--fwhm=%s')
niter = traits.Int(5, usedefault=True,
argstr='--niter=%s', desc='Number of iterations')
method = traits.Enum(
'jac',
'lsr',
argstr='--resamp=%s',
desc=('Final resampling method (jacobian/least '
'squares)'))
repol = traits.Bool(
False, argstr='--repol', desc='Detect and replace outlier slices')
num_threads = traits.Int(
1,
usedefault=True,
nohash=True,
desc="Number of openmp threads to use")
is_shelled = traits.Bool(
False,
argstr='--data_is_shelled',
desc="Override internal check to ensure that "
"date are acquired on a set of b-value "
"shells")
field = traits.Str(
argstr='--field=%s',
desc="NonTOPUP fieldmap scaled in Hz - filename has "
"to be provided without an extension. TOPUP is "
"strongly recommended")
field_mat = File(
exists=True,
argstr='--field_mat=%s',
desc="Matrix that specifies the relative locations of "
"the field specified by --field and first volume "
"in file --imain")
use_cuda = traits.Bool(False, desc="Run eddy using cuda gpu")
class EddyOutputSpec(TraitedSpec):
out_corrected = File(
exists=True, desc='4D image file containing all the corrected volumes')
out_parameter = File(
exists=True,
desc=('text file with parameters definining the field and'
'movement for each scan'))
out_rotated_bvecs = File(
exists=True, desc='File containing rotated b-values for all volumes')
out_movement_rms = File(
exists=True, desc='Summary of the "total movement" in each volume')
out_restricted_movement_rms = File(
exists=True,
desc=('Summary of the "total movement" in each volume '
'disregarding translation in the PE direction'))
out_shell_alignment_parameters = File(
exists=True,
desc=('File containing rigid body movement parameters '
'between the different shells as estimated by a '
'post-hoc mutual information based registration'))
out_outlier_report = File(
exists=True,
desc=('Text-file with a plain language report on what '
'outlier slices eddy has found'))
class Eddy(CommandLine):
"""
Interface for FSL eddy, a tool for estimating and correcting eddy
currents induced distortions. `User guide
<http://fsl.fmrib.ox.ac.uk/fsl/fslwiki/Eddy/UsersGuide>`_ and
`more info regarding acqp file
<http://fsl.fmrib.ox.ac.uk/fsl/fslwiki/eddy/Faq#How_do_I_know_what_to_put_into_my_--acqp_file>`_.
Examples
--------
>>> from nipype.interfaces.fsl import Eddy
>>> eddy = Eddy()
>>> eddy.inputs.in_file = 'epi.nii'
>>> eddy.inputs.in_mask = 'epi_mask.nii'
>>> eddy.inputs.in_index = 'epi_index.txt'
>>> eddy.inputs.in_acqp = 'epi_acqp.txt'
>>> eddy.inputs.in_bvec = 'bvecs.scheme'
>>> eddy.inputs.in_bval = 'bvals.scheme'
>>> eddy.inputs.use_cuda = True
>>> eddy.cmdline # doctest: +ELLIPSIS
'eddy_cuda --ff=10.0 --acqp=epi_acqp.txt --bvals=bvals.scheme \
--bvecs=bvecs.scheme --imain=epi.nii --index=epi_index.txt \
--mask=epi_mask.nii --niter=5 --nvoxhp=1000 --out=.../eddy_corrected'
>>> eddy.inputs.use_cuda = False
>>> eddy.cmdline # doctest: +ELLIPSIS
'eddy_openmp --ff=10.0 --acqp=epi_acqp.txt --bvals=bvals.scheme \
--bvecs=bvecs.scheme --imain=epi.nii --index=epi_index.txt \
--mask=epi_mask.nii --niter=5 --nvoxhp=1000 --out=.../eddy_corrected'
>>> res = eddy.run() # doctest: +SKIP
"""
_cmd = '/data/pt_life_dti/scripts/life2018/eddy_openmp-5.0.11'
input_spec = EddyInputSpec
output_spec = EddyOutputSpec
_num_threads = 1
def __init__(self, **inputs):
super(Eddy, self).__init__(**inputs)
self.inputs.on_trait_change(self._num_threads_update, 'num_threads')
if not isdefined(self.inputs.num_threads):
self.inputs.num_threads = self._num_threads
else:
self._num_threads_update()
self.inputs.on_trait_change(self._use_cuda, 'use_cuda')
if isdefined(self.inputs.use_cuda):
self._use_cuda()
def _num_threads_update(self):
self._num_threads = self.inputs.num_threads
if not isdefined(self.inputs.num_threads):
if 'OMP_NUM_THREADS' in self.inputs.environ:
del self.inputs.environ['OMP_NUM_THREADS']
else:
self.inputs.environ['OMP_NUM_THREADS'] = str(
self.inputs.num_threads)
def _use_cuda(self):
self._cmd = 'eddy_cuda' if self.inputs.use_cuda else 'eddy_openmp'
def _run_interface(self, runtime):
# If 'eddy_openmp' is missing, use 'eddy'
FSLDIR = os.getenv('FSLDIR', '')
cmd = self._cmd
if all((FSLDIR != '', cmd == 'eddy_openmp',
not os.path.exists(os.path.join(FSLDIR, 'bin', cmd)))):
self._cmd = 'eddy'
runtime = super(Eddy, self)._run_interface(runtime)
# Restore command to avoid side-effects
self._cmd = cmd
return runtime
def _format_arg(self, name, spec, value):
if name == 'in_topup_fieldcoef':
return spec.argstr % value.split('_fieldcoef')[0]
if name == 'out_base':
return spec.argstr % os.path.abspath(value)
return super(Eddy, self)._format_arg(name, spec, value)
def _list_outputs(self):
outputs = self.output_spec().get()
outputs['out_corrected'] = os.path.abspath(
'%s.nii.gz' % self.inputs.out_base)
outputs['out_parameter'] = os.path.abspath(
'%s.eddy_parameters' % self.inputs.out_base)
# File generation might depend on the version of EDDY
out_rotated_bvecs = os.path.abspath(
'%s.eddy_rotated_bvecs' % self.inputs.out_base)
out_movement_rms = os.path.abspath(
'%s.eddy_movement_rms' % self.inputs.out_base)
out_restricted_movement_rms = os.path.abspath(
'%s.eddy_restricted_movement_rms' % self.inputs.out_base)
out_shell_alignment_parameters = os.path.abspath(
'%s.eddy_post_eddy_shell_alignment_parameters' %
self.inputs.out_base)
out_outlier_report = os.path.abspath(
'%s.eddy_outlier_report' % self.inputs.out_base)
if os.path.exists(out_rotated_bvecs):
outputs['out_rotated_bvecs'] = out_rotated_bvecs
if os.path.exists(out_movement_rms):
outputs['out_movement_rms'] = out_movement_rms
if os.path.exists(out_restricted_movement_rms):
outputs['out_restricted_movement_rms'] = \
out_restricted_movement_rms
if os.path.exists(out_shell_alignment_parameters):
outputs['out_shell_alignment_parameters'] = \
out_shell_alignment_parameters
if os.path.exists(out_outlier_report):
outputs['out_outlier_report'] = out_outlier_report
return outputs
|
flexible
|
{
"blob_id": "419aee3045a0d532afa0fc314df9cdef7aab5219",
"index": 4181,
"step-1": "<mask token>\n\n\nclass MRdegibbsInputSpec(CommandLineInputSpec):\n in_file = File(desc='input DWI image', exists=True, mandatory=True,\n position=0, argstr='%s')\n force = traits.Bool(desc='force overwrite of output files', position=-1,\n argstr='-force')\n out_file = File(name_template='%s_unringed', name_source='in_file',\n keep_extension=True, argstr='%s', position=1, desc=\n 'the output unringed DWI image')\n\n\nclass MRdegibbsOutputSpec(TraitedSpec):\n out_file = File(desc='the output unringed DWI image', exists=True)\n\n\nclass MRdegibbs(CommandLine):\n \"\"\"Use MRTrix3 degibbs command for removing the gibbs ringing artefact.\n \n For more information, see\n <https://mrtrix.readthedocs.io/en/latest/reference/commands/mrdegibbs.html>\n \"\"\"\n _cmd = '/a/software/mrtrix/3.0-rc2/ubuntu-xenial-amd64/bin/mrdegibbs'\n input_spec = MRdegibbsInputSpec\n output_spec = MRdegibbsOutputSpec\n\n\nclass EddyInputSpec(CommandLineInputSpec):\n in_file = File(exists=True, mandatory=True, argstr='--imain=%s', desc=\n 'File containing all the images to estimate distortions for')\n in_mask = File(exists=True, mandatory=True, argstr='--mask=%s', desc=\n 'Mask to indicate brain')\n in_index = File(exists=True, mandatory=True, argstr='--index=%s', desc=\n 'File containing indices for all volumes in --imain into --acqp and --topup'\n )\n in_acqp = File(exists=True, mandatory=True, argstr='--acqp=%s', desc=\n 'File containing acquisition parameters')\n in_bvec = File(exists=True, mandatory=True, argstr='--bvecs=%s', desc=\n 'File containing the b-vectors for all volumes in --imain')\n in_bval = File(exists=True, mandatory=True, argstr='--bvals=%s', desc=\n 'File containing the b-values for all volumes in --imain')\n out_base = traits.Str('eddy_corrected', argstr='--out=%s', usedefault=\n True, desc='basename for output (warped) image')\n session = File(exists=True, argstr='--session=%s', desc=\n 'File containing session indices for all volumes in --imain')\n in_topup_fieldcoef = File(exists=True, argstr='--topup=%s', requires=[\n 'in_topup_movpar'], desc='topup file containing the field coefficients'\n )\n in_topup_movpar = File(exists=True, requires=['in_topup_fieldcoef'],\n desc='topup movpar.txt file')\n flm = traits.Enum('linear', 'quadratic', 'cubic', argstr='--flm=%s',\n desc='First level EC model')\n slm = traits.Enum('none', 'linear', 'quadratic', argstr='--slm=%s',\n desc='Second level EC model')\n fep = traits.Bool(False, argstr='--fep', desc=\n 'Fill empty planes in x- or y-directions')\n interp = traits.Enum('spline', 'trilinear', argstr='--interp=%s', desc=\n 'Interpolation model for estimation step')\n nvoxhp = traits.Int(1000, usedefault=True, argstr='--nvoxhp=%s', desc=\n '# of voxels used to estimate the hyperparameters')\n fudge_factor = traits.Float(10.0, usedefault=True, argstr='--ff=%s',\n desc='Fudge factor for hyperparameter error variance')\n dont_sep_offs_move = traits.Bool(False, argstr='--dont_sep_offs_move',\n desc='Do NOT attempt to separate field offset from subject movement')\n dont_peas = traits.Bool(False, argstr='--dont_peas', desc=\n 'Do NOT perform a post-eddy alignment of shells')\n fwhm = traits.Float(desc=\n 'FWHM for conditioning filter when estimating the parameters',\n argstr='--fwhm=%s')\n niter = traits.Int(5, usedefault=True, argstr='--niter=%s', desc=\n 'Number of iterations')\n method = traits.Enum('jac', 'lsr', argstr='--resamp=%s', desc=\n 'Final resampling method (jacobian/least squares)')\n repol = traits.Bool(False, argstr='--repol', desc=\n 'Detect and replace outlier slices')\n num_threads = traits.Int(1, usedefault=True, nohash=True, desc=\n 'Number of openmp threads to use')\n is_shelled = traits.Bool(False, argstr='--data_is_shelled', desc=\n 'Override internal check to ensure that date are acquired on a set of b-value shells'\n )\n field = traits.Str(argstr='--field=%s', desc=\n 'NonTOPUP fieldmap scaled in Hz - filename has to be provided without an extension. TOPUP is strongly recommended'\n )\n field_mat = File(exists=True, argstr='--field_mat=%s', desc=\n 'Matrix that specifies the relative locations of the field specified by --field and first volume in file --imain'\n )\n use_cuda = traits.Bool(False, desc='Run eddy using cuda gpu')\n\n\nclass EddyOutputSpec(TraitedSpec):\n out_corrected = File(exists=True, desc=\n '4D image file containing all the corrected volumes')\n out_parameter = File(exists=True, desc=\n 'text file with parameters definining the field andmovement for each scan'\n )\n out_rotated_bvecs = File(exists=True, desc=\n 'File containing rotated b-values for all volumes')\n out_movement_rms = File(exists=True, desc=\n 'Summary of the \"total movement\" in each volume')\n out_restricted_movement_rms = File(exists=True, desc=\n 'Summary of the \"total movement\" in each volume disregarding translation in the PE direction'\n )\n out_shell_alignment_parameters = File(exists=True, desc=\n 'File containing rigid body movement parameters between the different shells as estimated by a post-hoc mutual information based registration'\n )\n out_outlier_report = File(exists=True, desc=\n 'Text-file with a plain language report on what outlier slices eddy has found'\n )\n\n\nclass Eddy(CommandLine):\n \"\"\"\n Interface for FSL eddy, a tool for estimating and correcting eddy\n currents induced distortions. `User guide\n <http://fsl.fmrib.ox.ac.uk/fsl/fslwiki/Eddy/UsersGuide>`_ and\n `more info regarding acqp file\n <http://fsl.fmrib.ox.ac.uk/fsl/fslwiki/eddy/Faq#How_do_I_know_what_to_put_into_my_--acqp_file>`_.\n Examples\n --------\n >>> from nipype.interfaces.fsl import Eddy\n >>> eddy = Eddy()\n >>> eddy.inputs.in_file = 'epi.nii'\n >>> eddy.inputs.in_mask = 'epi_mask.nii'\n >>> eddy.inputs.in_index = 'epi_index.txt'\n >>> eddy.inputs.in_acqp = 'epi_acqp.txt'\n >>> eddy.inputs.in_bvec = 'bvecs.scheme'\n >>> eddy.inputs.in_bval = 'bvals.scheme'\n >>> eddy.inputs.use_cuda = True\n >>> eddy.cmdline # doctest: +ELLIPSIS\n 'eddy_cuda --ff=10.0 --acqp=epi_acqp.txt --bvals=bvals.scheme --bvecs=bvecs.scheme --imain=epi.nii --index=epi_index.txt --mask=epi_mask.nii --niter=5 --nvoxhp=1000 --out=.../eddy_corrected'\n >>> eddy.inputs.use_cuda = False\n >>> eddy.cmdline # doctest: +ELLIPSIS\n 'eddy_openmp --ff=10.0 --acqp=epi_acqp.txt --bvals=bvals.scheme --bvecs=bvecs.scheme --imain=epi.nii --index=epi_index.txt --mask=epi_mask.nii --niter=5 --nvoxhp=1000 --out=.../eddy_corrected'\n >>> res = eddy.run() # doctest: +SKIP\n \"\"\"\n _cmd = '/data/pt_life_dti/scripts/life2018/eddy_openmp-5.0.11'\n input_spec = EddyInputSpec\n output_spec = EddyOutputSpec\n _num_threads = 1\n\n def __init__(self, **inputs):\n super(Eddy, self).__init__(**inputs)\n self.inputs.on_trait_change(self._num_threads_update, 'num_threads')\n if not isdefined(self.inputs.num_threads):\n self.inputs.num_threads = self._num_threads\n else:\n self._num_threads_update()\n self.inputs.on_trait_change(self._use_cuda, 'use_cuda')\n if isdefined(self.inputs.use_cuda):\n self._use_cuda()\n\n def _num_threads_update(self):\n self._num_threads = self.inputs.num_threads\n if not isdefined(self.inputs.num_threads):\n if 'OMP_NUM_THREADS' in self.inputs.environ:\n del self.inputs.environ['OMP_NUM_THREADS']\n else:\n self.inputs.environ['OMP_NUM_THREADS'] = str(self.inputs.\n num_threads)\n\n def _use_cuda(self):\n self._cmd = 'eddy_cuda' if self.inputs.use_cuda else 'eddy_openmp'\n\n def _run_interface(self, runtime):\n FSLDIR = os.getenv('FSLDIR', '')\n cmd = self._cmd\n if all((FSLDIR != '', cmd == 'eddy_openmp', not os.path.exists(os.\n path.join(FSLDIR, 'bin', cmd)))):\n self._cmd = 'eddy'\n runtime = super(Eddy, self)._run_interface(runtime)\n self._cmd = cmd\n return runtime\n\n def _format_arg(self, name, spec, value):\n if name == 'in_topup_fieldcoef':\n return spec.argstr % value.split('_fieldcoef')[0]\n if name == 'out_base':\n return spec.argstr % os.path.abspath(value)\n return super(Eddy, self)._format_arg(name, spec, value)\n\n def _list_outputs(self):\n outputs = self.output_spec().get()\n outputs['out_corrected'] = os.path.abspath('%s.nii.gz' % self.\n inputs.out_base)\n outputs['out_parameter'] = os.path.abspath('%s.eddy_parameters' %\n self.inputs.out_base)\n out_rotated_bvecs = os.path.abspath('%s.eddy_rotated_bvecs' % self.\n inputs.out_base)\n out_movement_rms = os.path.abspath('%s.eddy_movement_rms' % self.\n inputs.out_base)\n out_restricted_movement_rms = os.path.abspath(\n '%s.eddy_restricted_movement_rms' % self.inputs.out_base)\n out_shell_alignment_parameters = os.path.abspath(\n '%s.eddy_post_eddy_shell_alignment_parameters' % self.inputs.\n out_base)\n out_outlier_report = os.path.abspath('%s.eddy_outlier_report' %\n self.inputs.out_base)\n if os.path.exists(out_rotated_bvecs):\n outputs['out_rotated_bvecs'] = out_rotated_bvecs\n if os.path.exists(out_movement_rms):\n outputs['out_movement_rms'] = out_movement_rms\n if os.path.exists(out_restricted_movement_rms):\n outputs['out_restricted_movement_rms'\n ] = out_restricted_movement_rms\n if os.path.exists(out_shell_alignment_parameters):\n outputs['out_shell_alignment_parameters'\n ] = out_shell_alignment_parameters\n if os.path.exists(out_outlier_report):\n outputs['out_outlier_report'] = out_outlier_report\n return outputs\n",
"step-2": "<mask token>\n\n\nclass DWIdenoise(CommandLine):\n <mask token>\n _cmd = '/a/software/mrtrix/3.0-rc2/ubuntu-xenial-amd64/bin/dwidenoise'\n input_spec = DWIdenoiseInputSpec\n output_spec = DWIdenoiseOutputSpec\n\n\nclass MRdegibbsInputSpec(CommandLineInputSpec):\n in_file = File(desc='input DWI image', exists=True, mandatory=True,\n position=0, argstr='%s')\n force = traits.Bool(desc='force overwrite of output files', position=-1,\n argstr='-force')\n out_file = File(name_template='%s_unringed', name_source='in_file',\n keep_extension=True, argstr='%s', position=1, desc=\n 'the output unringed DWI image')\n\n\nclass MRdegibbsOutputSpec(TraitedSpec):\n out_file = File(desc='the output unringed DWI image', exists=True)\n\n\nclass MRdegibbs(CommandLine):\n \"\"\"Use MRTrix3 degibbs command for removing the gibbs ringing artefact.\n \n For more information, see\n <https://mrtrix.readthedocs.io/en/latest/reference/commands/mrdegibbs.html>\n \"\"\"\n _cmd = '/a/software/mrtrix/3.0-rc2/ubuntu-xenial-amd64/bin/mrdegibbs'\n input_spec = MRdegibbsInputSpec\n output_spec = MRdegibbsOutputSpec\n\n\nclass EddyInputSpec(CommandLineInputSpec):\n in_file = File(exists=True, mandatory=True, argstr='--imain=%s', desc=\n 'File containing all the images to estimate distortions for')\n in_mask = File(exists=True, mandatory=True, argstr='--mask=%s', desc=\n 'Mask to indicate brain')\n in_index = File(exists=True, mandatory=True, argstr='--index=%s', desc=\n 'File containing indices for all volumes in --imain into --acqp and --topup'\n )\n in_acqp = File(exists=True, mandatory=True, argstr='--acqp=%s', desc=\n 'File containing acquisition parameters')\n in_bvec = File(exists=True, mandatory=True, argstr='--bvecs=%s', desc=\n 'File containing the b-vectors for all volumes in --imain')\n in_bval = File(exists=True, mandatory=True, argstr='--bvals=%s', desc=\n 'File containing the b-values for all volumes in --imain')\n out_base = traits.Str('eddy_corrected', argstr='--out=%s', usedefault=\n True, desc='basename for output (warped) image')\n session = File(exists=True, argstr='--session=%s', desc=\n 'File containing session indices for all volumes in --imain')\n in_topup_fieldcoef = File(exists=True, argstr='--topup=%s', requires=[\n 'in_topup_movpar'], desc='topup file containing the field coefficients'\n )\n in_topup_movpar = File(exists=True, requires=['in_topup_fieldcoef'],\n desc='topup movpar.txt file')\n flm = traits.Enum('linear', 'quadratic', 'cubic', argstr='--flm=%s',\n desc='First level EC model')\n slm = traits.Enum('none', 'linear', 'quadratic', argstr='--slm=%s',\n desc='Second level EC model')\n fep = traits.Bool(False, argstr='--fep', desc=\n 'Fill empty planes in x- or y-directions')\n interp = traits.Enum('spline', 'trilinear', argstr='--interp=%s', desc=\n 'Interpolation model for estimation step')\n nvoxhp = traits.Int(1000, usedefault=True, argstr='--nvoxhp=%s', desc=\n '# of voxels used to estimate the hyperparameters')\n fudge_factor = traits.Float(10.0, usedefault=True, argstr='--ff=%s',\n desc='Fudge factor for hyperparameter error variance')\n dont_sep_offs_move = traits.Bool(False, argstr='--dont_sep_offs_move',\n desc='Do NOT attempt to separate field offset from subject movement')\n dont_peas = traits.Bool(False, argstr='--dont_peas', desc=\n 'Do NOT perform a post-eddy alignment of shells')\n fwhm = traits.Float(desc=\n 'FWHM for conditioning filter when estimating the parameters',\n argstr='--fwhm=%s')\n niter = traits.Int(5, usedefault=True, argstr='--niter=%s', desc=\n 'Number of iterations')\n method = traits.Enum('jac', 'lsr', argstr='--resamp=%s', desc=\n 'Final resampling method (jacobian/least squares)')\n repol = traits.Bool(False, argstr='--repol', desc=\n 'Detect and replace outlier slices')\n num_threads = traits.Int(1, usedefault=True, nohash=True, desc=\n 'Number of openmp threads to use')\n is_shelled = traits.Bool(False, argstr='--data_is_shelled', desc=\n 'Override internal check to ensure that date are acquired on a set of b-value shells'\n )\n field = traits.Str(argstr='--field=%s', desc=\n 'NonTOPUP fieldmap scaled in Hz - filename has to be provided without an extension. TOPUP is strongly recommended'\n )\n field_mat = File(exists=True, argstr='--field_mat=%s', desc=\n 'Matrix that specifies the relative locations of the field specified by --field and first volume in file --imain'\n )\n use_cuda = traits.Bool(False, desc='Run eddy using cuda gpu')\n\n\nclass EddyOutputSpec(TraitedSpec):\n out_corrected = File(exists=True, desc=\n '4D image file containing all the corrected volumes')\n out_parameter = File(exists=True, desc=\n 'text file with parameters definining the field andmovement for each scan'\n )\n out_rotated_bvecs = File(exists=True, desc=\n 'File containing rotated b-values for all volumes')\n out_movement_rms = File(exists=True, desc=\n 'Summary of the \"total movement\" in each volume')\n out_restricted_movement_rms = File(exists=True, desc=\n 'Summary of the \"total movement\" in each volume disregarding translation in the PE direction'\n )\n out_shell_alignment_parameters = File(exists=True, desc=\n 'File containing rigid body movement parameters between the different shells as estimated by a post-hoc mutual information based registration'\n )\n out_outlier_report = File(exists=True, desc=\n 'Text-file with a plain language report on what outlier slices eddy has found'\n )\n\n\nclass Eddy(CommandLine):\n \"\"\"\n Interface for FSL eddy, a tool for estimating and correcting eddy\n currents induced distortions. `User guide\n <http://fsl.fmrib.ox.ac.uk/fsl/fslwiki/Eddy/UsersGuide>`_ and\n `more info regarding acqp file\n <http://fsl.fmrib.ox.ac.uk/fsl/fslwiki/eddy/Faq#How_do_I_know_what_to_put_into_my_--acqp_file>`_.\n Examples\n --------\n >>> from nipype.interfaces.fsl import Eddy\n >>> eddy = Eddy()\n >>> eddy.inputs.in_file = 'epi.nii'\n >>> eddy.inputs.in_mask = 'epi_mask.nii'\n >>> eddy.inputs.in_index = 'epi_index.txt'\n >>> eddy.inputs.in_acqp = 'epi_acqp.txt'\n >>> eddy.inputs.in_bvec = 'bvecs.scheme'\n >>> eddy.inputs.in_bval = 'bvals.scheme'\n >>> eddy.inputs.use_cuda = True\n >>> eddy.cmdline # doctest: +ELLIPSIS\n 'eddy_cuda --ff=10.0 --acqp=epi_acqp.txt --bvals=bvals.scheme --bvecs=bvecs.scheme --imain=epi.nii --index=epi_index.txt --mask=epi_mask.nii --niter=5 --nvoxhp=1000 --out=.../eddy_corrected'\n >>> eddy.inputs.use_cuda = False\n >>> eddy.cmdline # doctest: +ELLIPSIS\n 'eddy_openmp --ff=10.0 --acqp=epi_acqp.txt --bvals=bvals.scheme --bvecs=bvecs.scheme --imain=epi.nii --index=epi_index.txt --mask=epi_mask.nii --niter=5 --nvoxhp=1000 --out=.../eddy_corrected'\n >>> res = eddy.run() # doctest: +SKIP\n \"\"\"\n _cmd = '/data/pt_life_dti/scripts/life2018/eddy_openmp-5.0.11'\n input_spec = EddyInputSpec\n output_spec = EddyOutputSpec\n _num_threads = 1\n\n def __init__(self, **inputs):\n super(Eddy, self).__init__(**inputs)\n self.inputs.on_trait_change(self._num_threads_update, 'num_threads')\n if not isdefined(self.inputs.num_threads):\n self.inputs.num_threads = self._num_threads\n else:\n self._num_threads_update()\n self.inputs.on_trait_change(self._use_cuda, 'use_cuda')\n if isdefined(self.inputs.use_cuda):\n self._use_cuda()\n\n def _num_threads_update(self):\n self._num_threads = self.inputs.num_threads\n if not isdefined(self.inputs.num_threads):\n if 'OMP_NUM_THREADS' in self.inputs.environ:\n del self.inputs.environ['OMP_NUM_THREADS']\n else:\n self.inputs.environ['OMP_NUM_THREADS'] = str(self.inputs.\n num_threads)\n\n def _use_cuda(self):\n self._cmd = 'eddy_cuda' if self.inputs.use_cuda else 'eddy_openmp'\n\n def _run_interface(self, runtime):\n FSLDIR = os.getenv('FSLDIR', '')\n cmd = self._cmd\n if all((FSLDIR != '', cmd == 'eddy_openmp', not os.path.exists(os.\n path.join(FSLDIR, 'bin', cmd)))):\n self._cmd = 'eddy'\n runtime = super(Eddy, self)._run_interface(runtime)\n self._cmd = cmd\n return runtime\n\n def _format_arg(self, name, spec, value):\n if name == 'in_topup_fieldcoef':\n return spec.argstr % value.split('_fieldcoef')[0]\n if name == 'out_base':\n return spec.argstr % os.path.abspath(value)\n return super(Eddy, self)._format_arg(name, spec, value)\n\n def _list_outputs(self):\n outputs = self.output_spec().get()\n outputs['out_corrected'] = os.path.abspath('%s.nii.gz' % self.\n inputs.out_base)\n outputs['out_parameter'] = os.path.abspath('%s.eddy_parameters' %\n self.inputs.out_base)\n out_rotated_bvecs = os.path.abspath('%s.eddy_rotated_bvecs' % self.\n inputs.out_base)\n out_movement_rms = os.path.abspath('%s.eddy_movement_rms' % self.\n inputs.out_base)\n out_restricted_movement_rms = os.path.abspath(\n '%s.eddy_restricted_movement_rms' % self.inputs.out_base)\n out_shell_alignment_parameters = os.path.abspath(\n '%s.eddy_post_eddy_shell_alignment_parameters' % self.inputs.\n out_base)\n out_outlier_report = os.path.abspath('%s.eddy_outlier_report' %\n self.inputs.out_base)\n if os.path.exists(out_rotated_bvecs):\n outputs['out_rotated_bvecs'] = out_rotated_bvecs\n if os.path.exists(out_movement_rms):\n outputs['out_movement_rms'] = out_movement_rms\n if os.path.exists(out_restricted_movement_rms):\n outputs['out_restricted_movement_rms'\n ] = out_restricted_movement_rms\n if os.path.exists(out_shell_alignment_parameters):\n outputs['out_shell_alignment_parameters'\n ] = out_shell_alignment_parameters\n if os.path.exists(out_outlier_report):\n outputs['out_outlier_report'] = out_outlier_report\n return outputs\n",
"step-3": "<mask token>\n\n\nclass DWIdenoiseOutputSpec(TraitedSpec):\n <mask token>\n\n\nclass DWIdenoise(CommandLine):\n \"\"\"Use MRTrix3 dwidenoise command to denoisie DWI data and estimate the \n noise level based on the optimal threshold for PCA.\n \n For more information, see\n <https://mrtrix.readthedocs.io/en/latest/reference/commands/dwidenoise.html>\n \"\"\"\n _cmd = '/a/software/mrtrix/3.0-rc2/ubuntu-xenial-amd64/bin/dwidenoise'\n input_spec = DWIdenoiseInputSpec\n output_spec = DWIdenoiseOutputSpec\n\n\nclass MRdegibbsInputSpec(CommandLineInputSpec):\n in_file = File(desc='input DWI image', exists=True, mandatory=True,\n position=0, argstr='%s')\n force = traits.Bool(desc='force overwrite of output files', position=-1,\n argstr='-force')\n out_file = File(name_template='%s_unringed', name_source='in_file',\n keep_extension=True, argstr='%s', position=1, desc=\n 'the output unringed DWI image')\n\n\nclass MRdegibbsOutputSpec(TraitedSpec):\n out_file = File(desc='the output unringed DWI image', exists=True)\n\n\nclass MRdegibbs(CommandLine):\n \"\"\"Use MRTrix3 degibbs command for removing the gibbs ringing artefact.\n \n For more information, see\n <https://mrtrix.readthedocs.io/en/latest/reference/commands/mrdegibbs.html>\n \"\"\"\n _cmd = '/a/software/mrtrix/3.0-rc2/ubuntu-xenial-amd64/bin/mrdegibbs'\n input_spec = MRdegibbsInputSpec\n output_spec = MRdegibbsOutputSpec\n\n\nclass EddyInputSpec(CommandLineInputSpec):\n in_file = File(exists=True, mandatory=True, argstr='--imain=%s', desc=\n 'File containing all the images to estimate distortions for')\n in_mask = File(exists=True, mandatory=True, argstr='--mask=%s', desc=\n 'Mask to indicate brain')\n in_index = File(exists=True, mandatory=True, argstr='--index=%s', desc=\n 'File containing indices for all volumes in --imain into --acqp and --topup'\n )\n in_acqp = File(exists=True, mandatory=True, argstr='--acqp=%s', desc=\n 'File containing acquisition parameters')\n in_bvec = File(exists=True, mandatory=True, argstr='--bvecs=%s', desc=\n 'File containing the b-vectors for all volumes in --imain')\n in_bval = File(exists=True, mandatory=True, argstr='--bvals=%s', desc=\n 'File containing the b-values for all volumes in --imain')\n out_base = traits.Str('eddy_corrected', argstr='--out=%s', usedefault=\n True, desc='basename for output (warped) image')\n session = File(exists=True, argstr='--session=%s', desc=\n 'File containing session indices for all volumes in --imain')\n in_topup_fieldcoef = File(exists=True, argstr='--topup=%s', requires=[\n 'in_topup_movpar'], desc='topup file containing the field coefficients'\n )\n in_topup_movpar = File(exists=True, requires=['in_topup_fieldcoef'],\n desc='topup movpar.txt file')\n flm = traits.Enum('linear', 'quadratic', 'cubic', argstr='--flm=%s',\n desc='First level EC model')\n slm = traits.Enum('none', 'linear', 'quadratic', argstr='--slm=%s',\n desc='Second level EC model')\n fep = traits.Bool(False, argstr='--fep', desc=\n 'Fill empty planes in x- or y-directions')\n interp = traits.Enum('spline', 'trilinear', argstr='--interp=%s', desc=\n 'Interpolation model for estimation step')\n nvoxhp = traits.Int(1000, usedefault=True, argstr='--nvoxhp=%s', desc=\n '# of voxels used to estimate the hyperparameters')\n fudge_factor = traits.Float(10.0, usedefault=True, argstr='--ff=%s',\n desc='Fudge factor for hyperparameter error variance')\n dont_sep_offs_move = traits.Bool(False, argstr='--dont_sep_offs_move',\n desc='Do NOT attempt to separate field offset from subject movement')\n dont_peas = traits.Bool(False, argstr='--dont_peas', desc=\n 'Do NOT perform a post-eddy alignment of shells')\n fwhm = traits.Float(desc=\n 'FWHM for conditioning filter when estimating the parameters',\n argstr='--fwhm=%s')\n niter = traits.Int(5, usedefault=True, argstr='--niter=%s', desc=\n 'Number of iterations')\n method = traits.Enum('jac', 'lsr', argstr='--resamp=%s', desc=\n 'Final resampling method (jacobian/least squares)')\n repol = traits.Bool(False, argstr='--repol', desc=\n 'Detect and replace outlier slices')\n num_threads = traits.Int(1, usedefault=True, nohash=True, desc=\n 'Number of openmp threads to use')\n is_shelled = traits.Bool(False, argstr='--data_is_shelled', desc=\n 'Override internal check to ensure that date are acquired on a set of b-value shells'\n )\n field = traits.Str(argstr='--field=%s', desc=\n 'NonTOPUP fieldmap scaled in Hz - filename has to be provided without an extension. TOPUP is strongly recommended'\n )\n field_mat = File(exists=True, argstr='--field_mat=%s', desc=\n 'Matrix that specifies the relative locations of the field specified by --field and first volume in file --imain'\n )\n use_cuda = traits.Bool(False, desc='Run eddy using cuda gpu')\n\n\nclass EddyOutputSpec(TraitedSpec):\n out_corrected = File(exists=True, desc=\n '4D image file containing all the corrected volumes')\n out_parameter = File(exists=True, desc=\n 'text file with parameters definining the field andmovement for each scan'\n )\n out_rotated_bvecs = File(exists=True, desc=\n 'File containing rotated b-values for all volumes')\n out_movement_rms = File(exists=True, desc=\n 'Summary of the \"total movement\" in each volume')\n out_restricted_movement_rms = File(exists=True, desc=\n 'Summary of the \"total movement\" in each volume disregarding translation in the PE direction'\n )\n out_shell_alignment_parameters = File(exists=True, desc=\n 'File containing rigid body movement parameters between the different shells as estimated by a post-hoc mutual information based registration'\n )\n out_outlier_report = File(exists=True, desc=\n 'Text-file with a plain language report on what outlier slices eddy has found'\n )\n\n\nclass Eddy(CommandLine):\n \"\"\"\n Interface for FSL eddy, a tool for estimating and correcting eddy\n currents induced distortions. `User guide\n <http://fsl.fmrib.ox.ac.uk/fsl/fslwiki/Eddy/UsersGuide>`_ and\n `more info regarding acqp file\n <http://fsl.fmrib.ox.ac.uk/fsl/fslwiki/eddy/Faq#How_do_I_know_what_to_put_into_my_--acqp_file>`_.\n Examples\n --------\n >>> from nipype.interfaces.fsl import Eddy\n >>> eddy = Eddy()\n >>> eddy.inputs.in_file = 'epi.nii'\n >>> eddy.inputs.in_mask = 'epi_mask.nii'\n >>> eddy.inputs.in_index = 'epi_index.txt'\n >>> eddy.inputs.in_acqp = 'epi_acqp.txt'\n >>> eddy.inputs.in_bvec = 'bvecs.scheme'\n >>> eddy.inputs.in_bval = 'bvals.scheme'\n >>> eddy.inputs.use_cuda = True\n >>> eddy.cmdline # doctest: +ELLIPSIS\n 'eddy_cuda --ff=10.0 --acqp=epi_acqp.txt --bvals=bvals.scheme --bvecs=bvecs.scheme --imain=epi.nii --index=epi_index.txt --mask=epi_mask.nii --niter=5 --nvoxhp=1000 --out=.../eddy_corrected'\n >>> eddy.inputs.use_cuda = False\n >>> eddy.cmdline # doctest: +ELLIPSIS\n 'eddy_openmp --ff=10.0 --acqp=epi_acqp.txt --bvals=bvals.scheme --bvecs=bvecs.scheme --imain=epi.nii --index=epi_index.txt --mask=epi_mask.nii --niter=5 --nvoxhp=1000 --out=.../eddy_corrected'\n >>> res = eddy.run() # doctest: +SKIP\n \"\"\"\n _cmd = '/data/pt_life_dti/scripts/life2018/eddy_openmp-5.0.11'\n input_spec = EddyInputSpec\n output_spec = EddyOutputSpec\n _num_threads = 1\n\n def __init__(self, **inputs):\n super(Eddy, self).__init__(**inputs)\n self.inputs.on_trait_change(self._num_threads_update, 'num_threads')\n if not isdefined(self.inputs.num_threads):\n self.inputs.num_threads = self._num_threads\n else:\n self._num_threads_update()\n self.inputs.on_trait_change(self._use_cuda, 'use_cuda')\n if isdefined(self.inputs.use_cuda):\n self._use_cuda()\n\n def _num_threads_update(self):\n self._num_threads = self.inputs.num_threads\n if not isdefined(self.inputs.num_threads):\n if 'OMP_NUM_THREADS' in self.inputs.environ:\n del self.inputs.environ['OMP_NUM_THREADS']\n else:\n self.inputs.environ['OMP_NUM_THREADS'] = str(self.inputs.\n num_threads)\n\n def _use_cuda(self):\n self._cmd = 'eddy_cuda' if self.inputs.use_cuda else 'eddy_openmp'\n\n def _run_interface(self, runtime):\n FSLDIR = os.getenv('FSLDIR', '')\n cmd = self._cmd\n if all((FSLDIR != '', cmd == 'eddy_openmp', not os.path.exists(os.\n path.join(FSLDIR, 'bin', cmd)))):\n self._cmd = 'eddy'\n runtime = super(Eddy, self)._run_interface(runtime)\n self._cmd = cmd\n return runtime\n\n def _format_arg(self, name, spec, value):\n if name == 'in_topup_fieldcoef':\n return spec.argstr % value.split('_fieldcoef')[0]\n if name == 'out_base':\n return spec.argstr % os.path.abspath(value)\n return super(Eddy, self)._format_arg(name, spec, value)\n\n def _list_outputs(self):\n outputs = self.output_spec().get()\n outputs['out_corrected'] = os.path.abspath('%s.nii.gz' % self.\n inputs.out_base)\n outputs['out_parameter'] = os.path.abspath('%s.eddy_parameters' %\n self.inputs.out_base)\n out_rotated_bvecs = os.path.abspath('%s.eddy_rotated_bvecs' % self.\n inputs.out_base)\n out_movement_rms = os.path.abspath('%s.eddy_movement_rms' % self.\n inputs.out_base)\n out_restricted_movement_rms = os.path.abspath(\n '%s.eddy_restricted_movement_rms' % self.inputs.out_base)\n out_shell_alignment_parameters = os.path.abspath(\n '%s.eddy_post_eddy_shell_alignment_parameters' % self.inputs.\n out_base)\n out_outlier_report = os.path.abspath('%s.eddy_outlier_report' %\n self.inputs.out_base)\n if os.path.exists(out_rotated_bvecs):\n outputs['out_rotated_bvecs'] = out_rotated_bvecs\n if os.path.exists(out_movement_rms):\n outputs['out_movement_rms'] = out_movement_rms\n if os.path.exists(out_restricted_movement_rms):\n outputs['out_restricted_movement_rms'\n ] = out_restricted_movement_rms\n if os.path.exists(out_shell_alignment_parameters):\n outputs['out_shell_alignment_parameters'\n ] = out_shell_alignment_parameters\n if os.path.exists(out_outlier_report):\n outputs['out_outlier_report'] = out_outlier_report\n return outputs\n",
"step-4": "<mask token>\n\n\nclass DWIdenoiseInputSpec(CommandLineInputSpec):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass DWIdenoiseOutputSpec(TraitedSpec):\n out_file = File(desc='the output denoised DWI image', exists=True)\n\n\nclass DWIdenoise(CommandLine):\n \"\"\"Use MRTrix3 dwidenoise command to denoisie DWI data and estimate the \n noise level based on the optimal threshold for PCA.\n \n For more information, see\n <https://mrtrix.readthedocs.io/en/latest/reference/commands/dwidenoise.html>\n \"\"\"\n _cmd = '/a/software/mrtrix/3.0-rc2/ubuntu-xenial-amd64/bin/dwidenoise'\n input_spec = DWIdenoiseInputSpec\n output_spec = DWIdenoiseOutputSpec\n\n\nclass MRdegibbsInputSpec(CommandLineInputSpec):\n in_file = File(desc='input DWI image', exists=True, mandatory=True,\n position=0, argstr='%s')\n force = traits.Bool(desc='force overwrite of output files', position=-1,\n argstr='-force')\n out_file = File(name_template='%s_unringed', name_source='in_file',\n keep_extension=True, argstr='%s', position=1, desc=\n 'the output unringed DWI image')\n\n\nclass MRdegibbsOutputSpec(TraitedSpec):\n out_file = File(desc='the output unringed DWI image', exists=True)\n\n\nclass MRdegibbs(CommandLine):\n \"\"\"Use MRTrix3 degibbs command for removing the gibbs ringing artefact.\n \n For more information, see\n <https://mrtrix.readthedocs.io/en/latest/reference/commands/mrdegibbs.html>\n \"\"\"\n _cmd = '/a/software/mrtrix/3.0-rc2/ubuntu-xenial-amd64/bin/mrdegibbs'\n input_spec = MRdegibbsInputSpec\n output_spec = MRdegibbsOutputSpec\n\n\nclass EddyInputSpec(CommandLineInputSpec):\n in_file = File(exists=True, mandatory=True, argstr='--imain=%s', desc=\n 'File containing all the images to estimate distortions for')\n in_mask = File(exists=True, mandatory=True, argstr='--mask=%s', desc=\n 'Mask to indicate brain')\n in_index = File(exists=True, mandatory=True, argstr='--index=%s', desc=\n 'File containing indices for all volumes in --imain into --acqp and --topup'\n )\n in_acqp = File(exists=True, mandatory=True, argstr='--acqp=%s', desc=\n 'File containing acquisition parameters')\n in_bvec = File(exists=True, mandatory=True, argstr='--bvecs=%s', desc=\n 'File containing the b-vectors for all volumes in --imain')\n in_bval = File(exists=True, mandatory=True, argstr='--bvals=%s', desc=\n 'File containing the b-values for all volumes in --imain')\n out_base = traits.Str('eddy_corrected', argstr='--out=%s', usedefault=\n True, desc='basename for output (warped) image')\n session = File(exists=True, argstr='--session=%s', desc=\n 'File containing session indices for all volumes in --imain')\n in_topup_fieldcoef = File(exists=True, argstr='--topup=%s', requires=[\n 'in_topup_movpar'], desc='topup file containing the field coefficients'\n )\n in_topup_movpar = File(exists=True, requires=['in_topup_fieldcoef'],\n desc='topup movpar.txt file')\n flm = traits.Enum('linear', 'quadratic', 'cubic', argstr='--flm=%s',\n desc='First level EC model')\n slm = traits.Enum('none', 'linear', 'quadratic', argstr='--slm=%s',\n desc='Second level EC model')\n fep = traits.Bool(False, argstr='--fep', desc=\n 'Fill empty planes in x- or y-directions')\n interp = traits.Enum('spline', 'trilinear', argstr='--interp=%s', desc=\n 'Interpolation model for estimation step')\n nvoxhp = traits.Int(1000, usedefault=True, argstr='--nvoxhp=%s', desc=\n '# of voxels used to estimate the hyperparameters')\n fudge_factor = traits.Float(10.0, usedefault=True, argstr='--ff=%s',\n desc='Fudge factor for hyperparameter error variance')\n dont_sep_offs_move = traits.Bool(False, argstr='--dont_sep_offs_move',\n desc='Do NOT attempt to separate field offset from subject movement')\n dont_peas = traits.Bool(False, argstr='--dont_peas', desc=\n 'Do NOT perform a post-eddy alignment of shells')\n fwhm = traits.Float(desc=\n 'FWHM for conditioning filter when estimating the parameters',\n argstr='--fwhm=%s')\n niter = traits.Int(5, usedefault=True, argstr='--niter=%s', desc=\n 'Number of iterations')\n method = traits.Enum('jac', 'lsr', argstr='--resamp=%s', desc=\n 'Final resampling method (jacobian/least squares)')\n repol = traits.Bool(False, argstr='--repol', desc=\n 'Detect and replace outlier slices')\n num_threads = traits.Int(1, usedefault=True, nohash=True, desc=\n 'Number of openmp threads to use')\n is_shelled = traits.Bool(False, argstr='--data_is_shelled', desc=\n 'Override internal check to ensure that date are acquired on a set of b-value shells'\n )\n field = traits.Str(argstr='--field=%s', desc=\n 'NonTOPUP fieldmap scaled in Hz - filename has to be provided without an extension. TOPUP is strongly recommended'\n )\n field_mat = File(exists=True, argstr='--field_mat=%s', desc=\n 'Matrix that specifies the relative locations of the field specified by --field and first volume in file --imain'\n )\n use_cuda = traits.Bool(False, desc='Run eddy using cuda gpu')\n\n\nclass EddyOutputSpec(TraitedSpec):\n out_corrected = File(exists=True, desc=\n '4D image file containing all the corrected volumes')\n out_parameter = File(exists=True, desc=\n 'text file with parameters definining the field andmovement for each scan'\n )\n out_rotated_bvecs = File(exists=True, desc=\n 'File containing rotated b-values for all volumes')\n out_movement_rms = File(exists=True, desc=\n 'Summary of the \"total movement\" in each volume')\n out_restricted_movement_rms = File(exists=True, desc=\n 'Summary of the \"total movement\" in each volume disregarding translation in the PE direction'\n )\n out_shell_alignment_parameters = File(exists=True, desc=\n 'File containing rigid body movement parameters between the different shells as estimated by a post-hoc mutual information based registration'\n )\n out_outlier_report = File(exists=True, desc=\n 'Text-file with a plain language report on what outlier slices eddy has found'\n )\n\n\nclass Eddy(CommandLine):\n \"\"\"\n Interface for FSL eddy, a tool for estimating and correcting eddy\n currents induced distortions. `User guide\n <http://fsl.fmrib.ox.ac.uk/fsl/fslwiki/Eddy/UsersGuide>`_ and\n `more info regarding acqp file\n <http://fsl.fmrib.ox.ac.uk/fsl/fslwiki/eddy/Faq#How_do_I_know_what_to_put_into_my_--acqp_file>`_.\n Examples\n --------\n >>> from nipype.interfaces.fsl import Eddy\n >>> eddy = Eddy()\n >>> eddy.inputs.in_file = 'epi.nii'\n >>> eddy.inputs.in_mask = 'epi_mask.nii'\n >>> eddy.inputs.in_index = 'epi_index.txt'\n >>> eddy.inputs.in_acqp = 'epi_acqp.txt'\n >>> eddy.inputs.in_bvec = 'bvecs.scheme'\n >>> eddy.inputs.in_bval = 'bvals.scheme'\n >>> eddy.inputs.use_cuda = True\n >>> eddy.cmdline # doctest: +ELLIPSIS\n 'eddy_cuda --ff=10.0 --acqp=epi_acqp.txt --bvals=bvals.scheme --bvecs=bvecs.scheme --imain=epi.nii --index=epi_index.txt --mask=epi_mask.nii --niter=5 --nvoxhp=1000 --out=.../eddy_corrected'\n >>> eddy.inputs.use_cuda = False\n >>> eddy.cmdline # doctest: +ELLIPSIS\n 'eddy_openmp --ff=10.0 --acqp=epi_acqp.txt --bvals=bvals.scheme --bvecs=bvecs.scheme --imain=epi.nii --index=epi_index.txt --mask=epi_mask.nii --niter=5 --nvoxhp=1000 --out=.../eddy_corrected'\n >>> res = eddy.run() # doctest: +SKIP\n \"\"\"\n _cmd = '/data/pt_life_dti/scripts/life2018/eddy_openmp-5.0.11'\n input_spec = EddyInputSpec\n output_spec = EddyOutputSpec\n _num_threads = 1\n\n def __init__(self, **inputs):\n super(Eddy, self).__init__(**inputs)\n self.inputs.on_trait_change(self._num_threads_update, 'num_threads')\n if not isdefined(self.inputs.num_threads):\n self.inputs.num_threads = self._num_threads\n else:\n self._num_threads_update()\n self.inputs.on_trait_change(self._use_cuda, 'use_cuda')\n if isdefined(self.inputs.use_cuda):\n self._use_cuda()\n\n def _num_threads_update(self):\n self._num_threads = self.inputs.num_threads\n if not isdefined(self.inputs.num_threads):\n if 'OMP_NUM_THREADS' in self.inputs.environ:\n del self.inputs.environ['OMP_NUM_THREADS']\n else:\n self.inputs.environ['OMP_NUM_THREADS'] = str(self.inputs.\n num_threads)\n\n def _use_cuda(self):\n self._cmd = 'eddy_cuda' if self.inputs.use_cuda else 'eddy_openmp'\n\n def _run_interface(self, runtime):\n FSLDIR = os.getenv('FSLDIR', '')\n cmd = self._cmd\n if all((FSLDIR != '', cmd == 'eddy_openmp', not os.path.exists(os.\n path.join(FSLDIR, 'bin', cmd)))):\n self._cmd = 'eddy'\n runtime = super(Eddy, self)._run_interface(runtime)\n self._cmd = cmd\n return runtime\n\n def _format_arg(self, name, spec, value):\n if name == 'in_topup_fieldcoef':\n return spec.argstr % value.split('_fieldcoef')[0]\n if name == 'out_base':\n return spec.argstr % os.path.abspath(value)\n return super(Eddy, self)._format_arg(name, spec, value)\n\n def _list_outputs(self):\n outputs = self.output_spec().get()\n outputs['out_corrected'] = os.path.abspath('%s.nii.gz' % self.\n inputs.out_base)\n outputs['out_parameter'] = os.path.abspath('%s.eddy_parameters' %\n self.inputs.out_base)\n out_rotated_bvecs = os.path.abspath('%s.eddy_rotated_bvecs' % self.\n inputs.out_base)\n out_movement_rms = os.path.abspath('%s.eddy_movement_rms' % self.\n inputs.out_base)\n out_restricted_movement_rms = os.path.abspath(\n '%s.eddy_restricted_movement_rms' % self.inputs.out_base)\n out_shell_alignment_parameters = os.path.abspath(\n '%s.eddy_post_eddy_shell_alignment_parameters' % self.inputs.\n out_base)\n out_outlier_report = os.path.abspath('%s.eddy_outlier_report' %\n self.inputs.out_base)\n if os.path.exists(out_rotated_bvecs):\n outputs['out_rotated_bvecs'] = out_rotated_bvecs\n if os.path.exists(out_movement_rms):\n outputs['out_movement_rms'] = out_movement_rms\n if os.path.exists(out_restricted_movement_rms):\n outputs['out_restricted_movement_rms'\n ] = out_restricted_movement_rms\n if os.path.exists(out_shell_alignment_parameters):\n outputs['out_shell_alignment_parameters'\n ] = out_shell_alignment_parameters\n if os.path.exists(out_outlier_report):\n outputs['out_outlier_report'] = out_outlier_report\n return outputs\n",
"step-5": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Aug 11 13:13:53 2018\n\n@author: zhang\n\"\"\"\n'''\nWarp Commands use during diffusion-weighted images preprocessing\n================================================================\ndwidenoise & mrdegibbs from MRTrix3.0; eddy-openmp from FSL\n-------------------------------------------------------------------------\nfor unkonwn reason they are not included after loading relavant interface\n'''\nfrom nipype.interfaces.base import (CommandLine, \n CommandLineInputSpec,\n File, \n TraitedSpec, \n traits,\n isdefined,\n InputMultiPath)\nimport os\n\n# warp the dwidenoise function from MRtrix\nclass DWIdenoiseInputSpec(CommandLineInputSpec):\n in_file = InputMultiPath(\n File(exists=True), \n mandatory=True,\n position=0,\n argstr=\"%s\",\n desc=\"input DWI image\")\n noise = File(\n argstr='-noise %s',\n desc='noise map')\n force = traits.Bool(\n desc='force overwrite of output files', \n position=-1,\n argstr='-force')\n out_file = File(name_template='%s_denoised',\n name_source='in_file',\n keep_extension=True,\n argstr=\"%s\",\n position=1,\n desc=\"the output denoised DWI image\")\n\nclass DWIdenoiseOutputSpec(TraitedSpec):\n out_file = File(desc = \"the output denoised DWI image\", exists = True)\n\nclass DWIdenoise(CommandLine):\n \"\"\"Use MRTrix3 dwidenoise command to denoisie DWI data and estimate the \n noise level based on the optimal threshold for PCA.\n \n For more information, see\n <https://mrtrix.readthedocs.io/en/latest/reference/commands/dwidenoise.html>\n \"\"\"\n _cmd = '/a/software/mrtrix/3.0-rc2/ubuntu-xenial-amd64/bin/dwidenoise'\n input_spec = DWIdenoiseInputSpec\n output_spec = DWIdenoiseOutputSpec\n\n\n# warp the unring function from MRtrix \nclass MRdegibbsInputSpec(CommandLineInputSpec):\n in_file = File(\n desc=\"input DWI image\", \n exists=True, \n mandatory=True,\n position=0,\n argstr=\"%s\")\n force = traits.Bool(\n desc='force overwrite of output files', \n position=-1,\n argstr='-force')\n out_file = File(name_template='%s_unringed',\n name_source='in_file',\n keep_extension=True,\n argstr=\"%s\",\n position=1,\n desc=\"the output unringed DWI image\")\n\nclass MRdegibbsOutputSpec(TraitedSpec):\n out_file = File(desc = \"the output unringed DWI image\", exists = True)\n\nclass MRdegibbs(CommandLine):\n \"\"\"Use MRTrix3 degibbs command for removing the gibbs ringing artefact.\n \n For more information, see\n <https://mrtrix.readthedocs.io/en/latest/reference/commands/mrdegibbs.html>\n \"\"\"\n _cmd = '/a/software/mrtrix/3.0-rc2/ubuntu-xenial-amd64/bin/mrdegibbs'\n input_spec = MRdegibbsInputSpec\n output_spec = MRdegibbsOutputSpec\n\n\n# Wrap FSL eddy (copy from nipype interface)\nclass EddyInputSpec(CommandLineInputSpec):\n in_file = File(\n exists=True,\n mandatory=True,\n argstr='--imain=%s',\n desc=('File containing all the images to estimate '\n 'distortions for'))\n in_mask = File(\n exists=True,\n mandatory=True,\n argstr='--mask=%s',\n desc='Mask to indicate brain')\n in_index = File(\n exists=True,\n mandatory=True,\n argstr='--index=%s',\n desc=('File containing indices for all volumes in --imain '\n 'into --acqp and --topup'))\n in_acqp = File(\n exists=True,\n mandatory=True,\n argstr='--acqp=%s',\n desc='File containing acquisition parameters')\n in_bvec = File(\n exists=True,\n mandatory=True,\n argstr='--bvecs=%s',\n desc=('File containing the b-vectors for all volumes in '\n '--imain'))\n in_bval = File(\n exists=True,\n mandatory=True,\n argstr='--bvals=%s',\n desc=('File containing the b-values for all volumes in '\n '--imain'))\n out_base = traits.Str(\n 'eddy_corrected',\n argstr='--out=%s',\n usedefault=True,\n desc=('basename for output (warped) image'))\n session = File(\n exists=True,\n argstr='--session=%s',\n desc=('File containing session indices for all volumes in '\n '--imain'))\n in_topup_fieldcoef = File(\n exists=True,\n argstr=\"--topup=%s\",\n requires=['in_topup_movpar'],\n desc=('topup file containing the field '\n 'coefficients'))\n in_topup_movpar = File(\n exists=True,\n requires=['in_topup_fieldcoef'],\n desc='topup movpar.txt file')\n flm = traits.Enum(\n 'linear',\n 'quadratic',\n 'cubic',\n argstr='--flm=%s',\n desc='First level EC model')\n slm = traits.Enum(\n 'none',\n 'linear',\n 'quadratic',\n argstr='--slm=%s',\n desc='Second level EC model')\n fep = traits.Bool(\n False, argstr='--fep', desc='Fill empty planes in x- or y-directions')\n interp = traits.Enum(\n 'spline',\n 'trilinear',\n argstr='--interp=%s',\n desc='Interpolation model for estimation step')\n nvoxhp = traits.Int(\n 1000, usedefault=True,\n argstr='--nvoxhp=%s',\n desc=('# of voxels used to estimate the '\n 'hyperparameters'))\n fudge_factor = traits.Float(\n 10.0, usedefault=True,\n argstr='--ff=%s',\n desc=('Fudge factor for hyperparameter '\n 'error variance'))\n dont_sep_offs_move = traits.Bool(\n False,\n argstr='--dont_sep_offs_move',\n desc=('Do NOT attempt to separate '\n 'field offset from subject '\n 'movement'))\n dont_peas = traits.Bool(\n False,\n argstr='--dont_peas',\n desc=\"Do NOT perform a post-eddy alignment of \"\n \"shells\")\n fwhm = traits.Float(\n desc=('FWHM for conditioning filter when estimating '\n 'the parameters'),\n argstr='--fwhm=%s')\n niter = traits.Int(5, usedefault=True,\n argstr='--niter=%s', desc='Number of iterations')\n method = traits.Enum(\n 'jac',\n 'lsr',\n argstr='--resamp=%s',\n desc=('Final resampling method (jacobian/least '\n 'squares)'))\n repol = traits.Bool(\n False, argstr='--repol', desc='Detect and replace outlier slices')\n num_threads = traits.Int(\n 1,\n usedefault=True,\n nohash=True,\n desc=\"Number of openmp threads to use\")\n is_shelled = traits.Bool(\n False,\n argstr='--data_is_shelled',\n desc=\"Override internal check to ensure that \"\n \"date are acquired on a set of b-value \"\n \"shells\")\n field = traits.Str(\n argstr='--field=%s',\n desc=\"NonTOPUP fieldmap scaled in Hz - filename has \"\n \"to be provided without an extension. TOPUP is \"\n \"strongly recommended\")\n field_mat = File(\n exists=True,\n argstr='--field_mat=%s',\n desc=\"Matrix that specifies the relative locations of \"\n \"the field specified by --field and first volume \"\n \"in file --imain\")\n use_cuda = traits.Bool(False, desc=\"Run eddy using cuda gpu\")\n\n\nclass EddyOutputSpec(TraitedSpec):\n out_corrected = File(\n exists=True, desc='4D image file containing all the corrected volumes')\n out_parameter = File(\n exists=True,\n desc=('text file with parameters definining the field and'\n 'movement for each scan'))\n out_rotated_bvecs = File(\n exists=True, desc='File containing rotated b-values for all volumes')\n out_movement_rms = File(\n exists=True, desc='Summary of the \"total movement\" in each volume')\n out_restricted_movement_rms = File(\n exists=True,\n desc=('Summary of the \"total movement\" in each volume '\n 'disregarding translation in the PE direction'))\n out_shell_alignment_parameters = File(\n exists=True,\n desc=('File containing rigid body movement parameters '\n 'between the different shells as estimated by a '\n 'post-hoc mutual information based registration'))\n out_outlier_report = File(\n exists=True,\n desc=('Text-file with a plain language report on what '\n 'outlier slices eddy has found'))\n\n\nclass Eddy(CommandLine):\n \"\"\"\n Interface for FSL eddy, a tool for estimating and correcting eddy\n currents induced distortions. `User guide\n <http://fsl.fmrib.ox.ac.uk/fsl/fslwiki/Eddy/UsersGuide>`_ and\n `more info regarding acqp file\n <http://fsl.fmrib.ox.ac.uk/fsl/fslwiki/eddy/Faq#How_do_I_know_what_to_put_into_my_--acqp_file>`_.\n Examples\n --------\n >>> from nipype.interfaces.fsl import Eddy\n >>> eddy = Eddy()\n >>> eddy.inputs.in_file = 'epi.nii'\n >>> eddy.inputs.in_mask = 'epi_mask.nii'\n >>> eddy.inputs.in_index = 'epi_index.txt'\n >>> eddy.inputs.in_acqp = 'epi_acqp.txt'\n >>> eddy.inputs.in_bvec = 'bvecs.scheme'\n >>> eddy.inputs.in_bval = 'bvals.scheme'\n >>> eddy.inputs.use_cuda = True\n >>> eddy.cmdline # doctest: +ELLIPSIS\n 'eddy_cuda --ff=10.0 --acqp=epi_acqp.txt --bvals=bvals.scheme \\\n--bvecs=bvecs.scheme --imain=epi.nii --index=epi_index.txt \\\n--mask=epi_mask.nii --niter=5 --nvoxhp=1000 --out=.../eddy_corrected'\n >>> eddy.inputs.use_cuda = False\n >>> eddy.cmdline # doctest: +ELLIPSIS\n 'eddy_openmp --ff=10.0 --acqp=epi_acqp.txt --bvals=bvals.scheme \\\n--bvecs=bvecs.scheme --imain=epi.nii --index=epi_index.txt \\\n--mask=epi_mask.nii --niter=5 --nvoxhp=1000 --out=.../eddy_corrected'\n >>> res = eddy.run() # doctest: +SKIP\n \"\"\"\n _cmd = '/data/pt_life_dti/scripts/life2018/eddy_openmp-5.0.11'\n input_spec = EddyInputSpec\n output_spec = EddyOutputSpec\n\n _num_threads = 1\n\n def __init__(self, **inputs):\n super(Eddy, self).__init__(**inputs)\n self.inputs.on_trait_change(self._num_threads_update, 'num_threads')\n if not isdefined(self.inputs.num_threads):\n self.inputs.num_threads = self._num_threads\n else:\n self._num_threads_update()\n self.inputs.on_trait_change(self._use_cuda, 'use_cuda')\n if isdefined(self.inputs.use_cuda):\n self._use_cuda()\n\n def _num_threads_update(self):\n self._num_threads = self.inputs.num_threads\n if not isdefined(self.inputs.num_threads):\n if 'OMP_NUM_THREADS' in self.inputs.environ:\n del self.inputs.environ['OMP_NUM_THREADS']\n else:\n self.inputs.environ['OMP_NUM_THREADS'] = str(\n self.inputs.num_threads)\n\n def _use_cuda(self):\n self._cmd = 'eddy_cuda' if self.inputs.use_cuda else 'eddy_openmp'\n\n def _run_interface(self, runtime):\n # If 'eddy_openmp' is missing, use 'eddy'\n FSLDIR = os.getenv('FSLDIR', '')\n cmd = self._cmd\n if all((FSLDIR != '', cmd == 'eddy_openmp',\n not os.path.exists(os.path.join(FSLDIR, 'bin', cmd)))):\n self._cmd = 'eddy'\n runtime = super(Eddy, self)._run_interface(runtime)\n\n # Restore command to avoid side-effects\n self._cmd = cmd\n return runtime\n\n def _format_arg(self, name, spec, value):\n if name == 'in_topup_fieldcoef':\n return spec.argstr % value.split('_fieldcoef')[0]\n if name == 'out_base':\n return spec.argstr % os.path.abspath(value)\n return super(Eddy, self)._format_arg(name, spec, value)\n\n def _list_outputs(self):\n outputs = self.output_spec().get()\n outputs['out_corrected'] = os.path.abspath(\n '%s.nii.gz' % self.inputs.out_base)\n outputs['out_parameter'] = os.path.abspath(\n '%s.eddy_parameters' % self.inputs.out_base)\n\n # File generation might depend on the version of EDDY\n out_rotated_bvecs = os.path.abspath(\n '%s.eddy_rotated_bvecs' % self.inputs.out_base)\n out_movement_rms = os.path.abspath(\n '%s.eddy_movement_rms' % self.inputs.out_base)\n out_restricted_movement_rms = os.path.abspath(\n '%s.eddy_restricted_movement_rms' % self.inputs.out_base)\n out_shell_alignment_parameters = os.path.abspath(\n '%s.eddy_post_eddy_shell_alignment_parameters' %\n self.inputs.out_base)\n out_outlier_report = os.path.abspath(\n '%s.eddy_outlier_report' % self.inputs.out_base)\n\n if os.path.exists(out_rotated_bvecs):\n outputs['out_rotated_bvecs'] = out_rotated_bvecs\n if os.path.exists(out_movement_rms):\n outputs['out_movement_rms'] = out_movement_rms\n if os.path.exists(out_restricted_movement_rms):\n outputs['out_restricted_movement_rms'] = \\\n out_restricted_movement_rms\n if os.path.exists(out_shell_alignment_parameters):\n outputs['out_shell_alignment_parameters'] = \\\n out_shell_alignment_parameters\n if os.path.exists(out_outlier_report):\n outputs['out_outlier_report'] = out_outlier_report\n\n return outputs\n",
"step-ids": [
20,
22,
24,
26,
29
]
}
|
[
20,
22,
24,
26,
29
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print(set1)
print(set2)
print(set1.difference(set2))
print(set1 - set2)
print(set2.difference(set1))
print(set2 - set1)
print(set1)
print(set2)
print('*' * 40)
<|reserved_special_token_0|>
print(set3)
set3.difference_update(set4)
print(set3)
print('*' * 40)
<|reserved_special_token_0|>
print(set5)
print(set5.intersection(set6))
print(set5)
print('*' * 40)
<|reserved_special_token_0|>
print(set7)
set7.intersection_update(set8)
print(set7)
print('*' * 40)
<|reserved_special_token_0|>
print(set9)
print(set9.symmetric_difference(set10))
print(set9 ^ set10)
print(set9)
print('*' * 40)
<|reserved_special_token_0|>
print(set11)
set11.symmetric_difference_update(set12)
print(set11)
print('*' * 40)
<|reserved_special_token_1|>
set1 = {1, 2, 3, 4, 5, 6, 7, 8, 9}
set2 = {1, 2, 3, 4, 5, 6, 'A', 'B'}
print(set1)
print(set2)
print(set1.difference(set2))
print(set1 - set2)
print(set2.difference(set1))
print(set2 - set1)
print(set1)
print(set2)
print('*' * 40)
set3 = {1, 2, 3, 4, 5, 6, 7, 8, 9}
set4 = {1, 2, 3, 4, 5, 6, 'A', 'B'}
print(set3)
set3.difference_update(set4)
print(set3)
print('*' * 40)
set5 = {1, 2, 3, 4, 5, 6, 7, 8, 9}
set6 = {1, 2, 3, 4, 5, 6, 'A', 'B'}
print(set5)
print(set5.intersection(set6))
print(set5)
print('*' * 40)
set7 = {1, 2, 3, 4, 5, 6, 7, 8, 9}
set8 = {1, 2, 3, 4, 5, 6, 'A', 'B'}
print(set7)
set7.intersection_update(set8)
print(set7)
print('*' * 40)
set9 = {1, 2, 3, 4, 5, 6, 7, 8, 9}
set10 = {1, 2, 3, 4, 5, 6, 'A', 'B'}
print(set9)
print(set9.symmetric_difference(set10))
print(set9 ^ set10)
print(set9)
print('*' * 40)
set11 = {1, 2, 3, 4, 5, 6, 7, 8, 9}
set12 = {1, 2, 3, 4, 5, 6, 'A', 'B'}
print(set11)
set11.symmetric_difference_update(set12)
print(set11)
print('*' * 40)
<|reserved_special_token_1|>
# -------------------------------
# --------- Set Methods ---------
# -------------------------------
# difference() return the values in the first set that not in the second set
set1 ={1, 2, 3, 4, 5, 6, 7, 8 , 9}
set2 = {1, 2, 3, 4, 5, 6, "A", "B"}
print(set1)
print(set2)
print(set1.difference(set2))
print(set1-set2)
print(set2.difference(set1))
print(set2-set1)
print(set1)
print(set2)
print("*" * 40)
# difference_update() return the values in the first set that not in the second set
# and update the value for the first set with this values
set3 ={1, 2, 3, 4, 5, 6, 7, 8 , 9}
set4 = {1, 2, 3, 4, 5, 6, "A", "B"}
print(set3)
set3.difference_update(set4)
print(set3)
print("*" * 40)
# intersection() return the values in the first set and in the second set
set5 ={1, 2, 3, 4, 5, 6, 7, 8 , 9}
set6 = {1, 2, 3, 4, 5, 6, "A", "B"}
print(set5)
print(set5.intersection(set6))
print(set5)
print("*" * 40)
# intersection_update() return the values in the first set and in the second set
# and update the value for the first set with this values
set7 ={1, 2, 3, 4, 5, 6, 7, 8 , 9}
set8 = {1, 2, 3, 4, 5, 6, "A", "B"}
print(set7)
set7.intersection_update(set8)
print(set7)
print("*" * 40)
# symmetric_difference() return the values in the first set and not in the second set
# and the values in the second set and not in the first set
set9 ={1, 2, 3, 4, 5, 6, 7, 8 , 9}
set10 = {1, 2, 3, 4, 5, 6, "A", "B"}
print(set9)
print(set9.symmetric_difference(set10))
print(set9^set10)
print(set9)
print("*" * 40)
# symmetric_difference_update() return the values in the first set and not in the second set
# and the values in the second set and not in the first set
# and update the value for the first set with this values
set11 ={1, 2, 3, 4, 5, 6, 7, 8 , 9}
set12 = {1, 2, 3, 4, 5, 6, "A", "B"}
print(set11)
set11.symmetric_difference_update(set12)
print(set11)
print("*" * 40)
|
flexible
|
{
"blob_id": "faf2f5da92cf45cfedda91955688b3ca1c7c0db9",
"index": 8280,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(set1)\nprint(set2)\nprint(set1.difference(set2))\nprint(set1 - set2)\nprint(set2.difference(set1))\nprint(set2 - set1)\nprint(set1)\nprint(set2)\nprint('*' * 40)\n<mask token>\nprint(set3)\nset3.difference_update(set4)\nprint(set3)\nprint('*' * 40)\n<mask token>\nprint(set5)\nprint(set5.intersection(set6))\nprint(set5)\nprint('*' * 40)\n<mask token>\nprint(set7)\nset7.intersection_update(set8)\nprint(set7)\nprint('*' * 40)\n<mask token>\nprint(set9)\nprint(set9.symmetric_difference(set10))\nprint(set9 ^ set10)\nprint(set9)\nprint('*' * 40)\n<mask token>\nprint(set11)\nset11.symmetric_difference_update(set12)\nprint(set11)\nprint('*' * 40)\n",
"step-3": "set1 = {1, 2, 3, 4, 5, 6, 7, 8, 9}\nset2 = {1, 2, 3, 4, 5, 6, 'A', 'B'}\nprint(set1)\nprint(set2)\nprint(set1.difference(set2))\nprint(set1 - set2)\nprint(set2.difference(set1))\nprint(set2 - set1)\nprint(set1)\nprint(set2)\nprint('*' * 40)\nset3 = {1, 2, 3, 4, 5, 6, 7, 8, 9}\nset4 = {1, 2, 3, 4, 5, 6, 'A', 'B'}\nprint(set3)\nset3.difference_update(set4)\nprint(set3)\nprint('*' * 40)\nset5 = {1, 2, 3, 4, 5, 6, 7, 8, 9}\nset6 = {1, 2, 3, 4, 5, 6, 'A', 'B'}\nprint(set5)\nprint(set5.intersection(set6))\nprint(set5)\nprint('*' * 40)\nset7 = {1, 2, 3, 4, 5, 6, 7, 8, 9}\nset8 = {1, 2, 3, 4, 5, 6, 'A', 'B'}\nprint(set7)\nset7.intersection_update(set8)\nprint(set7)\nprint('*' * 40)\nset9 = {1, 2, 3, 4, 5, 6, 7, 8, 9}\nset10 = {1, 2, 3, 4, 5, 6, 'A', 'B'}\nprint(set9)\nprint(set9.symmetric_difference(set10))\nprint(set9 ^ set10)\nprint(set9)\nprint('*' * 40)\nset11 = {1, 2, 3, 4, 5, 6, 7, 8, 9}\nset12 = {1, 2, 3, 4, 5, 6, 'A', 'B'}\nprint(set11)\nset11.symmetric_difference_update(set12)\nprint(set11)\nprint('*' * 40)\n",
"step-4": "# -------------------------------\n# --------- Set Methods ---------\n# -------------------------------\n\n\n# difference() return the values in the first set that not in the second set\nset1 ={1, 2, 3, 4, 5, 6, 7, 8 , 9}\nset2 = {1, 2, 3, 4, 5, 6, \"A\", \"B\"}\nprint(set1)\nprint(set2)\nprint(set1.difference(set2))\nprint(set1-set2)\nprint(set2.difference(set1))\nprint(set2-set1)\nprint(set1)\nprint(set2)\n\nprint(\"*\" * 40)\n\n# difference_update() return the values in the first set that not in the second set\n# and update the value for the first set with this values\nset3 ={1, 2, 3, 4, 5, 6, 7, 8 , 9}\nset4 = {1, 2, 3, 4, 5, 6, \"A\", \"B\"}\nprint(set3)\nset3.difference_update(set4)\nprint(set3)\nprint(\"*\" * 40)\n\n# intersection() return the values in the first set and in the second set\nset5 ={1, 2, 3, 4, 5, 6, 7, 8 , 9}\nset6 = {1, 2, 3, 4, 5, 6, \"A\", \"B\"}\nprint(set5)\nprint(set5.intersection(set6))\nprint(set5)\nprint(\"*\" * 40)\n\n# intersection_update() return the values in the first set and in the second set\n# and update the value for the first set with this values\nset7 ={1, 2, 3, 4, 5, 6, 7, 8 , 9}\nset8 = {1, 2, 3, 4, 5, 6, \"A\", \"B\"}\nprint(set7)\nset7.intersection_update(set8)\nprint(set7)\nprint(\"*\" * 40)\n\n# symmetric_difference() return the values in the first set and not in the second set\n# and the values in the second set and not in the first set\nset9 ={1, 2, 3, 4, 5, 6, 7, 8 , 9}\nset10 = {1, 2, 3, 4, 5, 6, \"A\", \"B\"}\nprint(set9)\nprint(set9.symmetric_difference(set10))\nprint(set9^set10)\nprint(set9)\nprint(\"*\" * 40)\n\n# symmetric_difference_update() return the values in the first set and not in the second set\n# and the values in the second set and not in the first set\n# and update the value for the first set with this values\nset11 ={1, 2, 3, 4, 5, 6, 7, 8 , 9}\nset12 = {1, 2, 3, 4, 5, 6, \"A\", \"B\"}\nprint(set11)\nset11.symmetric_difference_update(set12)\nprint(set11)\nprint(\"*\" * 40)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# -*- coding: utf-8 -*-
"""
Created on Tue May 22 15:01:21 2018
@author: Weiyu_Lee
"""
import numpy as np
import pandas as pd
import pickle
import matplotlib.pyplot as plt
from datetime import datetime, timedelta
import config as conf
def get_stock_time_series(data_df, stock_id):
curr_ID_data = data_df.loc[stock_id]
output = np.array(curr_ID_data[0])
for i in range(1, len(curr_ID_data.index)):
output = np.vstack((output, curr_ID_data[i]))
return output
ID_conf = conf.config('feature_conf').config['ID']
#ETF_ID = ID_conf["ID"]
#ETF_ID_list = ["0050"]
ETF_ID_list = ["0050", "0052", "0053", "0054", "0055", "0056", "0057", "0058", "0059",
"006201", "006203", "006204", "006208"]
output_date = {}
for ETF_ID in ETF_ID_list:
Nm_conf = conf.config('feature_conf').config['Nm']
if Nm_conf["enable"] is True:
Nm_method = Nm_conf["method"]
file_postfix = '_Nm_' + str(Nm_conf["type"][0]) + '_' + Nm_method + '_' + str(94) + "_" + ETF_ID + '.pkl'
else:
file_postfix = "_" + str(94) + '.pkl'
src_file_path = './Data/all_feature_data' + file_postfix
meta_file_path = './Data/all_meta_data' + file_postfix
data = pd.read_pickle(src_file_path)
f = open(meta_file_path, "rb")
tasharep_ID = pickle.load(f)
member_ID = pickle.load(f)
Date = pickle.load(f)
feature_list = pickle.load(f)
price_scaler = pickle.load(f)
trade_scaler = pickle.load(f)
f_idx = 59 # MACD
src_time_period = ['20000101', '20180511']
# eval_time_period = ['20180511', '20180518']
eval_time_period = ['20180402', '20180518']
eval_time_len = Date.index(eval_time_period[1]) - Date.index(eval_time_period[0]) + 1
total_acc = 0
for day_shift in range(eval_time_len-5):
eval_start_date = Date.index(eval_time_period[0]) + day_shift
target_start_date = eval_start_date - 21
target_time_period = [Date[target_start_date], Date[eval_start_date]]
next_time_period = [Date[eval_start_date], Date[eval_start_date + 5]]
date_mask = (data.columns > src_time_period[0]) & (data.columns <= src_time_period[1])
src_data = data.iloc[:, date_mask]
date_mask = (data.columns > target_time_period[0]) & (data.columns <= target_time_period[1])
target_data = data.iloc[:, date_mask]
date_mask = (data.columns > next_time_period[0]) & (data.columns <= next_time_period[1])
next_data = data.iloc[:, date_mask]
src_TS = get_stock_time_series(src_data, ETF_ID)
target_TS = get_stock_time_series(target_data, ETF_ID)
next_TS = get_stock_time_series(next_data, ETF_ID)
overall_TS = get_stock_time_series(data, ETF_ID)
target_xcorr = np.correlate(src_TS[:, f_idx], target_TS[:, f_idx], mode='valid')
# next_xcorr = np.correlate(src_TS[:, f_idx], next_TS[:, f_idx], mode='valid')
target_len = len(target_TS)
max_target_xcorr_idx = target_xcorr.argsort()[::-1]
predict_target_idx = max_target_xcorr_idx + target_len
next_len = len(next_TS)
max_next_xcorr_idx = next_xcorr.argsort()[::-1]
# plt.plot(target_xcorr)
# plt.savefig("target_xcorr_{}.png".format(ETF_ID))
#for idx in max_target_xcorr_idx[:10]:
# plt.figure()
# plt.plot(target_TS[:, 84])
# plt.plot(src_TS[max_target_xcorr_idx[idx]:max_target_xcorr_idx[idx]+target_len, 84])
#plt.figure()
#plt.plot(target_xcorr)
#plt.plot(next_xcorr)
top_num = 10
acc = []
label = np.argmax(next_TS[:, -3:], axis=-1)
for idx in max_target_xcorr_idx[:top_num]:
predict = np.argmax(overall_TS[predict_target_idx[idx]:predict_target_idx[idx]+next_len, -3:], axis=-1)
acc.append(sum(label == predict) / next_len)
max_acc_idx = np.argsort(acc)[::-1]
output_xcorr_idx = [max_target_xcorr_idx[acc_idx] for acc_idx in max_acc_idx]
top_num = 3
avg_acc = 0
acc = []
for idx in output_xcorr_idx[:top_num]:
#plt.figure()
#plt.plot(next_TS[:, 84])
#plt.plot(overall_TS[predict_target_idx[idx]:predict_target_idx[idx]+next_len, 84])
#plt.figure()
#plt.plot(overall_TS[predict_target_idx[idx]:predict_target_idx[idx]+next_len, 3])
predict = np.argmax(overall_TS[predict_target_idx[idx]:predict_target_idx[idx]+next_len, -3:], axis=-1)
acc.append(sum(label == predict) / next_len)
avg_acc = avg_acc + acc[-1]
#print("Acc.: [{}]".format(acc[-1]))
print("Avg. Acc.: [{}]".format(avg_acc/top_num))
total_acc = total_acc + avg_acc/top_num
print("[{}] Overall Acc.: [{}]".format(ETF_ID, total_acc/(eval_time_len-5)))
output_date[ETF_ID] = [Date[i] for i in output_xcorr_idx[:10]]
f = open('./Data/xcorr_date_data.pkl', 'wb')
pickle.dump(output_date, f, True)
f.close()
|
normal
|
{
"blob_id": "6a7e5a78f516cecf083ca3900bdaaf427bedd497",
"index": 756,
"step-1": "<mask token>\n\n\ndef get_stock_time_series(data_df, stock_id):\n curr_ID_data = data_df.loc[stock_id]\n output = np.array(curr_ID_data[0])\n for i in range(1, len(curr_ID_data.index)):\n output = np.vstack((output, curr_ID_data[i]))\n return output\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_stock_time_series(data_df, stock_id):\n curr_ID_data = data_df.loc[stock_id]\n output = np.array(curr_ID_data[0])\n for i in range(1, len(curr_ID_data.index)):\n output = np.vstack((output, curr_ID_data[i]))\n return output\n\n\n<mask token>\nfor ETF_ID in ETF_ID_list:\n Nm_conf = conf.config('feature_conf').config['Nm']\n if Nm_conf['enable'] is True:\n Nm_method = Nm_conf['method']\n file_postfix = '_Nm_' + str(Nm_conf['type'][0]\n ) + '_' + Nm_method + '_' + str(94) + '_' + ETF_ID + '.pkl'\n else:\n file_postfix = '_' + str(94) + '.pkl'\n src_file_path = './Data/all_feature_data' + file_postfix\n meta_file_path = './Data/all_meta_data' + file_postfix\n data = pd.read_pickle(src_file_path)\n f = open(meta_file_path, 'rb')\n tasharep_ID = pickle.load(f)\n member_ID = pickle.load(f)\n Date = pickle.load(f)\n feature_list = pickle.load(f)\n price_scaler = pickle.load(f)\n trade_scaler = pickle.load(f)\n f_idx = 59\n src_time_period = ['20000101', '20180511']\n eval_time_period = ['20180402', '20180518']\n eval_time_len = Date.index(eval_time_period[1]) - Date.index(\n eval_time_period[0]) + 1\n total_acc = 0\n for day_shift in range(eval_time_len - 5):\n eval_start_date = Date.index(eval_time_period[0]) + day_shift\n target_start_date = eval_start_date - 21\n target_time_period = [Date[target_start_date], Date[eval_start_date]]\n next_time_period = [Date[eval_start_date], Date[eval_start_date + 5]]\n date_mask = (data.columns > src_time_period[0]) & (data.columns <=\n src_time_period[1])\n src_data = data.iloc[:, date_mask]\n date_mask = (data.columns > target_time_period[0]) & (data.columns <=\n target_time_period[1])\n target_data = data.iloc[:, date_mask]\n date_mask = (data.columns > next_time_period[0]) & (data.columns <=\n next_time_period[1])\n next_data = data.iloc[:, date_mask]\n src_TS = get_stock_time_series(src_data, ETF_ID)\n target_TS = get_stock_time_series(target_data, ETF_ID)\n next_TS = get_stock_time_series(next_data, ETF_ID)\n overall_TS = get_stock_time_series(data, ETF_ID)\n target_xcorr = np.correlate(src_TS[:, f_idx], target_TS[:, f_idx],\n mode='valid')\n target_len = len(target_TS)\n max_target_xcorr_idx = target_xcorr.argsort()[::-1]\n predict_target_idx = max_target_xcorr_idx + target_len\n next_len = len(next_TS)\n max_next_xcorr_idx = next_xcorr.argsort()[::-1]\n top_num = 10\n acc = []\n label = np.argmax(next_TS[:, -3:], axis=-1)\n for idx in max_target_xcorr_idx[:top_num]:\n predict = np.argmax(overall_TS[predict_target_idx[idx]:\n predict_target_idx[idx] + next_len, -3:], axis=-1)\n acc.append(sum(label == predict) / next_len)\n max_acc_idx = np.argsort(acc)[::-1]\n output_xcorr_idx = [max_target_xcorr_idx[acc_idx] for acc_idx in\n max_acc_idx]\n top_num = 3\n avg_acc = 0\n acc = []\n for idx in output_xcorr_idx[:top_num]:\n predict = np.argmax(overall_TS[predict_target_idx[idx]:\n predict_target_idx[idx] + next_len, -3:], axis=-1)\n acc.append(sum(label == predict) / next_len)\n avg_acc = avg_acc + acc[-1]\n print('Avg. Acc.: [{}]'.format(avg_acc / top_num))\n total_acc = total_acc + avg_acc / top_num\n print('[{}] Overall Acc.: [{}]'.format(ETF_ID, total_acc / (\n eval_time_len - 5)))\n output_date[ETF_ID] = [Date[i] for i in output_xcorr_idx[:10]]\n<mask token>\npickle.dump(output_date, f, True)\nf.close()\n",
"step-3": "<mask token>\n\n\ndef get_stock_time_series(data_df, stock_id):\n curr_ID_data = data_df.loc[stock_id]\n output = np.array(curr_ID_data[0])\n for i in range(1, len(curr_ID_data.index)):\n output = np.vstack((output, curr_ID_data[i]))\n return output\n\n\nID_conf = conf.config('feature_conf').config['ID']\nETF_ID_list = ['0050', '0052', '0053', '0054', '0055', '0056', '0057',\n '0058', '0059', '006201', '006203', '006204', '006208']\noutput_date = {}\nfor ETF_ID in ETF_ID_list:\n Nm_conf = conf.config('feature_conf').config['Nm']\n if Nm_conf['enable'] is True:\n Nm_method = Nm_conf['method']\n file_postfix = '_Nm_' + str(Nm_conf['type'][0]\n ) + '_' + Nm_method + '_' + str(94) + '_' + ETF_ID + '.pkl'\n else:\n file_postfix = '_' + str(94) + '.pkl'\n src_file_path = './Data/all_feature_data' + file_postfix\n meta_file_path = './Data/all_meta_data' + file_postfix\n data = pd.read_pickle(src_file_path)\n f = open(meta_file_path, 'rb')\n tasharep_ID = pickle.load(f)\n member_ID = pickle.load(f)\n Date = pickle.load(f)\n feature_list = pickle.load(f)\n price_scaler = pickle.load(f)\n trade_scaler = pickle.load(f)\n f_idx = 59\n src_time_period = ['20000101', '20180511']\n eval_time_period = ['20180402', '20180518']\n eval_time_len = Date.index(eval_time_period[1]) - Date.index(\n eval_time_period[0]) + 1\n total_acc = 0\n for day_shift in range(eval_time_len - 5):\n eval_start_date = Date.index(eval_time_period[0]) + day_shift\n target_start_date = eval_start_date - 21\n target_time_period = [Date[target_start_date], Date[eval_start_date]]\n next_time_period = [Date[eval_start_date], Date[eval_start_date + 5]]\n date_mask = (data.columns > src_time_period[0]) & (data.columns <=\n src_time_period[1])\n src_data = data.iloc[:, date_mask]\n date_mask = (data.columns > target_time_period[0]) & (data.columns <=\n target_time_period[1])\n target_data = data.iloc[:, date_mask]\n date_mask = (data.columns > next_time_period[0]) & (data.columns <=\n next_time_period[1])\n next_data = data.iloc[:, date_mask]\n src_TS = get_stock_time_series(src_data, ETF_ID)\n target_TS = get_stock_time_series(target_data, ETF_ID)\n next_TS = get_stock_time_series(next_data, ETF_ID)\n overall_TS = get_stock_time_series(data, ETF_ID)\n target_xcorr = np.correlate(src_TS[:, f_idx], target_TS[:, f_idx],\n mode='valid')\n target_len = len(target_TS)\n max_target_xcorr_idx = target_xcorr.argsort()[::-1]\n predict_target_idx = max_target_xcorr_idx + target_len\n next_len = len(next_TS)\n max_next_xcorr_idx = next_xcorr.argsort()[::-1]\n top_num = 10\n acc = []\n label = np.argmax(next_TS[:, -3:], axis=-1)\n for idx in max_target_xcorr_idx[:top_num]:\n predict = np.argmax(overall_TS[predict_target_idx[idx]:\n predict_target_idx[idx] + next_len, -3:], axis=-1)\n acc.append(sum(label == predict) / next_len)\n max_acc_idx = np.argsort(acc)[::-1]\n output_xcorr_idx = [max_target_xcorr_idx[acc_idx] for acc_idx in\n max_acc_idx]\n top_num = 3\n avg_acc = 0\n acc = []\n for idx in output_xcorr_idx[:top_num]:\n predict = np.argmax(overall_TS[predict_target_idx[idx]:\n predict_target_idx[idx] + next_len, -3:], axis=-1)\n acc.append(sum(label == predict) / next_len)\n avg_acc = avg_acc + acc[-1]\n print('Avg. Acc.: [{}]'.format(avg_acc / top_num))\n total_acc = total_acc + avg_acc / top_num\n print('[{}] Overall Acc.: [{}]'.format(ETF_ID, total_acc / (\n eval_time_len - 5)))\n output_date[ETF_ID] = [Date[i] for i in output_xcorr_idx[:10]]\nf = open('./Data/xcorr_date_data.pkl', 'wb')\npickle.dump(output_date, f, True)\nf.close()\n",
"step-4": "<mask token>\nimport numpy as np\nimport pandas as pd\nimport pickle\nimport matplotlib.pyplot as plt\nfrom datetime import datetime, timedelta\nimport config as conf\n\n\ndef get_stock_time_series(data_df, stock_id):\n curr_ID_data = data_df.loc[stock_id]\n output = np.array(curr_ID_data[0])\n for i in range(1, len(curr_ID_data.index)):\n output = np.vstack((output, curr_ID_data[i]))\n return output\n\n\nID_conf = conf.config('feature_conf').config['ID']\nETF_ID_list = ['0050', '0052', '0053', '0054', '0055', '0056', '0057',\n '0058', '0059', '006201', '006203', '006204', '006208']\noutput_date = {}\nfor ETF_ID in ETF_ID_list:\n Nm_conf = conf.config('feature_conf').config['Nm']\n if Nm_conf['enable'] is True:\n Nm_method = Nm_conf['method']\n file_postfix = '_Nm_' + str(Nm_conf['type'][0]\n ) + '_' + Nm_method + '_' + str(94) + '_' + ETF_ID + '.pkl'\n else:\n file_postfix = '_' + str(94) + '.pkl'\n src_file_path = './Data/all_feature_data' + file_postfix\n meta_file_path = './Data/all_meta_data' + file_postfix\n data = pd.read_pickle(src_file_path)\n f = open(meta_file_path, 'rb')\n tasharep_ID = pickle.load(f)\n member_ID = pickle.load(f)\n Date = pickle.load(f)\n feature_list = pickle.load(f)\n price_scaler = pickle.load(f)\n trade_scaler = pickle.load(f)\n f_idx = 59\n src_time_period = ['20000101', '20180511']\n eval_time_period = ['20180402', '20180518']\n eval_time_len = Date.index(eval_time_period[1]) - Date.index(\n eval_time_period[0]) + 1\n total_acc = 0\n for day_shift in range(eval_time_len - 5):\n eval_start_date = Date.index(eval_time_period[0]) + day_shift\n target_start_date = eval_start_date - 21\n target_time_period = [Date[target_start_date], Date[eval_start_date]]\n next_time_period = [Date[eval_start_date], Date[eval_start_date + 5]]\n date_mask = (data.columns > src_time_period[0]) & (data.columns <=\n src_time_period[1])\n src_data = data.iloc[:, date_mask]\n date_mask = (data.columns > target_time_period[0]) & (data.columns <=\n target_time_period[1])\n target_data = data.iloc[:, date_mask]\n date_mask = (data.columns > next_time_period[0]) & (data.columns <=\n next_time_period[1])\n next_data = data.iloc[:, date_mask]\n src_TS = get_stock_time_series(src_data, ETF_ID)\n target_TS = get_stock_time_series(target_data, ETF_ID)\n next_TS = get_stock_time_series(next_data, ETF_ID)\n overall_TS = get_stock_time_series(data, ETF_ID)\n target_xcorr = np.correlate(src_TS[:, f_idx], target_TS[:, f_idx],\n mode='valid')\n target_len = len(target_TS)\n max_target_xcorr_idx = target_xcorr.argsort()[::-1]\n predict_target_idx = max_target_xcorr_idx + target_len\n next_len = len(next_TS)\n max_next_xcorr_idx = next_xcorr.argsort()[::-1]\n top_num = 10\n acc = []\n label = np.argmax(next_TS[:, -3:], axis=-1)\n for idx in max_target_xcorr_idx[:top_num]:\n predict = np.argmax(overall_TS[predict_target_idx[idx]:\n predict_target_idx[idx] + next_len, -3:], axis=-1)\n acc.append(sum(label == predict) / next_len)\n max_acc_idx = np.argsort(acc)[::-1]\n output_xcorr_idx = [max_target_xcorr_idx[acc_idx] for acc_idx in\n max_acc_idx]\n top_num = 3\n avg_acc = 0\n acc = []\n for idx in output_xcorr_idx[:top_num]:\n predict = np.argmax(overall_TS[predict_target_idx[idx]:\n predict_target_idx[idx] + next_len, -3:], axis=-1)\n acc.append(sum(label == predict) / next_len)\n avg_acc = avg_acc + acc[-1]\n print('Avg. Acc.: [{}]'.format(avg_acc / top_num))\n total_acc = total_acc + avg_acc / top_num\n print('[{}] Overall Acc.: [{}]'.format(ETF_ID, total_acc / (\n eval_time_len - 5)))\n output_date[ETF_ID] = [Date[i] for i in output_xcorr_idx[:10]]\nf = open('./Data/xcorr_date_data.pkl', 'wb')\npickle.dump(output_date, f, True)\nf.close()\n",
"step-5": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue May 22 15:01:21 2018\r\n\r\n@author: Weiyu_Lee\r\n\"\"\"\r\nimport numpy as np\r\nimport pandas as pd\r\nimport pickle\r\nimport matplotlib.pyplot as plt\r\nfrom datetime import datetime, timedelta\r\n\r\nimport config as conf\r\n\r\ndef get_stock_time_series(data_df, stock_id):\r\n \r\n curr_ID_data = data_df.loc[stock_id]\r\n\r\n output = np.array(curr_ID_data[0])\r\n for i in range(1, len(curr_ID_data.index)):\r\n output = np.vstack((output, curr_ID_data[i]))\r\n \r\n return output \r\n\r\nID_conf = conf.config('feature_conf').config['ID']\r\n#ETF_ID = ID_conf[\"ID\"]\r\n#ETF_ID_list = [\"0050\"]\r\nETF_ID_list = [\"0050\", \"0052\", \"0053\", \"0054\", \"0055\", \"0056\", \"0057\", \"0058\", \"0059\", \r\n \"006201\", \"006203\", \"006204\", \"006208\"]\r\n\r\noutput_date = {}\r\nfor ETF_ID in ETF_ID_list:\r\n Nm_conf = conf.config('feature_conf').config['Nm']\r\n if Nm_conf[\"enable\"] is True:\r\n Nm_method = Nm_conf[\"method\"]\r\n file_postfix = '_Nm_' + str(Nm_conf[\"type\"][0]) + '_' + Nm_method + '_' + str(94) + \"_\" + ETF_ID + '.pkl'\r\n else:\r\n file_postfix = \"_\" + str(94) + '.pkl'\r\n \r\n src_file_path = './Data/all_feature_data' + file_postfix\r\n meta_file_path = './Data/all_meta_data' + file_postfix\r\n \r\n data = pd.read_pickle(src_file_path)\r\n \r\n f = open(meta_file_path, \"rb\")\r\n tasharep_ID = pickle.load(f)\r\n member_ID = pickle.load(f)\r\n Date = pickle.load(f)\r\n feature_list = pickle.load(f)\r\n price_scaler = pickle.load(f)\r\n trade_scaler = pickle.load(f)\r\n \r\n f_idx = 59 # MACD\r\n \r\n src_time_period = ['20000101', '20180511']\r\n# eval_time_period = ['20180511', '20180518']\r\n eval_time_period = ['20180402', '20180518']\r\n eval_time_len = Date.index(eval_time_period[1]) - Date.index(eval_time_period[0]) + 1\r\n \r\n total_acc = 0\r\n for day_shift in range(eval_time_len-5):\r\n \r\n eval_start_date = Date.index(eval_time_period[0]) + day_shift\r\n target_start_date = eval_start_date - 21\r\n \r\n target_time_period = [Date[target_start_date], Date[eval_start_date]]\r\n next_time_period = [Date[eval_start_date], Date[eval_start_date + 5]] \r\n \r\n date_mask = (data.columns > src_time_period[0]) & (data.columns <= src_time_period[1])\r\n src_data = data.iloc[:, date_mask]\r\n \r\n date_mask = (data.columns > target_time_period[0]) & (data.columns <= target_time_period[1])\r\n target_data = data.iloc[:, date_mask]\r\n \r\n date_mask = (data.columns > next_time_period[0]) & (data.columns <= next_time_period[1])\r\n next_data = data.iloc[:, date_mask]\r\n \r\n src_TS = get_stock_time_series(src_data, ETF_ID)\r\n target_TS = get_stock_time_series(target_data, ETF_ID)\r\n next_TS = get_stock_time_series(next_data, ETF_ID)\r\n overall_TS = get_stock_time_series(data, ETF_ID)\r\n \r\n target_xcorr = np.correlate(src_TS[:, f_idx], target_TS[:, f_idx], mode='valid')\r\n# next_xcorr = np.correlate(src_TS[:, f_idx], next_TS[:, f_idx], mode='valid')\r\n \r\n target_len = len(target_TS)\r\n max_target_xcorr_idx = target_xcorr.argsort()[::-1]\r\n predict_target_idx = max_target_xcorr_idx + target_len\r\n \r\n next_len = len(next_TS)\r\n max_next_xcorr_idx = next_xcorr.argsort()[::-1]\r\n \r\n # plt.plot(target_xcorr)\r\n # plt.savefig(\"target_xcorr_{}.png\".format(ETF_ID))\r\n \r\n #for idx in max_target_xcorr_idx[:10]:\r\n # plt.figure()\r\n # plt.plot(target_TS[:, 84])\r\n # plt.plot(src_TS[max_target_xcorr_idx[idx]:max_target_xcorr_idx[idx]+target_len, 84])\r\n \r\n #plt.figure()\r\n #plt.plot(target_xcorr)\r\n #plt.plot(next_xcorr)\r\n \r\n top_num = 10\r\n acc = []\r\n label = np.argmax(next_TS[:, -3:], axis=-1)\r\n for idx in max_target_xcorr_idx[:top_num]:\r\n predict = np.argmax(overall_TS[predict_target_idx[idx]:predict_target_idx[idx]+next_len, -3:], axis=-1)\r\n acc.append(sum(label == predict) / next_len)\r\n \r\n max_acc_idx = np.argsort(acc)[::-1]\r\n output_xcorr_idx = [max_target_xcorr_idx[acc_idx] for acc_idx in max_acc_idx]\r\n \r\n top_num = 3\r\n avg_acc = 0\r\n acc = []\r\n for idx in output_xcorr_idx[:top_num]:\r\n #plt.figure()\r\n #plt.plot(next_TS[:, 84])\r\n #plt.plot(overall_TS[predict_target_idx[idx]:predict_target_idx[idx]+next_len, 84]) \r\n #plt.figure()\r\n #plt.plot(overall_TS[predict_target_idx[idx]:predict_target_idx[idx]+next_len, 3])\r\n \r\n predict = np.argmax(overall_TS[predict_target_idx[idx]:predict_target_idx[idx]+next_len, -3:], axis=-1)\r\n acc.append(sum(label == predict) / next_len)\r\n \r\n avg_acc = avg_acc + acc[-1]\r\n #print(\"Acc.: [{}]\".format(acc[-1]))\r\n \r\n print(\"Avg. Acc.: [{}]\".format(avg_acc/top_num))\r\n \r\n total_acc = total_acc + avg_acc/top_num\r\n \r\n print(\"[{}] Overall Acc.: [{}]\".format(ETF_ID, total_acc/(eval_time_len-5)))\r\n output_date[ETF_ID] = [Date[i] for i in output_xcorr_idx[:10]]\r\n\r\nf = open('./Data/xcorr_date_data.pkl', 'wb')\r\npickle.dump(output_date, f, True) \r\nf.close()\r\n\r\n\r\n\r\n\r\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
"""
Constant types in Python.
定数上書きチェック用
"""
import os
from common import const
from datetime import timedelta
from linebot.models import (
TemplateSendMessage, CarouselTemplate, CarouselColumn, MessageAction,
QuickReplyButton, CameraAction, CameraRollAction, LocationAction
)
const.API_PROFILE_URL = 'https://api.line.me/v2/profile'
const.API_NOTIFICATIONTOKEN_URL = 'https://api.line.me/message/v3/notifier/token' # noqa: E501
const.API_ACCESSTOKEN_URL = 'https://api.line.me/v2/oauth/accessToken'
const.API_SENDSERVICEMESSAGE_URL = 'https://api.line.me/message/v3/notifier/send?target=service' # noqa 501
const.API_USER_ID_URL = 'https://api.line.me/oauth2/v2.1/verify'
const.MSG_ERROR_NOPARAM = 'パラメータ未設定エラー'
const.DATA_LIMIT_TIME = 60 * 60 * 12
const.ONE_WEEK = timedelta(days=7)
const.JST_UTC_TIMEDELTA = timedelta(hours=9)
const.FLEX = {
"type": "flex",
"altText": "Flex Message",
"contents": {
"type": "bubble",
"hero": {
"type": "image",
"url": "https://media.istockphoto.com/photos/empty-coffee-shop-picture-id1154756901", # noqa:E501
"size": "full",
"aspectRatio": "1:1",
"aspectMode": "cover",
"action": {
"type": "uri",
"label": "UseCase Cafe",
"uri": "https://line.me/ja/"
}
},
"body": {
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": "LINE Cafe",
"size": "xl",
"weight": "bold"
},
{
"type": "box",
"layout": "baseline",
"margin": "md",
"contents": [
{
"type": "icon",
"url": "https://scdn.line-apps.com/n/channel_devcenter/img/fx/review_gold_star_28.png", # noqa:E501
"size": "sm"
},
{
"type": "icon",
"url": "https://scdn.line-apps.com/n/channel_devcenter/img/fx/review_gold_star_28.png", # noqa:E501
"size": "sm"
},
{
"type": "icon",
"url": "https://scdn.line-apps.com/n/channel_devcenter/img/fx/review_gold_star_28.png", # noqa:E501
"size": "sm"
},
{
"type": "icon",
"url": "https://scdn.line-apps.com/n/channel_devcenter/img/fx/review_gold_star_28.png", # noqa:E501
"size": "sm"
},
{
"type": "icon",
"url": "https://scdn.line-apps.com/n/channel_devcenter/img/fx/review_gray_star_28.png", # noqa:E501
"size": "sm"
},
{
"type": "text",
"text": "4.0",
"flex": 0,
"margin": "md",
"size": "sm",
"color": "#999999"
}
]
},
{
"type": "box",
"layout": "vertical",
"spacing": "sm",
"margin": "lg",
"contents": [
{
"type": "box",
"layout": "baseline",
"spacing": "sm",
"contents": [
{
"type": "text",
"text": "Place",
"flex": 1,
"size": "sm",
"color": "#AAAAAA"
},
{
"type": "text",
"text": "Miraina Tower, 4-1-6 Shinjuku, Tokyo", # noqa:E501
"flex": 5,
"size": "sm",
"color": "#666666",
"wrap": True
}
]
},
{
"type": "box",
"layout": "baseline",
"spacing": "sm",
"contents": [
{
"type": "text",
"text": "Time",
"flex": 1,
"size": "sm",
"color": "#AAAAAA"
},
{
"type": "text",
"text": "10:00 - 23:00",
"flex": 5,
"size": "sm",
"color": "#666666",
"wrap": True
}
]
}
]
}
]
},
"footer": {
"type": "box",
"layout": "vertical",
"flex": 0,
"spacing": "sm",
"contents": [
{
"type": "button",
"action": {
"type": "uri",
"label": "WEBサイト",
"uri": "https://line.me/ja/"
},
"height": "sm",
"style": "link"
},
{
"type": "button",
"action": {
"type": "datetimepicker",
"label": "予約",
"data": "action=reserve",
"mode": "datetime",
"initial": "2020-01-01t00:00",
"max": "2020-12-31t23:59",
"min": "2020-01-01t00:00"
},
"height": "sm",
"style": "link"
},
{
"type": "button",
"action": {
"type": "postback",
"label": "クイックアクション",
"data": "action=quick_reply",
},
"height": "sm",
"style": "link"
},
{
"type": "spacer",
"size": "sm"
}
]
}
}
}
const.CAROUSEL = TemplateSendMessage(
alt_text='Carousel template',
template=CarouselTemplate(
columns=[
CarouselColumn(
thumbnail_image_url='https://media.istockphoto.com/photos/neon-sale-glowing-text-sign-sale-banner-design-3d-render-glow-sale-picture-id854550186', # noqa:E501
title='最大80%OFF',
text='期間限定SALE',
actions=[
MessageAction(
label='Go to SALE',
text='Choose SALE'
)
]
),
CarouselColumn(
thumbnail_image_url='https://media.istockphoto.com/photos/womens-clothes-set-isolatedfemale-clothing-collage-picture-id1067767654', # noqa:E501
title='今月のおススメ商品',
text='これがあれば困らない!',
actions=[
MessageAction(
label='Recommended',
text='Choose Recommended'
)
]
),
CarouselColumn(
thumbnail_image_url='https://media.istockphoto.com/photos/clothes-hanging-on-rail-in-white-wardrobe-picture-id518597694', # noqa:E501
title='スッキリ収納特集',
text='大切なお洋服をスッキリ簡単に収納します',
actions=[
MessageAction(
label='To receive clothes',
text='Choose receive clothes'
)
]
)
]
)
)
const.QUICK_REPLY_ITEMS = [
QuickReplyButton(action=LocationAction(label='位置情報')),
QuickReplyButton(action=CameraAction(label='カメラ起動')),
QuickReplyButton(action=CameraRollAction(label='カメラロール起動')),
]
const.MENU_LIST = {'message': os.getenv('RICH_MENU_MESSAGE', None),
'carousel': os.getenv('RICH_MENU_CAROUSEL', None),
'flex': os.getenv('RICH_MENU_FLEX', None)
}
|
normal
|
{
"blob_id": "25fcf162306b3d6d6307e703a7d829754cba2778",
"index": 2347,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nconst.API_PROFILE_URL = 'https://api.line.me/v2/profile'\nconst.API_NOTIFICATIONTOKEN_URL = (\n 'https://api.line.me/message/v3/notifier/token')\nconst.API_ACCESSTOKEN_URL = 'https://api.line.me/v2/oauth/accessToken'\nconst.API_SENDSERVICEMESSAGE_URL = (\n 'https://api.line.me/message/v3/notifier/send?target=service')\nconst.API_USER_ID_URL = 'https://api.line.me/oauth2/v2.1/verify'\nconst.MSG_ERROR_NOPARAM = 'パラメータ未設定エラー'\nconst.DATA_LIMIT_TIME = 60 * 60 * 12\nconst.ONE_WEEK = timedelta(days=7)\nconst.JST_UTC_TIMEDELTA = timedelta(hours=9)\nconst.FLEX = {'type': 'flex', 'altText': 'Flex Message', 'contents': {\n 'type': 'bubble', 'hero': {'type': 'image', 'url':\n 'https://media.istockphoto.com/photos/empty-coffee-shop-picture-id1154756901'\n , 'size': 'full', 'aspectRatio': '1:1', 'aspectMode': 'cover', 'action':\n {'type': 'uri', 'label': 'UseCase Cafe', 'uri': 'https://line.me/ja/'}},\n 'body': {'type': 'box', 'layout': 'vertical', 'contents': [{'type':\n 'text', 'text': 'LINE Cafe', 'size': 'xl', 'weight': 'bold'}, {'type':\n 'box', 'layout': 'baseline', 'margin': 'md', 'contents': [{'type':\n 'icon', 'url':\n 'https://scdn.line-apps.com/n/channel_devcenter/img/fx/review_gold_star_28.png'\n , 'size': 'sm'}, {'type': 'icon', 'url':\n 'https://scdn.line-apps.com/n/channel_devcenter/img/fx/review_gold_star_28.png'\n , 'size': 'sm'}, {'type': 'icon', 'url':\n 'https://scdn.line-apps.com/n/channel_devcenter/img/fx/review_gold_star_28.png'\n , 'size': 'sm'}, {'type': 'icon', 'url':\n 'https://scdn.line-apps.com/n/channel_devcenter/img/fx/review_gold_star_28.png'\n , 'size': 'sm'}, {'type': 'icon', 'url':\n 'https://scdn.line-apps.com/n/channel_devcenter/img/fx/review_gray_star_28.png'\n , 'size': 'sm'}, {'type': 'text', 'text': '4.0', 'flex': 0, 'margin':\n 'md', 'size': 'sm', 'color': '#999999'}]}, {'type': 'box', 'layout':\n 'vertical', 'spacing': 'sm', 'margin': 'lg', 'contents': [{'type':\n 'box', 'layout': 'baseline', 'spacing': 'sm', 'contents': [{'type':\n 'text', 'text': 'Place', 'flex': 1, 'size': 'sm', 'color': '#AAAAAA'},\n {'type': 'text', 'text': 'Miraina Tower, 4-1-6 Shinjuku, Tokyo', 'flex':\n 5, 'size': 'sm', 'color': '#666666', 'wrap': True}]}, {'type': 'box',\n 'layout': 'baseline', 'spacing': 'sm', 'contents': [{'type': 'text',\n 'text': 'Time', 'flex': 1, 'size': 'sm', 'color': '#AAAAAA'}, {'type':\n 'text', 'text': '10:00 - 23:00', 'flex': 5, 'size': 'sm', 'color':\n '#666666', 'wrap': True}]}]}]}, 'footer': {'type': 'box', 'layout':\n 'vertical', 'flex': 0, 'spacing': 'sm', 'contents': [{'type': 'button',\n 'action': {'type': 'uri', 'label': 'WEBサイト', 'uri':\n 'https://line.me/ja/'}, 'height': 'sm', 'style': 'link'}, {'type':\n 'button', 'action': {'type': 'datetimepicker', 'label': '予約', 'data':\n 'action=reserve', 'mode': 'datetime', 'initial': '2020-01-01t00:00',\n 'max': '2020-12-31t23:59', 'min': '2020-01-01t00:00'}, 'height': 'sm',\n 'style': 'link'}, {'type': 'button', 'action': {'type': 'postback',\n 'label': 'クイックアクション', 'data': 'action=quick_reply'}, 'height': 'sm',\n 'style': 'link'}, {'type': 'spacer', 'size': 'sm'}]}}}\nconst.CAROUSEL = TemplateSendMessage(alt_text='Carousel template', template\n =CarouselTemplate(columns=[CarouselColumn(thumbnail_image_url=\n 'https://media.istockphoto.com/photos/neon-sale-glowing-text-sign-sale-banner-design-3d-render-glow-sale-picture-id854550186'\n , title='最大80%OFF', text='期間限定SALE', actions=[MessageAction(label=\n 'Go to SALE', text='Choose SALE')]), CarouselColumn(thumbnail_image_url\n =\n 'https://media.istockphoto.com/photos/womens-clothes-set-isolatedfemale-clothing-collage-picture-id1067767654'\n , title='今月のおススメ商品', text='これがあれば困らない!', actions=[MessageAction(label=\n 'Recommended', text='Choose Recommended')]), CarouselColumn(\n thumbnail_image_url=\n 'https://media.istockphoto.com/photos/clothes-hanging-on-rail-in-white-wardrobe-picture-id518597694'\n , title='スッキリ収納特集', text='大切なお洋服をスッキリ簡単に収納します', actions=[MessageAction(\n label='To receive clothes', text='Choose receive clothes')])]))\nconst.QUICK_REPLY_ITEMS = [QuickReplyButton(action=LocationAction(label=\n '位置情報')), QuickReplyButton(action=CameraAction(label='カメラ起動')),\n QuickReplyButton(action=CameraRollAction(label='カメラロール起動'))]\nconst.MENU_LIST = {'message': os.getenv('RICH_MENU_MESSAGE', None),\n 'carousel': os.getenv('RICH_MENU_CAROUSEL', None), 'flex': os.getenv(\n 'RICH_MENU_FLEX', None)}\n",
"step-3": "<mask token>\nimport os\nfrom common import const\nfrom datetime import timedelta\nfrom linebot.models import TemplateSendMessage, CarouselTemplate, CarouselColumn, MessageAction, QuickReplyButton, CameraAction, CameraRollAction, LocationAction\nconst.API_PROFILE_URL = 'https://api.line.me/v2/profile'\nconst.API_NOTIFICATIONTOKEN_URL = (\n 'https://api.line.me/message/v3/notifier/token')\nconst.API_ACCESSTOKEN_URL = 'https://api.line.me/v2/oauth/accessToken'\nconst.API_SENDSERVICEMESSAGE_URL = (\n 'https://api.line.me/message/v3/notifier/send?target=service')\nconst.API_USER_ID_URL = 'https://api.line.me/oauth2/v2.1/verify'\nconst.MSG_ERROR_NOPARAM = 'パラメータ未設定エラー'\nconst.DATA_LIMIT_TIME = 60 * 60 * 12\nconst.ONE_WEEK = timedelta(days=7)\nconst.JST_UTC_TIMEDELTA = timedelta(hours=9)\nconst.FLEX = {'type': 'flex', 'altText': 'Flex Message', 'contents': {\n 'type': 'bubble', 'hero': {'type': 'image', 'url':\n 'https://media.istockphoto.com/photos/empty-coffee-shop-picture-id1154756901'\n , 'size': 'full', 'aspectRatio': '1:1', 'aspectMode': 'cover', 'action':\n {'type': 'uri', 'label': 'UseCase Cafe', 'uri': 'https://line.me/ja/'}},\n 'body': {'type': 'box', 'layout': 'vertical', 'contents': [{'type':\n 'text', 'text': 'LINE Cafe', 'size': 'xl', 'weight': 'bold'}, {'type':\n 'box', 'layout': 'baseline', 'margin': 'md', 'contents': [{'type':\n 'icon', 'url':\n 'https://scdn.line-apps.com/n/channel_devcenter/img/fx/review_gold_star_28.png'\n , 'size': 'sm'}, {'type': 'icon', 'url':\n 'https://scdn.line-apps.com/n/channel_devcenter/img/fx/review_gold_star_28.png'\n , 'size': 'sm'}, {'type': 'icon', 'url':\n 'https://scdn.line-apps.com/n/channel_devcenter/img/fx/review_gold_star_28.png'\n , 'size': 'sm'}, {'type': 'icon', 'url':\n 'https://scdn.line-apps.com/n/channel_devcenter/img/fx/review_gold_star_28.png'\n , 'size': 'sm'}, {'type': 'icon', 'url':\n 'https://scdn.line-apps.com/n/channel_devcenter/img/fx/review_gray_star_28.png'\n , 'size': 'sm'}, {'type': 'text', 'text': '4.0', 'flex': 0, 'margin':\n 'md', 'size': 'sm', 'color': '#999999'}]}, {'type': 'box', 'layout':\n 'vertical', 'spacing': 'sm', 'margin': 'lg', 'contents': [{'type':\n 'box', 'layout': 'baseline', 'spacing': 'sm', 'contents': [{'type':\n 'text', 'text': 'Place', 'flex': 1, 'size': 'sm', 'color': '#AAAAAA'},\n {'type': 'text', 'text': 'Miraina Tower, 4-1-6 Shinjuku, Tokyo', 'flex':\n 5, 'size': 'sm', 'color': '#666666', 'wrap': True}]}, {'type': 'box',\n 'layout': 'baseline', 'spacing': 'sm', 'contents': [{'type': 'text',\n 'text': 'Time', 'flex': 1, 'size': 'sm', 'color': '#AAAAAA'}, {'type':\n 'text', 'text': '10:00 - 23:00', 'flex': 5, 'size': 'sm', 'color':\n '#666666', 'wrap': True}]}]}]}, 'footer': {'type': 'box', 'layout':\n 'vertical', 'flex': 0, 'spacing': 'sm', 'contents': [{'type': 'button',\n 'action': {'type': 'uri', 'label': 'WEBサイト', 'uri':\n 'https://line.me/ja/'}, 'height': 'sm', 'style': 'link'}, {'type':\n 'button', 'action': {'type': 'datetimepicker', 'label': '予約', 'data':\n 'action=reserve', 'mode': 'datetime', 'initial': '2020-01-01t00:00',\n 'max': '2020-12-31t23:59', 'min': '2020-01-01t00:00'}, 'height': 'sm',\n 'style': 'link'}, {'type': 'button', 'action': {'type': 'postback',\n 'label': 'クイックアクション', 'data': 'action=quick_reply'}, 'height': 'sm',\n 'style': 'link'}, {'type': 'spacer', 'size': 'sm'}]}}}\nconst.CAROUSEL = TemplateSendMessage(alt_text='Carousel template', template\n =CarouselTemplate(columns=[CarouselColumn(thumbnail_image_url=\n 'https://media.istockphoto.com/photos/neon-sale-glowing-text-sign-sale-banner-design-3d-render-glow-sale-picture-id854550186'\n , title='最大80%OFF', text='期間限定SALE', actions=[MessageAction(label=\n 'Go to SALE', text='Choose SALE')]), CarouselColumn(thumbnail_image_url\n =\n 'https://media.istockphoto.com/photos/womens-clothes-set-isolatedfemale-clothing-collage-picture-id1067767654'\n , title='今月のおススメ商品', text='これがあれば困らない!', actions=[MessageAction(label=\n 'Recommended', text='Choose Recommended')]), CarouselColumn(\n thumbnail_image_url=\n 'https://media.istockphoto.com/photos/clothes-hanging-on-rail-in-white-wardrobe-picture-id518597694'\n , title='スッキリ収納特集', text='大切なお洋服をスッキリ簡単に収納します', actions=[MessageAction(\n label='To receive clothes', text='Choose receive clothes')])]))\nconst.QUICK_REPLY_ITEMS = [QuickReplyButton(action=LocationAction(label=\n '位置情報')), QuickReplyButton(action=CameraAction(label='カメラ起動')),\n QuickReplyButton(action=CameraRollAction(label='カメラロール起動'))]\nconst.MENU_LIST = {'message': os.getenv('RICH_MENU_MESSAGE', None),\n 'carousel': os.getenv('RICH_MENU_CAROUSEL', None), 'flex': os.getenv(\n 'RICH_MENU_FLEX', None)}\n",
"step-4": "\"\"\"\nConstant types in Python.\n定数上書きチェック用\n\"\"\"\nimport os\nfrom common import const\nfrom datetime import timedelta\n\nfrom linebot.models import (\n TemplateSendMessage, CarouselTemplate, CarouselColumn, MessageAction,\n QuickReplyButton, CameraAction, CameraRollAction, LocationAction\n)\n\nconst.API_PROFILE_URL = 'https://api.line.me/v2/profile'\nconst.API_NOTIFICATIONTOKEN_URL = 'https://api.line.me/message/v3/notifier/token' # noqa: E501\nconst.API_ACCESSTOKEN_URL = 'https://api.line.me/v2/oauth/accessToken'\nconst.API_SENDSERVICEMESSAGE_URL = 'https://api.line.me/message/v3/notifier/send?target=service' # noqa 501\nconst.API_USER_ID_URL = 'https://api.line.me/oauth2/v2.1/verify'\n\nconst.MSG_ERROR_NOPARAM = 'パラメータ未設定エラー'\nconst.DATA_LIMIT_TIME = 60 * 60 * 12\nconst.ONE_WEEK = timedelta(days=7)\nconst.JST_UTC_TIMEDELTA = timedelta(hours=9)\n\n\nconst.FLEX = {\n \"type\": \"flex\",\n \"altText\": \"Flex Message\",\n \"contents\": {\n \"type\": \"bubble\",\n \"hero\": {\n \"type\": \"image\",\n \"url\": \"https://media.istockphoto.com/photos/empty-coffee-shop-picture-id1154756901\", # noqa:E501\n \"size\": \"full\",\n \"aspectRatio\": \"1:1\",\n \"aspectMode\": \"cover\",\n \"action\": {\n \"type\": \"uri\",\n \"label\": \"UseCase Cafe\",\n \"uri\": \"https://line.me/ja/\"\n }\n },\n \"body\": {\n \"type\": \"box\",\n \"layout\": \"vertical\",\n \"contents\": [\n {\n \"type\": \"text\",\n \"text\": \"LINE Cafe\",\n \"size\": \"xl\",\n \"weight\": \"bold\"\n },\n {\n \"type\": \"box\",\n \"layout\": \"baseline\",\n \"margin\": \"md\",\n \"contents\": [\n {\n \"type\": \"icon\",\n \"url\": \"https://scdn.line-apps.com/n/channel_devcenter/img/fx/review_gold_star_28.png\", # noqa:E501\n \"size\": \"sm\"\n },\n {\n \"type\": \"icon\",\n \"url\": \"https://scdn.line-apps.com/n/channel_devcenter/img/fx/review_gold_star_28.png\", # noqa:E501\n \"size\": \"sm\"\n },\n {\n \"type\": \"icon\",\n \"url\": \"https://scdn.line-apps.com/n/channel_devcenter/img/fx/review_gold_star_28.png\", # noqa:E501\n \"size\": \"sm\"\n },\n {\n \"type\": \"icon\",\n \"url\": \"https://scdn.line-apps.com/n/channel_devcenter/img/fx/review_gold_star_28.png\", # noqa:E501\n \"size\": \"sm\"\n },\n {\n \"type\": \"icon\",\n \"url\": \"https://scdn.line-apps.com/n/channel_devcenter/img/fx/review_gray_star_28.png\", # noqa:E501\n \"size\": \"sm\"\n },\n {\n \"type\": \"text\",\n \"text\": \"4.0\",\n \"flex\": 0,\n \"margin\": \"md\",\n \"size\": \"sm\",\n \"color\": \"#999999\"\n }\n ]\n },\n {\n \"type\": \"box\",\n \"layout\": \"vertical\",\n \"spacing\": \"sm\",\n \"margin\": \"lg\",\n \"contents\": [\n {\n \"type\": \"box\",\n \"layout\": \"baseline\",\n \"spacing\": \"sm\",\n \"contents\": [\n {\n \"type\": \"text\",\n \"text\": \"Place\",\n \"flex\": 1,\n \"size\": \"sm\",\n \"color\": \"#AAAAAA\"\n },\n {\n \"type\": \"text\",\n \"text\": \"Miraina Tower, 4-1-6 Shinjuku, Tokyo\", # noqa:E501\n \"flex\": 5,\n \"size\": \"sm\",\n \"color\": \"#666666\",\n \"wrap\": True\n }\n ]\n },\n {\n \"type\": \"box\",\n \"layout\": \"baseline\",\n \"spacing\": \"sm\",\n \"contents\": [\n {\n \"type\": \"text\",\n \"text\": \"Time\",\n \"flex\": 1,\n \"size\": \"sm\",\n \"color\": \"#AAAAAA\"\n },\n {\n \"type\": \"text\",\n \"text\": \"10:00 - 23:00\",\n \"flex\": 5,\n \"size\": \"sm\",\n \"color\": \"#666666\",\n \"wrap\": True\n }\n ]\n }\n ]\n }\n ]\n },\n \"footer\": {\n \"type\": \"box\",\n \"layout\": \"vertical\",\n \"flex\": 0,\n \"spacing\": \"sm\",\n \"contents\": [\n {\n \"type\": \"button\",\n \"action\": {\n \"type\": \"uri\",\n \"label\": \"WEBサイト\",\n \"uri\": \"https://line.me/ja/\"\n },\n \"height\": \"sm\",\n \"style\": \"link\"\n },\n {\n \"type\": \"button\",\n \"action\": {\n \"type\": \"datetimepicker\",\n \"label\": \"予約\",\n \"data\": \"action=reserve\",\n \"mode\": \"datetime\",\n \"initial\": \"2020-01-01t00:00\",\n \"max\": \"2020-12-31t23:59\",\n \"min\": \"2020-01-01t00:00\"\n },\n \"height\": \"sm\",\n \"style\": \"link\"\n },\n {\n \"type\": \"button\",\n \"action\": {\n \"type\": \"postback\",\n \"label\": \"クイックアクション\",\n \"data\": \"action=quick_reply\",\n },\n \"height\": \"sm\",\n \"style\": \"link\"\n },\n {\n \"type\": \"spacer\",\n \"size\": \"sm\"\n }\n ]\n }\n }\n}\n\nconst.CAROUSEL = TemplateSendMessage(\n alt_text='Carousel template',\n template=CarouselTemplate(\n columns=[\n CarouselColumn(\n thumbnail_image_url='https://media.istockphoto.com/photos/neon-sale-glowing-text-sign-sale-banner-design-3d-render-glow-sale-picture-id854550186', # noqa:E501\n title='最大80%OFF',\n text='期間限定SALE',\n actions=[\n MessageAction(\n label='Go to SALE',\n text='Choose SALE'\n )\n ]\n ),\n CarouselColumn(\n thumbnail_image_url='https://media.istockphoto.com/photos/womens-clothes-set-isolatedfemale-clothing-collage-picture-id1067767654', # noqa:E501\n title='今月のおススメ商品',\n text='これがあれば困らない!',\n actions=[\n MessageAction(\n label='Recommended',\n text='Choose Recommended'\n )\n ]\n ),\n CarouselColumn(\n thumbnail_image_url='https://media.istockphoto.com/photos/clothes-hanging-on-rail-in-white-wardrobe-picture-id518597694', # noqa:E501\n title='スッキリ収納特集',\n text='大切なお洋服をスッキリ簡単に収納します',\n actions=[\n MessageAction(\n label='To receive clothes',\n text='Choose receive clothes'\n )\n ]\n )\n ]\n )\n)\n\nconst.QUICK_REPLY_ITEMS = [\n QuickReplyButton(action=LocationAction(label='位置情報')),\n QuickReplyButton(action=CameraAction(label='カメラ起動')),\n QuickReplyButton(action=CameraRollAction(label='カメラロール起動')),\n]\n\nconst.MENU_LIST = {'message': os.getenv('RICH_MENU_MESSAGE', None),\n 'carousel': os.getenv('RICH_MENU_CAROUSEL', None),\n 'flex': os.getenv('RICH_MENU_FLEX', None)\n }\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# -*- coding: utf-8 -*-
from optparse import make_option
from django.core.management.base import BaseCommand, LabelCommand, CommandError
from open_coesione import utils
import sys
import logging
import csv
import os
class Command(LabelCommand):
"""
Task to extract data related to a sample of all projects.
The sample of projects can be extracted through:
head -n 1 progetti_YYYYMMDD.csv > progetti_sample.csv
tail -n +2 progetti_YYYYMMDD.csv | shuf -n 10 | sort >> progetti_sample.csv
"""
args = "<filename>"
help = "Produces a csv file of rows related to projects' sample."
label = 'filename'
option_list = BaseCommand.option_list + (
make_option('--sample',
dest='proj_sample_file',
default='progetti_sample.csv',
help='Select projects sample csv file'),
make_option('--data-root',
dest='data_root',
default='dati/dataset_latest/',
help='Data root path, where csv files are to be found'),
make_option('--type',
dest='type',
default='loc',
help='Type of related data: loc|rec|pay'),
make_option('--encoding',
dest='encoding',
default='latin1',
help='set character encoding of input (and output) csv files')
)
proj_sample_file = ''
sorted_csv_file = ''
data_root = ''
encoding = ''
logger = logging.getLogger('csvimport')
proj_reader = None
csv.register_dialect('opencoesione', delimiter=';', quoting=csv.QUOTE_ALL)
def handle(self, *labels, **options):
if len(labels) is not 1:
raise CommandError('Enter just one %s.' % self.label)
self.data_root = options['data_root']
self.sorted_csv_file = os.path.join(self.data_root, labels[0])
self.proj_sample_file = os.path.join(self.data_root, options['proj_sample_file'])
self.encoding = options['encoding']
# open sample progetto csv reader
try:
self.proj_reader = utils.UnicodeDictReader(
open(self.proj_sample_file, 'r'),
dialect='opencoesione',
encoding=self.encoding
)
except IOError:
self.logger.error("It was impossible to open file %s" % self.proj_sample_file)
exit(1)
except csv.Error, e:
self.logger.error("CSV error while reading %s: %s" % (self.proj_sample_file, e.message))
exit(1)
verbosity = options['verbosity']
if verbosity == '0':
self.logger.setLevel(logging.ERROR)
elif verbosity == '1':
self.logger.setLevel(logging.WARNING)
elif verbosity == '2':
self.logger.setLevel(logging.INFO)
elif verbosity == '3':
self.logger.setLevel(logging.DEBUG)
if options['type'] == 'loc':
# to produce the full, sorted localizzazioni file
# head -n 1 localizzazioni_20120630.csv > localizzazioni_sorted.csv
# tail -n +2 localizzazioni_20120630.csv | sort >> localizzazioni_sorted.csv
headers = [
"COD_LOCALE_PROGETTO",
"COD_REGIONE","DEN_REGIONE",
"COD_PROVINCIA","DEN_PROVINCIA",
"COD_COMUNE","DEN_COMUNE",
"INDIRIZZO_PROG","CAP_PROG",
"DPS_TERRITORIO_PROG","DPS_FLAG_CAP_PROG"
]
elif options['type'] == 'rec':
# to produce the full, sorted soggetti file
# head -n 1 soggetti_20120630.csv > soggetti_sorted.csv
# tail -n +2 soggetti_20120630.csv | sort >> soggetti_sorted.csv
headers = [
"COD_LOCALE_PROGETTO",
"SOGG_COD_RUOLO","SOGG_DESCR_RUOLO","SOGG_PROGR_RUOLO",
"DPS_CODICE_FISCALE_SOGG","DPS_DENOMINAZIONE_SOGG",
"COD_FORMA_GIURIDICA_SOGG","DESCR_FORMA_GIURIDICA_SOGG",
"COD_COMUNE_SEDE_SOGG","INDIRIZZO_SOGG","CAP_SOGG",
"COD_ATECO_SOGG", "DESCRIZIONE_ATECO_SOGG"
]
elif options['type'] == 'pay':
headers = [
"COD_LOCALE_PROGETTO",
"DATA_AGGIORNAMENTO",
"TOT_PAGAMENTI"
]
else:
raise CommandError("Wrong type %s. Select between loc and rec." % options['type'])
# open sorted csv file from where to extract record related to progetti_sample
csv_file = os.path.join(self.data_root, labels[0])
self.logger.info("Inizio ricerca in %s" % csv_file)
try:
reader = utils.UnicodeDictReader(
open(csv_file, 'r'),
dialect='opencoesione',
encoding=self.encoding)
except IOError:
self.logger.error("It was impossible to open file %s" % csv_file)
exit(1)
except csv.Error, e:
self.logger.error("CSV error while reading %s: %s" % (csv_file, e.message))
# loop over progetto_sample and advance in localizzazioni, to fetch related records
# this is of O(n), and reduces drastically the extraction time
writer = None
for proj_row in self.proj_reader:
proj_codice_locale = proj_row['COD_LOCALE_PROGETTO']
loc = reader.next()
if writer is None:
writer = utils.UnicodeDictWriter(sys.stdout, headers, dialect='opencoesione', encoding=self.encoding)
while loc['COD_LOCALE_PROGETTO'] < proj_codice_locale:
loc = reader.next()
writer.writerow(loc)
loc = reader.next()
while loc['COD_LOCALE_PROGETTO'] == proj_codice_locale:
writer.writerow(loc)
loc = reader.next()
|
normal
|
{
"blob_id": "f0444676d28be27ad2f0f7cdaa58a96b7facc546",
"index": 2193,
"step-1": "# -*- coding: utf-8 -*-\n\nfrom optparse import make_option\nfrom django.core.management.base import BaseCommand, LabelCommand, CommandError\nfrom open_coesione import utils\n\nimport sys\nimport logging\nimport csv\nimport os\n\nclass Command(LabelCommand):\n \"\"\"\n Task to extract data related to a sample of all projects.\n The sample of projects can be extracted through:\n\n head -n 1 progetti_YYYYMMDD.csv > progetti_sample.csv\n tail -n +2 progetti_YYYYMMDD.csv | shuf -n 10 | sort >> progetti_sample.csv\n\n \"\"\"\n args = \"<filename>\"\n help = \"Produces a csv file of rows related to projects' sample.\"\n label = 'filename'\n\n option_list = BaseCommand.option_list + (\n make_option('--sample',\n dest='proj_sample_file',\n default='progetti_sample.csv',\n help='Select projects sample csv file'),\n make_option('--data-root',\n dest='data_root',\n default='dati/dataset_latest/',\n help='Data root path, where csv files are to be found'),\n make_option('--type',\n dest='type',\n default='loc',\n help='Type of related data: loc|rec|pay'),\n make_option('--encoding',\n dest='encoding',\n default='latin1',\n help='set character encoding of input (and output) csv files')\n )\n\n proj_sample_file = ''\n sorted_csv_file = ''\n data_root = ''\n encoding = ''\n logger = logging.getLogger('csvimport')\n proj_reader = None\n\n csv.register_dialect('opencoesione', delimiter=';', quoting=csv.QUOTE_ALL)\n\n def handle(self, *labels, **options):\n\n if len(labels) is not 1:\n raise CommandError('Enter just one %s.' % self.label)\n\n self.data_root = options['data_root']\n self.sorted_csv_file = os.path.join(self.data_root, labels[0])\n self.proj_sample_file = os.path.join(self.data_root, options['proj_sample_file'])\n self.encoding = options['encoding']\n\n # open sample progetto csv reader\n try:\n self.proj_reader = utils.UnicodeDictReader(\n open(self.proj_sample_file, 'r'),\n dialect='opencoesione',\n encoding=self.encoding\n )\n except IOError:\n self.logger.error(\"It was impossible to open file %s\" % self.proj_sample_file)\n exit(1)\n except csv.Error, e:\n self.logger.error(\"CSV error while reading %s: %s\" % (self.proj_sample_file, e.message))\n exit(1)\n\n verbosity = options['verbosity']\n if verbosity == '0':\n self.logger.setLevel(logging.ERROR)\n elif verbosity == '1':\n self.logger.setLevel(logging.WARNING)\n elif verbosity == '2':\n self.logger.setLevel(logging.INFO)\n elif verbosity == '3':\n self.logger.setLevel(logging.DEBUG)\n\n if options['type'] == 'loc':\n # to produce the full, sorted localizzazioni file\n # head -n 1 localizzazioni_20120630.csv > localizzazioni_sorted.csv\n # tail -n +2 localizzazioni_20120630.csv | sort >> localizzazioni_sorted.csv\n headers = [\n \"COD_LOCALE_PROGETTO\",\n \"COD_REGIONE\",\"DEN_REGIONE\",\n \"COD_PROVINCIA\",\"DEN_PROVINCIA\",\n \"COD_COMUNE\",\"DEN_COMUNE\",\n \"INDIRIZZO_PROG\",\"CAP_PROG\",\n \"DPS_TERRITORIO_PROG\",\"DPS_FLAG_CAP_PROG\"\n ]\n elif options['type'] == 'rec':\n # to produce the full, sorted soggetti file\n # head -n 1 soggetti_20120630.csv > soggetti_sorted.csv\n # tail -n +2 soggetti_20120630.csv | sort >> soggetti_sorted.csv\n headers = [\n \"COD_LOCALE_PROGETTO\",\n \"SOGG_COD_RUOLO\",\"SOGG_DESCR_RUOLO\",\"SOGG_PROGR_RUOLO\",\n \"DPS_CODICE_FISCALE_SOGG\",\"DPS_DENOMINAZIONE_SOGG\",\n \"COD_FORMA_GIURIDICA_SOGG\",\"DESCR_FORMA_GIURIDICA_SOGG\",\n \"COD_COMUNE_SEDE_SOGG\",\"INDIRIZZO_SOGG\",\"CAP_SOGG\",\n \"COD_ATECO_SOGG\", \"DESCRIZIONE_ATECO_SOGG\"\n ]\n elif options['type'] == 'pay':\n headers = [\n \"COD_LOCALE_PROGETTO\",\n \"DATA_AGGIORNAMENTO\",\n \"TOT_PAGAMENTI\"\n ]\n else:\n raise CommandError(\"Wrong type %s. Select between loc and rec.\" % options['type'])\n\n # open sorted csv file from where to extract record related to progetti_sample\n csv_file = os.path.join(self.data_root, labels[0])\n self.logger.info(\"Inizio ricerca in %s\" % csv_file)\n\n try:\n reader = utils.UnicodeDictReader(\n open(csv_file, 'r'),\n dialect='opencoesione',\n encoding=self.encoding)\n except IOError:\n self.logger.error(\"It was impossible to open file %s\" % csv_file)\n exit(1)\n except csv.Error, e:\n self.logger.error(\"CSV error while reading %s: %s\" % (csv_file, e.message))\n\n\n # loop over progetto_sample and advance in localizzazioni, to fetch related records\n # this is of O(n), and reduces drastically the extraction time\n writer = None\n for proj_row in self.proj_reader:\n proj_codice_locale = proj_row['COD_LOCALE_PROGETTO']\n\n loc = reader.next()\n if writer is None:\n writer = utils.UnicodeDictWriter(sys.stdout, headers, dialect='opencoesione', encoding=self.encoding)\n while loc['COD_LOCALE_PROGETTO'] < proj_codice_locale:\n loc = reader.next()\n writer.writerow(loc)\n\n loc = reader.next()\n while loc['COD_LOCALE_PROGETTO'] == proj_codice_locale:\n writer.writerow(loc)\n loc = reader.next()\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
# Generated by Django 2.2.5 on 2019-10-28 08:45
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('user', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='user',
name='my_resume',
field=models.CharField(choices=[('', ''), ('삼성전자', '삼성전자')], default=True, max_length=80),
),
]
|
normal
|
{
"blob_id": "32c28c7a1e1572744387b509fc6a448554ed565e",
"index": 3445,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('user', '0001_initial')]\n operations = [migrations.AddField(model_name='user', name='my_resume',\n field=models.CharField(choices=[('', ''), ('삼성전자', '삼성전자')],\n default=True, max_length=80))]\n",
"step-4": "from django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('user', '0001_initial')]\n operations = [migrations.AddField(model_name='user', name='my_resume',\n field=models.CharField(choices=[('', ''), ('삼성전자', '삼성전자')],\n default=True, max_length=80))]\n",
"step-5": "# Generated by Django 2.2.5 on 2019-10-28 08:45\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('user', '0001_initial'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='user',\n name='my_resume',\n field=models.CharField(choices=[('', ''), ('삼성전자', '삼성전자')], default=True, max_length=80),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
import wizard
import report
|
flexible
|
{
"blob_id": "9d07fd14825ed1e0210fa1f404939f68a3bb039c",
"index": 4762,
"step-1": "<mask token>\n",
"step-2": "import wizard\nimport report\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
# -*- coding: utf-8 -*-
from numpy import *
def loadDataSet(fileName, delim = '\t'):
fr = open(fileName)
stringArr = [line.strip().split(delim) for line in fr.readlines()]
datArr = [map(float,line) for line in stringArr]
return mat(datArr)
def pca(dataMat, topNfeat = 9999999):
meanVals = mean(dataMat, axis = 0)
# 首先去平均值
meanRemoved = dataMat - meanVals
covMat = cov(meanRemoved, rowvar =False)
eigVals, eigVects = linalg.eig(mat(covMat))
eigValInd = argsort(eigVals)
# 从小到大对N个值排序
eigValInd = eigValInd[: -(topNfeat+1) : -1]
redEigVects = eigVects[:, eigValInd]
# 将数据切换到新的空间
lowDDataMat = meanRemoved * redEigVects
reconMat = (lowDDataMat * redEigVects.T) + meanVals
return lowDDataMat, reconMat
def replaceNanWithMean():
dataMat = loadDataSet('secom.data.txt', '')
numFeat = shape(dataMat)[1]
for i in range(numFeat):
# 计算所有非 NaN 的平均值
meanVal = mean(dataMat[nonzero(~isnan(dataMat[:,i].A))[0],i])
# 将所有 NaN 置为平均值
dataMat[nonzero(isnan(dataMat[:,i].A))[0], i] = meanVal
return dataMat
|
normal
|
{
"blob_id": "5f00cd446b219203c401799ba7b6205c7f1f8e9f",
"index": 3510,
"step-1": "<mask token>\n\n\ndef replaceNanWithMean():\n dataMat = loadDataSet('secom.data.txt', '')\n numFeat = shape(dataMat)[1]\n for i in range(numFeat):\n meanVal = mean(dataMat[nonzero(~isnan(dataMat[:, i].A))[0], i])\n dataMat[nonzero(isnan(dataMat[:, i].A))[0], i] = meanVal\n return dataMat\n",
"step-2": "<mask token>\n\n\ndef loadDataSet(fileName, delim='\\t'):\n fr = open(fileName)\n stringArr = [line.strip().split(delim) for line in fr.readlines()]\n datArr = [map(float, line) for line in stringArr]\n return mat(datArr)\n\n\n<mask token>\n\n\ndef replaceNanWithMean():\n dataMat = loadDataSet('secom.data.txt', '')\n numFeat = shape(dataMat)[1]\n for i in range(numFeat):\n meanVal = mean(dataMat[nonzero(~isnan(dataMat[:, i].A))[0], i])\n dataMat[nonzero(isnan(dataMat[:, i].A))[0], i] = meanVal\n return dataMat\n",
"step-3": "<mask token>\n\n\ndef loadDataSet(fileName, delim='\\t'):\n fr = open(fileName)\n stringArr = [line.strip().split(delim) for line in fr.readlines()]\n datArr = [map(float, line) for line in stringArr]\n return mat(datArr)\n\n\ndef pca(dataMat, topNfeat=9999999):\n meanVals = mean(dataMat, axis=0)\n meanRemoved = dataMat - meanVals\n covMat = cov(meanRemoved, rowvar=False)\n eigVals, eigVects = linalg.eig(mat(covMat))\n eigValInd = argsort(eigVals)\n eigValInd = eigValInd[:-(topNfeat + 1):-1]\n redEigVects = eigVects[:, eigValInd]\n lowDDataMat = meanRemoved * redEigVects\n reconMat = lowDDataMat * redEigVects.T + meanVals\n return lowDDataMat, reconMat\n\n\ndef replaceNanWithMean():\n dataMat = loadDataSet('secom.data.txt', '')\n numFeat = shape(dataMat)[1]\n for i in range(numFeat):\n meanVal = mean(dataMat[nonzero(~isnan(dataMat[:, i].A))[0], i])\n dataMat[nonzero(isnan(dataMat[:, i].A))[0], i] = meanVal\n return dataMat\n",
"step-4": "from numpy import *\n\n\ndef loadDataSet(fileName, delim='\\t'):\n fr = open(fileName)\n stringArr = [line.strip().split(delim) for line in fr.readlines()]\n datArr = [map(float, line) for line in stringArr]\n return mat(datArr)\n\n\ndef pca(dataMat, topNfeat=9999999):\n meanVals = mean(dataMat, axis=0)\n meanRemoved = dataMat - meanVals\n covMat = cov(meanRemoved, rowvar=False)\n eigVals, eigVects = linalg.eig(mat(covMat))\n eigValInd = argsort(eigVals)\n eigValInd = eigValInd[:-(topNfeat + 1):-1]\n redEigVects = eigVects[:, eigValInd]\n lowDDataMat = meanRemoved * redEigVects\n reconMat = lowDDataMat * redEigVects.T + meanVals\n return lowDDataMat, reconMat\n\n\ndef replaceNanWithMean():\n dataMat = loadDataSet('secom.data.txt', '')\n numFeat = shape(dataMat)[1]\n for i in range(numFeat):\n meanVal = mean(dataMat[nonzero(~isnan(dataMat[:, i].A))[0], i])\n dataMat[nonzero(isnan(dataMat[:, i].A))[0], i] = meanVal\n return dataMat\n",
"step-5": "# -*- coding: utf-8 -*-\n\nfrom numpy import *\n\ndef loadDataSet(fileName, delim = '\\t'):\n fr = open(fileName)\n stringArr = [line.strip().split(delim) for line in fr.readlines()]\n datArr = [map(float,line) for line in stringArr]\n return mat(datArr)\n\ndef pca(dataMat, topNfeat = 9999999):\n meanVals = mean(dataMat, axis = 0)\n # 首先去平均值\n meanRemoved = dataMat - meanVals\n covMat = cov(meanRemoved, rowvar =False)\n eigVals, eigVects = linalg.eig(mat(covMat))\n eigValInd = argsort(eigVals)\n # 从小到大对N个值排序\n eigValInd = eigValInd[: -(topNfeat+1) : -1]\n redEigVects = eigVects[:, eigValInd]\n # 将数据切换到新的空间\n lowDDataMat = meanRemoved * redEigVects\n reconMat = (lowDDataMat * redEigVects.T) + meanVals\n return lowDDataMat, reconMat\n\ndef replaceNanWithMean():\n dataMat = loadDataSet('secom.data.txt', '')\n numFeat = shape(dataMat)[1]\n for i in range(numFeat):\n # 计算所有非 NaN 的平均值\n meanVal = mean(dataMat[nonzero(~isnan(dataMat[:,i].A))[0],i])\n # 将所有 NaN 置为平均值\n dataMat[nonzero(isnan(dataMat[:,i].A))[0], i] = meanVal\n return dataMat\n\n\n\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
import os
import shutil
import argparse
ap = argparse.ArgumentParser()
ap.add_argument('-D', '--dir', required=False, help='Directory to sort')
args = vars(ap.parse_args())
if args['dir'] == None:
DIR = os.getcwd()
elif os.path.exists(args['dir']):
DIR = args['dir']
for file in os.listdir(DIR):
if not os.path.isdir(os.path.join(DIR, file)):
name, ext = os.path.splitext(file)
ext = ext[::-1][:-1][::-1]
if os.path.exists(os.path.join(DIR, ext.upper())):
shutil.move(os.path.join(DIR, file), os.path.join(DIR, ext.
upper(), file))
else:
os.mkdir(os.path.join(DIR, ext.upper()))
shutil.move(os.path.join(DIR, file), os.path.join(DIR, ext.
upper(), file))
|
normal
|
{
"blob_id": "93737e4c409d0efb1ae2263cb60d4b03d9aad0d8",
"index": 247,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nap.add_argument('-D', '--dir', required=False, help='Directory to sort')\n<mask token>\nif args['dir'] == None:\n DIR = os.getcwd()\nelif os.path.exists(args['dir']):\n DIR = args['dir']\nfor file in os.listdir(DIR):\n if not os.path.isdir(os.path.join(DIR, file)):\n name, ext = os.path.splitext(file)\n ext = ext[::-1][:-1][::-1]\n if os.path.exists(os.path.join(DIR, ext.upper())):\n shutil.move(os.path.join(DIR, file), os.path.join(DIR, ext.\n upper(), file))\n else:\n os.mkdir(os.path.join(DIR, ext.upper()))\n shutil.move(os.path.join(DIR, file), os.path.join(DIR, ext.\n upper(), file))\n",
"step-3": "<mask token>\nap = argparse.ArgumentParser()\nap.add_argument('-D', '--dir', required=False, help='Directory to sort')\nargs = vars(ap.parse_args())\nif args['dir'] == None:\n DIR = os.getcwd()\nelif os.path.exists(args['dir']):\n DIR = args['dir']\nfor file in os.listdir(DIR):\n if not os.path.isdir(os.path.join(DIR, file)):\n name, ext = os.path.splitext(file)\n ext = ext[::-1][:-1][::-1]\n if os.path.exists(os.path.join(DIR, ext.upper())):\n shutil.move(os.path.join(DIR, file), os.path.join(DIR, ext.\n upper(), file))\n else:\n os.mkdir(os.path.join(DIR, ext.upper()))\n shutil.move(os.path.join(DIR, file), os.path.join(DIR, ext.\n upper(), file))\n",
"step-4": "import os\nimport shutil\nimport argparse\nap = argparse.ArgumentParser()\nap.add_argument('-D', '--dir', required=False, help='Directory to sort')\nargs = vars(ap.parse_args())\nif args['dir'] == None:\n DIR = os.getcwd()\nelif os.path.exists(args['dir']):\n DIR = args['dir']\nfor file in os.listdir(DIR):\n if not os.path.isdir(os.path.join(DIR, file)):\n name, ext = os.path.splitext(file)\n ext = ext[::-1][:-1][::-1]\n if os.path.exists(os.path.join(DIR, ext.upper())):\n shutil.move(os.path.join(DIR, file), os.path.join(DIR, ext.\n upper(), file))\n else:\n os.mkdir(os.path.join(DIR, ext.upper()))\n shutil.move(os.path.join(DIR, file), os.path.join(DIR, ext.\n upper(), file))\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
#!/usr/bin/env python
import rospy
from nav_msgs.msg import Odometry
from geometry_msgs.msg import Twist
from std_srvs.srv import Empty, EmptyResponse
import tf
from math import radians, degrees, fabs
class MovementNullifier:
def __init__(self):
rospy.Subscriber("odom", Odometry, self.OdomCallback)
rospy.Subscriber("cmd_vel", Twist, self.TwistCallback)
self.cmd_vel_publisher = rospy.Publisher("cmd_vel", Twist, queue_size=10)
self.first = True
self.start_yaw = 0
self.threshold = 0.01;
self.distance = 0.0
self.prev_distance = 0.0
self.angle = 0.0
self.turn = False
self.move = False
self.cruise_velocity = 0.01
self.velocity = 0
self.lin_velocity = 0
self.cmd_is_commanding = False
self.twist_time = rospy.Time.now()
self.stop_service = rospy.Service("stop_nullify", Empty, self.StopListening)
self.start_service = rospy.Service("start_nullify", Empty, self.StartListening)
self.keep_running = True
def StopListening(self, data):
self.keep_running = False
return EmptyResponse()
def StartListening(self, data):
self.keep_running = True
#self.Zero()
self.turn = False
self.move = False
self.cmd_is_commanding = False
self.first = True
return EmptyResponse()
def Turn(self):
#print "Turning with velocity: %f" % (self.velocity)
cmd_vel_msg = Twist()
cmd_vel_msg.angular.z = self.velocity
self.cmd_vel_publisher.publish(cmd_vel_msg)
def Move(self):
cmd_vel_msg = Twist()
cmd_vel_msg.linear.x = self.lin_velocity
self.cmd_vel_publisher.publish(cmd_vel_msg)
def Zero(self):
cmd_vel_msg = Twist()
cmd_vel_msg.angular.z = 0
cmd_vel_msg.linear.x = 0
self.cmd_vel_publisher.publish(cmd_vel_msg)
def TwistCallback(self, data):
self.twist_time = rospy.Time.now()
eps = 0.002
if fabs(data.angular.z) > self.cruise_velocity + eps or fabs(data.linear.x) > self.cruise_velocity + eps:
self.cmd_is_commanding = True
else:
self.cmd_is_commanding = False
def OdomCallback(self, data):
if not self.keep_running:
return
twist = data.twist
if rospy.Time.now() - self.twist_time > rospy.Duration(0.5):
self.cmd_is_commanding = False
if not self.cmd_is_commanding: # lets counter react movement
pose = data.pose
quaternion = (pose.pose.orientation.x,
pose.pose.orientation.y,
pose.pose.orientation.z,
pose.pose.orientation.w)
euler = tf.transformations.euler_from_quaternion(quaternion)
yaw = euler[2]
x_position = pose.pose.position.x
#print "Yaw: %f deg, Position x: %f" % (degrees(euler[2]), pose.pose.position.x)
#print "Turn: %r, Move: %r, First: %r" % (self.turn, self.move, self.first)
if self.turn:
self.Turn()
if self.move:
self.Move()
if self.first:
self.start_yaw = euler[2]
self.start_x = x_position
self.first = False
self.turn = False
self.prev_time = data.header.stamp
self.Zero()
#print "Start yaw: %f" % (self.start_yaw)
#print "Start x: %f" % (self.start_x)
else:
self.angle = fabs(degrees(self.start_yaw) - degrees(yaw))
self.distance = fabs(self.start_x - x_position)
#print "Distance %f, prev distance: %f" % (self.distance, self.prev_distance)
if self.angle >= 0.5:
self.turn = True
if self.start_yaw > yaw:
self.velocity = self.cruise_velocity
else:
self.velocity = -self.cruise_velocity
#print "Angle: %f" % self.angle
if self.turn and self.angle < 0.01:
self.turn = False
self.Zero()
#print "Yaw: start %f, new %f" % (self.start_yaw, yaw)
if self.move and self.distance < 0.001:
self.move = False
self.Zero()
#print "Position: start %f, new %f" % (self.start_x, x_position)
if self.move and (self.distance > self.prev_distance):
self.move = False
self.Zero()
if self.distance >= 0.01:
self.move = True
if self.start_x > x_position:
self.lin_velocity = self.cruise_velocity
else:
self.lin_velocity = -self.cruise_velocity
self.prev_distance = self.distance
else:
#print 'Resetting...'
self.first = True
self.angle = 0.0
if __name__ == "__main__":
rospy.init_node("keep_yaw")
movement_nullifier = MovementNullifier()
rospy.spin()
|
normal
|
{
"blob_id": "c349fa484476e3195e0932e425cbe93d7a7e5394",
"index": 1225,
"step-1": "<mask token>\n\n\nclass MovementNullifier:\n\n def __init__(self):\n rospy.Subscriber('odom', Odometry, self.OdomCallback)\n rospy.Subscriber('cmd_vel', Twist, self.TwistCallback)\n self.cmd_vel_publisher = rospy.Publisher('cmd_vel', Twist,\n queue_size=10)\n self.first = True\n self.start_yaw = 0\n self.threshold = 0.01\n self.distance = 0.0\n self.prev_distance = 0.0\n self.angle = 0.0\n self.turn = False\n self.move = False\n self.cruise_velocity = 0.01\n self.velocity = 0\n self.lin_velocity = 0\n self.cmd_is_commanding = False\n self.twist_time = rospy.Time.now()\n self.stop_service = rospy.Service('stop_nullify', Empty, self.\n StopListening)\n self.start_service = rospy.Service('start_nullify', Empty, self.\n StartListening)\n self.keep_running = True\n\n def StopListening(self, data):\n self.keep_running = False\n return EmptyResponse()\n <mask token>\n\n def Turn(self):\n cmd_vel_msg = Twist()\n cmd_vel_msg.angular.z = self.velocity\n self.cmd_vel_publisher.publish(cmd_vel_msg)\n <mask token>\n\n def Zero(self):\n cmd_vel_msg = Twist()\n cmd_vel_msg.angular.z = 0\n cmd_vel_msg.linear.x = 0\n self.cmd_vel_publisher.publish(cmd_vel_msg)\n\n def TwistCallback(self, data):\n self.twist_time = rospy.Time.now()\n eps = 0.002\n if fabs(data.angular.z) > self.cruise_velocity + eps or fabs(data.\n linear.x) > self.cruise_velocity + eps:\n self.cmd_is_commanding = True\n else:\n self.cmd_is_commanding = False\n\n def OdomCallback(self, data):\n if not self.keep_running:\n return\n twist = data.twist\n if rospy.Time.now() - self.twist_time > rospy.Duration(0.5):\n self.cmd_is_commanding = False\n if not self.cmd_is_commanding:\n pose = data.pose\n quaternion = (pose.pose.orientation.x, pose.pose.orientation.y,\n pose.pose.orientation.z, pose.pose.orientation.w)\n euler = tf.transformations.euler_from_quaternion(quaternion)\n yaw = euler[2]\n x_position = pose.pose.position.x\n if self.turn:\n self.Turn()\n if self.move:\n self.Move()\n if self.first:\n self.start_yaw = euler[2]\n self.start_x = x_position\n self.first = False\n self.turn = False\n self.prev_time = data.header.stamp\n self.Zero()\n else:\n self.angle = fabs(degrees(self.start_yaw) - degrees(yaw))\n self.distance = fabs(self.start_x - x_position)\n if self.angle >= 0.5:\n self.turn = True\n if self.start_yaw > yaw:\n self.velocity = self.cruise_velocity\n else:\n self.velocity = -self.cruise_velocity\n if self.turn and self.angle < 0.01:\n self.turn = False\n self.Zero()\n if self.move and self.distance < 0.001:\n self.move = False\n self.Zero()\n if self.move and self.distance > self.prev_distance:\n self.move = False\n self.Zero()\n if self.distance >= 0.01:\n self.move = True\n if self.start_x > x_position:\n self.lin_velocity = self.cruise_velocity\n else:\n self.lin_velocity = -self.cruise_velocity\n self.prev_distance = self.distance\n else:\n self.first = True\n self.angle = 0.0\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass MovementNullifier:\n\n def __init__(self):\n rospy.Subscriber('odom', Odometry, self.OdomCallback)\n rospy.Subscriber('cmd_vel', Twist, self.TwistCallback)\n self.cmd_vel_publisher = rospy.Publisher('cmd_vel', Twist,\n queue_size=10)\n self.first = True\n self.start_yaw = 0\n self.threshold = 0.01\n self.distance = 0.0\n self.prev_distance = 0.0\n self.angle = 0.0\n self.turn = False\n self.move = False\n self.cruise_velocity = 0.01\n self.velocity = 0\n self.lin_velocity = 0\n self.cmd_is_commanding = False\n self.twist_time = rospy.Time.now()\n self.stop_service = rospy.Service('stop_nullify', Empty, self.\n StopListening)\n self.start_service = rospy.Service('start_nullify', Empty, self.\n StartListening)\n self.keep_running = True\n\n def StopListening(self, data):\n self.keep_running = False\n return EmptyResponse()\n\n def StartListening(self, data):\n self.keep_running = True\n self.turn = False\n self.move = False\n self.cmd_is_commanding = False\n self.first = True\n return EmptyResponse()\n\n def Turn(self):\n cmd_vel_msg = Twist()\n cmd_vel_msg.angular.z = self.velocity\n self.cmd_vel_publisher.publish(cmd_vel_msg)\n <mask token>\n\n def Zero(self):\n cmd_vel_msg = Twist()\n cmd_vel_msg.angular.z = 0\n cmd_vel_msg.linear.x = 0\n self.cmd_vel_publisher.publish(cmd_vel_msg)\n\n def TwistCallback(self, data):\n self.twist_time = rospy.Time.now()\n eps = 0.002\n if fabs(data.angular.z) > self.cruise_velocity + eps or fabs(data.\n linear.x) > self.cruise_velocity + eps:\n self.cmd_is_commanding = True\n else:\n self.cmd_is_commanding = False\n\n def OdomCallback(self, data):\n if not self.keep_running:\n return\n twist = data.twist\n if rospy.Time.now() - self.twist_time > rospy.Duration(0.5):\n self.cmd_is_commanding = False\n if not self.cmd_is_commanding:\n pose = data.pose\n quaternion = (pose.pose.orientation.x, pose.pose.orientation.y,\n pose.pose.orientation.z, pose.pose.orientation.w)\n euler = tf.transformations.euler_from_quaternion(quaternion)\n yaw = euler[2]\n x_position = pose.pose.position.x\n if self.turn:\n self.Turn()\n if self.move:\n self.Move()\n if self.first:\n self.start_yaw = euler[2]\n self.start_x = x_position\n self.first = False\n self.turn = False\n self.prev_time = data.header.stamp\n self.Zero()\n else:\n self.angle = fabs(degrees(self.start_yaw) - degrees(yaw))\n self.distance = fabs(self.start_x - x_position)\n if self.angle >= 0.5:\n self.turn = True\n if self.start_yaw > yaw:\n self.velocity = self.cruise_velocity\n else:\n self.velocity = -self.cruise_velocity\n if self.turn and self.angle < 0.01:\n self.turn = False\n self.Zero()\n if self.move and self.distance < 0.001:\n self.move = False\n self.Zero()\n if self.move and self.distance > self.prev_distance:\n self.move = False\n self.Zero()\n if self.distance >= 0.01:\n self.move = True\n if self.start_x > x_position:\n self.lin_velocity = self.cruise_velocity\n else:\n self.lin_velocity = -self.cruise_velocity\n self.prev_distance = self.distance\n else:\n self.first = True\n self.angle = 0.0\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass MovementNullifier:\n\n def __init__(self):\n rospy.Subscriber('odom', Odometry, self.OdomCallback)\n rospy.Subscriber('cmd_vel', Twist, self.TwistCallback)\n self.cmd_vel_publisher = rospy.Publisher('cmd_vel', Twist,\n queue_size=10)\n self.first = True\n self.start_yaw = 0\n self.threshold = 0.01\n self.distance = 0.0\n self.prev_distance = 0.0\n self.angle = 0.0\n self.turn = False\n self.move = False\n self.cruise_velocity = 0.01\n self.velocity = 0\n self.lin_velocity = 0\n self.cmd_is_commanding = False\n self.twist_time = rospy.Time.now()\n self.stop_service = rospy.Service('stop_nullify', Empty, self.\n StopListening)\n self.start_service = rospy.Service('start_nullify', Empty, self.\n StartListening)\n self.keep_running = True\n\n def StopListening(self, data):\n self.keep_running = False\n return EmptyResponse()\n\n def StartListening(self, data):\n self.keep_running = True\n self.turn = False\n self.move = False\n self.cmd_is_commanding = False\n self.first = True\n return EmptyResponse()\n\n def Turn(self):\n cmd_vel_msg = Twist()\n cmd_vel_msg.angular.z = self.velocity\n self.cmd_vel_publisher.publish(cmd_vel_msg)\n\n def Move(self):\n cmd_vel_msg = Twist()\n cmd_vel_msg.linear.x = self.lin_velocity\n self.cmd_vel_publisher.publish(cmd_vel_msg)\n\n def Zero(self):\n cmd_vel_msg = Twist()\n cmd_vel_msg.angular.z = 0\n cmd_vel_msg.linear.x = 0\n self.cmd_vel_publisher.publish(cmd_vel_msg)\n\n def TwistCallback(self, data):\n self.twist_time = rospy.Time.now()\n eps = 0.002\n if fabs(data.angular.z) > self.cruise_velocity + eps or fabs(data.\n linear.x) > self.cruise_velocity + eps:\n self.cmd_is_commanding = True\n else:\n self.cmd_is_commanding = False\n\n def OdomCallback(self, data):\n if not self.keep_running:\n return\n twist = data.twist\n if rospy.Time.now() - self.twist_time > rospy.Duration(0.5):\n self.cmd_is_commanding = False\n if not self.cmd_is_commanding:\n pose = data.pose\n quaternion = (pose.pose.orientation.x, pose.pose.orientation.y,\n pose.pose.orientation.z, pose.pose.orientation.w)\n euler = tf.transformations.euler_from_quaternion(quaternion)\n yaw = euler[2]\n x_position = pose.pose.position.x\n if self.turn:\n self.Turn()\n if self.move:\n self.Move()\n if self.first:\n self.start_yaw = euler[2]\n self.start_x = x_position\n self.first = False\n self.turn = False\n self.prev_time = data.header.stamp\n self.Zero()\n else:\n self.angle = fabs(degrees(self.start_yaw) - degrees(yaw))\n self.distance = fabs(self.start_x - x_position)\n if self.angle >= 0.5:\n self.turn = True\n if self.start_yaw > yaw:\n self.velocity = self.cruise_velocity\n else:\n self.velocity = -self.cruise_velocity\n if self.turn and self.angle < 0.01:\n self.turn = False\n self.Zero()\n if self.move and self.distance < 0.001:\n self.move = False\n self.Zero()\n if self.move and self.distance > self.prev_distance:\n self.move = False\n self.Zero()\n if self.distance >= 0.01:\n self.move = True\n if self.start_x > x_position:\n self.lin_velocity = self.cruise_velocity\n else:\n self.lin_velocity = -self.cruise_velocity\n self.prev_distance = self.distance\n else:\n self.first = True\n self.angle = 0.0\n\n\nif __name__ == '__main__':\n rospy.init_node('keep_yaw')\n movement_nullifier = MovementNullifier()\n rospy.spin()\n",
"step-4": "import rospy\nfrom nav_msgs.msg import Odometry\nfrom geometry_msgs.msg import Twist\nfrom std_srvs.srv import Empty, EmptyResponse\nimport tf\nfrom math import radians, degrees, fabs\n\n\nclass MovementNullifier:\n\n def __init__(self):\n rospy.Subscriber('odom', Odometry, self.OdomCallback)\n rospy.Subscriber('cmd_vel', Twist, self.TwistCallback)\n self.cmd_vel_publisher = rospy.Publisher('cmd_vel', Twist,\n queue_size=10)\n self.first = True\n self.start_yaw = 0\n self.threshold = 0.01\n self.distance = 0.0\n self.prev_distance = 0.0\n self.angle = 0.0\n self.turn = False\n self.move = False\n self.cruise_velocity = 0.01\n self.velocity = 0\n self.lin_velocity = 0\n self.cmd_is_commanding = False\n self.twist_time = rospy.Time.now()\n self.stop_service = rospy.Service('stop_nullify', Empty, self.\n StopListening)\n self.start_service = rospy.Service('start_nullify', Empty, self.\n StartListening)\n self.keep_running = True\n\n def StopListening(self, data):\n self.keep_running = False\n return EmptyResponse()\n\n def StartListening(self, data):\n self.keep_running = True\n self.turn = False\n self.move = False\n self.cmd_is_commanding = False\n self.first = True\n return EmptyResponse()\n\n def Turn(self):\n cmd_vel_msg = Twist()\n cmd_vel_msg.angular.z = self.velocity\n self.cmd_vel_publisher.publish(cmd_vel_msg)\n\n def Move(self):\n cmd_vel_msg = Twist()\n cmd_vel_msg.linear.x = self.lin_velocity\n self.cmd_vel_publisher.publish(cmd_vel_msg)\n\n def Zero(self):\n cmd_vel_msg = Twist()\n cmd_vel_msg.angular.z = 0\n cmd_vel_msg.linear.x = 0\n self.cmd_vel_publisher.publish(cmd_vel_msg)\n\n def TwistCallback(self, data):\n self.twist_time = rospy.Time.now()\n eps = 0.002\n if fabs(data.angular.z) > self.cruise_velocity + eps or fabs(data.\n linear.x) > self.cruise_velocity + eps:\n self.cmd_is_commanding = True\n else:\n self.cmd_is_commanding = False\n\n def OdomCallback(self, data):\n if not self.keep_running:\n return\n twist = data.twist\n if rospy.Time.now() - self.twist_time > rospy.Duration(0.5):\n self.cmd_is_commanding = False\n if not self.cmd_is_commanding:\n pose = data.pose\n quaternion = (pose.pose.orientation.x, pose.pose.orientation.y,\n pose.pose.orientation.z, pose.pose.orientation.w)\n euler = tf.transformations.euler_from_quaternion(quaternion)\n yaw = euler[2]\n x_position = pose.pose.position.x\n if self.turn:\n self.Turn()\n if self.move:\n self.Move()\n if self.first:\n self.start_yaw = euler[2]\n self.start_x = x_position\n self.first = False\n self.turn = False\n self.prev_time = data.header.stamp\n self.Zero()\n else:\n self.angle = fabs(degrees(self.start_yaw) - degrees(yaw))\n self.distance = fabs(self.start_x - x_position)\n if self.angle >= 0.5:\n self.turn = True\n if self.start_yaw > yaw:\n self.velocity = self.cruise_velocity\n else:\n self.velocity = -self.cruise_velocity\n if self.turn and self.angle < 0.01:\n self.turn = False\n self.Zero()\n if self.move and self.distance < 0.001:\n self.move = False\n self.Zero()\n if self.move and self.distance > self.prev_distance:\n self.move = False\n self.Zero()\n if self.distance >= 0.01:\n self.move = True\n if self.start_x > x_position:\n self.lin_velocity = self.cruise_velocity\n else:\n self.lin_velocity = -self.cruise_velocity\n self.prev_distance = self.distance\n else:\n self.first = True\n self.angle = 0.0\n\n\nif __name__ == '__main__':\n rospy.init_node('keep_yaw')\n movement_nullifier = MovementNullifier()\n rospy.spin()\n",
"step-5": "#!/usr/bin/env python\nimport rospy\nfrom nav_msgs.msg import Odometry\nfrom geometry_msgs.msg import Twist\nfrom std_srvs.srv import Empty, EmptyResponse\nimport tf\nfrom math import radians, degrees, fabs\n\nclass MovementNullifier:\n\n def __init__(self):\n rospy.Subscriber(\"odom\", Odometry, self.OdomCallback)\n rospy.Subscriber(\"cmd_vel\", Twist, self.TwistCallback)\n self.cmd_vel_publisher = rospy.Publisher(\"cmd_vel\", Twist, queue_size=10)\n self.first = True\n self.start_yaw = 0\n self.threshold = 0.01;\n self.distance = 0.0\n self.prev_distance = 0.0\n self.angle = 0.0\n self.turn = False\n self.move = False\n self.cruise_velocity = 0.01\n self.velocity = 0\n self.lin_velocity = 0\n self.cmd_is_commanding = False\n self.twist_time = rospy.Time.now()\n self.stop_service = rospy.Service(\"stop_nullify\", Empty, self.StopListening)\n self.start_service = rospy.Service(\"start_nullify\", Empty, self.StartListening)\n self.keep_running = True\n \n def StopListening(self, data):\n self.keep_running = False\n return EmptyResponse()\n \n def StartListening(self, data):\n self.keep_running = True\n #self.Zero()\n self.turn = False\n self.move = False\n self.cmd_is_commanding = False\n self.first = True\n return EmptyResponse()\n \n def Turn(self):\n #print \"Turning with velocity: %f\" % (self.velocity)\n cmd_vel_msg = Twist()\n cmd_vel_msg.angular.z = self.velocity\n self.cmd_vel_publisher.publish(cmd_vel_msg)\n \n def Move(self):\n cmd_vel_msg = Twist()\n cmd_vel_msg.linear.x = self.lin_velocity\n self.cmd_vel_publisher.publish(cmd_vel_msg)\n \n \n def Zero(self):\n cmd_vel_msg = Twist()\n cmd_vel_msg.angular.z = 0\n cmd_vel_msg.linear.x = 0\n self.cmd_vel_publisher.publish(cmd_vel_msg)\n \n def TwistCallback(self, data):\n \n self.twist_time = rospy.Time.now()\n eps = 0.002\n if fabs(data.angular.z) > self.cruise_velocity + eps or fabs(data.linear.x) > self.cruise_velocity + eps:\n self.cmd_is_commanding = True\n else:\n self.cmd_is_commanding = False \n \n def OdomCallback(self, data):\n \n if not self.keep_running:\n return\n \n twist = data.twist\n \n if rospy.Time.now() - self.twist_time > rospy.Duration(0.5):\n self.cmd_is_commanding = False\n \n if not self.cmd_is_commanding: # lets counter react movement\n pose = data.pose\n quaternion = (pose.pose.orientation.x,\n pose.pose.orientation.y,\n pose.pose.orientation.z,\n pose.pose.orientation.w)\n \n euler = tf.transformations.euler_from_quaternion(quaternion)\n yaw = euler[2]\n \n x_position = pose.pose.position.x\n #print \"Yaw: %f deg, Position x: %f\" % (degrees(euler[2]), pose.pose.position.x)\n \n #print \"Turn: %r, Move: %r, First: %r\" % (self.turn, self.move, self.first)\n \n if self.turn:\n self.Turn()\n \n if self.move:\n self.Move()\n \n if self.first:\n self.start_yaw = euler[2]\n self.start_x = x_position\n self.first = False\n self.turn = False\n self.prev_time = data.header.stamp \n self.Zero() \n #print \"Start yaw: %f\" % (self.start_yaw) \n #print \"Start x: %f\" % (self.start_x) \n else: \n self.angle = fabs(degrees(self.start_yaw) - degrees(yaw))\n self.distance = fabs(self.start_x - x_position)\n #print \"Distance %f, prev distance: %f\" % (self.distance, self.prev_distance)\n \n if self.angle >= 0.5: \n self.turn = True\n \n if self.start_yaw > yaw:\n self.velocity = self.cruise_velocity\n else:\n self.velocity = -self.cruise_velocity\n \n #print \"Angle: %f\" % self.angle\n if self.turn and self.angle < 0.01:\n self.turn = False\n self.Zero()\n #print \"Yaw: start %f, new %f\" % (self.start_yaw, yaw)\n \n if self.move and self.distance < 0.001:\n self.move = False\n self.Zero()\n #print \"Position: start %f, new %f\" % (self.start_x, x_position)\n \n if self.move and (self.distance > self.prev_distance):\n self.move = False\n self.Zero()\n \n if self.distance >= 0.01:\n self.move = True\n \n if self.start_x > x_position:\n self.lin_velocity = self.cruise_velocity\n else:\n self.lin_velocity = -self.cruise_velocity\n \n self.prev_distance = self.distance\n \n else:\n #print 'Resetting...'\n self.first = True\n self.angle = 0.0\n \n \n \nif __name__ == \"__main__\":\n rospy.init_node(\"keep_yaw\")\n \n movement_nullifier = MovementNullifier()\n \n rospy.spin()",
"step-ids": [
7,
8,
10,
11,
12
]
}
|
[
7,
8,
10,
11,
12
] |
#list,for replacing element we use {a[0='']}:for adding element we use {append()}
a=['somesh','aakash','sarika','datta','rudra','4mridula']
a[2] = 'nandini'
a.append('sarika')
print(a[2])
print(a)
|
normal
|
{
"blob_id": "5c643dfce9cf7a9f774957ff4819d3be8ac4f1da",
"index": 7376,
"step-1": "<mask token>\n",
"step-2": "<mask token>\na.append('sarika')\nprint(a[2])\nprint(a)\n",
"step-3": "a = ['somesh', 'aakash', 'sarika', 'datta', 'rudra', '4mridula']\na[2] = 'nandini'\na.append('sarika')\nprint(a[2])\nprint(a)\n",
"step-4": "#list,for replacing element we use {a[0='']}:for adding element we use {append()}\r\na=['somesh','aakash','sarika','datta','rudra','4mridula']\r\na[2] = 'nandini'\r\na.append('sarika')\r\nprint(a[2])\r\nprint(a)\r\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from PyQt5.QtWidgets import *
from select_substituents_table import *
from save_selection_dialog import *
class SelectSubsDialog(QDialog):
def __init__(self, r_group):
super().__init__()
self.r_group = r_group
self.substituents = None
self.new_set_saved = False
self.setWindowTitle(f"Select Substituents for {self.r_group}")
self.instructions_label = QLabel("Click row heading to select functional group set. Ctrl + click or Shift + click to select multiple items. Double click functional group name to view SMILES.")
self.select_subs_table = SelectSubsTable()
self.confirm_button = QPushButton("Confirm Selection")
self.confirm_button.setEnabled(False)
self.save_as_set_button = QPushButton("Save Selection as Set")
self.save_as_set_button.setEnabled(False)
self.cancel_button = QPushButton("Cancel")
self.select_subs_button_layout = QHBoxLayout()
self.select_subs_button_layout.addWidget(self.confirm_button)
self.select_subs_button_layout.addWidget(self.save_as_set_button)
self.select_subs_button_layout.addWidget(self.cancel_button)
self.select_subs_layout = QVBoxLayout()
self.select_subs_layout.addWidget(self.instructions_label)
self.select_subs_layout.addWidget(self.select_subs_table)
self.select_subs_layout.addLayout(self.select_subs_button_layout)
self.setLayout(self.select_subs_layout)
self.select_subs_table.itemSelectionChanged.connect(self.enable_save_buttons)
self.confirm_button.clicked.connect(self.save_substituents)
self.save_as_set_button.clicked.connect(self.save_selection)
self.cancel_button.clicked.connect(self.close)
def enable_save_buttons(self):
self.confirm_button.setEnabled(True)
self.save_as_set_button.setEnabled(True)
def get_substituents(self):
self.substituents = list(dict.fromkeys([item.text() for item in self.select_subs_table.selectedItems()]))
def save_substituents(self):
self.get_substituents()
self.close()
def save_selection(self):
self.get_substituents()
save_selection_dialog = SaveSelectionDialog(self.substituents)
save_selection_dialog.exec_()
if save_selection_dialog.new_set_saved:
self.new_set_saved = True
self.close()
class SelectSubsForNewSetDialog(SelectSubsDialog):
def __init__(self):
super().__init__(r_group = "New Set")
self.confirm_button.setVisible(False)
class SelectSubsEditSetDialog(SelectSubsDialog):
def __init__(self, set_name):
super().__init__(r_group = None)
self.set_name = set_name
self.setWindowTitle(f"Select Groups for {self.set_name}")
self.save_as_set_button.setVisible(False)
|
normal
|
{
"blob_id": "849db3a92e0544661dd465b3e7f6949f8de5633b",
"index": 5099,
"step-1": "<mask token>\n\n\nclass SelectSubsDialog(QDialog):\n <mask token>\n\n def enable_save_buttons(self):\n self.confirm_button.setEnabled(True)\n self.save_as_set_button.setEnabled(True)\n\n def get_substituents(self):\n self.substituents = list(dict.fromkeys([item.text() for item in\n self.select_subs_table.selectedItems()]))\n <mask token>\n <mask token>\n\n\nclass SelectSubsForNewSetDialog(SelectSubsDialog):\n\n def __init__(self):\n super().__init__(r_group='New Set')\n self.confirm_button.setVisible(False)\n\n\nclass SelectSubsEditSetDialog(SelectSubsDialog):\n\n def __init__(self, set_name):\n super().__init__(r_group=None)\n self.set_name = set_name\n self.setWindowTitle(f'Select Groups for {self.set_name}')\n self.save_as_set_button.setVisible(False)\n",
"step-2": "<mask token>\n\n\nclass SelectSubsDialog(QDialog):\n <mask token>\n\n def enable_save_buttons(self):\n self.confirm_button.setEnabled(True)\n self.save_as_set_button.setEnabled(True)\n\n def get_substituents(self):\n self.substituents = list(dict.fromkeys([item.text() for item in\n self.select_subs_table.selectedItems()]))\n\n def save_substituents(self):\n self.get_substituents()\n self.close()\n <mask token>\n\n\nclass SelectSubsForNewSetDialog(SelectSubsDialog):\n\n def __init__(self):\n super().__init__(r_group='New Set')\n self.confirm_button.setVisible(False)\n\n\nclass SelectSubsEditSetDialog(SelectSubsDialog):\n\n def __init__(self, set_name):\n super().__init__(r_group=None)\n self.set_name = set_name\n self.setWindowTitle(f'Select Groups for {self.set_name}')\n self.save_as_set_button.setVisible(False)\n",
"step-3": "<mask token>\n\n\nclass SelectSubsDialog(QDialog):\n\n def __init__(self, r_group):\n super().__init__()\n self.r_group = r_group\n self.substituents = None\n self.new_set_saved = False\n self.setWindowTitle(f'Select Substituents for {self.r_group}')\n self.instructions_label = QLabel(\n 'Click row heading to select functional group set. Ctrl + click or Shift + click to select multiple items. Double click functional group name to view SMILES.'\n )\n self.select_subs_table = SelectSubsTable()\n self.confirm_button = QPushButton('Confirm Selection')\n self.confirm_button.setEnabled(False)\n self.save_as_set_button = QPushButton('Save Selection as Set')\n self.save_as_set_button.setEnabled(False)\n self.cancel_button = QPushButton('Cancel')\n self.select_subs_button_layout = QHBoxLayout()\n self.select_subs_button_layout.addWidget(self.confirm_button)\n self.select_subs_button_layout.addWidget(self.save_as_set_button)\n self.select_subs_button_layout.addWidget(self.cancel_button)\n self.select_subs_layout = QVBoxLayout()\n self.select_subs_layout.addWidget(self.instructions_label)\n self.select_subs_layout.addWidget(self.select_subs_table)\n self.select_subs_layout.addLayout(self.select_subs_button_layout)\n self.setLayout(self.select_subs_layout)\n self.select_subs_table.itemSelectionChanged.connect(self.\n enable_save_buttons)\n self.confirm_button.clicked.connect(self.save_substituents)\n self.save_as_set_button.clicked.connect(self.save_selection)\n self.cancel_button.clicked.connect(self.close)\n\n def enable_save_buttons(self):\n self.confirm_button.setEnabled(True)\n self.save_as_set_button.setEnabled(True)\n\n def get_substituents(self):\n self.substituents = list(dict.fromkeys([item.text() for item in\n self.select_subs_table.selectedItems()]))\n\n def save_substituents(self):\n self.get_substituents()\n self.close()\n <mask token>\n\n\nclass SelectSubsForNewSetDialog(SelectSubsDialog):\n\n def __init__(self):\n super().__init__(r_group='New Set')\n self.confirm_button.setVisible(False)\n\n\nclass SelectSubsEditSetDialog(SelectSubsDialog):\n\n def __init__(self, set_name):\n super().__init__(r_group=None)\n self.set_name = set_name\n self.setWindowTitle(f'Select Groups for {self.set_name}')\n self.save_as_set_button.setVisible(False)\n",
"step-4": "from PyQt5.QtWidgets import *\nfrom select_substituents_table import *\nfrom save_selection_dialog import *\n\n\nclass SelectSubsDialog(QDialog):\n\n def __init__(self, r_group):\n super().__init__()\n self.r_group = r_group\n self.substituents = None\n self.new_set_saved = False\n self.setWindowTitle(f'Select Substituents for {self.r_group}')\n self.instructions_label = QLabel(\n 'Click row heading to select functional group set. Ctrl + click or Shift + click to select multiple items. Double click functional group name to view SMILES.'\n )\n self.select_subs_table = SelectSubsTable()\n self.confirm_button = QPushButton('Confirm Selection')\n self.confirm_button.setEnabled(False)\n self.save_as_set_button = QPushButton('Save Selection as Set')\n self.save_as_set_button.setEnabled(False)\n self.cancel_button = QPushButton('Cancel')\n self.select_subs_button_layout = QHBoxLayout()\n self.select_subs_button_layout.addWidget(self.confirm_button)\n self.select_subs_button_layout.addWidget(self.save_as_set_button)\n self.select_subs_button_layout.addWidget(self.cancel_button)\n self.select_subs_layout = QVBoxLayout()\n self.select_subs_layout.addWidget(self.instructions_label)\n self.select_subs_layout.addWidget(self.select_subs_table)\n self.select_subs_layout.addLayout(self.select_subs_button_layout)\n self.setLayout(self.select_subs_layout)\n self.select_subs_table.itemSelectionChanged.connect(self.\n enable_save_buttons)\n self.confirm_button.clicked.connect(self.save_substituents)\n self.save_as_set_button.clicked.connect(self.save_selection)\n self.cancel_button.clicked.connect(self.close)\n\n def enable_save_buttons(self):\n self.confirm_button.setEnabled(True)\n self.save_as_set_button.setEnabled(True)\n\n def get_substituents(self):\n self.substituents = list(dict.fromkeys([item.text() for item in\n self.select_subs_table.selectedItems()]))\n\n def save_substituents(self):\n self.get_substituents()\n self.close()\n\n def save_selection(self):\n self.get_substituents()\n save_selection_dialog = SaveSelectionDialog(self.substituents)\n save_selection_dialog.exec_()\n if save_selection_dialog.new_set_saved:\n self.new_set_saved = True\n self.close()\n\n\nclass SelectSubsForNewSetDialog(SelectSubsDialog):\n\n def __init__(self):\n super().__init__(r_group='New Set')\n self.confirm_button.setVisible(False)\n\n\nclass SelectSubsEditSetDialog(SelectSubsDialog):\n\n def __init__(self, set_name):\n super().__init__(r_group=None)\n self.set_name = set_name\n self.setWindowTitle(f'Select Groups for {self.set_name}')\n self.save_as_set_button.setVisible(False)\n",
"step-5": "from PyQt5.QtWidgets import *\n\nfrom select_substituents_table import *\nfrom save_selection_dialog import *\n\nclass SelectSubsDialog(QDialog):\n\n def __init__(self, r_group):\n super().__init__()\n self.r_group = r_group\n self.substituents = None\n self.new_set_saved = False\n\n self.setWindowTitle(f\"Select Substituents for {self.r_group}\")\n\n self.instructions_label = QLabel(\"Click row heading to select functional group set. Ctrl + click or Shift + click to select multiple items. Double click functional group name to view SMILES.\")\n\n self.select_subs_table = SelectSubsTable()\n\n self.confirm_button = QPushButton(\"Confirm Selection\")\n self.confirm_button.setEnabled(False)\n self.save_as_set_button = QPushButton(\"Save Selection as Set\")\n self.save_as_set_button.setEnabled(False)\n self.cancel_button = QPushButton(\"Cancel\")\n\n self.select_subs_button_layout = QHBoxLayout()\n self.select_subs_button_layout.addWidget(self.confirm_button)\n self.select_subs_button_layout.addWidget(self.save_as_set_button)\n self.select_subs_button_layout.addWidget(self.cancel_button)\n\n self.select_subs_layout = QVBoxLayout()\n self.select_subs_layout.addWidget(self.instructions_label)\n self.select_subs_layout.addWidget(self.select_subs_table)\n self.select_subs_layout.addLayout(self.select_subs_button_layout)\n self.setLayout(self.select_subs_layout)\n\n self.select_subs_table.itemSelectionChanged.connect(self.enable_save_buttons)\n self.confirm_button.clicked.connect(self.save_substituents)\n self.save_as_set_button.clicked.connect(self.save_selection)\n self.cancel_button.clicked.connect(self.close)\n\n def enable_save_buttons(self):\n self.confirm_button.setEnabled(True)\n self.save_as_set_button.setEnabled(True)\n\n def get_substituents(self):\n self.substituents = list(dict.fromkeys([item.text() for item in self.select_subs_table.selectedItems()]))\n \n def save_substituents(self):\n self.get_substituents()\n self.close()\n\n def save_selection(self):\n self.get_substituents()\n save_selection_dialog = SaveSelectionDialog(self.substituents)\n save_selection_dialog.exec_()\n if save_selection_dialog.new_set_saved:\n self.new_set_saved = True\n self.close()\n\nclass SelectSubsForNewSetDialog(SelectSubsDialog):\n\n def __init__(self):\n super().__init__(r_group = \"New Set\")\n\n self.confirm_button.setVisible(False)\n\nclass SelectSubsEditSetDialog(SelectSubsDialog):\n\n def __init__(self, set_name):\n super().__init__(r_group = None)\n self.set_name = set_name\n \n self.setWindowTitle(f\"Select Groups for {self.set_name}\")\n\n self.save_as_set_button.setVisible(False)\n\n\n\n\n\n\n\n\n\n",
"step-ids": [
7,
8,
9,
11,
12
]
}
|
[
7,
8,
9,
11,
12
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def sort(list):
for i in range(len(list) - 1):
if list[i] > list[i + 1]:
a = list[i]
list[i] = list[i + 1]
list[i + 1] = a
print(list)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def sort(list):
for i in range(len(list) - 1):
if list[i] > list[i + 1]:
a = list[i]
list[i] = list[i + 1]
list[i + 1] = a
print(list)
sort(list)
<|reserved_special_token_1|>
list = [3, 1, 2, 5, 4, 7, 6]
def sort(list):
for i in range(len(list) - 1):
if list[i] > list[i + 1]:
a = list[i]
list[i] = list[i + 1]
list[i + 1] = a
print(list)
sort(list)
|
flexible
|
{
"blob_id": "219929d52b5f1a0690590e83b41d2b4f0b2b3a51",
"index": 336,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef sort(list):\n for i in range(len(list) - 1):\n if list[i] > list[i + 1]:\n a = list[i]\n list[i] = list[i + 1]\n list[i + 1] = a\n print(list)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef sort(list):\n for i in range(len(list) - 1):\n if list[i] > list[i + 1]:\n a = list[i]\n list[i] = list[i + 1]\n list[i + 1] = a\n print(list)\n\n\nsort(list)\n",
"step-4": "list = [3, 1, 2, 5, 4, 7, 6]\n\n\ndef sort(list):\n for i in range(len(list) - 1):\n if list[i] > list[i + 1]:\n a = list[i]\n list[i] = list[i + 1]\n list[i + 1] = a\n print(list)\n\n\nsort(list)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
#-*- coding: utf8 -*-
#Programa: 04-palindromo
#Objetivo:Un Numero Palindromo es aquel numero que se lee igual, de izquierda a derecha y viceversa
#El palindromo mas grande que se pued obtener por el producto de dos numeos de dos digitos
# es: 9009 que es igual a 91x99.
#Encuentre el palindromo mas grande que se pueda encontrar por el producto de numeo de tres digitos.
#Recomendacion: tratar de hacerlo con el ejemplo siempre.
#Autor: Fernando Martinez
#Fecha: 28 enero de 2020
def obtener_palindromo(valor):
"""
Funcion que verifica si un numero es palindromo
"""
#Luego de convertirlo a str, los vamos a insertar en una lista para luego verificar
palindromo = list(str(valor))
#lo insertamos en una nueva lista
palindromo_final = palindromo
#Luego aplicaremos la verificacion para comprobar si es un palindromo
if palindromo [:: -1] == palindromo_final:
return True
#print('El numero es un palindromo')
def multiplicaciones(): #906609 tiene que darme
"""
Funcion se encargara de crear las multiplicaciones entre 999 y 100
mediante dos ciclos for.
"""
ultimo_palindromo = 0
total = 0
for primer_numero in range(100, 1000):
for segundo_numero in range(100, 1000):
#total se encarga de hacer la multiplicacion entre los numeros
total = primer_numero * segundo_numero
# llamamos a la funcion que verifica si la multiplicacion que envia es un palindromo
if obtener_palindromo(total):
#luego de verificar que la multiplicacion era palindromo pasamos a evaluarla hasta llegar al ultimo palindromo
#entre 100 y 1000
if ultimo_palindromo < total:
ultimo_palindromo = total
return ultimo_palindromo
#Llamamos a la funcion
if __name__ == "__main__":
print(multiplicaciones())
|
normal
|
{
"blob_id": "45f9d5ac0fa7d9259c1d53b92c030559f3bfda89",
"index": 7161,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef multiplicaciones():\n \"\"\"\n Funcion se encargara de crear las multiplicaciones entre 999 y 100\n\n mediante dos ciclos for.\n \"\"\"\n ultimo_palindromo = 0\n total = 0\n for primer_numero in range(100, 1000):\n for segundo_numero in range(100, 1000):\n total = primer_numero * segundo_numero\n if obtener_palindromo(total):\n if ultimo_palindromo < total:\n ultimo_palindromo = total\n return ultimo_palindromo\n\n\n<mask token>\n",
"step-3": "def obtener_palindromo(valor):\n \"\"\"\n Funcion que verifica si un numero es palindromo\n \n \"\"\"\n palindromo = list(str(valor))\n palindromo_final = palindromo\n if palindromo[::-1] == palindromo_final:\n return True\n\n\ndef multiplicaciones():\n \"\"\"\n Funcion se encargara de crear las multiplicaciones entre 999 y 100\n\n mediante dos ciclos for.\n \"\"\"\n ultimo_palindromo = 0\n total = 0\n for primer_numero in range(100, 1000):\n for segundo_numero in range(100, 1000):\n total = primer_numero * segundo_numero\n if obtener_palindromo(total):\n if ultimo_palindromo < total:\n ultimo_palindromo = total\n return ultimo_palindromo\n\n\n<mask token>\n",
"step-4": "def obtener_palindromo(valor):\n \"\"\"\n Funcion que verifica si un numero es palindromo\n \n \"\"\"\n palindromo = list(str(valor))\n palindromo_final = palindromo\n if palindromo[::-1] == palindromo_final:\n return True\n\n\ndef multiplicaciones():\n \"\"\"\n Funcion se encargara de crear las multiplicaciones entre 999 y 100\n\n mediante dos ciclos for.\n \"\"\"\n ultimo_palindromo = 0\n total = 0\n for primer_numero in range(100, 1000):\n for segundo_numero in range(100, 1000):\n total = primer_numero * segundo_numero\n if obtener_palindromo(total):\n if ultimo_palindromo < total:\n ultimo_palindromo = total\n return ultimo_palindromo\n\n\nif __name__ == '__main__':\n print(multiplicaciones())\n",
"step-5": "#-*- coding: utf8 -*-\n#Programa: 04-palindromo\n#Objetivo:Un Numero Palindromo es aquel numero que se lee igual, de izquierda a derecha y viceversa\n #El palindromo mas grande que se pued obtener por el producto de dos numeos de dos digitos \n # es: 9009 que es igual a 91x99.\n #Encuentre el palindromo mas grande que se pueda encontrar por el producto de numeo de tres digitos.\n\n #Recomendacion: tratar de hacerlo con el ejemplo siempre.\n#Autor: Fernando Martinez\n#Fecha: 28 enero de 2020\n\ndef obtener_palindromo(valor):\n \"\"\"\n Funcion que verifica si un numero es palindromo\n \n \"\"\"\n \n #Luego de convertirlo a str, los vamos a insertar en una lista para luego verificar\n palindromo = list(str(valor))\n #lo insertamos en una nueva lista\n palindromo_final = palindromo\n \n #Luego aplicaremos la verificacion para comprobar si es un palindromo\n if palindromo [:: -1] == palindromo_final:\n return True\n\n\n \n #print('El numero es un palindromo')\n\ndef multiplicaciones(): #906609 tiene que darme\n \"\"\"\n Funcion se encargara de crear las multiplicaciones entre 999 y 100\n\n mediante dos ciclos for.\n \"\"\"\n ultimo_palindromo = 0\n total = 0\n for primer_numero in range(100, 1000):\n for segundo_numero in range(100, 1000):\n #total se encarga de hacer la multiplicacion entre los numeros\n total = primer_numero * segundo_numero\n # llamamos a la funcion que verifica si la multiplicacion que envia es un palindromo\n if obtener_palindromo(total):\n #luego de verificar que la multiplicacion era palindromo pasamos a evaluarla hasta llegar al ultimo palindromo\n #entre 100 y 1000\n if ultimo_palindromo < total:\n ultimo_palindromo = total\n return ultimo_palindromo\n\n#Llamamos a la funcion\n\nif __name__ == \"__main__\":\n print(multiplicaciones())\n\n\n\n\n\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#!/usr/bin/env conda-execute
# conda execute
# env:
# - python >=3
# - requests
# run_with: python
from configparser import NoOptionError
from configparser import SafeConfigParser
import argparse
import base64
import inspect
import ipaddress
import json
import logging
import logging.config
import os
import socket
import sys
import time
import requests
requests.packages.urllib3.disable_warnings()
""" McAfee ESM <=> ServiceNow
This script can be called as an alarm action on the McAfee ESM to send data
to ServiceNow via the API to create tickets. Optionally, ticket data is
transmitted back to the ESM via syslog and referenced as an event. The event
allows for contextual linking directly to the ticket from the ESM.
The script requires Python 3 and was tested with 3.5.2 for Windows and Linux.
Other modules, requests and configparser, are also required.
The script requires a config.ini file for the credentials. The filename and
path can be set from the command line.
An example config.ini is available at:
https://raw.githubusercontent.com/andywalden/mfe2snow/config.ini
Example:
$ python mfe2snow.py alarm="This is my alarm" severity="50"
This is intended to be called as an alarm action to Execute a Script. In the ESM,
go to System Properties | Profile Management | Remote Commands and add a profile for
"Create ServiceNow Ticket". The script can be called using any combination of fields and
values however 'alarm', 'eventdescription', 'severity', 'sourceip' and 'destip' are
mapped to ServiceNow fields. Remaining fields=values are mapped to SNOW field
"Additional Info".
This is an example of the script being called:
mfe2snow.py alarm="[$Alarm Name]" eventdescription="[$Rule Message]" severity="[$Average Severity]"
devicename="[$Device Name]" message_key="[$Event ID]" category="[$Normalized Rule]" sourceip="[$Source IP]"
destip="[$Destination IP]" sourceport="[$Source Port]" destport="[$Destination Port]" host="[$%HostID]"
domain="[$%DomainID]" command="[$%CommandID]" object="[$%ObjectID]" application="[$%AppID]"
deviceaction="[$%Device_Action]" targetuser="[$%UserIDDst]" threatcategory="[$%Threat_Category]"
threathandled="[$%Threat_Handled]" geosrc="[$Geolocation Source]" geodest="[$Geolocation Destination]"
The output is also written to a file that is overwritten each time the script is run.
Make sure the permissions on the config.ini file are secure as not to expose any credentials.
"""
__author__ = "Andy Walden"
__version__ = "1.2"
class Args(object):
"""
Handles any args and passes them back as a dict
"""
def __init__(self, args):
self.log_levels = ["quiet", "error", "warning", "info", "debug"]
self.formatter_class = argparse.RawDescriptionHelpFormatter
self.parser = argparse.ArgumentParser(
formatter_class=self.formatter_class,
description="Send McAfee ESM Alarm data to ServiceNow"
)
self.args = args
self.parser.add_argument("-v", "--version",
action="version",
help="Show version",
version="%(prog)s {}".format(__version__))
self.parser.add_argument("-l", "--level",
default=None, dest="level",
choices=self.log_levels, metavar='',
help="Logging output level. Default: warning")
self.parser.add_argument("-c", "--config",
default=None, dest="cfgfile", metavar='',
help="Path to config file. Default: config.ini")
self.parser.add_argument("fields", nargs='*', metavar='',
help="Key=Values for the query. Example: \n \
alarm=\"The milk has spilled\" sourceip=\"1.1.1.1\", destip=\"2.2.2.2\" \
The following keys are mapped to fields in SNOW: \
alarm - Description \
sourceip/destip - Node \
severity - Severity,
recordid = Message_Key")
self.pargs = self.parser.parse_args()
def get_args(self):
return self.pargs
class Config(object):
""" Creates object for provided configfile/section settings """
def __init__(self, filename, header):
config = SafeConfigParser()
cfgfile = config.read(filename)
if not cfgfile:
raise ValueError('Config file not found:', filename)
self.__dict__.update(config.items(header))
def logging_init():
filename = get_filename()
logfile = filename + ".log"
hostname = socket.gethostname()
formatter = logging.Formatter('%(asctime)s {} %(module)s: %(message)s'.format(hostname),
datefmt='%b %d %H:%M:%S')
logger = logging.getLogger()
fh = logging.FileHandler(logfile, mode='w')
fh.setFormatter(formatter)
logger.addHandler(fh)
ch = logging.StreamHandler()
ch.setFormatter(formatter)
logger.addHandler(ch)
def get_filename():
filename = (inspect.getfile(inspect.currentframe()).split("\\", -1)[-1]).rsplit(".", 1)[0]
return filename
class Syslog(object):
"""
Open TCP socket using supplied server IP and port.
Returns socket or None on failure
"""
def __init__(self,
server,
port=514):
logging.debug("Function: open_socket: %s: %s", server, port)
self.server = server
self.port = int(port)
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.sock.connect((self.server, self.port))
def send(self, data):
"""
Sends data to the established connection
"""
self.data = data
self.sock.sendall(data.encode())
logging.info("Syslog feedback sent")
class SNOW(object):
"""
Send to ServiceNow API
Initialize with host, user and passwd to create connection.
send() sends JSON query to SNOW.
"""
def __init__(self, host, user, passwd):
self.host = host
self.user = user
self.passwd = passwd
self.url = "https://" + host
self.auth_string = '{}'.format(base64.b64encode('{}:{}'
.format(user,passwd)
.encode('utf-8'))
.decode('ascii'))
self.headers = {'Authorization':'Basic '+ self.auth_string, 'Content-Type': 'application/json'}
def send(self, query_conf, uri_string):
"""
Sends URI method and JSON query string
Runs query and returns result object.
"""
self.query_conf = query_conf
self.uri_string = uri_string
result = requests.post(self.url + self.uri_string,
headers=self.headers,
data=query_conf, verify=False)
if result.status_code != 200:
logging.error("SNOW said: Status Code: %s, Headers: %s, \
Mesg: %s", result.status_code, result.headers,
result.json())
sys.exit(1)
return result
class Query(object):
"""
Returns JSON query from provided dict
"""
def __init__(self):
self.qconf = []
def create(self, **kwargs):
self.query_dict = kwargs
self.alarm = self.query_dict.pop('alarm', 'McAfee ESM Alarm')
self.node = self.query_dict.pop('node', '0.0.0.0')
self.severity = self.query_dict.pop('severity', '25')
self.id = self.query_dict.pop('id', "No key")
self.info = ", ".join(["=".join([key, str(val)])
for key, val in self.query_dict.items()])
self.qconf = {
"active" : "false",
"classification" : "1",
"description" : self.alarm,
"source" : "McAfee ESM",
"node" : self.node,
"type" : "Security" ,
"message_key" : "id",
"additional_info" : self.info,
"severity" : self.severity,
"state" : "Ready",
"sys_class_name" : "em_event",
"sys_created_by" : "mcafee.integration"
}
return(json.dumps(self.qconf))
def main():
""" Main function """
# Process any command line args
args = Args(sys.argv)
pargs = args.get_args()
logging_init()
if pargs.level:
logging.getLogger().setLevel(getattr(logging, pargs.level.upper()))
try:
fields = dict(x.split('=', 1) for x in pargs.fields)
except ValueError:
logging.error("Invalid input. Format is field=value")
sys.exit(1)
configfile = pargs.cfgfile if pargs.cfgfile else 'config.ini'
try:
c = Config(configfile, "DEFAULT")
except ValueError:
logging.error("Config file not found: %s", configfile)
sys.exit(1)
# Strip empty values
fields = {k:v for k,v in fields.items() if v is not None}
# Figure out which IP should be 'node'
destip = fields.get('destip', None)
sourceip = fields.get('sourceip', None)
if sourceip:
for subnet in homenet:
if ipaddress.ip_address(sourceip) in ipaddress.ip_network(subnet):
fields['node'] = sourceip
elif ipaddress.ip_address(destip) in ipaddress.ip_network(subnet):
fields['node'] = destip
else:
fields['node'] = sourceip
# Check for severity in arguments. Map ESM severity (1-100) to SNOW (1-5)
s = int(fields.get('severity', 25))
if 90 <= s <= 100: fields['severity'] = 1 # Critical
if 75 <= s <= 89: fields['severity'] = 2 # Major
if 65 <= s <= 74: fields['severity'] = 3 # Minor
if 50 <= s <= 64: fields['severity'] = 4 # Warning
if 0 <= s <= 49: fields['severity'] = 5 # Info
try:
snowhost = SNOW(c.snowhost, c.snowuser, c.snowpass)
except AttributeError:
print("{} is missing a required field:".format(configfile))
raise
sys.exit(1)
new_ticket = Query()
new_ticket_q = new_ticket.create(**fields)
result = snowhost.send(new_ticket_q, '/api/now/table/em_event')
# Syslog feedback to ESM
try:
syslog_host = c.get('sysloghost')
syslog_port = c.get('syslogport')
syslog = Syslog(syslog_host, syslog_port)
syslog.send(result.text)
except NoOptionError:
logging.debug("Syslog feedback disabled. Settings not detected.")
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
logging.warning("Control-C Pressed, stopping...")
sys.exit()
|
normal
|
{
"blob_id": "dd91ba13177aefacc24ef4a004acae0bffafadf0",
"index": 8889,
"step-1": "#!/usr/bin/env conda-execute\r\n\r\n# conda execute\r\n# env:\r\n# - python >=3\r\n# - requests\r\n# run_with: python\r\n\r\nfrom configparser import NoOptionError\r\nfrom configparser import SafeConfigParser\r\nimport argparse\r\nimport base64\r\nimport inspect\r\nimport ipaddress\r\nimport json\r\nimport logging\r\nimport logging.config\r\nimport os\r\nimport socket\r\nimport sys\r\nimport time\r\nimport requests\r\nrequests.packages.urllib3.disable_warnings()\r\n\r\n\"\"\" McAfee ESM <=> ServiceNow\r\n\r\nThis script can be called as an alarm action on the McAfee ESM to send data\r\nto ServiceNow via the API to create tickets. Optionally, ticket data is\r\ntransmitted back to the ESM via syslog and referenced as an event. The event\r\nallows for contextual linking directly to the ticket from the ESM.\r\n\r\nThe script requires Python 3 and was tested with 3.5.2 for Windows and Linux.\r\n\r\nOther modules, requests and configparser, are also required.\r\n\r\nThe script requires a config.ini file for the credentials. The filename and\r\npath can be set from the command line.\r\n\r\n\r\nAn example config.ini is available at:\r\nhttps://raw.githubusercontent.com/andywalden/mfe2snow/config.ini\r\n\r\nExample:\r\n\r\n $ python mfe2snow.py alarm=\"This is my alarm\" severity=\"50\"\r\n\r\nThis is intended to be called as an alarm action to Execute a Script. In the ESM,\r\ngo to System Properties | Profile Management | Remote Commands and add a profile for\r\n\"Create ServiceNow Ticket\". The script can be called using any combination of fields and\r\nvalues however 'alarm', 'eventdescription', 'severity', 'sourceip' and 'destip' are\r\nmapped to ServiceNow fields. Remaining fields=values are mapped to SNOW field\r\n\"Additional Info\".\r\n\r\nThis is an example of the script being called:\r\n\r\nmfe2snow.py alarm=\"[$Alarm Name]\" eventdescription=\"[$Rule Message]\" severity=\"[$Average Severity]\"\r\ndevicename=\"[$Device Name]\" message_key=\"[$Event ID]\" category=\"[$Normalized Rule]\" sourceip=\"[$Source IP]\"\r\ndestip=\"[$Destination IP]\" sourceport=\"[$Source Port]\" destport=\"[$Destination Port]\" host=\"[$%HostID]\"\r\ndomain=\"[$%DomainID]\" command=\"[$%CommandID]\" object=\"[$%ObjectID]\" application=\"[$%AppID]\"\r\ndeviceaction=\"[$%Device_Action]\" targetuser=\"[$%UserIDDst]\" threatcategory=\"[$%Threat_Category]\"\r\nthreathandled=\"[$%Threat_Handled]\" geosrc=\"[$Geolocation Source]\" geodest=\"[$Geolocation Destination]\"\r\n\r\nThe output is also written to a file that is overwritten each time the script is run.\r\n\r\nMake sure the permissions on the config.ini file are secure as not to expose any credentials.\r\n\r\n\"\"\"\r\n\r\n__author__ = \"Andy Walden\"\r\n__version__ = \"1.2\"\r\n\r\nclass Args(object):\r\n \"\"\"\r\n Handles any args and passes them back as a dict\r\n \"\"\"\r\n\r\n def __init__(self, args):\r\n self.log_levels = [\"quiet\", \"error\", \"warning\", \"info\", \"debug\"]\r\n self.formatter_class = argparse.RawDescriptionHelpFormatter\r\n self.parser = argparse.ArgumentParser(\r\n formatter_class=self.formatter_class,\r\n description=\"Send McAfee ESM Alarm data to ServiceNow\"\r\n )\r\n self.args = args\r\n\r\n self.parser.add_argument(\"-v\", \"--version\",\r\n action=\"version\",\r\n help=\"Show version\",\r\n version=\"%(prog)s {}\".format(__version__))\r\n\r\n self.parser.add_argument(\"-l\", \"--level\",\r\n default=None, dest=\"level\",\r\n choices=self.log_levels, metavar='',\r\n help=\"Logging output level. Default: warning\")\r\n\r\n self.parser.add_argument(\"-c\", \"--config\",\r\n default=None, dest=\"cfgfile\", metavar='',\r\n help=\"Path to config file. Default: config.ini\")\r\n\r\n self.parser.add_argument(\"fields\", nargs='*', metavar='',\r\n\r\n help=\"Key=Values for the query. Example: \\n \\\r\n alarm=\\\"The milk has spilled\\\" sourceip=\\\"1.1.1.1\\\", destip=\\\"2.2.2.2\\\" \\\r\n The following keys are mapped to fields in SNOW: \\\r\n alarm - Description \\\r\n sourceip/destip - Node \\\r\n severity - Severity,\r\n recordid = Message_Key\")\r\n\r\n self.pargs = self.parser.parse_args()\r\n\r\n def get_args(self):\r\n return self.pargs\r\n\r\n\r\nclass Config(object):\r\n \"\"\" Creates object for provided configfile/section settings \"\"\"\r\n\r\n def __init__(self, filename, header):\r\n config = SafeConfigParser()\r\n cfgfile = config.read(filename)\r\n if not cfgfile:\r\n raise ValueError('Config file not found:', filename)\r\n self.__dict__.update(config.items(header))\r\n\r\n\r\ndef logging_init():\r\n filename = get_filename()\r\n logfile = filename + \".log\"\r\n hostname = socket.gethostname()\r\n formatter = logging.Formatter('%(asctime)s {} %(module)s: %(message)s'.format(hostname),\r\n datefmt='%b %d %H:%M:%S')\r\n logger = logging.getLogger()\r\n fh = logging.FileHandler(logfile, mode='w')\r\n fh.setFormatter(formatter)\r\n logger.addHandler(fh)\r\n ch = logging.StreamHandler()\r\n ch.setFormatter(formatter)\r\n logger.addHandler(ch)\r\n\r\ndef get_filename():\r\n filename = (inspect.getfile(inspect.currentframe()).split(\"\\\\\", -1)[-1]).rsplit(\".\", 1)[0]\r\n return filename\r\n\r\n\r\nclass Syslog(object):\r\n \"\"\"\r\n Open TCP socket using supplied server IP and port.\r\n\r\n Returns socket or None on failure\r\n \"\"\"\r\n\r\n def __init__(self,\r\n server,\r\n port=514):\r\n logging.debug(\"Function: open_socket: %s: %s\", server, port)\r\n self.server = server\r\n self.port = int(port)\r\n self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\r\n self.sock.connect((self.server, self.port))\r\n\r\n def send(self, data):\r\n \"\"\"\r\n Sends data to the established connection\r\n \"\"\"\r\n\r\n self.data = data\r\n self.sock.sendall(data.encode())\r\n logging.info(\"Syslog feedback sent\")\r\n\r\n\r\nclass SNOW(object):\r\n \"\"\"\r\n Send to ServiceNow API\r\n Initialize with host, user and passwd to create connection.\r\n send() sends JSON query to SNOW.\r\n \"\"\"\r\n\r\n def __init__(self, host, user, passwd):\r\n self.host = host\r\n self.user = user\r\n self.passwd = passwd\r\n self.url = \"https://\" + host\r\n\r\n self.auth_string = '{}'.format(base64.b64encode('{}:{}'\r\n .format(user,passwd)\r\n .encode('utf-8'))\r\n .decode('ascii'))\r\n\r\n self.headers = {'Authorization':'Basic '+ self.auth_string, 'Content-Type': 'application/json'}\r\n\r\n\r\n def send(self, query_conf, uri_string):\r\n \"\"\"\r\n Sends URI method and JSON query string\r\n Runs query and returns result object.\r\n \"\"\"\r\n\r\n self.query_conf = query_conf\r\n self.uri_string = uri_string\r\n result = requests.post(self.url + self.uri_string,\r\n headers=self.headers,\r\n data=query_conf, verify=False)\r\n\r\n if result.status_code != 200:\r\n logging.error(\"SNOW said: Status Code: %s, Headers: %s, \\\r\n Mesg: %s\", result.status_code, result.headers,\r\n result.json())\r\n sys.exit(1)\r\n return result\r\n\r\nclass Query(object):\r\n \"\"\"\r\n Returns JSON query from provided dict\r\n \"\"\"\r\n\r\n def __init__(self):\r\n self.qconf = []\r\n\r\n def create(self, **kwargs):\r\n self.query_dict = kwargs\r\n self.alarm = self.query_dict.pop('alarm', 'McAfee ESM Alarm')\r\n self.node = self.query_dict.pop('node', '0.0.0.0')\r\n self.severity = self.query_dict.pop('severity', '25')\r\n self.id = self.query_dict.pop('id', \"No key\")\r\n self.info = \", \".join([\"=\".join([key, str(val)])\r\n for key, val in self.query_dict.items()])\r\n\r\n self.qconf = {\r\n \"active\" : \"false\",\r\n \"classification\" : \"1\",\r\n \"description\" : self.alarm,\r\n \"source\" : \"McAfee ESM\",\r\n \"node\" : self.node,\r\n \"type\" : \"Security\" ,\r\n \"message_key\" : \"id\",\r\n \"additional_info\" : self.info,\r\n \"severity\" : self.severity,\r\n \"state\" : \"Ready\",\r\n \"sys_class_name\" : \"em_event\",\r\n \"sys_created_by\" : \"mcafee.integration\"\r\n }\r\n\r\n return(json.dumps(self.qconf))\r\n\r\n\r\n\r\ndef main():\r\n \"\"\" Main function \"\"\"\r\n\r\n # Process any command line args\r\n args = Args(sys.argv)\r\n pargs = args.get_args()\r\n\r\n logging_init()\r\n\r\n if pargs.level:\r\n logging.getLogger().setLevel(getattr(logging, pargs.level.upper()))\r\n\r\n try:\r\n fields = dict(x.split('=', 1) for x in pargs.fields)\r\n except ValueError:\r\n logging.error(\"Invalid input. Format is field=value\")\r\n sys.exit(1)\r\n\r\n configfile = pargs.cfgfile if pargs.cfgfile else 'config.ini'\r\n try:\r\n c = Config(configfile, \"DEFAULT\")\r\n except ValueError:\r\n logging.error(\"Config file not found: %s\", configfile)\r\n sys.exit(1)\r\n\r\n # Strip empty values\r\n fields = {k:v for k,v in fields.items() if v is not None}\r\n\r\n # Figure out which IP should be 'node'\r\n destip = fields.get('destip', None)\r\n sourceip = fields.get('sourceip', None)\r\n if sourceip:\r\n for subnet in homenet:\r\n if ipaddress.ip_address(sourceip) in ipaddress.ip_network(subnet):\r\n fields['node'] = sourceip\r\n elif ipaddress.ip_address(destip) in ipaddress.ip_network(subnet):\r\n fields['node'] = destip\r\n else:\r\n fields['node'] = sourceip\r\n\r\n # Check for severity in arguments. Map ESM severity (1-100) to SNOW (1-5)\r\n s = int(fields.get('severity', 25))\r\n if 90 <= s <= 100: fields['severity'] = 1 # Critical\r\n if 75 <= s <= 89: fields['severity'] = 2 # Major\r\n if 65 <= s <= 74: fields['severity'] = 3 # Minor\r\n if 50 <= s <= 64: fields['severity'] = 4 # Warning\r\n if 0 <= s <= 49: fields['severity'] = 5 # Info\r\n\r\n try:\r\n snowhost = SNOW(c.snowhost, c.snowuser, c.snowpass)\r\n except AttributeError:\r\n print(\"{} is missing a required field:\".format(configfile))\r\n raise\r\n sys.exit(1)\r\n\r\n new_ticket = Query()\r\n new_ticket_q = new_ticket.create(**fields)\r\n result = snowhost.send(new_ticket_q, '/api/now/table/em_event')\r\n\r\n # Syslog feedback to ESM\r\n try:\r\n syslog_host = c.get('sysloghost')\r\n syslog_port = c.get('syslogport')\r\n syslog = Syslog(syslog_host, syslog_port)\r\n\r\n syslog.send(result.text)\r\n except NoOptionError:\r\n logging.debug(\"Syslog feedback disabled. Settings not detected.\")\r\n\r\nif __name__ == \"__main__\":\r\n try:\r\n main()\r\n except KeyboardInterrupt:\r\n logging.warning(\"Control-C Pressed, stopping...\")\r\n sys.exit()\r\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
from django.core.management.base import BaseCommand, CommandError
class Command(BaseCommand):
help = 'Closes the specified poll for voting'
def add_arguments(self, parser):
# Positional arguments
parser.add_argument('poll_id', nargs='+', type=int)
# Named (optional arguments)
parser.add_argument(
'--add',
action='store_true',
dest='add',
default=False,
help='add'
)
parser.add_argument(
'--substract',
action='store_true',
dest='substract',
default=False,
help='substract'
)
parser.add_argument(
'--multiply',
action='store_true',
dest='multiply',
default=False,
help='multiply'
)
parser.add_argument(
'--divide',
action='store_true',
dest='divide',
default=False,
help='divide'
)
def handle(self, *args, **options):
s = ''
result = 0
tag = sum([options[i] for i in ['add', 'substract', 'multiply', 'divide']])
if options['add'] or not tag:
for poll_id in options['poll_id']:
s += '{} + '.format(poll_id)
result += poll_id
self.stdout.write(self.style.SUCCESS('{}= {}'.format(s[:-2], result)))
elif options['substract']:
result += options['poll_id'][0]
s = '{} - '.format(options['poll_id'][0])
for poll_id in options['poll_id'][1:]:
s += '{} - '.format(poll_id)
result -= poll_id
self.stdout.write(self.style.SUCCESS('{}= {}'.format(s[:-2], result)))
elif options['multiply']:
result = 1
for poll_id in options['poll_id']:
s += '{} × '.format(poll_id)
result *= poll_id
self.stdout.write(self.style.SUCCESS('{}= {}'.format(s[:-2], result)))
elif options['divide']:
result = options['poll_id'][0]
s = '{} ÷ '.format(options['poll_id'][0])
for poll_id in options['poll_id'][1:]:
s += '{} ÷ '.format(poll_id)
result /= poll_id
self.stdout.write(self.style.SUCCESS('{}= {}'.format(s[:-2], result)))
|
normal
|
{
"blob_id": "b2d5b16c287dc76a088f6e20eca4a16dd0aad00f",
"index": 8797,
"step-1": "<mask token>\n\n\nclass Command(BaseCommand):\n <mask token>\n <mask token>\n\n def handle(self, *args, **options):\n s = ''\n result = 0\n tag = sum([options[i] for i in ['add', 'substract', 'multiply',\n 'divide']])\n if options['add'] or not tag:\n for poll_id in options['poll_id']:\n s += '{} + '.format(poll_id)\n result += poll_id\n self.stdout.write(self.style.SUCCESS('{}= {}'.format(s[:-2],\n result)))\n elif options['substract']:\n result += options['poll_id'][0]\n s = '{} - '.format(options['poll_id'][0])\n for poll_id in options['poll_id'][1:]:\n s += '{} - '.format(poll_id)\n result -= poll_id\n self.stdout.write(self.style.SUCCESS('{}= {}'.format(s[:-2],\n result)))\n elif options['multiply']:\n result = 1\n for poll_id in options['poll_id']:\n s += '{} × '.format(poll_id)\n result *= poll_id\n self.stdout.write(self.style.SUCCESS('{}= {}'.format(s[:-2],\n result)))\n elif options['divide']:\n result = options['poll_id'][0]\n s = '{} ÷ '.format(options['poll_id'][0])\n for poll_id in options['poll_id'][1:]:\n s += '{} ÷ '.format(poll_id)\n result /= poll_id\n self.stdout.write(self.style.SUCCESS('{}= {}'.format(s[:-2],\n result)))\n",
"step-2": "<mask token>\n\n\nclass Command(BaseCommand):\n <mask token>\n\n def add_arguments(self, parser):\n parser.add_argument('poll_id', nargs='+', type=int)\n parser.add_argument('--add', action='store_true', dest='add',\n default=False, help='add')\n parser.add_argument('--substract', action='store_true', dest=\n 'substract', default=False, help='substract')\n parser.add_argument('--multiply', action='store_true', dest=\n 'multiply', default=False, help='multiply')\n parser.add_argument('--divide', action='store_true', dest='divide',\n default=False, help='divide')\n\n def handle(self, *args, **options):\n s = ''\n result = 0\n tag = sum([options[i] for i in ['add', 'substract', 'multiply',\n 'divide']])\n if options['add'] or not tag:\n for poll_id in options['poll_id']:\n s += '{} + '.format(poll_id)\n result += poll_id\n self.stdout.write(self.style.SUCCESS('{}= {}'.format(s[:-2],\n result)))\n elif options['substract']:\n result += options['poll_id'][0]\n s = '{} - '.format(options['poll_id'][0])\n for poll_id in options['poll_id'][1:]:\n s += '{} - '.format(poll_id)\n result -= poll_id\n self.stdout.write(self.style.SUCCESS('{}= {}'.format(s[:-2],\n result)))\n elif options['multiply']:\n result = 1\n for poll_id in options['poll_id']:\n s += '{} × '.format(poll_id)\n result *= poll_id\n self.stdout.write(self.style.SUCCESS('{}= {}'.format(s[:-2],\n result)))\n elif options['divide']:\n result = options['poll_id'][0]\n s = '{} ÷ '.format(options['poll_id'][0])\n for poll_id in options['poll_id'][1:]:\n s += '{} ÷ '.format(poll_id)\n result /= poll_id\n self.stdout.write(self.style.SUCCESS('{}= {}'.format(s[:-2],\n result)))\n",
"step-3": "<mask token>\n\n\nclass Command(BaseCommand):\n help = 'Closes the specified poll for voting'\n\n def add_arguments(self, parser):\n parser.add_argument('poll_id', nargs='+', type=int)\n parser.add_argument('--add', action='store_true', dest='add',\n default=False, help='add')\n parser.add_argument('--substract', action='store_true', dest=\n 'substract', default=False, help='substract')\n parser.add_argument('--multiply', action='store_true', dest=\n 'multiply', default=False, help='multiply')\n parser.add_argument('--divide', action='store_true', dest='divide',\n default=False, help='divide')\n\n def handle(self, *args, **options):\n s = ''\n result = 0\n tag = sum([options[i] for i in ['add', 'substract', 'multiply',\n 'divide']])\n if options['add'] or not tag:\n for poll_id in options['poll_id']:\n s += '{} + '.format(poll_id)\n result += poll_id\n self.stdout.write(self.style.SUCCESS('{}= {}'.format(s[:-2],\n result)))\n elif options['substract']:\n result += options['poll_id'][0]\n s = '{} - '.format(options['poll_id'][0])\n for poll_id in options['poll_id'][1:]:\n s += '{} - '.format(poll_id)\n result -= poll_id\n self.stdout.write(self.style.SUCCESS('{}= {}'.format(s[:-2],\n result)))\n elif options['multiply']:\n result = 1\n for poll_id in options['poll_id']:\n s += '{} × '.format(poll_id)\n result *= poll_id\n self.stdout.write(self.style.SUCCESS('{}= {}'.format(s[:-2],\n result)))\n elif options['divide']:\n result = options['poll_id'][0]\n s = '{} ÷ '.format(options['poll_id'][0])\n for poll_id in options['poll_id'][1:]:\n s += '{} ÷ '.format(poll_id)\n result /= poll_id\n self.stdout.write(self.style.SUCCESS('{}= {}'.format(s[:-2],\n result)))\n",
"step-4": "from django.core.management.base import BaseCommand, CommandError\n\n\nclass Command(BaseCommand):\n help = 'Closes the specified poll for voting'\n\n def add_arguments(self, parser):\n parser.add_argument('poll_id', nargs='+', type=int)\n parser.add_argument('--add', action='store_true', dest='add',\n default=False, help='add')\n parser.add_argument('--substract', action='store_true', dest=\n 'substract', default=False, help='substract')\n parser.add_argument('--multiply', action='store_true', dest=\n 'multiply', default=False, help='multiply')\n parser.add_argument('--divide', action='store_true', dest='divide',\n default=False, help='divide')\n\n def handle(self, *args, **options):\n s = ''\n result = 0\n tag = sum([options[i] for i in ['add', 'substract', 'multiply',\n 'divide']])\n if options['add'] or not tag:\n for poll_id in options['poll_id']:\n s += '{} + '.format(poll_id)\n result += poll_id\n self.stdout.write(self.style.SUCCESS('{}= {}'.format(s[:-2],\n result)))\n elif options['substract']:\n result += options['poll_id'][0]\n s = '{} - '.format(options['poll_id'][0])\n for poll_id in options['poll_id'][1:]:\n s += '{} - '.format(poll_id)\n result -= poll_id\n self.stdout.write(self.style.SUCCESS('{}= {}'.format(s[:-2],\n result)))\n elif options['multiply']:\n result = 1\n for poll_id in options['poll_id']:\n s += '{} × '.format(poll_id)\n result *= poll_id\n self.stdout.write(self.style.SUCCESS('{}= {}'.format(s[:-2],\n result)))\n elif options['divide']:\n result = options['poll_id'][0]\n s = '{} ÷ '.format(options['poll_id'][0])\n for poll_id in options['poll_id'][1:]:\n s += '{} ÷ '.format(poll_id)\n result /= poll_id\n self.stdout.write(self.style.SUCCESS('{}= {}'.format(s[:-2],\n result)))\n",
"step-5": "from django.core.management.base import BaseCommand, CommandError\n\nclass Command(BaseCommand):\n help = 'Closes the specified poll for voting'\n\n def add_arguments(self, parser):\n # Positional arguments\n parser.add_argument('poll_id', nargs='+', type=int)\n\n # Named (optional arguments)\n parser.add_argument(\n '--add',\n action='store_true',\n dest='add',\n default=False,\n help='add'\n )\n parser.add_argument(\n '--substract',\n action='store_true',\n dest='substract',\n default=False,\n help='substract'\n )\n parser.add_argument(\n '--multiply',\n action='store_true',\n dest='multiply',\n default=False,\n help='multiply'\n )\n parser.add_argument(\n '--divide',\n action='store_true',\n dest='divide',\n default=False,\n help='divide'\n )\n\n\n def handle(self, *args, **options):\n s = ''\n result = 0\n tag = sum([options[i] for i in ['add', 'substract', 'multiply', 'divide']])\n if options['add'] or not tag:\n for poll_id in options['poll_id']:\n s += '{} + '.format(poll_id)\n result += poll_id\n self.stdout.write(self.style.SUCCESS('{}= {}'.format(s[:-2], result)))\n elif options['substract']:\n result += options['poll_id'][0]\n s = '{} - '.format(options['poll_id'][0])\n for poll_id in options['poll_id'][1:]:\n s += '{} - '.format(poll_id)\n result -= poll_id\n self.stdout.write(self.style.SUCCESS('{}= {}'.format(s[:-2], result)))\n elif options['multiply']:\n result = 1\n for poll_id in options['poll_id']:\n s += '{} × '.format(poll_id)\n result *= poll_id\n self.stdout.write(self.style.SUCCESS('{}= {}'.format(s[:-2], result)))\n elif options['divide']:\n result = options['poll_id'][0]\n s = '{} ÷ '.format(options['poll_id'][0])\n for poll_id in options['poll_id'][1:]:\n s += '{} ÷ '.format(poll_id)\n result /= poll_id\n self.stdout.write(self.style.SUCCESS('{}= {}'.format(s[:-2], result)))\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.