code
stringlengths 13
6.09M
| order_type
stringclasses 2
values | original_example
dict | step_ids
listlengths 1
5
|
|---|---|---|---|
def f(p_arg, *s_args, **kw_args):
return (s_args[0] + kw_args['py'])+p_arg
r = f(3, 2, py = 1) ## value r => 6
|
normal
|
{
"blob_id": "4a913cfdbddb2f6b5098395814f5fc1203192b9a",
"index": 4847,
"step-1": "<mask token>\n",
"step-2": "def f(p_arg, *s_args, **kw_args):\n return s_args[0] + kw_args['py'] + p_arg\n\n\n<mask token>\n",
"step-3": "def f(p_arg, *s_args, **kw_args):\n return s_args[0] + kw_args['py'] + p_arg\n\n\nr = f(3, 2, py=1)\n",
"step-4": "\r\ndef f(p_arg, *s_args, **kw_args):\r\n return (s_args[0] + kw_args['py'])+p_arg\r\nr = f(3, 2, py = 1) ## value r => 6\r\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
#! /usr/bin/env python
from taskHandler import Location, Task, TaskFactory
import roslib; roslib.load_manifest('smart_stool')
import rospy
from geometry_msgs.msg import PoseStamped, Twist, Vector3
from nav_msgs.msg import Odometry
from kobuki_msgs.msg import BumperEvent
from move_base_msgs.msg import MoveBaseActionResult
from tf.transformations import quaternion_about_axis, euler_from_quaternion
z_axis = (0,0,1)
from math import pi
class SmartStool:
def __init__(self):
# state of the smart stool
self.odomPose = Location(0,0,0)
self.bumperTriggered = False
self.atTaskLocation = False
# defining the tasks
stool = Task('stool', 1, Location(0,0,0), 'sit')
getMail = Task('get_mail', 2, Location(4,-3,0), 'bump')
chasePets = Task('chase_pets', 3, Location(0,0,0), 'wiggle')
charge = Task('charge_battery', 4, Location(1,0,0), 'sit')
charge.activate() # charging should always be an active task
# populate the task list and set up the task factory
taskList = [stool, getMail, chasePets, charge]
self.factory = TaskFactory(taskList)
# set up the current task
self.task = self.factory.getNextTask()
# set up the subscribers
self.odom_sub = rospy.Subscriber('/odom', Odometry, self.readOdometry, queue_size=1)
self.bumper_sub = rospy.Subscriber('/mobile_base/events/bumper', BumperEvent, self.readBumper, queue_size=1)
self.goalReached_sub = rospy.Subscriber('/move_base/result', MoveBaseActionResult, self.goalReached, queue_size=1)
# set up the publishers
self.moveBase_pub = rospy.Publisher('/move_base_simple/goal', PoseStamped)
self.action_pub = rospy.Publisher('/cmd_vel_mux/input/teleop', Twist)
def goToTask(self):
# send the smart stool to the location of its current task
current_task_location = self.task.location.copy()
goal = PoseStamped()
goal.header.frame_id = 'map'
goal.header.seq = 1
now = rospy.Time.now()
goal.header.stamp.secs = now.secs
goal.header.stamp.nsecs = now.nsecs
goal.pose.position.x = current_task_location.x
goal.pose.position.y = current_task_location.y
goal.pose.position.z = 0
quat = quaternion_about_axis(current_task_location.theta,z_axis)
goal.pose.orientation.w = quat[0]
goal.pose.orientation.x = quat[1]
goal.pose.orientation.y = quat[2]
goal.pose.orientation.z = quat[3]
self.moveBase_pub.publish(goal)
def publishTwist(self, cmd_linvel, cmd_angvel):
# publishes a Twist message to /cmd_vel_mux/input/teleop to perform custom motion actions
self.action_pub.publish(Twist(Vector3(cmd_linvel,0,0),Vector3(0,0,cmd_angvel)))
def actionHandler(self,actionName):
####
#### TODO: a change of task priority doesn't necessarily mean that the task was deactivated. Need to check
#### if original task is still in list of active tasks. if it is, do not deactivate it. if it's not, deactivate it.
#### Also need to check for other general silly mistakes
####
current_task = self.task.copy()
startLocation = self.odomPose.copy()
driveSpeed = 0.1
spinSpeed = 0.5
close_enough = 0.1
wiggle_rotate = pi/2
timeout = 10
startTime = rospy.get_time()
# execute the sit action
print actionName
if actionName == 'sit':
while (not rospy.is_shutdown()) and (self.task == current_task):
self.publishTwist(0,0)
rate.sleep()
self.task = self.factory.getNextTask()
##### TEMP #####
self.factory.activateTask('get_mail')
# execute the bump action
elif actionName == 'bump':
self.bumperTriggered = False
while not rospy.is_shutdown() and not self.bumperTriggered:
self.publishTwist(driveSpeed,0)
rate.sleep()
startTime = rospy.get_time()
while not rospy.is_shutdown() and (rospy.get_time() - startTime < 1):
self.publishTwist(-driveSpeed,0)
rate.sleep()
self.factory.deactivateTask(current_task.name)
# execute the wiggle action
elif actionName == 'wiggle':
while self.task == current_task or (rospy.get_time() - startTime > timeout):
while not rospy.is_shutdown() and not self.odomPose.compareAngle(startLocation,-wiggle_rotate):
self.publishTwist(0,-spinSpeed)
rate.sleep()
while not rospy.is_shutdown() and not self.odomPose.compareAngle(startLocation,wiggle_rotate):
self.publishTwist(0,spinSpeed)
rate.sleep()
self.task = self.factory.getNextTask()
self.factory.deactivateTask(current_task.name)
# warn that the specified action is not implemented
else:
print 'Action not implemented!'
print actionName
# stop the robot:
self.publishTwist(0,0)
def execute(self):
if self.task is None: break
current_task = self.task.copy()
self.goToTask()
# wait for the robot to be at its goal position
print 'going to task:' + current_task.name
while not self.atTaskLocation:
rate.sleep()
self.task = self.factory.getNextTask()
# if that task has changed, exit this function
if not(current_task == self.task):
return
# reset for the next task
self.atTaskLocation = False
print 'doing action'
self.actionHandler(self.task.getAction())
def readOdometry(self,msg):
# callback function to read the robot's current odometry position
odom_position = msg.pose.pose.position
odom_rotation = msg.pose.pose.orientation
self.odomPose = Location(odom_position.x,odom_position.y,euler_from_quaternion((odom_rotation.w, odom_rotation.x, odom_rotation.y, odom_rotation.z))[2])
def readBumper(self,msg):
# callback function to set the bumperTriggered flag if the bumper was hit
self.bumperTriggered = True
def goalReached(self,msg):
# callback function to determine if the current task location was reached
if msg.status.status == 3:
self.atTaskLocation = True
if __name__ == '__main__':
# initialize the node:
rospy.init_node('smart_stool')
freq = 30 # hz
rate = rospy.Rate(freq)
# set up the smart stool object
mySmartStool = SmartStool()
# wait for one second
for i in range(freq):
rate.sleep()
while not rospy.is_shutdown():
mySmartStool.execute()
rate.sleep()
#top = factory.getNextTask()
#all = factory.getAllTasks()
|
normal
|
{
"blob_id": "234112ec16af39b79849dd08769597771fa2c38f",
"index": 3425,
"step-1": "#! /usr/bin/env python\n\nfrom taskHandler import Location, Task, TaskFactory\nimport roslib; roslib.load_manifest('smart_stool')\nimport rospy\nfrom geometry_msgs.msg import PoseStamped, Twist, Vector3\nfrom nav_msgs.msg import Odometry\nfrom kobuki_msgs.msg import BumperEvent\nfrom move_base_msgs.msg import MoveBaseActionResult\nfrom tf.transformations import quaternion_about_axis, euler_from_quaternion\nz_axis = (0,0,1)\nfrom math import pi\n\nclass SmartStool:\n def __init__(self):\n # state of the smart stool\n self.odomPose = Location(0,0,0)\n self.bumperTriggered = False\n self.atTaskLocation = False\n\n # defining the tasks\n stool = Task('stool', 1, Location(0,0,0), 'sit')\n getMail = Task('get_mail', 2, Location(4,-3,0), 'bump')\n chasePets = Task('chase_pets', 3, Location(0,0,0), 'wiggle')\n charge = Task('charge_battery', 4, Location(1,0,0), 'sit')\n charge.activate() # charging should always be an active task\n\n # populate the task list and set up the task factory\n taskList = [stool, getMail, chasePets, charge]\n self.factory = TaskFactory(taskList)\n\n # set up the current task\n self.task = self.factory.getNextTask()\n\n # set up the subscribers\n self.odom_sub = rospy.Subscriber('/odom', Odometry, self.readOdometry, queue_size=1)\n self.bumper_sub = rospy.Subscriber('/mobile_base/events/bumper', BumperEvent, self.readBumper, queue_size=1)\n self.goalReached_sub = rospy.Subscriber('/move_base/result', MoveBaseActionResult, self.goalReached, queue_size=1)\n\n # set up the publishers\n self.moveBase_pub = rospy.Publisher('/move_base_simple/goal', PoseStamped)\n self.action_pub = rospy.Publisher('/cmd_vel_mux/input/teleop', Twist)\n\n def goToTask(self):\n # send the smart stool to the location of its current task\n current_task_location = self.task.location.copy()\n goal = PoseStamped()\n goal.header.frame_id = 'map'\n goal.header.seq = 1\n now = rospy.Time.now()\n goal.header.stamp.secs = now.secs\n goal.header.stamp.nsecs = now.nsecs\n goal.pose.position.x = current_task_location.x\n goal.pose.position.y = current_task_location.y\n goal.pose.position.z = 0\n quat = quaternion_about_axis(current_task_location.theta,z_axis)\n goal.pose.orientation.w = quat[0]\n goal.pose.orientation.x = quat[1]\n goal.pose.orientation.y = quat[2]\n goal.pose.orientation.z = quat[3]\n self.moveBase_pub.publish(goal)\n\n def publishTwist(self, cmd_linvel, cmd_angvel):\n # publishes a Twist message to /cmd_vel_mux/input/teleop to perform custom motion actions\n self.action_pub.publish(Twist(Vector3(cmd_linvel,0,0),Vector3(0,0,cmd_angvel)))\n\n def actionHandler(self,actionName):\n ####\n #### TODO: a change of task priority doesn't necessarily mean that the task was deactivated. Need to check\n #### if original task is still in list of active tasks. if it is, do not deactivate it. if it's not, deactivate it.\n #### Also need to check for other general silly mistakes\n ####\n current_task = self.task.copy()\n startLocation = self.odomPose.copy()\n driveSpeed = 0.1\n spinSpeed = 0.5\n close_enough = 0.1\n wiggle_rotate = pi/2\n timeout = 10\n startTime = rospy.get_time()\n\n # execute the sit action\n print actionName\n if actionName == 'sit':\n while (not rospy.is_shutdown()) and (self.task == current_task):\n self.publishTwist(0,0)\n rate.sleep()\n self.task = self.factory.getNextTask()\n ##### TEMP #####\n self.factory.activateTask('get_mail')\n\n # execute the bump action\n elif actionName == 'bump':\n self.bumperTriggered = False\n while not rospy.is_shutdown() and not self.bumperTriggered:\n self.publishTwist(driveSpeed,0)\n rate.sleep()\n startTime = rospy.get_time()\n while not rospy.is_shutdown() and (rospy.get_time() - startTime < 1):\n self.publishTwist(-driveSpeed,0)\n rate.sleep()\n self.factory.deactivateTask(current_task.name)\n\n # execute the wiggle action\n elif actionName == 'wiggle':\n while self.task == current_task or (rospy.get_time() - startTime > timeout):\n while not rospy.is_shutdown() and not self.odomPose.compareAngle(startLocation,-wiggle_rotate):\n self.publishTwist(0,-spinSpeed)\n rate.sleep()\n while not rospy.is_shutdown() and not self.odomPose.compareAngle(startLocation,wiggle_rotate):\n self.publishTwist(0,spinSpeed)\n rate.sleep()\n self.task = self.factory.getNextTask()\n self.factory.deactivateTask(current_task.name)\n\n # warn that the specified action is not implemented\n else:\n print 'Action not implemented!'\n print actionName\n\n # stop the robot:\n self.publishTwist(0,0)\n\n def execute(self):\n if self.task is None: break\n current_task = self.task.copy()\n self.goToTask()\n # wait for the robot to be at its goal position\n print 'going to task:' + current_task.name\n while not self.atTaskLocation:\n rate.sleep()\n self.task = self.factory.getNextTask()\n # if that task has changed, exit this function\n if not(current_task == self.task):\n return\n # reset for the next task\n self.atTaskLocation = False\n print 'doing action'\n self.actionHandler(self.task.getAction())\n\n def readOdometry(self,msg):\n # callback function to read the robot's current odometry position\n odom_position = msg.pose.pose.position\n odom_rotation = msg.pose.pose.orientation\n self.odomPose = Location(odom_position.x,odom_position.y,euler_from_quaternion((odom_rotation.w, odom_rotation.x, odom_rotation.y, odom_rotation.z))[2])\n\n def readBumper(self,msg):\n # callback function to set the bumperTriggered flag if the bumper was hit\n self.bumperTriggered = True\n\n def goalReached(self,msg):\n # callback function to determine if the current task location was reached\n if msg.status.status == 3:\n self.atTaskLocation = True\n\nif __name__ == '__main__':\n # initialize the node:\n rospy.init_node('smart_stool')\n freq = 30 # hz\n rate = rospy.Rate(freq)\n # set up the smart stool object\n mySmartStool = SmartStool()\n\n # wait for one second\n for i in range(freq):\n rate.sleep()\n\n while not rospy.is_shutdown():\n mySmartStool.execute()\n rate.sleep()\n\n\n\n#top = factory.getNextTask()\n#all = factory.getAllTasks()",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
def SetCu2Wave():
"""Set the parameters to the two-line Cu K alpha 1+2 spectrum
"""
parmDict['wave'] = {i: v for i, v in enumerate((1.540596, 1.544493))}
parmDict['int'] = {i: v for i, v in enumerate((0.653817, 0.346183))}
parmDict['lwidth'] = {i: v for i, v in enumerate((0.501844, 0.626579))}
<|reserved_special_token_0|>
def MakeTopasFPASizer(G2frame, FPdlg, mode, SetButtonStatus):
"""Create a GUI with parameters for the NIST XRD Fundamental Parameters Code.
Parameter input is modeled after Topas input parameters.
:param wx.Window FPdlg: Frame or Dialog where GUI will appear
:param str mode: either 'BBpoint' or 'BBPSD' for Bragg-Brentano point detector or
(linear) position sensitive detector
:param dict parmDict: dict to place parameters. If empty, default values from
globals BraggBrentanoParms, BBPointDetector & BBPSDDetector will be placed in
the array.
:returns: a sizer with the GUI controls
"""
def _onOK(event):
XferFPAsettings(parmDict)
SetButtonStatus(done=True)
FPdlg.Destroy()
def _onClose(event):
SetButtonStatus()
FPdlg.Destroy()
def _onAddWave(event):
parmDict['numWave'] += 1
wx.CallAfter(MakeTopasFPASizer, G2frame, FPdlg, mode, SetButtonStatus)
def _onRemWave(event):
parmDict['numWave'] -= 1
wx.CallAfter(MakeTopasFPASizer, G2frame, FPdlg, mode, SetButtonStatus)
def _onSetCu5Wave(event):
parmDict['wave'] = {i: v for i, v in enumerate((1.534753, 1.540596,
1.541058, 1.54441, 1.544721))}
parmDict['int'] = {i: v for i, v in enumerate((0.0159, 0.5791,
0.0762, 0.2417, 0.0871))}
parmDict['lwidth'] = {i: v for i, v in enumerate((3.6854, 0.437,
0.6, 0.52, 0.62))}
parmDict['numWave'] = 5
wx.CallAfter(MakeTopasFPASizer, G2frame, FPdlg, mode, SetButtonStatus)
def _onSetCu2Wave(event):
SetCu2Wave()
parmDict['numWave'] = 2
wx.CallAfter(MakeTopasFPASizer, G2frame, FPdlg, mode, SetButtonStatus)
def _onSetPoint(event):
wx.CallAfter(MakeTopasFPASizer, G2frame, FPdlg, 'BBpoint',
SetButtonStatus)
def _onSetPSD(event):
wx.CallAfter(MakeTopasFPASizer, G2frame, FPdlg, 'BBPSD',
SetButtonStatus)
def PlotTopasFPA(event):
XferFPAsettings(parmDict)
ttArr = np.arange(max(0.5, simParms['plotpos'] - simParms['calcwid'
]), simParms['plotpos'] + simParms['calcwid'], simParms['step'])
intArr = np.zeros_like(ttArr)
NISTpk = setupFPAcalc()
try:
center_bin_idx, peakObj = doFPAcalc(NISTpk, ttArr, simParms[
'plotpos'], simParms['calcwid'], simParms['step'])
except Exception as err:
msg = 'Error computing convolution, revise input'
print(msg)
print(err)
return
G2plt.PlotFPAconvolutors(G2frame, NISTpk)
pkPts = len(peakObj.peak)
pkMax = peakObj.peak.max()
startInd = center_bin_idx - pkPts // 2
if startInd < 0:
intArr[:startInd + pkPts] += 10000 * peakObj.peak[-startInd:
] / pkMax
elif startInd > len(intArr):
return
elif startInd + pkPts >= len(intArr):
offset = pkPts - len(intArr[startInd:])
intArr[startInd:startInd + pkPts - offset] += 10000 * peakObj.peak[
:-offset] / pkMax
else:
intArr[startInd:startInd + pkPts] += 10000 * peakObj.peak / pkMax
G2plt.PlotXY(G2frame, [(ttArr, intArr)], labelX='$2\\theta, deg$',
labelY='Intensity (arbitrary)', Title='FPA peak', newPlot=True,
lines=True)
if FPdlg.GetSizer():
FPdlg.GetSizer().Clear(True)
numWave = parmDict['numWave']
if mode == 'BBpoint':
itemList = BraggBrentanoParms + BBPointDetector
elif mode == 'BBPSD':
itemList = BraggBrentanoParms + BBPSDDetector
else:
raise Exception('Unknown mode in MakeTopasFPASizer: ' + mode)
MainSizer = wx.BoxSizer(wx.VERTICAL)
MainSizer.Add((-1, 5))
waveSizer = wx.FlexGridSizer(cols=numWave + 1, hgap=3, vgap=5)
for lbl, prm, defVal in zip((u'Wavelength (Å)', 'Rel. Intensity',
u'Lorentz Width\n(Å/1000)'), ('wave', 'int', 'lwidth'), (0.0, 1.0, 0.1)
):
text = wx.StaticText(FPdlg, wx.ID_ANY, lbl, style=wx.ALIGN_CENTER)
text.SetBackgroundColour(wx.WHITE)
waveSizer.Add(text, 0, wx.EXPAND)
if prm not in parmDict:
parmDict[prm] = {}
for i in range(numWave):
if i not in parmDict[prm]:
parmDict[prm][i] = defVal
ctrl = G2G.ValidatedTxtCtrl(FPdlg, parmDict[prm], i, size=(90, -1))
waveSizer.Add(ctrl, 1, wx.ALIGN_CENTER_VERTICAL, 1)
MainSizer.Add(waveSizer)
MainSizer.Add((-1, 5))
btnsizer = wx.BoxSizer(wx.HORIZONTAL)
btn = wx.Button(FPdlg, wx.ID_ANY, 'Add col')
btnsizer.Add(btn)
btn.Bind(wx.EVT_BUTTON, _onAddWave)
btn = wx.Button(FPdlg, wx.ID_ANY, 'Remove col')
btnsizer.Add(btn)
btn.Bind(wx.EVT_BUTTON, _onRemWave)
btn = wx.Button(FPdlg, wx.ID_ANY, 'CuKa1+2')
btnsizer.Add(btn)
btn.Bind(wx.EVT_BUTTON, _onSetCu2Wave)
btn = wx.Button(FPdlg, wx.ID_ANY, 'CuKa-5wave')
btnsizer.Add(btn)
btn.Bind(wx.EVT_BUTTON, _onSetCu5Wave)
MainSizer.Add(btnsizer, 0, wx.ALIGN_CENTER, 0)
MainSizer.Add((-1, 5))
btnsizer = wx.BoxSizer(wx.HORIZONTAL)
btn = wx.Button(FPdlg, wx.ID_ANY, 'Point Dect.')
btn.Enable(not mode == 'BBpoint')
btnsizer.Add(btn)
btn.Bind(wx.EVT_BUTTON, _onSetPoint)
btn = wx.Button(FPdlg, wx.ID_ANY, 'PSD')
btn.Enable(not mode == 'BBPSD')
btnsizer.Add(btn)
btn.Bind(wx.EVT_BUTTON, _onSetPSD)
MainSizer.Add(btnsizer, 0, wx.ALIGN_CENTER, 0)
MainSizer.Add((-1, 5))
prmSizer = wx.FlexGridSizer(cols=3, hgap=3, vgap=5)
text = wx.StaticText(FPdlg, wx.ID_ANY, 'label', style=wx.ALIGN_CENTER)
text.SetBackgroundColour(wx.WHITE)
prmSizer.Add(text, 0, wx.EXPAND)
text = wx.StaticText(FPdlg, wx.ID_ANY, 'value', style=wx.ALIGN_CENTER)
text.SetBackgroundColour(wx.WHITE)
prmSizer.Add(text, 0, wx.EXPAND)
text = wx.StaticText(FPdlg, wx.ID_ANY, 'explanation', style=wx.ALIGN_CENTER
)
text.SetBackgroundColour(wx.WHITE)
prmSizer.Add(text, 0, wx.EXPAND)
for lbl, defVal, text in itemList:
prmSizer.Add(wx.StaticText(FPdlg, wx.ID_ANY, lbl), 1, wx.
ALIGN_RIGHT | wx.ALIGN_CENTER_VERTICAL, 1)
if lbl not in parmDict:
parmDict[lbl] = defVal
ctrl = G2G.ValidatedTxtCtrl(FPdlg, parmDict, lbl, size=(70, -1))
prmSizer.Add(ctrl, 1, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 1)
txt = wx.StaticText(FPdlg, wx.ID_ANY, text, size=(400, -1))
txt.Wrap(380)
prmSizer.Add(txt)
MainSizer.Add(prmSizer)
MainSizer.Add((-1, 4), 1, wx.EXPAND, 1)
btnsizer = wx.BoxSizer(wx.HORIZONTAL)
btn = wx.Button(FPdlg, wx.ID_ANY, 'Plot peak')
btnsizer.Add(btn)
btn.Bind(wx.EVT_BUTTON, PlotTopasFPA)
btnsizer.Add(wx.StaticText(FPdlg, wx.ID_ANY, ' at '))
if 'plotpos' not in simParms:
simParms['plotpos'] = simParms['minTT']
ctrl = G2G.ValidatedTxtCtrl(FPdlg, simParms, 'plotpos', size=(70, -1))
btnsizer.Add(ctrl)
btnsizer.Add(wx.StaticText(FPdlg, wx.ID_ANY, ' deg.'))
MainSizer.Add(btnsizer, 0, wx.ALIGN_CENTER, 0)
MainSizer.Add((-1, 4), 1, wx.EXPAND, 1)
btnsizer = wx.BoxSizer(wx.HORIZONTAL)
OKbtn = wx.Button(FPdlg, wx.ID_OK)
OKbtn.SetDefault()
btnsizer.Add(OKbtn)
Cbtn = wx.Button(FPdlg, wx.ID_CLOSE, 'Cancel')
btnsizer.Add(Cbtn)
MainSizer.Add(btnsizer, 0, wx.ALIGN_CENTER, 0)
MainSizer.Add((-1, 4), 1, wx.EXPAND, 1)
OKbtn.Bind(wx.EVT_BUTTON, _onOK)
Cbtn.Bind(wx.EVT_BUTTON, _onClose)
FPdlg.SetSizer(MainSizer)
MainSizer.Layout()
MainSizer.Fit(FPdlg)
FPdlg.SetMinSize(FPdlg.GetSize())
FPdlg.SendSizeEvent()
def XferFPAsettings(InpParms):
"""convert Topas-type parameters to SI units for NIST and place in a dict sorted
according to use in each convoluter
:param dict InpParms: a dict with Topas-like parameters, as set in
:func:`MakeTopasFPASizer`
:returns: a nested dict with global parameters and those for each convolution
"""
wavenums = range(InpParms['numWave'])
source_wavelengths_m = 1e-10 * np.array([InpParms['wave'][i] for i in
wavenums])
la = [InpParms['int'][i] for i in wavenums]
source_intensities = np.array(la) / max(la)
source_lor_widths_m = 1e-10 * 0.001 * np.array([InpParms['lwidth'][i] for
i in wavenums])
source_gauss_widths_m = 1e-10 * 0.001 * np.array([(0.001) for i in
wavenums])
NISTparms['emission'] = {'emiss_wavelengths': source_wavelengths_m,
'emiss_intensities': source_intensities, 'emiss_gauss_widths':
source_gauss_widths_m, 'emiss_lor_widths': source_lor_widths_m,
'crystallite_size_gauss': 1e-09 * InpParms.get('Size_G', 1000000.0),
'crystallite_size_lor': 1e-09 * InpParms.get('Size_L', 1000000.0)}
if InpParms['filament_length'] == InpParms['receiving_slit_length']:
InpParms['receiving_slit_length'] *= 1.00001
NISTparms['axial'] = {'axDiv': 'full', 'slit_length_source': 0.001 *
InpParms['filament_length'], 'slit_length_target': 0.001 * InpParms
['receiving_slit_length'], 'length_sample': 0.001 * InpParms[
'sample_length'], 'n_integral_points': 10, 'angI_deg': InpParms[
'soller_angle'], 'angD_deg': InpParms['soller_angle']}
if InpParms.get('LAC_cm', 0) > 0:
NISTparms['absorption'] = {'absorption_coefficient': InpParms[
'LAC_cm'] * 100, 'sample_thickness': 0.001 * InpParms[
'sample_thickness']}
elif 'absorption' in NISTparms:
del NISTparms['absorption']
if InpParms.get('lpsd_equitorial_divergence', 0) > 0 and InpParms.get(
'lpsd_th2_angular_range', 0) > 0:
PSDdetector_length_mm = np.arcsin(np.pi * InpParms[
'lpsd_th2_angular_range'] / 180.0) * InpParms['Rs']
NISTparms['si_psd'] = {'equatorial_divergence_deg': InpParms[
'lpsd_equitorial_divergence'], 'si_psd_window_bounds': (0.0,
PSDdetector_length_mm / 1000.0)}
elif 'si_psd' in NISTparms:
del NISTparms['si_psd']
if InpParms.get('Specimen_Displacement'):
NISTparms['displacement'] = {'specimen_displacement': 0.001 *
InpParms['Specimen_Displacement']}
elif 'displacement' in NISTparms:
del NISTparms['displacement']
if InpParms.get('receiving_slit_width'):
NISTparms['receiver_slit'] = {'slit_width': 0.001 * InpParms[
'receiving_slit_width']}
elif 'receiver_slit' in NISTparms:
del NISTparms['receiver_slit']
if InpParms.get('tube-tails_width', 0) > 0 and InpParms.get(
'tube-tails_rel-I', 0) > 0:
NISTparms['tube_tails'] = {'main_width': 0.001 * InpParms.get(
'tube-tails_width', 0.0), 'tail_left': -0.001 * InpParms.get(
'tube-tails_L-tail', 0.0), 'tail_right': 0.001 * InpParms.get(
'tube-tails_R-tail', 0.0), 'tail_intens': InpParms.get(
'tube-tails_rel-I', 0.0)}
elif 'tube_tails' in NISTparms:
del NISTparms['tube_tails']
max_wavelength = source_wavelengths_m[np.argmax(source_intensities)]
NISTparms[''] = {'equatorial_divergence_deg': InpParms['divergence'],
'dominant_wavelength': max_wavelength, 'diffractometer_radius':
0.001 * InpParms['Rs'], 'oversampling': InpParms['convolution_steps']}
def setupFPAcalc():
"""Create a peak profile object using the NIST XRD Fundamental
Parameters Code.
:returns: a profile object that can provide information on
each convolution or compute the composite peak shape.
"""
p = FP.FP_profile(anglemode='twotheta',
output_gaussian_smoother_bins_sigma=1.0, oversampling=NISTparms.get
('oversampling', 10))
p.debug_cache = False
for key in NISTparms:
if key:
p.set_parameters(convolver=key, **NISTparms[key])
else:
p.set_parameters(**NISTparms[key])
return p
def doFPAcalc(NISTpk, ttArr, twotheta, calcwid, step):
"""Compute a single peak using a NIST profile object
:param object NISTpk: a peak profile computational object from the
NIST XRD Fundamental Parameters Code, typically established from
a call to :func:`SetupFPAcalc`
:param np.Array ttArr: an evenly-spaced grid of two-theta points (degrees)
:param float twotheta: nominal center of peak (degrees)
:param float calcwid: width to perform convolution (degrees)
:param float step: step size
"""
center_bin_idx = min(ttArr.searchsorted(twotheta), len(ttArr) - 1)
NISTpk.set_optimized_window(twotheta_exact_bin_spacing_deg=step,
twotheta_window_center_deg=ttArr[center_bin_idx],
twotheta_approx_window_fullwidth_deg=calcwid)
NISTpk.set_parameters(twotheta0_deg=twotheta)
return center_bin_idx, NISTpk.compute_line_profile()
def MakeSimSizer(G2frame, dlg):
"""Create a GUI to get simulation with parameters for Fundamental
Parameters fitting.
:param wx.Window dlg: Frame or Dialog where GUI will appear
:returns: a sizer with the GUI controls
"""
def _onOK(event):
msg = ''
if simParms['minTT'] - simParms['calcwid'] / 1.5 < 0.1:
msg += 'First peak minus half the calc width is too low'
if simParms['maxTT'] + simParms['calcwid'] / 1.5 > 175:
if msg:
msg += '\n'
msg += 'Last peak plus half the calc width is too high'
if simParms['npeaks'] < 8:
if msg:
msg += '\n'
msg += 'At least 8 peaks are needed'
if msg:
G2G.G2MessageBox(dlg, msg, 'Bad input, try again')
return
ttArr = np.arange(max(0.5, simParms['minTT'] - simParms['calcwid'] /
1.5), simParms['maxTT'] + simParms['calcwid'] / 1.5, simParms[
'step'])
intArr = np.zeros_like(ttArr)
peaklist = np.linspace(simParms['minTT'], simParms['maxTT'],
simParms['npeaks'], endpoint=True)
peakSpacing = (peaklist[-1] - peaklist[0]) / (len(peaklist) - 1)
NISTpk = setupFPAcalc()
minPtsHM = len(intArr)
maxPtsHM = 0
for num, twoth_peak in enumerate(peaklist):
try:
center_bin_idx, peakObj = doFPAcalc(NISTpk, ttArr,
twoth_peak, simParms['calcwid'], simParms['step'])
except:
if msg:
msg += '\n'
msg = 'Error computing convolution, revise input'
continue
if num == 0:
G2plt.PlotFPAconvolutors(G2frame, NISTpk)
pkMax = peakObj.peak.max()
pkPts = len(peakObj.peak)
minPtsHM = min(minPtsHM, sum(peakObj.peak >= 0.5 * pkMax))
maxPtsHM = max(maxPtsHM, sum(peakObj.peak >= 0.5 * pkMax))
startInd = center_bin_idx - pkPts // 2
if startInd < 0:
intArr[:startInd + pkPts] += 10000 * peakObj.peak[-startInd:
] / pkMax
elif startInd > len(intArr):
break
elif startInd + pkPts >= len(intArr):
offset = pkPts - len(intArr[startInd:])
intArr[startInd:startInd + pkPts - offset
] += 10000 * peakObj.peak[:-offset] / pkMax
else:
intArr[startInd:startInd + pkPts
] += 10000 * peakObj.peak / pkMax
if maxPtsHM * simParms['step'] > peakSpacing / 4:
if msg:
msg += '\n'
msg += (
'Maximum FWHM ({}) is too large compared to the peak spacing ({}). Decrease number of peaks or increase data range.'
.format(maxPtsHM * simParms['step'], peakSpacing))
if minPtsHM < 10:
if msg:
msg += '\n'
msg += (
'There are only {} points above the half-max. 10 are needed. Dropping step size.'
.format(minPtsHM))
simParms['step'] *= 0.5
if msg:
G2G.G2MessageBox(dlg, msg, 'Bad input, try again')
wx.CallAfter(MakeSimSizer, G2frame, dlg)
return
dlg.Destroy()
wx.CallAfter(FitFPApeaks, ttArr, intArr, peaklist, maxPtsHM)
def FitFPApeaks(ttArr, intArr, peaklist, maxPtsHM):
"""Perform a peak fit to the FP simulated pattern
"""
plswait = wx.Dialog(G2frame, style=wx.DEFAULT_DIALOG_STYLE | wx.
RESIZE_BORDER)
vbox = wx.BoxSizer(wx.VERTICAL)
vbox.Add((1, 1), 1, wx.ALL | wx.EXPAND, 1)
txt = wx.StaticText(plswait, wx.ID_ANY,
'Fitting peaks...\nPlease wait...', style=wx.ALIGN_CENTER)
vbox.Add(txt, 0, wx.ALL | wx.EXPAND)
vbox.Add((1, 1), 1, wx.ALL | wx.EXPAND, 1)
plswait.SetSizer(vbox)
plswait.Layout()
plswait.CenterOnParent()
plswait.Show()
wx.BeginBusyCursor()
ints = list(NISTparms['emission']['emiss_intensities'])
Lam1 = NISTparms['emission']['emiss_wavelengths'][np.argmax(ints)
] * 10000000000.0
if len(ints) > 1:
ints[np.argmax(ints)] = -1
Lam2 = NISTparms['emission']['emiss_wavelengths'][np.argmax(ints)
] * 10000000000.0
else:
Lam2 = None
histId = G2frame.AddSimulatedPowder(ttArr, intArr,
'NIST Fundamental Parameters simulation', Lam1, Lam2)
controls = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(
G2frame, G2frame.root, 'Controls'))
controldat = controls.get('data', {'deriv type': 'analytic',
'min dM/M': 0.001})
Parms, Parms2 = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId
(G2frame, histId, 'Instrument Parameters'))
peakData = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(
G2frame, histId, 'Peak List'))
bkg1, bkg2 = bkg = G2frame.GPXtree.GetItemPyData(G2gd.
GetGPXtreeItemId(G2frame, histId, 'Background'))
bkg1[1] = False
bkg1[2] = 0
bkg1[3] = 0.0
limits = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(
G2frame, histId, 'Limits'))
try:
Parms['SH/L'][1] = 0.25 * (NISTparms['axial']['length_sample'] +
NISTparms['axial']['slit_length_source']) / NISTparms[''][
'diffractometer_radius']
except:
pass
for pos in peaklist:
i = ttArr.searchsorted(pos)
area = sum(intArr[max(0, i - maxPtsHM):min(len(intArr), i +
maxPtsHM)])
peakData['peaks'].append(G2mth.setPeakparms(Parms, Parms2, pos,
area))
histData = G2frame.GPXtree.GetItemPyData(histId)
bxye = np.zeros(len(histData[1][1]))
peakData['sigDict'] = G2pwd.DoPeakFit('LSQ', peakData['peaks'], bkg,
limits[1], Parms, Parms2, histData[1], bxye, [], False,
controldat, None)[0]
for pk in peakData['peaks']:
pk[1] = True
peakData['sigDict'] = G2pwd.DoPeakFit('LSQ', peakData['peaks'], bkg,
limits[1], Parms, Parms2, histData[1], bxye, [], False, controldat
)[0]
for p in ('U', 'V', 'W', 'X', 'Y'):
Parms[p][2] = True
peakData['sigDict'] = G2pwd.DoPeakFit('LSQ', peakData['peaks'], bkg,
limits[1], Parms, Parms2, histData[1], bxye, [], False, controldat
)[0]
Parms['SH/L'][2] = True
peakData['sigDict'] = G2pwd.DoPeakFit('LSQ', peakData['peaks'], bkg,
limits[1], Parms, Parms2, histData[1], bxye, [], False, controldat
)[0]
for p in Parms:
if len(Parms[p]) == 3:
Parms[p][0] = Parms[p][1]
Parms[p][2] = False
wx.EndBusyCursor()
plswait.Destroy()
pth = G2G.GetExportPath(G2frame)
fldlg = wx.FileDialog(G2frame,
'Set name to save GSAS-II instrument parameters file', pth, '',
'instrument parameter files (*.instprm)|*.instprm', wx.FD_SAVE |
wx.FD_OVERWRITE_PROMPT)
try:
if fldlg.ShowModal() == wx.ID_OK:
filename = fldlg.GetPath()
filename = os.path.splitext(filename)[0] + '.instprm'
File = open(filename, 'w')
File.write(
'#GSAS-II instrument parameter file; do not add/delete items!\n'
)
for item in Parms:
File.write(item + ':' + str(Parms[item][1]) + '\n')
File.close()
print('Instrument parameters saved to: ' + filename)
finally:
fldlg.Destroy()
def _onClose(event):
dlg.Destroy()
def SetButtonStatus(done=False):
OKbtn.Enable(bool(NISTparms))
saveBtn.Enable(bool(NISTparms))
if done:
_onOK(None)
def _onSetFPA(event):
FPdlg = wx.Dialog(dlg, wx.ID_ANY, 'FPA parameters', style=wx.
DEFAULT_DIALOG_STYLE | wx.RESIZE_BORDER)
MakeTopasFPASizer(G2frame, FPdlg, 'BBpoint', SetButtonStatus)
FPdlg.CenterOnParent()
FPdlg.Raise()
FPdlg.Show()
def _onSaveFPA(event):
filename = G2G.askSaveFile(G2frame, '', '.NISTfpa',
'dict of NIST FPA values', dlg)
if not filename:
return
fp = open(filename, 'w')
fp.write(
'# parameters to be used in the NIST XRD Fundamental Parameters program\n'
)
fp.write('{\n')
for key in sorted(NISTparms):
fp.write(" '" + key + "' : " + str(NISTparms[key]) + ',')
if not key:
fp.write(' # global parameters')
fp.write('\n')
fp.write('}\n')
fp.close()
def _onReadFPA(event):
filename = G2G.GetImportFile(G2frame, message=
'Read file with dict of values for NIST Fundamental Parameters',
parent=dlg, wildcard='dict of NIST FPA values|*.NISTfpa')
if not filename:
return
if not filename[0]:
return
try:
txt = open(filename[0], 'r').read()
NISTparms.clear()
array = np.array
d = eval(txt)
NISTparms.update(d)
except Exception as err:
G2G.G2MessageBox(dlg, u'Error reading file {}:{}\n'.format(
filename, err), 'Bad dict input')
SetButtonStatus()
if dlg.GetSizer():
dlg.GetSizer().Clear(True)
MainSizer = wx.BoxSizer(wx.VERTICAL)
MainSizer.Add(wx.StaticText(dlg, wx.ID_ANY,
'Fit Profile Parameters to Peaks from Fundamental Parameters',
style=wx.ALIGN_CENTER), 0, wx.EXPAND)
MainSizer.Add((-1, 5))
prmSizer = wx.FlexGridSizer(cols=2, hgap=3, vgap=5)
text = wx.StaticText(dlg, wx.ID_ANY, 'value', style=wx.ALIGN_CENTER)
text.SetBackgroundColour(wx.WHITE)
prmSizer.Add(text, 0, wx.EXPAND)
text = wx.StaticText(dlg, wx.ID_ANY, 'explanation', style=wx.ALIGN_CENTER)
text.SetBackgroundColour(wx.WHITE)
prmSizer.Add(text, 0, wx.EXPAND)
for key, defVal, text in (('minTT', 3.0,
'Location of first peak in 2theta (deg)'), ('maxTT', 123.0,
'Location of last peak in 2theta (deg)'), ('step', 0.01,
'Pattern step size (deg 2theta)'), ('npeaks', 13.0,
'Number of peaks'), ('calcwid', 2.0,
'Range to compute each peak (deg 2theta)')):
if key not in simParms:
simParms[key] = defVal
ctrl = G2G.ValidatedTxtCtrl(dlg, simParms, key, size=(70, -1))
prmSizer.Add(ctrl, 1, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 1)
txt = wx.StaticText(dlg, wx.ID_ANY, text, size=(300, -1))
txt.Wrap(280)
prmSizer.Add(txt)
MainSizer.Add(prmSizer)
btnsizer = wx.BoxSizer(wx.HORIZONTAL)
btn = wx.Button(dlg, wx.ID_ANY, 'Input FP vals')
btnsizer.Add(btn)
btn.Bind(wx.EVT_BUTTON, _onSetFPA)
saveBtn = wx.Button(dlg, wx.ID_ANY, 'Save FPA dict')
btnsizer.Add(saveBtn)
saveBtn.Bind(wx.EVT_BUTTON, _onSaveFPA)
readBtn = wx.Button(dlg, wx.ID_ANY, 'Read FPA dict')
btnsizer.Add(readBtn)
readBtn.Bind(wx.EVT_BUTTON, _onReadFPA)
MainSizer.Add(btnsizer, 0, wx.ALIGN_CENTER, 0)
MainSizer.Add((-1, 4), 1, wx.EXPAND, 1)
txt = wx.StaticText(dlg, wx.ID_ANY, 'If you use this, please cite: ' +
Citation, size=(350, -1))
txt.Wrap(340)
MainSizer.Add(txt, 0, wx.ALIGN_CENTER)
btnsizer = wx.BoxSizer(wx.HORIZONTAL)
OKbtn = wx.Button(dlg, wx.ID_OK)
OKbtn.SetDefault()
btnsizer.Add(OKbtn)
Cbtn = wx.Button(dlg, wx.ID_CLOSE, 'Cancel')
btnsizer.Add(Cbtn)
MainSizer.Add(btnsizer, 0, wx.ALIGN_CENTER, 0)
MainSizer.Add((-1, 4), 1, wx.EXPAND, 1)
OKbtn.Bind(wx.EVT_BUTTON, _onOK)
Cbtn.Bind(wx.EVT_BUTTON, _onClose)
SetButtonStatus()
dlg.SetSizer(MainSizer)
MainSizer.Layout()
MainSizer.Fit(dlg)
dlg.SetMinSize(dlg.GetSize())
dlg.SendSizeEvent()
dlg.Raise()
def GetFPAInput(G2frame):
dlg = wx.Dialog(G2frame, wx.ID_ANY, 'FPA input', style=wx.
DEFAULT_DIALOG_STYLE | wx.RESIZE_BORDER)
MakeSimSizer(G2frame, dlg)
dlg.CenterOnParent()
dlg.Show()
return
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def SetCu2Wave():
"""Set the parameters to the two-line Cu K alpha 1+2 spectrum
"""
parmDict['wave'] = {i: v for i, v in enumerate((1.540596, 1.544493))}
parmDict['int'] = {i: v for i, v in enumerate((0.653817, 0.346183))}
parmDict['lwidth'] = {i: v for i, v in enumerate((0.501844, 0.626579))}
SetCu2Wave()
def MakeTopasFPASizer(G2frame, FPdlg, mode, SetButtonStatus):
"""Create a GUI with parameters for the NIST XRD Fundamental Parameters Code.
Parameter input is modeled after Topas input parameters.
:param wx.Window FPdlg: Frame or Dialog where GUI will appear
:param str mode: either 'BBpoint' or 'BBPSD' for Bragg-Brentano point detector or
(linear) position sensitive detector
:param dict parmDict: dict to place parameters. If empty, default values from
globals BraggBrentanoParms, BBPointDetector & BBPSDDetector will be placed in
the array.
:returns: a sizer with the GUI controls
"""
def _onOK(event):
XferFPAsettings(parmDict)
SetButtonStatus(done=True)
FPdlg.Destroy()
def _onClose(event):
SetButtonStatus()
FPdlg.Destroy()
def _onAddWave(event):
parmDict['numWave'] += 1
wx.CallAfter(MakeTopasFPASizer, G2frame, FPdlg, mode, SetButtonStatus)
def _onRemWave(event):
parmDict['numWave'] -= 1
wx.CallAfter(MakeTopasFPASizer, G2frame, FPdlg, mode, SetButtonStatus)
def _onSetCu5Wave(event):
parmDict['wave'] = {i: v for i, v in enumerate((1.534753, 1.540596,
1.541058, 1.54441, 1.544721))}
parmDict['int'] = {i: v for i, v in enumerate((0.0159, 0.5791,
0.0762, 0.2417, 0.0871))}
parmDict['lwidth'] = {i: v for i, v in enumerate((3.6854, 0.437,
0.6, 0.52, 0.62))}
parmDict['numWave'] = 5
wx.CallAfter(MakeTopasFPASizer, G2frame, FPdlg, mode, SetButtonStatus)
def _onSetCu2Wave(event):
SetCu2Wave()
parmDict['numWave'] = 2
wx.CallAfter(MakeTopasFPASizer, G2frame, FPdlg, mode, SetButtonStatus)
def _onSetPoint(event):
wx.CallAfter(MakeTopasFPASizer, G2frame, FPdlg, 'BBpoint',
SetButtonStatus)
def _onSetPSD(event):
wx.CallAfter(MakeTopasFPASizer, G2frame, FPdlg, 'BBPSD',
SetButtonStatus)
def PlotTopasFPA(event):
XferFPAsettings(parmDict)
ttArr = np.arange(max(0.5, simParms['plotpos'] - simParms['calcwid'
]), simParms['plotpos'] + simParms['calcwid'], simParms['step'])
intArr = np.zeros_like(ttArr)
NISTpk = setupFPAcalc()
try:
center_bin_idx, peakObj = doFPAcalc(NISTpk, ttArr, simParms[
'plotpos'], simParms['calcwid'], simParms['step'])
except Exception as err:
msg = 'Error computing convolution, revise input'
print(msg)
print(err)
return
G2plt.PlotFPAconvolutors(G2frame, NISTpk)
pkPts = len(peakObj.peak)
pkMax = peakObj.peak.max()
startInd = center_bin_idx - pkPts // 2
if startInd < 0:
intArr[:startInd + pkPts] += 10000 * peakObj.peak[-startInd:
] / pkMax
elif startInd > len(intArr):
return
elif startInd + pkPts >= len(intArr):
offset = pkPts - len(intArr[startInd:])
intArr[startInd:startInd + pkPts - offset] += 10000 * peakObj.peak[
:-offset] / pkMax
else:
intArr[startInd:startInd + pkPts] += 10000 * peakObj.peak / pkMax
G2plt.PlotXY(G2frame, [(ttArr, intArr)], labelX='$2\\theta, deg$',
labelY='Intensity (arbitrary)', Title='FPA peak', newPlot=True,
lines=True)
if FPdlg.GetSizer():
FPdlg.GetSizer().Clear(True)
numWave = parmDict['numWave']
if mode == 'BBpoint':
itemList = BraggBrentanoParms + BBPointDetector
elif mode == 'BBPSD':
itemList = BraggBrentanoParms + BBPSDDetector
else:
raise Exception('Unknown mode in MakeTopasFPASizer: ' + mode)
MainSizer = wx.BoxSizer(wx.VERTICAL)
MainSizer.Add((-1, 5))
waveSizer = wx.FlexGridSizer(cols=numWave + 1, hgap=3, vgap=5)
for lbl, prm, defVal in zip((u'Wavelength (Å)', 'Rel. Intensity',
u'Lorentz Width\n(Å/1000)'), ('wave', 'int', 'lwidth'), (0.0, 1.0, 0.1)
):
text = wx.StaticText(FPdlg, wx.ID_ANY, lbl, style=wx.ALIGN_CENTER)
text.SetBackgroundColour(wx.WHITE)
waveSizer.Add(text, 0, wx.EXPAND)
if prm not in parmDict:
parmDict[prm] = {}
for i in range(numWave):
if i not in parmDict[prm]:
parmDict[prm][i] = defVal
ctrl = G2G.ValidatedTxtCtrl(FPdlg, parmDict[prm], i, size=(90, -1))
waveSizer.Add(ctrl, 1, wx.ALIGN_CENTER_VERTICAL, 1)
MainSizer.Add(waveSizer)
MainSizer.Add((-1, 5))
btnsizer = wx.BoxSizer(wx.HORIZONTAL)
btn = wx.Button(FPdlg, wx.ID_ANY, 'Add col')
btnsizer.Add(btn)
btn.Bind(wx.EVT_BUTTON, _onAddWave)
btn = wx.Button(FPdlg, wx.ID_ANY, 'Remove col')
btnsizer.Add(btn)
btn.Bind(wx.EVT_BUTTON, _onRemWave)
btn = wx.Button(FPdlg, wx.ID_ANY, 'CuKa1+2')
btnsizer.Add(btn)
btn.Bind(wx.EVT_BUTTON, _onSetCu2Wave)
btn = wx.Button(FPdlg, wx.ID_ANY, 'CuKa-5wave')
btnsizer.Add(btn)
btn.Bind(wx.EVT_BUTTON, _onSetCu5Wave)
MainSizer.Add(btnsizer, 0, wx.ALIGN_CENTER, 0)
MainSizer.Add((-1, 5))
btnsizer = wx.BoxSizer(wx.HORIZONTAL)
btn = wx.Button(FPdlg, wx.ID_ANY, 'Point Dect.')
btn.Enable(not mode == 'BBpoint')
btnsizer.Add(btn)
btn.Bind(wx.EVT_BUTTON, _onSetPoint)
btn = wx.Button(FPdlg, wx.ID_ANY, 'PSD')
btn.Enable(not mode == 'BBPSD')
btnsizer.Add(btn)
btn.Bind(wx.EVT_BUTTON, _onSetPSD)
MainSizer.Add(btnsizer, 0, wx.ALIGN_CENTER, 0)
MainSizer.Add((-1, 5))
prmSizer = wx.FlexGridSizer(cols=3, hgap=3, vgap=5)
text = wx.StaticText(FPdlg, wx.ID_ANY, 'label', style=wx.ALIGN_CENTER)
text.SetBackgroundColour(wx.WHITE)
prmSizer.Add(text, 0, wx.EXPAND)
text = wx.StaticText(FPdlg, wx.ID_ANY, 'value', style=wx.ALIGN_CENTER)
text.SetBackgroundColour(wx.WHITE)
prmSizer.Add(text, 0, wx.EXPAND)
text = wx.StaticText(FPdlg, wx.ID_ANY, 'explanation', style=wx.ALIGN_CENTER
)
text.SetBackgroundColour(wx.WHITE)
prmSizer.Add(text, 0, wx.EXPAND)
for lbl, defVal, text in itemList:
prmSizer.Add(wx.StaticText(FPdlg, wx.ID_ANY, lbl), 1, wx.
ALIGN_RIGHT | wx.ALIGN_CENTER_VERTICAL, 1)
if lbl not in parmDict:
parmDict[lbl] = defVal
ctrl = G2G.ValidatedTxtCtrl(FPdlg, parmDict, lbl, size=(70, -1))
prmSizer.Add(ctrl, 1, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 1)
txt = wx.StaticText(FPdlg, wx.ID_ANY, text, size=(400, -1))
txt.Wrap(380)
prmSizer.Add(txt)
MainSizer.Add(prmSizer)
MainSizer.Add((-1, 4), 1, wx.EXPAND, 1)
btnsizer = wx.BoxSizer(wx.HORIZONTAL)
btn = wx.Button(FPdlg, wx.ID_ANY, 'Plot peak')
btnsizer.Add(btn)
btn.Bind(wx.EVT_BUTTON, PlotTopasFPA)
btnsizer.Add(wx.StaticText(FPdlg, wx.ID_ANY, ' at '))
if 'plotpos' not in simParms:
simParms['plotpos'] = simParms['minTT']
ctrl = G2G.ValidatedTxtCtrl(FPdlg, simParms, 'plotpos', size=(70, -1))
btnsizer.Add(ctrl)
btnsizer.Add(wx.StaticText(FPdlg, wx.ID_ANY, ' deg.'))
MainSizer.Add(btnsizer, 0, wx.ALIGN_CENTER, 0)
MainSizer.Add((-1, 4), 1, wx.EXPAND, 1)
btnsizer = wx.BoxSizer(wx.HORIZONTAL)
OKbtn = wx.Button(FPdlg, wx.ID_OK)
OKbtn.SetDefault()
btnsizer.Add(OKbtn)
Cbtn = wx.Button(FPdlg, wx.ID_CLOSE, 'Cancel')
btnsizer.Add(Cbtn)
MainSizer.Add(btnsizer, 0, wx.ALIGN_CENTER, 0)
MainSizer.Add((-1, 4), 1, wx.EXPAND, 1)
OKbtn.Bind(wx.EVT_BUTTON, _onOK)
Cbtn.Bind(wx.EVT_BUTTON, _onClose)
FPdlg.SetSizer(MainSizer)
MainSizer.Layout()
MainSizer.Fit(FPdlg)
FPdlg.SetMinSize(FPdlg.GetSize())
FPdlg.SendSizeEvent()
def XferFPAsettings(InpParms):
"""convert Topas-type parameters to SI units for NIST and place in a dict sorted
according to use in each convoluter
:param dict InpParms: a dict with Topas-like parameters, as set in
:func:`MakeTopasFPASizer`
:returns: a nested dict with global parameters and those for each convolution
"""
wavenums = range(InpParms['numWave'])
source_wavelengths_m = 1e-10 * np.array([InpParms['wave'][i] for i in
wavenums])
la = [InpParms['int'][i] for i in wavenums]
source_intensities = np.array(la) / max(la)
source_lor_widths_m = 1e-10 * 0.001 * np.array([InpParms['lwidth'][i] for
i in wavenums])
source_gauss_widths_m = 1e-10 * 0.001 * np.array([(0.001) for i in
wavenums])
NISTparms['emission'] = {'emiss_wavelengths': source_wavelengths_m,
'emiss_intensities': source_intensities, 'emiss_gauss_widths':
source_gauss_widths_m, 'emiss_lor_widths': source_lor_widths_m,
'crystallite_size_gauss': 1e-09 * InpParms.get('Size_G', 1000000.0),
'crystallite_size_lor': 1e-09 * InpParms.get('Size_L', 1000000.0)}
if InpParms['filament_length'] == InpParms['receiving_slit_length']:
InpParms['receiving_slit_length'] *= 1.00001
NISTparms['axial'] = {'axDiv': 'full', 'slit_length_source': 0.001 *
InpParms['filament_length'], 'slit_length_target': 0.001 * InpParms
['receiving_slit_length'], 'length_sample': 0.001 * InpParms[
'sample_length'], 'n_integral_points': 10, 'angI_deg': InpParms[
'soller_angle'], 'angD_deg': InpParms['soller_angle']}
if InpParms.get('LAC_cm', 0) > 0:
NISTparms['absorption'] = {'absorption_coefficient': InpParms[
'LAC_cm'] * 100, 'sample_thickness': 0.001 * InpParms[
'sample_thickness']}
elif 'absorption' in NISTparms:
del NISTparms['absorption']
if InpParms.get('lpsd_equitorial_divergence', 0) > 0 and InpParms.get(
'lpsd_th2_angular_range', 0) > 0:
PSDdetector_length_mm = np.arcsin(np.pi * InpParms[
'lpsd_th2_angular_range'] / 180.0) * InpParms['Rs']
NISTparms['si_psd'] = {'equatorial_divergence_deg': InpParms[
'lpsd_equitorial_divergence'], 'si_psd_window_bounds': (0.0,
PSDdetector_length_mm / 1000.0)}
elif 'si_psd' in NISTparms:
del NISTparms['si_psd']
if InpParms.get('Specimen_Displacement'):
NISTparms['displacement'] = {'specimen_displacement': 0.001 *
InpParms['Specimen_Displacement']}
elif 'displacement' in NISTparms:
del NISTparms['displacement']
if InpParms.get('receiving_slit_width'):
NISTparms['receiver_slit'] = {'slit_width': 0.001 * InpParms[
'receiving_slit_width']}
elif 'receiver_slit' in NISTparms:
del NISTparms['receiver_slit']
if InpParms.get('tube-tails_width', 0) > 0 and InpParms.get(
'tube-tails_rel-I', 0) > 0:
NISTparms['tube_tails'] = {'main_width': 0.001 * InpParms.get(
'tube-tails_width', 0.0), 'tail_left': -0.001 * InpParms.get(
'tube-tails_L-tail', 0.0), 'tail_right': 0.001 * InpParms.get(
'tube-tails_R-tail', 0.0), 'tail_intens': InpParms.get(
'tube-tails_rel-I', 0.0)}
elif 'tube_tails' in NISTparms:
del NISTparms['tube_tails']
max_wavelength = source_wavelengths_m[np.argmax(source_intensities)]
NISTparms[''] = {'equatorial_divergence_deg': InpParms['divergence'],
'dominant_wavelength': max_wavelength, 'diffractometer_radius':
0.001 * InpParms['Rs'], 'oversampling': InpParms['convolution_steps']}
def setupFPAcalc():
"""Create a peak profile object using the NIST XRD Fundamental
Parameters Code.
:returns: a profile object that can provide information on
each convolution or compute the composite peak shape.
"""
p = FP.FP_profile(anglemode='twotheta',
output_gaussian_smoother_bins_sigma=1.0, oversampling=NISTparms.get
('oversampling', 10))
p.debug_cache = False
for key in NISTparms:
if key:
p.set_parameters(convolver=key, **NISTparms[key])
else:
p.set_parameters(**NISTparms[key])
return p
def doFPAcalc(NISTpk, ttArr, twotheta, calcwid, step):
"""Compute a single peak using a NIST profile object
:param object NISTpk: a peak profile computational object from the
NIST XRD Fundamental Parameters Code, typically established from
a call to :func:`SetupFPAcalc`
:param np.Array ttArr: an evenly-spaced grid of two-theta points (degrees)
:param float twotheta: nominal center of peak (degrees)
:param float calcwid: width to perform convolution (degrees)
:param float step: step size
"""
center_bin_idx = min(ttArr.searchsorted(twotheta), len(ttArr) - 1)
NISTpk.set_optimized_window(twotheta_exact_bin_spacing_deg=step,
twotheta_window_center_deg=ttArr[center_bin_idx],
twotheta_approx_window_fullwidth_deg=calcwid)
NISTpk.set_parameters(twotheta0_deg=twotheta)
return center_bin_idx, NISTpk.compute_line_profile()
def MakeSimSizer(G2frame, dlg):
"""Create a GUI to get simulation with parameters for Fundamental
Parameters fitting.
:param wx.Window dlg: Frame or Dialog where GUI will appear
:returns: a sizer with the GUI controls
"""
def _onOK(event):
msg = ''
if simParms['minTT'] - simParms['calcwid'] / 1.5 < 0.1:
msg += 'First peak minus half the calc width is too low'
if simParms['maxTT'] + simParms['calcwid'] / 1.5 > 175:
if msg:
msg += '\n'
msg += 'Last peak plus half the calc width is too high'
if simParms['npeaks'] < 8:
if msg:
msg += '\n'
msg += 'At least 8 peaks are needed'
if msg:
G2G.G2MessageBox(dlg, msg, 'Bad input, try again')
return
ttArr = np.arange(max(0.5, simParms['minTT'] - simParms['calcwid'] /
1.5), simParms['maxTT'] + simParms['calcwid'] / 1.5, simParms[
'step'])
intArr = np.zeros_like(ttArr)
peaklist = np.linspace(simParms['minTT'], simParms['maxTT'],
simParms['npeaks'], endpoint=True)
peakSpacing = (peaklist[-1] - peaklist[0]) / (len(peaklist) - 1)
NISTpk = setupFPAcalc()
minPtsHM = len(intArr)
maxPtsHM = 0
for num, twoth_peak in enumerate(peaklist):
try:
center_bin_idx, peakObj = doFPAcalc(NISTpk, ttArr,
twoth_peak, simParms['calcwid'], simParms['step'])
except:
if msg:
msg += '\n'
msg = 'Error computing convolution, revise input'
continue
if num == 0:
G2plt.PlotFPAconvolutors(G2frame, NISTpk)
pkMax = peakObj.peak.max()
pkPts = len(peakObj.peak)
minPtsHM = min(minPtsHM, sum(peakObj.peak >= 0.5 * pkMax))
maxPtsHM = max(maxPtsHM, sum(peakObj.peak >= 0.5 * pkMax))
startInd = center_bin_idx - pkPts // 2
if startInd < 0:
intArr[:startInd + pkPts] += 10000 * peakObj.peak[-startInd:
] / pkMax
elif startInd > len(intArr):
break
elif startInd + pkPts >= len(intArr):
offset = pkPts - len(intArr[startInd:])
intArr[startInd:startInd + pkPts - offset
] += 10000 * peakObj.peak[:-offset] / pkMax
else:
intArr[startInd:startInd + pkPts
] += 10000 * peakObj.peak / pkMax
if maxPtsHM * simParms['step'] > peakSpacing / 4:
if msg:
msg += '\n'
msg += (
'Maximum FWHM ({}) is too large compared to the peak spacing ({}). Decrease number of peaks or increase data range.'
.format(maxPtsHM * simParms['step'], peakSpacing))
if minPtsHM < 10:
if msg:
msg += '\n'
msg += (
'There are only {} points above the half-max. 10 are needed. Dropping step size.'
.format(minPtsHM))
simParms['step'] *= 0.5
if msg:
G2G.G2MessageBox(dlg, msg, 'Bad input, try again')
wx.CallAfter(MakeSimSizer, G2frame, dlg)
return
dlg.Destroy()
wx.CallAfter(FitFPApeaks, ttArr, intArr, peaklist, maxPtsHM)
def FitFPApeaks(ttArr, intArr, peaklist, maxPtsHM):
"""Perform a peak fit to the FP simulated pattern
"""
plswait = wx.Dialog(G2frame, style=wx.DEFAULT_DIALOG_STYLE | wx.
RESIZE_BORDER)
vbox = wx.BoxSizer(wx.VERTICAL)
vbox.Add((1, 1), 1, wx.ALL | wx.EXPAND, 1)
txt = wx.StaticText(plswait, wx.ID_ANY,
'Fitting peaks...\nPlease wait...', style=wx.ALIGN_CENTER)
vbox.Add(txt, 0, wx.ALL | wx.EXPAND)
vbox.Add((1, 1), 1, wx.ALL | wx.EXPAND, 1)
plswait.SetSizer(vbox)
plswait.Layout()
plswait.CenterOnParent()
plswait.Show()
wx.BeginBusyCursor()
ints = list(NISTparms['emission']['emiss_intensities'])
Lam1 = NISTparms['emission']['emiss_wavelengths'][np.argmax(ints)
] * 10000000000.0
if len(ints) > 1:
ints[np.argmax(ints)] = -1
Lam2 = NISTparms['emission']['emiss_wavelengths'][np.argmax(ints)
] * 10000000000.0
else:
Lam2 = None
histId = G2frame.AddSimulatedPowder(ttArr, intArr,
'NIST Fundamental Parameters simulation', Lam1, Lam2)
controls = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(
G2frame, G2frame.root, 'Controls'))
controldat = controls.get('data', {'deriv type': 'analytic',
'min dM/M': 0.001})
Parms, Parms2 = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId
(G2frame, histId, 'Instrument Parameters'))
peakData = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(
G2frame, histId, 'Peak List'))
bkg1, bkg2 = bkg = G2frame.GPXtree.GetItemPyData(G2gd.
GetGPXtreeItemId(G2frame, histId, 'Background'))
bkg1[1] = False
bkg1[2] = 0
bkg1[3] = 0.0
limits = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(
G2frame, histId, 'Limits'))
try:
Parms['SH/L'][1] = 0.25 * (NISTparms['axial']['length_sample'] +
NISTparms['axial']['slit_length_source']) / NISTparms[''][
'diffractometer_radius']
except:
pass
for pos in peaklist:
i = ttArr.searchsorted(pos)
area = sum(intArr[max(0, i - maxPtsHM):min(len(intArr), i +
maxPtsHM)])
peakData['peaks'].append(G2mth.setPeakparms(Parms, Parms2, pos,
area))
histData = G2frame.GPXtree.GetItemPyData(histId)
bxye = np.zeros(len(histData[1][1]))
peakData['sigDict'] = G2pwd.DoPeakFit('LSQ', peakData['peaks'], bkg,
limits[1], Parms, Parms2, histData[1], bxye, [], False,
controldat, None)[0]
for pk in peakData['peaks']:
pk[1] = True
peakData['sigDict'] = G2pwd.DoPeakFit('LSQ', peakData['peaks'], bkg,
limits[1], Parms, Parms2, histData[1], bxye, [], False, controldat
)[0]
for p in ('U', 'V', 'W', 'X', 'Y'):
Parms[p][2] = True
peakData['sigDict'] = G2pwd.DoPeakFit('LSQ', peakData['peaks'], bkg,
limits[1], Parms, Parms2, histData[1], bxye, [], False, controldat
)[0]
Parms['SH/L'][2] = True
peakData['sigDict'] = G2pwd.DoPeakFit('LSQ', peakData['peaks'], bkg,
limits[1], Parms, Parms2, histData[1], bxye, [], False, controldat
)[0]
for p in Parms:
if len(Parms[p]) == 3:
Parms[p][0] = Parms[p][1]
Parms[p][2] = False
wx.EndBusyCursor()
plswait.Destroy()
pth = G2G.GetExportPath(G2frame)
fldlg = wx.FileDialog(G2frame,
'Set name to save GSAS-II instrument parameters file', pth, '',
'instrument parameter files (*.instprm)|*.instprm', wx.FD_SAVE |
wx.FD_OVERWRITE_PROMPT)
try:
if fldlg.ShowModal() == wx.ID_OK:
filename = fldlg.GetPath()
filename = os.path.splitext(filename)[0] + '.instprm'
File = open(filename, 'w')
File.write(
'#GSAS-II instrument parameter file; do not add/delete items!\n'
)
for item in Parms:
File.write(item + ':' + str(Parms[item][1]) + '\n')
File.close()
print('Instrument parameters saved to: ' + filename)
finally:
fldlg.Destroy()
def _onClose(event):
dlg.Destroy()
def SetButtonStatus(done=False):
OKbtn.Enable(bool(NISTparms))
saveBtn.Enable(bool(NISTparms))
if done:
_onOK(None)
def _onSetFPA(event):
FPdlg = wx.Dialog(dlg, wx.ID_ANY, 'FPA parameters', style=wx.
DEFAULT_DIALOG_STYLE | wx.RESIZE_BORDER)
MakeTopasFPASizer(G2frame, FPdlg, 'BBpoint', SetButtonStatus)
FPdlg.CenterOnParent()
FPdlg.Raise()
FPdlg.Show()
def _onSaveFPA(event):
filename = G2G.askSaveFile(G2frame, '', '.NISTfpa',
'dict of NIST FPA values', dlg)
if not filename:
return
fp = open(filename, 'w')
fp.write(
'# parameters to be used in the NIST XRD Fundamental Parameters program\n'
)
fp.write('{\n')
for key in sorted(NISTparms):
fp.write(" '" + key + "' : " + str(NISTparms[key]) + ',')
if not key:
fp.write(' # global parameters')
fp.write('\n')
fp.write('}\n')
fp.close()
def _onReadFPA(event):
filename = G2G.GetImportFile(G2frame, message=
'Read file with dict of values for NIST Fundamental Parameters',
parent=dlg, wildcard='dict of NIST FPA values|*.NISTfpa')
if not filename:
return
if not filename[0]:
return
try:
txt = open(filename[0], 'r').read()
NISTparms.clear()
array = np.array
d = eval(txt)
NISTparms.update(d)
except Exception as err:
G2G.G2MessageBox(dlg, u'Error reading file {}:{}\n'.format(
filename, err), 'Bad dict input')
SetButtonStatus()
if dlg.GetSizer():
dlg.GetSizer().Clear(True)
MainSizer = wx.BoxSizer(wx.VERTICAL)
MainSizer.Add(wx.StaticText(dlg, wx.ID_ANY,
'Fit Profile Parameters to Peaks from Fundamental Parameters',
style=wx.ALIGN_CENTER), 0, wx.EXPAND)
MainSizer.Add((-1, 5))
prmSizer = wx.FlexGridSizer(cols=2, hgap=3, vgap=5)
text = wx.StaticText(dlg, wx.ID_ANY, 'value', style=wx.ALIGN_CENTER)
text.SetBackgroundColour(wx.WHITE)
prmSizer.Add(text, 0, wx.EXPAND)
text = wx.StaticText(dlg, wx.ID_ANY, 'explanation', style=wx.ALIGN_CENTER)
text.SetBackgroundColour(wx.WHITE)
prmSizer.Add(text, 0, wx.EXPAND)
for key, defVal, text in (('minTT', 3.0,
'Location of first peak in 2theta (deg)'), ('maxTT', 123.0,
'Location of last peak in 2theta (deg)'), ('step', 0.01,
'Pattern step size (deg 2theta)'), ('npeaks', 13.0,
'Number of peaks'), ('calcwid', 2.0,
'Range to compute each peak (deg 2theta)')):
if key not in simParms:
simParms[key] = defVal
ctrl = G2G.ValidatedTxtCtrl(dlg, simParms, key, size=(70, -1))
prmSizer.Add(ctrl, 1, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 1)
txt = wx.StaticText(dlg, wx.ID_ANY, text, size=(300, -1))
txt.Wrap(280)
prmSizer.Add(txt)
MainSizer.Add(prmSizer)
btnsizer = wx.BoxSizer(wx.HORIZONTAL)
btn = wx.Button(dlg, wx.ID_ANY, 'Input FP vals')
btnsizer.Add(btn)
btn.Bind(wx.EVT_BUTTON, _onSetFPA)
saveBtn = wx.Button(dlg, wx.ID_ANY, 'Save FPA dict')
btnsizer.Add(saveBtn)
saveBtn.Bind(wx.EVT_BUTTON, _onSaveFPA)
readBtn = wx.Button(dlg, wx.ID_ANY, 'Read FPA dict')
btnsizer.Add(readBtn)
readBtn.Bind(wx.EVT_BUTTON, _onReadFPA)
MainSizer.Add(btnsizer, 0, wx.ALIGN_CENTER, 0)
MainSizer.Add((-1, 4), 1, wx.EXPAND, 1)
txt = wx.StaticText(dlg, wx.ID_ANY, 'If you use this, please cite: ' +
Citation, size=(350, -1))
txt.Wrap(340)
MainSizer.Add(txt, 0, wx.ALIGN_CENTER)
btnsizer = wx.BoxSizer(wx.HORIZONTAL)
OKbtn = wx.Button(dlg, wx.ID_OK)
OKbtn.SetDefault()
btnsizer.Add(OKbtn)
Cbtn = wx.Button(dlg, wx.ID_CLOSE, 'Cancel')
btnsizer.Add(Cbtn)
MainSizer.Add(btnsizer, 0, wx.ALIGN_CENTER, 0)
MainSizer.Add((-1, 4), 1, wx.EXPAND, 1)
OKbtn.Bind(wx.EVT_BUTTON, _onOK)
Cbtn.Bind(wx.EVT_BUTTON, _onClose)
SetButtonStatus()
dlg.SetSizer(MainSizer)
MainSizer.Layout()
MainSizer.Fit(dlg)
dlg.SetMinSize(dlg.GetSize())
dlg.SendSizeEvent()
dlg.Raise()
def GetFPAInput(G2frame):
dlg = wx.Dialog(G2frame, wx.ID_ANY, 'FPA input', style=wx.
DEFAULT_DIALOG_STYLE | wx.RESIZE_BORDER)
MakeSimSizer(G2frame, dlg)
dlg.CenterOnParent()
dlg.Show()
return
<|reserved_special_token_1|>
<|reserved_special_token_0|>
simParms = {}
<|reserved_special_token_0|>
parmDict = {'numWave': 2}
<|reserved_special_token_0|>
NISTparms = {}
<|reserved_special_token_0|>
BraggBrentanoParms = [('divergence', 0.5,
'Bragg-Brentano divergence angle (degrees)'), ('soller_angle', 2.0,
'Soller slit axial divergence (degrees)'), ('Rs', 220,
'Diffractometer radius (mm)'), ('filament_length', 12.0,
'X-ray tube line focus length (mm)'), ('sample_length', 12.0,
'Illuminated sample length in axial direction (mm)'), (
'receiving_slit_length', 12.0,
'Length of receiving slit in axial direction (mm)'), ('LAC_cm', 0.0,
'Linear absorption coef. adjusted for packing density (cm-1)'), (
'sample_thickness', 1.0, 'Depth of sample (mm)'), ('convolution_steps',
8, 'Number of Fourier-space bins per two-theta step'), (
'tube-tails_width', 0.04,
'Tube filament width, in projection at takeoff angle (mm)'), (
'tube-tails_L-tail', -1.0,
'Left-side tube tails width, in projection (mm)'), ('tube-tails_R-tail',
1.0, 'Right-side tube tails width, in projection (mm)'), (
'tube-tails_rel-I', 0.001, 'Tube tails fractional intensity (no units)')]
<|reserved_special_token_0|>
BBPointDetector = [('receiving_slit_width', 0.2,
'Width of receiving slit (mm)')]
<|reserved_special_token_0|>
BBPSDDetector = [('lpsd_th2_angular_range', 3.0,
'Angular range observed by PSD (degrees 2Theta)'), (
'lpsd_equitorial_divergence', 0.1,
'Equatorial divergence of the primary beam (degrees)')]
<|reserved_special_token_0|>
Citation = """MH Mendenhall, K Mullen && JP Cline. (2015) J. Res. of NIST 120, 223-251. doi:10.6028/jres.120.014.
"""
def SetCu2Wave():
"""Set the parameters to the two-line Cu K alpha 1+2 spectrum
"""
parmDict['wave'] = {i: v for i, v in enumerate((1.540596, 1.544493))}
parmDict['int'] = {i: v for i, v in enumerate((0.653817, 0.346183))}
parmDict['lwidth'] = {i: v for i, v in enumerate((0.501844, 0.626579))}
SetCu2Wave()
def MakeTopasFPASizer(G2frame, FPdlg, mode, SetButtonStatus):
"""Create a GUI with parameters for the NIST XRD Fundamental Parameters Code.
Parameter input is modeled after Topas input parameters.
:param wx.Window FPdlg: Frame or Dialog where GUI will appear
:param str mode: either 'BBpoint' or 'BBPSD' for Bragg-Brentano point detector or
(linear) position sensitive detector
:param dict parmDict: dict to place parameters. If empty, default values from
globals BraggBrentanoParms, BBPointDetector & BBPSDDetector will be placed in
the array.
:returns: a sizer with the GUI controls
"""
def _onOK(event):
XferFPAsettings(parmDict)
SetButtonStatus(done=True)
FPdlg.Destroy()
def _onClose(event):
SetButtonStatus()
FPdlg.Destroy()
def _onAddWave(event):
parmDict['numWave'] += 1
wx.CallAfter(MakeTopasFPASizer, G2frame, FPdlg, mode, SetButtonStatus)
def _onRemWave(event):
parmDict['numWave'] -= 1
wx.CallAfter(MakeTopasFPASizer, G2frame, FPdlg, mode, SetButtonStatus)
def _onSetCu5Wave(event):
parmDict['wave'] = {i: v for i, v in enumerate((1.534753, 1.540596,
1.541058, 1.54441, 1.544721))}
parmDict['int'] = {i: v for i, v in enumerate((0.0159, 0.5791,
0.0762, 0.2417, 0.0871))}
parmDict['lwidth'] = {i: v for i, v in enumerate((3.6854, 0.437,
0.6, 0.52, 0.62))}
parmDict['numWave'] = 5
wx.CallAfter(MakeTopasFPASizer, G2frame, FPdlg, mode, SetButtonStatus)
def _onSetCu2Wave(event):
SetCu2Wave()
parmDict['numWave'] = 2
wx.CallAfter(MakeTopasFPASizer, G2frame, FPdlg, mode, SetButtonStatus)
def _onSetPoint(event):
wx.CallAfter(MakeTopasFPASizer, G2frame, FPdlg, 'BBpoint',
SetButtonStatus)
def _onSetPSD(event):
wx.CallAfter(MakeTopasFPASizer, G2frame, FPdlg, 'BBPSD',
SetButtonStatus)
def PlotTopasFPA(event):
XferFPAsettings(parmDict)
ttArr = np.arange(max(0.5, simParms['plotpos'] - simParms['calcwid'
]), simParms['plotpos'] + simParms['calcwid'], simParms['step'])
intArr = np.zeros_like(ttArr)
NISTpk = setupFPAcalc()
try:
center_bin_idx, peakObj = doFPAcalc(NISTpk, ttArr, simParms[
'plotpos'], simParms['calcwid'], simParms['step'])
except Exception as err:
msg = 'Error computing convolution, revise input'
print(msg)
print(err)
return
G2plt.PlotFPAconvolutors(G2frame, NISTpk)
pkPts = len(peakObj.peak)
pkMax = peakObj.peak.max()
startInd = center_bin_idx - pkPts // 2
if startInd < 0:
intArr[:startInd + pkPts] += 10000 * peakObj.peak[-startInd:
] / pkMax
elif startInd > len(intArr):
return
elif startInd + pkPts >= len(intArr):
offset = pkPts - len(intArr[startInd:])
intArr[startInd:startInd + pkPts - offset] += 10000 * peakObj.peak[
:-offset] / pkMax
else:
intArr[startInd:startInd + pkPts] += 10000 * peakObj.peak / pkMax
G2plt.PlotXY(G2frame, [(ttArr, intArr)], labelX='$2\\theta, deg$',
labelY='Intensity (arbitrary)', Title='FPA peak', newPlot=True,
lines=True)
if FPdlg.GetSizer():
FPdlg.GetSizer().Clear(True)
numWave = parmDict['numWave']
if mode == 'BBpoint':
itemList = BraggBrentanoParms + BBPointDetector
elif mode == 'BBPSD':
itemList = BraggBrentanoParms + BBPSDDetector
else:
raise Exception('Unknown mode in MakeTopasFPASizer: ' + mode)
MainSizer = wx.BoxSizer(wx.VERTICAL)
MainSizer.Add((-1, 5))
waveSizer = wx.FlexGridSizer(cols=numWave + 1, hgap=3, vgap=5)
for lbl, prm, defVal in zip((u'Wavelength (Å)', 'Rel. Intensity',
u'Lorentz Width\n(Å/1000)'), ('wave', 'int', 'lwidth'), (0.0, 1.0, 0.1)
):
text = wx.StaticText(FPdlg, wx.ID_ANY, lbl, style=wx.ALIGN_CENTER)
text.SetBackgroundColour(wx.WHITE)
waveSizer.Add(text, 0, wx.EXPAND)
if prm not in parmDict:
parmDict[prm] = {}
for i in range(numWave):
if i not in parmDict[prm]:
parmDict[prm][i] = defVal
ctrl = G2G.ValidatedTxtCtrl(FPdlg, parmDict[prm], i, size=(90, -1))
waveSizer.Add(ctrl, 1, wx.ALIGN_CENTER_VERTICAL, 1)
MainSizer.Add(waveSizer)
MainSizer.Add((-1, 5))
btnsizer = wx.BoxSizer(wx.HORIZONTAL)
btn = wx.Button(FPdlg, wx.ID_ANY, 'Add col')
btnsizer.Add(btn)
btn.Bind(wx.EVT_BUTTON, _onAddWave)
btn = wx.Button(FPdlg, wx.ID_ANY, 'Remove col')
btnsizer.Add(btn)
btn.Bind(wx.EVT_BUTTON, _onRemWave)
btn = wx.Button(FPdlg, wx.ID_ANY, 'CuKa1+2')
btnsizer.Add(btn)
btn.Bind(wx.EVT_BUTTON, _onSetCu2Wave)
btn = wx.Button(FPdlg, wx.ID_ANY, 'CuKa-5wave')
btnsizer.Add(btn)
btn.Bind(wx.EVT_BUTTON, _onSetCu5Wave)
MainSizer.Add(btnsizer, 0, wx.ALIGN_CENTER, 0)
MainSizer.Add((-1, 5))
btnsizer = wx.BoxSizer(wx.HORIZONTAL)
btn = wx.Button(FPdlg, wx.ID_ANY, 'Point Dect.')
btn.Enable(not mode == 'BBpoint')
btnsizer.Add(btn)
btn.Bind(wx.EVT_BUTTON, _onSetPoint)
btn = wx.Button(FPdlg, wx.ID_ANY, 'PSD')
btn.Enable(not mode == 'BBPSD')
btnsizer.Add(btn)
btn.Bind(wx.EVT_BUTTON, _onSetPSD)
MainSizer.Add(btnsizer, 0, wx.ALIGN_CENTER, 0)
MainSizer.Add((-1, 5))
prmSizer = wx.FlexGridSizer(cols=3, hgap=3, vgap=5)
text = wx.StaticText(FPdlg, wx.ID_ANY, 'label', style=wx.ALIGN_CENTER)
text.SetBackgroundColour(wx.WHITE)
prmSizer.Add(text, 0, wx.EXPAND)
text = wx.StaticText(FPdlg, wx.ID_ANY, 'value', style=wx.ALIGN_CENTER)
text.SetBackgroundColour(wx.WHITE)
prmSizer.Add(text, 0, wx.EXPAND)
text = wx.StaticText(FPdlg, wx.ID_ANY, 'explanation', style=wx.ALIGN_CENTER
)
text.SetBackgroundColour(wx.WHITE)
prmSizer.Add(text, 0, wx.EXPAND)
for lbl, defVal, text in itemList:
prmSizer.Add(wx.StaticText(FPdlg, wx.ID_ANY, lbl), 1, wx.
ALIGN_RIGHT | wx.ALIGN_CENTER_VERTICAL, 1)
if lbl not in parmDict:
parmDict[lbl] = defVal
ctrl = G2G.ValidatedTxtCtrl(FPdlg, parmDict, lbl, size=(70, -1))
prmSizer.Add(ctrl, 1, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 1)
txt = wx.StaticText(FPdlg, wx.ID_ANY, text, size=(400, -1))
txt.Wrap(380)
prmSizer.Add(txt)
MainSizer.Add(prmSizer)
MainSizer.Add((-1, 4), 1, wx.EXPAND, 1)
btnsizer = wx.BoxSizer(wx.HORIZONTAL)
btn = wx.Button(FPdlg, wx.ID_ANY, 'Plot peak')
btnsizer.Add(btn)
btn.Bind(wx.EVT_BUTTON, PlotTopasFPA)
btnsizer.Add(wx.StaticText(FPdlg, wx.ID_ANY, ' at '))
if 'plotpos' not in simParms:
simParms['plotpos'] = simParms['minTT']
ctrl = G2G.ValidatedTxtCtrl(FPdlg, simParms, 'plotpos', size=(70, -1))
btnsizer.Add(ctrl)
btnsizer.Add(wx.StaticText(FPdlg, wx.ID_ANY, ' deg.'))
MainSizer.Add(btnsizer, 0, wx.ALIGN_CENTER, 0)
MainSizer.Add((-1, 4), 1, wx.EXPAND, 1)
btnsizer = wx.BoxSizer(wx.HORIZONTAL)
OKbtn = wx.Button(FPdlg, wx.ID_OK)
OKbtn.SetDefault()
btnsizer.Add(OKbtn)
Cbtn = wx.Button(FPdlg, wx.ID_CLOSE, 'Cancel')
btnsizer.Add(Cbtn)
MainSizer.Add(btnsizer, 0, wx.ALIGN_CENTER, 0)
MainSizer.Add((-1, 4), 1, wx.EXPAND, 1)
OKbtn.Bind(wx.EVT_BUTTON, _onOK)
Cbtn.Bind(wx.EVT_BUTTON, _onClose)
FPdlg.SetSizer(MainSizer)
MainSizer.Layout()
MainSizer.Fit(FPdlg)
FPdlg.SetMinSize(FPdlg.GetSize())
FPdlg.SendSizeEvent()
def XferFPAsettings(InpParms):
"""convert Topas-type parameters to SI units for NIST and place in a dict sorted
according to use in each convoluter
:param dict InpParms: a dict with Topas-like parameters, as set in
:func:`MakeTopasFPASizer`
:returns: a nested dict with global parameters and those for each convolution
"""
wavenums = range(InpParms['numWave'])
source_wavelengths_m = 1e-10 * np.array([InpParms['wave'][i] for i in
wavenums])
la = [InpParms['int'][i] for i in wavenums]
source_intensities = np.array(la) / max(la)
source_lor_widths_m = 1e-10 * 0.001 * np.array([InpParms['lwidth'][i] for
i in wavenums])
source_gauss_widths_m = 1e-10 * 0.001 * np.array([(0.001) for i in
wavenums])
NISTparms['emission'] = {'emiss_wavelengths': source_wavelengths_m,
'emiss_intensities': source_intensities, 'emiss_gauss_widths':
source_gauss_widths_m, 'emiss_lor_widths': source_lor_widths_m,
'crystallite_size_gauss': 1e-09 * InpParms.get('Size_G', 1000000.0),
'crystallite_size_lor': 1e-09 * InpParms.get('Size_L', 1000000.0)}
if InpParms['filament_length'] == InpParms['receiving_slit_length']:
InpParms['receiving_slit_length'] *= 1.00001
NISTparms['axial'] = {'axDiv': 'full', 'slit_length_source': 0.001 *
InpParms['filament_length'], 'slit_length_target': 0.001 * InpParms
['receiving_slit_length'], 'length_sample': 0.001 * InpParms[
'sample_length'], 'n_integral_points': 10, 'angI_deg': InpParms[
'soller_angle'], 'angD_deg': InpParms['soller_angle']}
if InpParms.get('LAC_cm', 0) > 0:
NISTparms['absorption'] = {'absorption_coefficient': InpParms[
'LAC_cm'] * 100, 'sample_thickness': 0.001 * InpParms[
'sample_thickness']}
elif 'absorption' in NISTparms:
del NISTparms['absorption']
if InpParms.get('lpsd_equitorial_divergence', 0) > 0 and InpParms.get(
'lpsd_th2_angular_range', 0) > 0:
PSDdetector_length_mm = np.arcsin(np.pi * InpParms[
'lpsd_th2_angular_range'] / 180.0) * InpParms['Rs']
NISTparms['si_psd'] = {'equatorial_divergence_deg': InpParms[
'lpsd_equitorial_divergence'], 'si_psd_window_bounds': (0.0,
PSDdetector_length_mm / 1000.0)}
elif 'si_psd' in NISTparms:
del NISTparms['si_psd']
if InpParms.get('Specimen_Displacement'):
NISTparms['displacement'] = {'specimen_displacement': 0.001 *
InpParms['Specimen_Displacement']}
elif 'displacement' in NISTparms:
del NISTparms['displacement']
if InpParms.get('receiving_slit_width'):
NISTparms['receiver_slit'] = {'slit_width': 0.001 * InpParms[
'receiving_slit_width']}
elif 'receiver_slit' in NISTparms:
del NISTparms['receiver_slit']
if InpParms.get('tube-tails_width', 0) > 0 and InpParms.get(
'tube-tails_rel-I', 0) > 0:
NISTparms['tube_tails'] = {'main_width': 0.001 * InpParms.get(
'tube-tails_width', 0.0), 'tail_left': -0.001 * InpParms.get(
'tube-tails_L-tail', 0.0), 'tail_right': 0.001 * InpParms.get(
'tube-tails_R-tail', 0.0), 'tail_intens': InpParms.get(
'tube-tails_rel-I', 0.0)}
elif 'tube_tails' in NISTparms:
del NISTparms['tube_tails']
max_wavelength = source_wavelengths_m[np.argmax(source_intensities)]
NISTparms[''] = {'equatorial_divergence_deg': InpParms['divergence'],
'dominant_wavelength': max_wavelength, 'diffractometer_radius':
0.001 * InpParms['Rs'], 'oversampling': InpParms['convolution_steps']}
def setupFPAcalc():
"""Create a peak profile object using the NIST XRD Fundamental
Parameters Code.
:returns: a profile object that can provide information on
each convolution or compute the composite peak shape.
"""
p = FP.FP_profile(anglemode='twotheta',
output_gaussian_smoother_bins_sigma=1.0, oversampling=NISTparms.get
('oversampling', 10))
p.debug_cache = False
for key in NISTparms:
if key:
p.set_parameters(convolver=key, **NISTparms[key])
else:
p.set_parameters(**NISTparms[key])
return p
def doFPAcalc(NISTpk, ttArr, twotheta, calcwid, step):
"""Compute a single peak using a NIST profile object
:param object NISTpk: a peak profile computational object from the
NIST XRD Fundamental Parameters Code, typically established from
a call to :func:`SetupFPAcalc`
:param np.Array ttArr: an evenly-spaced grid of two-theta points (degrees)
:param float twotheta: nominal center of peak (degrees)
:param float calcwid: width to perform convolution (degrees)
:param float step: step size
"""
center_bin_idx = min(ttArr.searchsorted(twotheta), len(ttArr) - 1)
NISTpk.set_optimized_window(twotheta_exact_bin_spacing_deg=step,
twotheta_window_center_deg=ttArr[center_bin_idx],
twotheta_approx_window_fullwidth_deg=calcwid)
NISTpk.set_parameters(twotheta0_deg=twotheta)
return center_bin_idx, NISTpk.compute_line_profile()
def MakeSimSizer(G2frame, dlg):
"""Create a GUI to get simulation with parameters for Fundamental
Parameters fitting.
:param wx.Window dlg: Frame or Dialog where GUI will appear
:returns: a sizer with the GUI controls
"""
def _onOK(event):
msg = ''
if simParms['minTT'] - simParms['calcwid'] / 1.5 < 0.1:
msg += 'First peak minus half the calc width is too low'
if simParms['maxTT'] + simParms['calcwid'] / 1.5 > 175:
if msg:
msg += '\n'
msg += 'Last peak plus half the calc width is too high'
if simParms['npeaks'] < 8:
if msg:
msg += '\n'
msg += 'At least 8 peaks are needed'
if msg:
G2G.G2MessageBox(dlg, msg, 'Bad input, try again')
return
ttArr = np.arange(max(0.5, simParms['minTT'] - simParms['calcwid'] /
1.5), simParms['maxTT'] + simParms['calcwid'] / 1.5, simParms[
'step'])
intArr = np.zeros_like(ttArr)
peaklist = np.linspace(simParms['minTT'], simParms['maxTT'],
simParms['npeaks'], endpoint=True)
peakSpacing = (peaklist[-1] - peaklist[0]) / (len(peaklist) - 1)
NISTpk = setupFPAcalc()
minPtsHM = len(intArr)
maxPtsHM = 0
for num, twoth_peak in enumerate(peaklist):
try:
center_bin_idx, peakObj = doFPAcalc(NISTpk, ttArr,
twoth_peak, simParms['calcwid'], simParms['step'])
except:
if msg:
msg += '\n'
msg = 'Error computing convolution, revise input'
continue
if num == 0:
G2plt.PlotFPAconvolutors(G2frame, NISTpk)
pkMax = peakObj.peak.max()
pkPts = len(peakObj.peak)
minPtsHM = min(minPtsHM, sum(peakObj.peak >= 0.5 * pkMax))
maxPtsHM = max(maxPtsHM, sum(peakObj.peak >= 0.5 * pkMax))
startInd = center_bin_idx - pkPts // 2
if startInd < 0:
intArr[:startInd + pkPts] += 10000 * peakObj.peak[-startInd:
] / pkMax
elif startInd > len(intArr):
break
elif startInd + pkPts >= len(intArr):
offset = pkPts - len(intArr[startInd:])
intArr[startInd:startInd + pkPts - offset
] += 10000 * peakObj.peak[:-offset] / pkMax
else:
intArr[startInd:startInd + pkPts
] += 10000 * peakObj.peak / pkMax
if maxPtsHM * simParms['step'] > peakSpacing / 4:
if msg:
msg += '\n'
msg += (
'Maximum FWHM ({}) is too large compared to the peak spacing ({}). Decrease number of peaks or increase data range.'
.format(maxPtsHM * simParms['step'], peakSpacing))
if minPtsHM < 10:
if msg:
msg += '\n'
msg += (
'There are only {} points above the half-max. 10 are needed. Dropping step size.'
.format(minPtsHM))
simParms['step'] *= 0.5
if msg:
G2G.G2MessageBox(dlg, msg, 'Bad input, try again')
wx.CallAfter(MakeSimSizer, G2frame, dlg)
return
dlg.Destroy()
wx.CallAfter(FitFPApeaks, ttArr, intArr, peaklist, maxPtsHM)
def FitFPApeaks(ttArr, intArr, peaklist, maxPtsHM):
"""Perform a peak fit to the FP simulated pattern
"""
plswait = wx.Dialog(G2frame, style=wx.DEFAULT_DIALOG_STYLE | wx.
RESIZE_BORDER)
vbox = wx.BoxSizer(wx.VERTICAL)
vbox.Add((1, 1), 1, wx.ALL | wx.EXPAND, 1)
txt = wx.StaticText(plswait, wx.ID_ANY,
'Fitting peaks...\nPlease wait...', style=wx.ALIGN_CENTER)
vbox.Add(txt, 0, wx.ALL | wx.EXPAND)
vbox.Add((1, 1), 1, wx.ALL | wx.EXPAND, 1)
plswait.SetSizer(vbox)
plswait.Layout()
plswait.CenterOnParent()
plswait.Show()
wx.BeginBusyCursor()
ints = list(NISTparms['emission']['emiss_intensities'])
Lam1 = NISTparms['emission']['emiss_wavelengths'][np.argmax(ints)
] * 10000000000.0
if len(ints) > 1:
ints[np.argmax(ints)] = -1
Lam2 = NISTparms['emission']['emiss_wavelengths'][np.argmax(ints)
] * 10000000000.0
else:
Lam2 = None
histId = G2frame.AddSimulatedPowder(ttArr, intArr,
'NIST Fundamental Parameters simulation', Lam1, Lam2)
controls = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(
G2frame, G2frame.root, 'Controls'))
controldat = controls.get('data', {'deriv type': 'analytic',
'min dM/M': 0.001})
Parms, Parms2 = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId
(G2frame, histId, 'Instrument Parameters'))
peakData = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(
G2frame, histId, 'Peak List'))
bkg1, bkg2 = bkg = G2frame.GPXtree.GetItemPyData(G2gd.
GetGPXtreeItemId(G2frame, histId, 'Background'))
bkg1[1] = False
bkg1[2] = 0
bkg1[3] = 0.0
limits = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(
G2frame, histId, 'Limits'))
try:
Parms['SH/L'][1] = 0.25 * (NISTparms['axial']['length_sample'] +
NISTparms['axial']['slit_length_source']) / NISTparms[''][
'diffractometer_radius']
except:
pass
for pos in peaklist:
i = ttArr.searchsorted(pos)
area = sum(intArr[max(0, i - maxPtsHM):min(len(intArr), i +
maxPtsHM)])
peakData['peaks'].append(G2mth.setPeakparms(Parms, Parms2, pos,
area))
histData = G2frame.GPXtree.GetItemPyData(histId)
bxye = np.zeros(len(histData[1][1]))
peakData['sigDict'] = G2pwd.DoPeakFit('LSQ', peakData['peaks'], bkg,
limits[1], Parms, Parms2, histData[1], bxye, [], False,
controldat, None)[0]
for pk in peakData['peaks']:
pk[1] = True
peakData['sigDict'] = G2pwd.DoPeakFit('LSQ', peakData['peaks'], bkg,
limits[1], Parms, Parms2, histData[1], bxye, [], False, controldat
)[0]
for p in ('U', 'V', 'W', 'X', 'Y'):
Parms[p][2] = True
peakData['sigDict'] = G2pwd.DoPeakFit('LSQ', peakData['peaks'], bkg,
limits[1], Parms, Parms2, histData[1], bxye, [], False, controldat
)[0]
Parms['SH/L'][2] = True
peakData['sigDict'] = G2pwd.DoPeakFit('LSQ', peakData['peaks'], bkg,
limits[1], Parms, Parms2, histData[1], bxye, [], False, controldat
)[0]
for p in Parms:
if len(Parms[p]) == 3:
Parms[p][0] = Parms[p][1]
Parms[p][2] = False
wx.EndBusyCursor()
plswait.Destroy()
pth = G2G.GetExportPath(G2frame)
fldlg = wx.FileDialog(G2frame,
'Set name to save GSAS-II instrument parameters file', pth, '',
'instrument parameter files (*.instprm)|*.instprm', wx.FD_SAVE |
wx.FD_OVERWRITE_PROMPT)
try:
if fldlg.ShowModal() == wx.ID_OK:
filename = fldlg.GetPath()
filename = os.path.splitext(filename)[0] + '.instprm'
File = open(filename, 'w')
File.write(
'#GSAS-II instrument parameter file; do not add/delete items!\n'
)
for item in Parms:
File.write(item + ':' + str(Parms[item][1]) + '\n')
File.close()
print('Instrument parameters saved to: ' + filename)
finally:
fldlg.Destroy()
def _onClose(event):
dlg.Destroy()
def SetButtonStatus(done=False):
OKbtn.Enable(bool(NISTparms))
saveBtn.Enable(bool(NISTparms))
if done:
_onOK(None)
def _onSetFPA(event):
FPdlg = wx.Dialog(dlg, wx.ID_ANY, 'FPA parameters', style=wx.
DEFAULT_DIALOG_STYLE | wx.RESIZE_BORDER)
MakeTopasFPASizer(G2frame, FPdlg, 'BBpoint', SetButtonStatus)
FPdlg.CenterOnParent()
FPdlg.Raise()
FPdlg.Show()
def _onSaveFPA(event):
filename = G2G.askSaveFile(G2frame, '', '.NISTfpa',
'dict of NIST FPA values', dlg)
if not filename:
return
fp = open(filename, 'w')
fp.write(
'# parameters to be used in the NIST XRD Fundamental Parameters program\n'
)
fp.write('{\n')
for key in sorted(NISTparms):
fp.write(" '" + key + "' : " + str(NISTparms[key]) + ',')
if not key:
fp.write(' # global parameters')
fp.write('\n')
fp.write('}\n')
fp.close()
def _onReadFPA(event):
filename = G2G.GetImportFile(G2frame, message=
'Read file with dict of values for NIST Fundamental Parameters',
parent=dlg, wildcard='dict of NIST FPA values|*.NISTfpa')
if not filename:
return
if not filename[0]:
return
try:
txt = open(filename[0], 'r').read()
NISTparms.clear()
array = np.array
d = eval(txt)
NISTparms.update(d)
except Exception as err:
G2G.G2MessageBox(dlg, u'Error reading file {}:{}\n'.format(
filename, err), 'Bad dict input')
SetButtonStatus()
if dlg.GetSizer():
dlg.GetSizer().Clear(True)
MainSizer = wx.BoxSizer(wx.VERTICAL)
MainSizer.Add(wx.StaticText(dlg, wx.ID_ANY,
'Fit Profile Parameters to Peaks from Fundamental Parameters',
style=wx.ALIGN_CENTER), 0, wx.EXPAND)
MainSizer.Add((-1, 5))
prmSizer = wx.FlexGridSizer(cols=2, hgap=3, vgap=5)
text = wx.StaticText(dlg, wx.ID_ANY, 'value', style=wx.ALIGN_CENTER)
text.SetBackgroundColour(wx.WHITE)
prmSizer.Add(text, 0, wx.EXPAND)
text = wx.StaticText(dlg, wx.ID_ANY, 'explanation', style=wx.ALIGN_CENTER)
text.SetBackgroundColour(wx.WHITE)
prmSizer.Add(text, 0, wx.EXPAND)
for key, defVal, text in (('minTT', 3.0,
'Location of first peak in 2theta (deg)'), ('maxTT', 123.0,
'Location of last peak in 2theta (deg)'), ('step', 0.01,
'Pattern step size (deg 2theta)'), ('npeaks', 13.0,
'Number of peaks'), ('calcwid', 2.0,
'Range to compute each peak (deg 2theta)')):
if key not in simParms:
simParms[key] = defVal
ctrl = G2G.ValidatedTxtCtrl(dlg, simParms, key, size=(70, -1))
prmSizer.Add(ctrl, 1, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 1)
txt = wx.StaticText(dlg, wx.ID_ANY, text, size=(300, -1))
txt.Wrap(280)
prmSizer.Add(txt)
MainSizer.Add(prmSizer)
btnsizer = wx.BoxSizer(wx.HORIZONTAL)
btn = wx.Button(dlg, wx.ID_ANY, 'Input FP vals')
btnsizer.Add(btn)
btn.Bind(wx.EVT_BUTTON, _onSetFPA)
saveBtn = wx.Button(dlg, wx.ID_ANY, 'Save FPA dict')
btnsizer.Add(saveBtn)
saveBtn.Bind(wx.EVT_BUTTON, _onSaveFPA)
readBtn = wx.Button(dlg, wx.ID_ANY, 'Read FPA dict')
btnsizer.Add(readBtn)
readBtn.Bind(wx.EVT_BUTTON, _onReadFPA)
MainSizer.Add(btnsizer, 0, wx.ALIGN_CENTER, 0)
MainSizer.Add((-1, 4), 1, wx.EXPAND, 1)
txt = wx.StaticText(dlg, wx.ID_ANY, 'If you use this, please cite: ' +
Citation, size=(350, -1))
txt.Wrap(340)
MainSizer.Add(txt, 0, wx.ALIGN_CENTER)
btnsizer = wx.BoxSizer(wx.HORIZONTAL)
OKbtn = wx.Button(dlg, wx.ID_OK)
OKbtn.SetDefault()
btnsizer.Add(OKbtn)
Cbtn = wx.Button(dlg, wx.ID_CLOSE, 'Cancel')
btnsizer.Add(Cbtn)
MainSizer.Add(btnsizer, 0, wx.ALIGN_CENTER, 0)
MainSizer.Add((-1, 4), 1, wx.EXPAND, 1)
OKbtn.Bind(wx.EVT_BUTTON, _onOK)
Cbtn.Bind(wx.EVT_BUTTON, _onClose)
SetButtonStatus()
dlg.SetSizer(MainSizer)
MainSizer.Layout()
MainSizer.Fit(dlg)
dlg.SetMinSize(dlg.GetSize())
dlg.SendSizeEvent()
dlg.Raise()
def GetFPAInput(G2frame):
dlg = wx.Dialog(G2frame, wx.ID_ANY, 'FPA input', style=wx.
DEFAULT_DIALOG_STYLE | wx.RESIZE_BORDER)
MakeSimSizer(G2frame, dlg)
dlg.CenterOnParent()
dlg.Show()
return
<|reserved_special_token_1|>
<|reserved_special_token_0|>
from __future__ import division, print_function
import wx
import os.path
import numpy as np
import NIST_profile as FP
import GSASIIpath
import GSASIIctrlGUI as G2G
import GSASIIdataGUI as G2gd
import GSASIIplot as G2plt
import GSASIImath as G2mth
import GSASIIpwd as G2pwd
simParms = {}
<|reserved_special_token_0|>
parmDict = {'numWave': 2}
<|reserved_special_token_0|>
NISTparms = {}
<|reserved_special_token_0|>
BraggBrentanoParms = [('divergence', 0.5,
'Bragg-Brentano divergence angle (degrees)'), ('soller_angle', 2.0,
'Soller slit axial divergence (degrees)'), ('Rs', 220,
'Diffractometer radius (mm)'), ('filament_length', 12.0,
'X-ray tube line focus length (mm)'), ('sample_length', 12.0,
'Illuminated sample length in axial direction (mm)'), (
'receiving_slit_length', 12.0,
'Length of receiving slit in axial direction (mm)'), ('LAC_cm', 0.0,
'Linear absorption coef. adjusted for packing density (cm-1)'), (
'sample_thickness', 1.0, 'Depth of sample (mm)'), ('convolution_steps',
8, 'Number of Fourier-space bins per two-theta step'), (
'tube-tails_width', 0.04,
'Tube filament width, in projection at takeoff angle (mm)'), (
'tube-tails_L-tail', -1.0,
'Left-side tube tails width, in projection (mm)'), ('tube-tails_R-tail',
1.0, 'Right-side tube tails width, in projection (mm)'), (
'tube-tails_rel-I', 0.001, 'Tube tails fractional intensity (no units)')]
<|reserved_special_token_0|>
BBPointDetector = [('receiving_slit_width', 0.2,
'Width of receiving slit (mm)')]
<|reserved_special_token_0|>
BBPSDDetector = [('lpsd_th2_angular_range', 3.0,
'Angular range observed by PSD (degrees 2Theta)'), (
'lpsd_equitorial_divergence', 0.1,
'Equatorial divergence of the primary beam (degrees)')]
<|reserved_special_token_0|>
Citation = """MH Mendenhall, K Mullen && JP Cline. (2015) J. Res. of NIST 120, 223-251. doi:10.6028/jres.120.014.
"""
def SetCu2Wave():
"""Set the parameters to the two-line Cu K alpha 1+2 spectrum
"""
parmDict['wave'] = {i: v for i, v in enumerate((1.540596, 1.544493))}
parmDict['int'] = {i: v for i, v in enumerate((0.653817, 0.346183))}
parmDict['lwidth'] = {i: v for i, v in enumerate((0.501844, 0.626579))}
SetCu2Wave()
def MakeTopasFPASizer(G2frame, FPdlg, mode, SetButtonStatus):
"""Create a GUI with parameters for the NIST XRD Fundamental Parameters Code.
Parameter input is modeled after Topas input parameters.
:param wx.Window FPdlg: Frame or Dialog where GUI will appear
:param str mode: either 'BBpoint' or 'BBPSD' for Bragg-Brentano point detector or
(linear) position sensitive detector
:param dict parmDict: dict to place parameters. If empty, default values from
globals BraggBrentanoParms, BBPointDetector & BBPSDDetector will be placed in
the array.
:returns: a sizer with the GUI controls
"""
def _onOK(event):
XferFPAsettings(parmDict)
SetButtonStatus(done=True)
FPdlg.Destroy()
def _onClose(event):
SetButtonStatus()
FPdlg.Destroy()
def _onAddWave(event):
parmDict['numWave'] += 1
wx.CallAfter(MakeTopasFPASizer, G2frame, FPdlg, mode, SetButtonStatus)
def _onRemWave(event):
parmDict['numWave'] -= 1
wx.CallAfter(MakeTopasFPASizer, G2frame, FPdlg, mode, SetButtonStatus)
def _onSetCu5Wave(event):
parmDict['wave'] = {i: v for i, v in enumerate((1.534753, 1.540596,
1.541058, 1.54441, 1.544721))}
parmDict['int'] = {i: v for i, v in enumerate((0.0159, 0.5791,
0.0762, 0.2417, 0.0871))}
parmDict['lwidth'] = {i: v for i, v in enumerate((3.6854, 0.437,
0.6, 0.52, 0.62))}
parmDict['numWave'] = 5
wx.CallAfter(MakeTopasFPASizer, G2frame, FPdlg, mode, SetButtonStatus)
def _onSetCu2Wave(event):
SetCu2Wave()
parmDict['numWave'] = 2
wx.CallAfter(MakeTopasFPASizer, G2frame, FPdlg, mode, SetButtonStatus)
def _onSetPoint(event):
wx.CallAfter(MakeTopasFPASizer, G2frame, FPdlg, 'BBpoint',
SetButtonStatus)
def _onSetPSD(event):
wx.CallAfter(MakeTopasFPASizer, G2frame, FPdlg, 'BBPSD',
SetButtonStatus)
def PlotTopasFPA(event):
XferFPAsettings(parmDict)
ttArr = np.arange(max(0.5, simParms['plotpos'] - simParms['calcwid'
]), simParms['plotpos'] + simParms['calcwid'], simParms['step'])
intArr = np.zeros_like(ttArr)
NISTpk = setupFPAcalc()
try:
center_bin_idx, peakObj = doFPAcalc(NISTpk, ttArr, simParms[
'plotpos'], simParms['calcwid'], simParms['step'])
except Exception as err:
msg = 'Error computing convolution, revise input'
print(msg)
print(err)
return
G2plt.PlotFPAconvolutors(G2frame, NISTpk)
pkPts = len(peakObj.peak)
pkMax = peakObj.peak.max()
startInd = center_bin_idx - pkPts // 2
if startInd < 0:
intArr[:startInd + pkPts] += 10000 * peakObj.peak[-startInd:
] / pkMax
elif startInd > len(intArr):
return
elif startInd + pkPts >= len(intArr):
offset = pkPts - len(intArr[startInd:])
intArr[startInd:startInd + pkPts - offset] += 10000 * peakObj.peak[
:-offset] / pkMax
else:
intArr[startInd:startInd + pkPts] += 10000 * peakObj.peak / pkMax
G2plt.PlotXY(G2frame, [(ttArr, intArr)], labelX='$2\\theta, deg$',
labelY='Intensity (arbitrary)', Title='FPA peak', newPlot=True,
lines=True)
if FPdlg.GetSizer():
FPdlg.GetSizer().Clear(True)
numWave = parmDict['numWave']
if mode == 'BBpoint':
itemList = BraggBrentanoParms + BBPointDetector
elif mode == 'BBPSD':
itemList = BraggBrentanoParms + BBPSDDetector
else:
raise Exception('Unknown mode in MakeTopasFPASizer: ' + mode)
MainSizer = wx.BoxSizer(wx.VERTICAL)
MainSizer.Add((-1, 5))
waveSizer = wx.FlexGridSizer(cols=numWave + 1, hgap=3, vgap=5)
for lbl, prm, defVal in zip((u'Wavelength (Å)', 'Rel. Intensity',
u'Lorentz Width\n(Å/1000)'), ('wave', 'int', 'lwidth'), (0.0, 1.0, 0.1)
):
text = wx.StaticText(FPdlg, wx.ID_ANY, lbl, style=wx.ALIGN_CENTER)
text.SetBackgroundColour(wx.WHITE)
waveSizer.Add(text, 0, wx.EXPAND)
if prm not in parmDict:
parmDict[prm] = {}
for i in range(numWave):
if i not in parmDict[prm]:
parmDict[prm][i] = defVal
ctrl = G2G.ValidatedTxtCtrl(FPdlg, parmDict[prm], i, size=(90, -1))
waveSizer.Add(ctrl, 1, wx.ALIGN_CENTER_VERTICAL, 1)
MainSizer.Add(waveSizer)
MainSizer.Add((-1, 5))
btnsizer = wx.BoxSizer(wx.HORIZONTAL)
btn = wx.Button(FPdlg, wx.ID_ANY, 'Add col')
btnsizer.Add(btn)
btn.Bind(wx.EVT_BUTTON, _onAddWave)
btn = wx.Button(FPdlg, wx.ID_ANY, 'Remove col')
btnsizer.Add(btn)
btn.Bind(wx.EVT_BUTTON, _onRemWave)
btn = wx.Button(FPdlg, wx.ID_ANY, 'CuKa1+2')
btnsizer.Add(btn)
btn.Bind(wx.EVT_BUTTON, _onSetCu2Wave)
btn = wx.Button(FPdlg, wx.ID_ANY, 'CuKa-5wave')
btnsizer.Add(btn)
btn.Bind(wx.EVT_BUTTON, _onSetCu5Wave)
MainSizer.Add(btnsizer, 0, wx.ALIGN_CENTER, 0)
MainSizer.Add((-1, 5))
btnsizer = wx.BoxSizer(wx.HORIZONTAL)
btn = wx.Button(FPdlg, wx.ID_ANY, 'Point Dect.')
btn.Enable(not mode == 'BBpoint')
btnsizer.Add(btn)
btn.Bind(wx.EVT_BUTTON, _onSetPoint)
btn = wx.Button(FPdlg, wx.ID_ANY, 'PSD')
btn.Enable(not mode == 'BBPSD')
btnsizer.Add(btn)
btn.Bind(wx.EVT_BUTTON, _onSetPSD)
MainSizer.Add(btnsizer, 0, wx.ALIGN_CENTER, 0)
MainSizer.Add((-1, 5))
prmSizer = wx.FlexGridSizer(cols=3, hgap=3, vgap=5)
text = wx.StaticText(FPdlg, wx.ID_ANY, 'label', style=wx.ALIGN_CENTER)
text.SetBackgroundColour(wx.WHITE)
prmSizer.Add(text, 0, wx.EXPAND)
text = wx.StaticText(FPdlg, wx.ID_ANY, 'value', style=wx.ALIGN_CENTER)
text.SetBackgroundColour(wx.WHITE)
prmSizer.Add(text, 0, wx.EXPAND)
text = wx.StaticText(FPdlg, wx.ID_ANY, 'explanation', style=wx.ALIGN_CENTER
)
text.SetBackgroundColour(wx.WHITE)
prmSizer.Add(text, 0, wx.EXPAND)
for lbl, defVal, text in itemList:
prmSizer.Add(wx.StaticText(FPdlg, wx.ID_ANY, lbl), 1, wx.
ALIGN_RIGHT | wx.ALIGN_CENTER_VERTICAL, 1)
if lbl not in parmDict:
parmDict[lbl] = defVal
ctrl = G2G.ValidatedTxtCtrl(FPdlg, parmDict, lbl, size=(70, -1))
prmSizer.Add(ctrl, 1, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 1)
txt = wx.StaticText(FPdlg, wx.ID_ANY, text, size=(400, -1))
txt.Wrap(380)
prmSizer.Add(txt)
MainSizer.Add(prmSizer)
MainSizer.Add((-1, 4), 1, wx.EXPAND, 1)
btnsizer = wx.BoxSizer(wx.HORIZONTAL)
btn = wx.Button(FPdlg, wx.ID_ANY, 'Plot peak')
btnsizer.Add(btn)
btn.Bind(wx.EVT_BUTTON, PlotTopasFPA)
btnsizer.Add(wx.StaticText(FPdlg, wx.ID_ANY, ' at '))
if 'plotpos' not in simParms:
simParms['plotpos'] = simParms['minTT']
ctrl = G2G.ValidatedTxtCtrl(FPdlg, simParms, 'plotpos', size=(70, -1))
btnsizer.Add(ctrl)
btnsizer.Add(wx.StaticText(FPdlg, wx.ID_ANY, ' deg.'))
MainSizer.Add(btnsizer, 0, wx.ALIGN_CENTER, 0)
MainSizer.Add((-1, 4), 1, wx.EXPAND, 1)
btnsizer = wx.BoxSizer(wx.HORIZONTAL)
OKbtn = wx.Button(FPdlg, wx.ID_OK)
OKbtn.SetDefault()
btnsizer.Add(OKbtn)
Cbtn = wx.Button(FPdlg, wx.ID_CLOSE, 'Cancel')
btnsizer.Add(Cbtn)
MainSizer.Add(btnsizer, 0, wx.ALIGN_CENTER, 0)
MainSizer.Add((-1, 4), 1, wx.EXPAND, 1)
OKbtn.Bind(wx.EVT_BUTTON, _onOK)
Cbtn.Bind(wx.EVT_BUTTON, _onClose)
FPdlg.SetSizer(MainSizer)
MainSizer.Layout()
MainSizer.Fit(FPdlg)
FPdlg.SetMinSize(FPdlg.GetSize())
FPdlg.SendSizeEvent()
def XferFPAsettings(InpParms):
"""convert Topas-type parameters to SI units for NIST and place in a dict sorted
according to use in each convoluter
:param dict InpParms: a dict with Topas-like parameters, as set in
:func:`MakeTopasFPASizer`
:returns: a nested dict with global parameters and those for each convolution
"""
wavenums = range(InpParms['numWave'])
source_wavelengths_m = 1e-10 * np.array([InpParms['wave'][i] for i in
wavenums])
la = [InpParms['int'][i] for i in wavenums]
source_intensities = np.array(la) / max(la)
source_lor_widths_m = 1e-10 * 0.001 * np.array([InpParms['lwidth'][i] for
i in wavenums])
source_gauss_widths_m = 1e-10 * 0.001 * np.array([(0.001) for i in
wavenums])
NISTparms['emission'] = {'emiss_wavelengths': source_wavelengths_m,
'emiss_intensities': source_intensities, 'emiss_gauss_widths':
source_gauss_widths_m, 'emiss_lor_widths': source_lor_widths_m,
'crystallite_size_gauss': 1e-09 * InpParms.get('Size_G', 1000000.0),
'crystallite_size_lor': 1e-09 * InpParms.get('Size_L', 1000000.0)}
if InpParms['filament_length'] == InpParms['receiving_slit_length']:
InpParms['receiving_slit_length'] *= 1.00001
NISTparms['axial'] = {'axDiv': 'full', 'slit_length_source': 0.001 *
InpParms['filament_length'], 'slit_length_target': 0.001 * InpParms
['receiving_slit_length'], 'length_sample': 0.001 * InpParms[
'sample_length'], 'n_integral_points': 10, 'angI_deg': InpParms[
'soller_angle'], 'angD_deg': InpParms['soller_angle']}
if InpParms.get('LAC_cm', 0) > 0:
NISTparms['absorption'] = {'absorption_coefficient': InpParms[
'LAC_cm'] * 100, 'sample_thickness': 0.001 * InpParms[
'sample_thickness']}
elif 'absorption' in NISTparms:
del NISTparms['absorption']
if InpParms.get('lpsd_equitorial_divergence', 0) > 0 and InpParms.get(
'lpsd_th2_angular_range', 0) > 0:
PSDdetector_length_mm = np.arcsin(np.pi * InpParms[
'lpsd_th2_angular_range'] / 180.0) * InpParms['Rs']
NISTparms['si_psd'] = {'equatorial_divergence_deg': InpParms[
'lpsd_equitorial_divergence'], 'si_psd_window_bounds': (0.0,
PSDdetector_length_mm / 1000.0)}
elif 'si_psd' in NISTparms:
del NISTparms['si_psd']
if InpParms.get('Specimen_Displacement'):
NISTparms['displacement'] = {'specimen_displacement': 0.001 *
InpParms['Specimen_Displacement']}
elif 'displacement' in NISTparms:
del NISTparms['displacement']
if InpParms.get('receiving_slit_width'):
NISTparms['receiver_slit'] = {'slit_width': 0.001 * InpParms[
'receiving_slit_width']}
elif 'receiver_slit' in NISTparms:
del NISTparms['receiver_slit']
if InpParms.get('tube-tails_width', 0) > 0 and InpParms.get(
'tube-tails_rel-I', 0) > 0:
NISTparms['tube_tails'] = {'main_width': 0.001 * InpParms.get(
'tube-tails_width', 0.0), 'tail_left': -0.001 * InpParms.get(
'tube-tails_L-tail', 0.0), 'tail_right': 0.001 * InpParms.get(
'tube-tails_R-tail', 0.0), 'tail_intens': InpParms.get(
'tube-tails_rel-I', 0.0)}
elif 'tube_tails' in NISTparms:
del NISTparms['tube_tails']
max_wavelength = source_wavelengths_m[np.argmax(source_intensities)]
NISTparms[''] = {'equatorial_divergence_deg': InpParms['divergence'],
'dominant_wavelength': max_wavelength, 'diffractometer_radius':
0.001 * InpParms['Rs'], 'oversampling': InpParms['convolution_steps']}
def setupFPAcalc():
"""Create a peak profile object using the NIST XRD Fundamental
Parameters Code.
:returns: a profile object that can provide information on
each convolution or compute the composite peak shape.
"""
p = FP.FP_profile(anglemode='twotheta',
output_gaussian_smoother_bins_sigma=1.0, oversampling=NISTparms.get
('oversampling', 10))
p.debug_cache = False
for key in NISTparms:
if key:
p.set_parameters(convolver=key, **NISTparms[key])
else:
p.set_parameters(**NISTparms[key])
return p
def doFPAcalc(NISTpk, ttArr, twotheta, calcwid, step):
"""Compute a single peak using a NIST profile object
:param object NISTpk: a peak profile computational object from the
NIST XRD Fundamental Parameters Code, typically established from
a call to :func:`SetupFPAcalc`
:param np.Array ttArr: an evenly-spaced grid of two-theta points (degrees)
:param float twotheta: nominal center of peak (degrees)
:param float calcwid: width to perform convolution (degrees)
:param float step: step size
"""
center_bin_idx = min(ttArr.searchsorted(twotheta), len(ttArr) - 1)
NISTpk.set_optimized_window(twotheta_exact_bin_spacing_deg=step,
twotheta_window_center_deg=ttArr[center_bin_idx],
twotheta_approx_window_fullwidth_deg=calcwid)
NISTpk.set_parameters(twotheta0_deg=twotheta)
return center_bin_idx, NISTpk.compute_line_profile()
def MakeSimSizer(G2frame, dlg):
"""Create a GUI to get simulation with parameters for Fundamental
Parameters fitting.
:param wx.Window dlg: Frame or Dialog where GUI will appear
:returns: a sizer with the GUI controls
"""
def _onOK(event):
msg = ''
if simParms['minTT'] - simParms['calcwid'] / 1.5 < 0.1:
msg += 'First peak minus half the calc width is too low'
if simParms['maxTT'] + simParms['calcwid'] / 1.5 > 175:
if msg:
msg += '\n'
msg += 'Last peak plus half the calc width is too high'
if simParms['npeaks'] < 8:
if msg:
msg += '\n'
msg += 'At least 8 peaks are needed'
if msg:
G2G.G2MessageBox(dlg, msg, 'Bad input, try again')
return
ttArr = np.arange(max(0.5, simParms['minTT'] - simParms['calcwid'] /
1.5), simParms['maxTT'] + simParms['calcwid'] / 1.5, simParms[
'step'])
intArr = np.zeros_like(ttArr)
peaklist = np.linspace(simParms['minTT'], simParms['maxTT'],
simParms['npeaks'], endpoint=True)
peakSpacing = (peaklist[-1] - peaklist[0]) / (len(peaklist) - 1)
NISTpk = setupFPAcalc()
minPtsHM = len(intArr)
maxPtsHM = 0
for num, twoth_peak in enumerate(peaklist):
try:
center_bin_idx, peakObj = doFPAcalc(NISTpk, ttArr,
twoth_peak, simParms['calcwid'], simParms['step'])
except:
if msg:
msg += '\n'
msg = 'Error computing convolution, revise input'
continue
if num == 0:
G2plt.PlotFPAconvolutors(G2frame, NISTpk)
pkMax = peakObj.peak.max()
pkPts = len(peakObj.peak)
minPtsHM = min(minPtsHM, sum(peakObj.peak >= 0.5 * pkMax))
maxPtsHM = max(maxPtsHM, sum(peakObj.peak >= 0.5 * pkMax))
startInd = center_bin_idx - pkPts // 2
if startInd < 0:
intArr[:startInd + pkPts] += 10000 * peakObj.peak[-startInd:
] / pkMax
elif startInd > len(intArr):
break
elif startInd + pkPts >= len(intArr):
offset = pkPts - len(intArr[startInd:])
intArr[startInd:startInd + pkPts - offset
] += 10000 * peakObj.peak[:-offset] / pkMax
else:
intArr[startInd:startInd + pkPts
] += 10000 * peakObj.peak / pkMax
if maxPtsHM * simParms['step'] > peakSpacing / 4:
if msg:
msg += '\n'
msg += (
'Maximum FWHM ({}) is too large compared to the peak spacing ({}). Decrease number of peaks or increase data range.'
.format(maxPtsHM * simParms['step'], peakSpacing))
if minPtsHM < 10:
if msg:
msg += '\n'
msg += (
'There are only {} points above the half-max. 10 are needed. Dropping step size.'
.format(minPtsHM))
simParms['step'] *= 0.5
if msg:
G2G.G2MessageBox(dlg, msg, 'Bad input, try again')
wx.CallAfter(MakeSimSizer, G2frame, dlg)
return
dlg.Destroy()
wx.CallAfter(FitFPApeaks, ttArr, intArr, peaklist, maxPtsHM)
def FitFPApeaks(ttArr, intArr, peaklist, maxPtsHM):
"""Perform a peak fit to the FP simulated pattern
"""
plswait = wx.Dialog(G2frame, style=wx.DEFAULT_DIALOG_STYLE | wx.
RESIZE_BORDER)
vbox = wx.BoxSizer(wx.VERTICAL)
vbox.Add((1, 1), 1, wx.ALL | wx.EXPAND, 1)
txt = wx.StaticText(plswait, wx.ID_ANY,
'Fitting peaks...\nPlease wait...', style=wx.ALIGN_CENTER)
vbox.Add(txt, 0, wx.ALL | wx.EXPAND)
vbox.Add((1, 1), 1, wx.ALL | wx.EXPAND, 1)
plswait.SetSizer(vbox)
plswait.Layout()
plswait.CenterOnParent()
plswait.Show()
wx.BeginBusyCursor()
ints = list(NISTparms['emission']['emiss_intensities'])
Lam1 = NISTparms['emission']['emiss_wavelengths'][np.argmax(ints)
] * 10000000000.0
if len(ints) > 1:
ints[np.argmax(ints)] = -1
Lam2 = NISTparms['emission']['emiss_wavelengths'][np.argmax(ints)
] * 10000000000.0
else:
Lam2 = None
histId = G2frame.AddSimulatedPowder(ttArr, intArr,
'NIST Fundamental Parameters simulation', Lam1, Lam2)
controls = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(
G2frame, G2frame.root, 'Controls'))
controldat = controls.get('data', {'deriv type': 'analytic',
'min dM/M': 0.001})
Parms, Parms2 = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId
(G2frame, histId, 'Instrument Parameters'))
peakData = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(
G2frame, histId, 'Peak List'))
bkg1, bkg2 = bkg = G2frame.GPXtree.GetItemPyData(G2gd.
GetGPXtreeItemId(G2frame, histId, 'Background'))
bkg1[1] = False
bkg1[2] = 0
bkg1[3] = 0.0
limits = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(
G2frame, histId, 'Limits'))
try:
Parms['SH/L'][1] = 0.25 * (NISTparms['axial']['length_sample'] +
NISTparms['axial']['slit_length_source']) / NISTparms[''][
'diffractometer_radius']
except:
pass
for pos in peaklist:
i = ttArr.searchsorted(pos)
area = sum(intArr[max(0, i - maxPtsHM):min(len(intArr), i +
maxPtsHM)])
peakData['peaks'].append(G2mth.setPeakparms(Parms, Parms2, pos,
area))
histData = G2frame.GPXtree.GetItemPyData(histId)
bxye = np.zeros(len(histData[1][1]))
peakData['sigDict'] = G2pwd.DoPeakFit('LSQ', peakData['peaks'], bkg,
limits[1], Parms, Parms2, histData[1], bxye, [], False,
controldat, None)[0]
for pk in peakData['peaks']:
pk[1] = True
peakData['sigDict'] = G2pwd.DoPeakFit('LSQ', peakData['peaks'], bkg,
limits[1], Parms, Parms2, histData[1], bxye, [], False, controldat
)[0]
for p in ('U', 'V', 'W', 'X', 'Y'):
Parms[p][2] = True
peakData['sigDict'] = G2pwd.DoPeakFit('LSQ', peakData['peaks'], bkg,
limits[1], Parms, Parms2, histData[1], bxye, [], False, controldat
)[0]
Parms['SH/L'][2] = True
peakData['sigDict'] = G2pwd.DoPeakFit('LSQ', peakData['peaks'], bkg,
limits[1], Parms, Parms2, histData[1], bxye, [], False, controldat
)[0]
for p in Parms:
if len(Parms[p]) == 3:
Parms[p][0] = Parms[p][1]
Parms[p][2] = False
wx.EndBusyCursor()
plswait.Destroy()
pth = G2G.GetExportPath(G2frame)
fldlg = wx.FileDialog(G2frame,
'Set name to save GSAS-II instrument parameters file', pth, '',
'instrument parameter files (*.instprm)|*.instprm', wx.FD_SAVE |
wx.FD_OVERWRITE_PROMPT)
try:
if fldlg.ShowModal() == wx.ID_OK:
filename = fldlg.GetPath()
filename = os.path.splitext(filename)[0] + '.instprm'
File = open(filename, 'w')
File.write(
'#GSAS-II instrument parameter file; do not add/delete items!\n'
)
for item in Parms:
File.write(item + ':' + str(Parms[item][1]) + '\n')
File.close()
print('Instrument parameters saved to: ' + filename)
finally:
fldlg.Destroy()
def _onClose(event):
dlg.Destroy()
def SetButtonStatus(done=False):
OKbtn.Enable(bool(NISTparms))
saveBtn.Enable(bool(NISTparms))
if done:
_onOK(None)
def _onSetFPA(event):
FPdlg = wx.Dialog(dlg, wx.ID_ANY, 'FPA parameters', style=wx.
DEFAULT_DIALOG_STYLE | wx.RESIZE_BORDER)
MakeTopasFPASizer(G2frame, FPdlg, 'BBpoint', SetButtonStatus)
FPdlg.CenterOnParent()
FPdlg.Raise()
FPdlg.Show()
def _onSaveFPA(event):
filename = G2G.askSaveFile(G2frame, '', '.NISTfpa',
'dict of NIST FPA values', dlg)
if not filename:
return
fp = open(filename, 'w')
fp.write(
'# parameters to be used in the NIST XRD Fundamental Parameters program\n'
)
fp.write('{\n')
for key in sorted(NISTparms):
fp.write(" '" + key + "' : " + str(NISTparms[key]) + ',')
if not key:
fp.write(' # global parameters')
fp.write('\n')
fp.write('}\n')
fp.close()
def _onReadFPA(event):
filename = G2G.GetImportFile(G2frame, message=
'Read file with dict of values for NIST Fundamental Parameters',
parent=dlg, wildcard='dict of NIST FPA values|*.NISTfpa')
if not filename:
return
if not filename[0]:
return
try:
txt = open(filename[0], 'r').read()
NISTparms.clear()
array = np.array
d = eval(txt)
NISTparms.update(d)
except Exception as err:
G2G.G2MessageBox(dlg, u'Error reading file {}:{}\n'.format(
filename, err), 'Bad dict input')
SetButtonStatus()
if dlg.GetSizer():
dlg.GetSizer().Clear(True)
MainSizer = wx.BoxSizer(wx.VERTICAL)
MainSizer.Add(wx.StaticText(dlg, wx.ID_ANY,
'Fit Profile Parameters to Peaks from Fundamental Parameters',
style=wx.ALIGN_CENTER), 0, wx.EXPAND)
MainSizer.Add((-1, 5))
prmSizer = wx.FlexGridSizer(cols=2, hgap=3, vgap=5)
text = wx.StaticText(dlg, wx.ID_ANY, 'value', style=wx.ALIGN_CENTER)
text.SetBackgroundColour(wx.WHITE)
prmSizer.Add(text, 0, wx.EXPAND)
text = wx.StaticText(dlg, wx.ID_ANY, 'explanation', style=wx.ALIGN_CENTER)
text.SetBackgroundColour(wx.WHITE)
prmSizer.Add(text, 0, wx.EXPAND)
for key, defVal, text in (('minTT', 3.0,
'Location of first peak in 2theta (deg)'), ('maxTT', 123.0,
'Location of last peak in 2theta (deg)'), ('step', 0.01,
'Pattern step size (deg 2theta)'), ('npeaks', 13.0,
'Number of peaks'), ('calcwid', 2.0,
'Range to compute each peak (deg 2theta)')):
if key not in simParms:
simParms[key] = defVal
ctrl = G2G.ValidatedTxtCtrl(dlg, simParms, key, size=(70, -1))
prmSizer.Add(ctrl, 1, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 1)
txt = wx.StaticText(dlg, wx.ID_ANY, text, size=(300, -1))
txt.Wrap(280)
prmSizer.Add(txt)
MainSizer.Add(prmSizer)
btnsizer = wx.BoxSizer(wx.HORIZONTAL)
btn = wx.Button(dlg, wx.ID_ANY, 'Input FP vals')
btnsizer.Add(btn)
btn.Bind(wx.EVT_BUTTON, _onSetFPA)
saveBtn = wx.Button(dlg, wx.ID_ANY, 'Save FPA dict')
btnsizer.Add(saveBtn)
saveBtn.Bind(wx.EVT_BUTTON, _onSaveFPA)
readBtn = wx.Button(dlg, wx.ID_ANY, 'Read FPA dict')
btnsizer.Add(readBtn)
readBtn.Bind(wx.EVT_BUTTON, _onReadFPA)
MainSizer.Add(btnsizer, 0, wx.ALIGN_CENTER, 0)
MainSizer.Add((-1, 4), 1, wx.EXPAND, 1)
txt = wx.StaticText(dlg, wx.ID_ANY, 'If you use this, please cite: ' +
Citation, size=(350, -1))
txt.Wrap(340)
MainSizer.Add(txt, 0, wx.ALIGN_CENTER)
btnsizer = wx.BoxSizer(wx.HORIZONTAL)
OKbtn = wx.Button(dlg, wx.ID_OK)
OKbtn.SetDefault()
btnsizer.Add(OKbtn)
Cbtn = wx.Button(dlg, wx.ID_CLOSE, 'Cancel')
btnsizer.Add(Cbtn)
MainSizer.Add(btnsizer, 0, wx.ALIGN_CENTER, 0)
MainSizer.Add((-1, 4), 1, wx.EXPAND, 1)
OKbtn.Bind(wx.EVT_BUTTON, _onOK)
Cbtn.Bind(wx.EVT_BUTTON, _onClose)
SetButtonStatus()
dlg.SetSizer(MainSizer)
MainSizer.Layout()
MainSizer.Fit(dlg)
dlg.SetMinSize(dlg.GetSize())
dlg.SendSizeEvent()
dlg.Raise()
def GetFPAInput(G2frame):
dlg = wx.Dialog(G2frame, wx.ID_ANY, 'FPA input', style=wx.
DEFAULT_DIALOG_STYLE | wx.RESIZE_BORDER)
MakeSimSizer(G2frame, dlg)
dlg.CenterOnParent()
dlg.Show()
return
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
########### SVN repository information ###################
# $Date: $
# $Author: $
# $Revision: $
# $URL: $
# $Id: $
########### SVN repository information ###################
'''
*GSASIIfpaGUI: Fundamental Parameters Routines*
===============================================
This module contains routines for getting Fundamental Parameters
Approach (FPA) input, setting up for running the NIST XRD Fundamental
Parameters Code, plotting the convolutors and computing a set of peaks
generated by that code.
'''
from __future__ import division, print_function
import wx
import os.path
import numpy as np
import NIST_profile as FP
import GSASIIpath
import GSASIIctrlGUI as G2G
import GSASIIdataGUI as G2gd
import GSASIIplot as G2plt
import GSASIImath as G2mth
import GSASIIpwd as G2pwd
simParms = {}
'''Parameters to set range for pattern simulation
'''
parmDict = {'numWave':2}
'''Parameter dict used for reading Topas-style values. These are
converted to SI units and placed into :data:`NISTparms`
'''
NISTparms = {}
'''Parameters in a nested dict, with an entry for each concolutor. Entries in
those dicts have values in SI units (of course). NISTparms can be
can be input directly or can be from created from :data:`parmDict`
by :func:`XferFPAsettings`
'''
BraggBrentanoParms = [
('divergence', 0.5, 'Bragg-Brentano divergence angle (degrees)'),
('soller_angle', 2.0, 'Soller slit axial divergence (degrees)'),
('Rs', 220, 'Diffractometer radius (mm)'),
('filament_length', 12., 'X-ray tube line focus length (mm)'),
('sample_length', 12., 'Illuminated sample length in axial direction (mm)'),
('receiving_slit_length', 12., 'Length of receiving slit in axial direction (mm)'),
('LAC_cm', 0.,'Linear absorption coef. adjusted for packing density (cm-1)'),
('sample_thickness', 1., 'Depth of sample (mm)'),
('convolution_steps', 8, 'Number of Fourier-space bins per two-theta step'),
('tube-tails_width', 0.04,'Tube filament width, in projection at takeoff angle (mm)'),
('tube-tails_L-tail', -1.,'Left-side tube tails width, in projection (mm)'),
('tube-tails_R-tail', 1.,'Right-side tube tails width, in projection (mm)'),
('tube-tails_rel-I', 0.001,'Tube tails fractional intensity (no units)'),
]
'''FPA dict entries used in :func:`MakeTopasFPASizer`. Tuple contains
a dict key, a default value and a description. These are the parameters
needed for all Bragg Brentano instruments
'''
BBPointDetector = [
('receiving_slit_width', 0.2, 'Width of receiving slit (mm)'),]
'''Additional FPA dict entries used in :func:`MakeTopasFPASizer`
needed for Bragg Brentano instruments with point detectors.
'''
BBPSDDetector = [
('lpsd_th2_angular_range', 3.0, 'Angular range observed by PSD (degrees 2Theta)'),
('lpsd_equitorial_divergence', 0.1, 'Equatorial divergence of the primary beam (degrees)'),]
'''Additional FPA dict entries used in :func:`MakeTopasFPASizer`
needed for Bragg Brentano instruments with linear (1-D) PSD detectors.
'''
Citation = '''MH Mendenhall, K Mullen && JP Cline. (2015) J. Res. of NIST 120, 223-251. doi:10.6028/jres.120.014.
'''
def SetCu2Wave():
'''Set the parameters to the two-line Cu K alpha 1+2 spectrum
'''
parmDict['wave'] = {i:v for i,v in enumerate((1.540596,1.544493))}
parmDict['int'] = {i:v for i,v in enumerate((0.653817, 0.346183))}
parmDict['lwidth'] = {i:v for i,v in enumerate((0.501844,0.626579))}
SetCu2Wave() # use these as default
def MakeTopasFPASizer(G2frame,FPdlg,mode,SetButtonStatus):
'''Create a GUI with parameters for the NIST XRD Fundamental Parameters Code.
Parameter input is modeled after Topas input parameters.
:param wx.Window FPdlg: Frame or Dialog where GUI will appear
:param str mode: either 'BBpoint' or 'BBPSD' for Bragg-Brentano point detector or
(linear) position sensitive detector
:param dict parmDict: dict to place parameters. If empty, default values from
globals BraggBrentanoParms, BBPointDetector & BBPSDDetector will be placed in
the array.
:returns: a sizer with the GUI controls
'''
def _onOK(event):
XferFPAsettings(parmDict)
SetButtonStatus(done=True) # done=True triggers the simulation
FPdlg.Destroy()
def _onClose(event):
SetButtonStatus()
FPdlg.Destroy()
def _onAddWave(event):
parmDict['numWave'] += 1
wx.CallAfter(MakeTopasFPASizer,G2frame,FPdlg,mode,SetButtonStatus)
def _onRemWave(event):
parmDict['numWave'] -= 1
wx.CallAfter(MakeTopasFPASizer,G2frame,FPdlg,mode,SetButtonStatus)
def _onSetCu5Wave(event):
parmDict['wave'] = {i:v for i,v in enumerate((1.534753,1.540596,1.541058,1.54441,1.544721))}
parmDict['int'] = {i:v for i,v in enumerate((0.0159, 0.5791, 0.0762, 0.2417, 0.0871))}
parmDict['lwidth'] = {i:v for i,v in enumerate((3.6854, 0.437, 0.6, 0.52, 0.62))}
parmDict['numWave'] = 5
wx.CallAfter(MakeTopasFPASizer,G2frame,FPdlg,mode,SetButtonStatus)
def _onSetCu2Wave(event):
SetCu2Wave()
parmDict['numWave'] = 2
wx.CallAfter(MakeTopasFPASizer,G2frame,FPdlg,mode,SetButtonStatus)
def _onSetPoint(event):
wx.CallAfter(MakeTopasFPASizer,G2frame,FPdlg,'BBpoint',SetButtonStatus)
def _onSetPSD(event):
wx.CallAfter(MakeTopasFPASizer,G2frame,FPdlg,'BBPSD',SetButtonStatus)
def PlotTopasFPA(event):
XferFPAsettings(parmDict)
ttArr = np.arange(max(0.5,
simParms['plotpos']-simParms['calcwid']),
simParms['plotpos']+simParms['calcwid'],
simParms['step'])
intArr = np.zeros_like(ttArr)
NISTpk = setupFPAcalc()
try:
center_bin_idx,peakObj = doFPAcalc(
NISTpk,ttArr,simParms['plotpos'],simParms['calcwid'],
simParms['step'])
except Exception as err:
msg = "Error computing convolution, revise input"
print(msg)
print(err)
return
G2plt.PlotFPAconvolutors(G2frame,NISTpk)
pkPts = len(peakObj.peak)
pkMax = peakObj.peak.max()
startInd = center_bin_idx-(pkPts//2) #this should be the aligned start of the new data
# scale peak so max I=10,000 and add into intensity array
if startInd < 0:
intArr[:startInd+pkPts] += 10000 * peakObj.peak[-startInd:]/pkMax
elif startInd > len(intArr):
return
elif startInd+pkPts >= len(intArr):
offset = pkPts - len( intArr[startInd:] )
intArr[startInd:startInd+pkPts-offset] += 10000 * peakObj.peak[:-offset]/pkMax
else:
intArr[startInd:startInd+pkPts] += 10000 * peakObj.peak/pkMax
G2plt.PlotXY(G2frame, [(ttArr, intArr)],
labelX=r'$2\theta, deg$',
labelY=r'Intensity (arbitrary)',
Title='FPA peak', newPlot=True, lines=True)
if FPdlg.GetSizer(): FPdlg.GetSizer().Clear(True)
numWave = parmDict['numWave']
if mode == 'BBpoint':
itemList = BraggBrentanoParms+BBPointDetector
elif mode == 'BBPSD':
itemList = BraggBrentanoParms+BBPSDDetector
else:
raise Exception('Unknown mode in MakeTopasFPASizer: '+mode)
MainSizer = wx.BoxSizer(wx.VERTICAL)
MainSizer.Add((-1,5))
waveSizer = wx.FlexGridSizer(cols=numWave+1,hgap=3,vgap=5)
for lbl,prm,defVal in zip(
(u'Wavelength (\u212b)','Rel. Intensity',u'Lorentz Width\n(\u212b/1000)'),
('wave','int','lwidth'),
(0.0, 1.0, 0.1),
):
text = wx.StaticText(FPdlg,wx.ID_ANY,lbl,style=wx.ALIGN_CENTER)
text.SetBackgroundColour(wx.WHITE)
waveSizer.Add(text,0,wx.EXPAND)
if prm not in parmDict: parmDict[prm] = {}
for i in range(numWave):
if i not in parmDict[prm]: parmDict[prm][i] = defVal
ctrl = G2G.ValidatedTxtCtrl(FPdlg,parmDict[prm],i,size=(90,-1))
waveSizer.Add(ctrl,1,wx.ALIGN_CENTER_VERTICAL,1)
MainSizer.Add(waveSizer)
MainSizer.Add((-1,5))
btnsizer = wx.BoxSizer(wx.HORIZONTAL)
btn = wx.Button(FPdlg, wx.ID_ANY,'Add col')
btnsizer.Add(btn)
btn.Bind(wx.EVT_BUTTON,_onAddWave)
btn = wx.Button(FPdlg, wx.ID_ANY,'Remove col')
btnsizer.Add(btn)
btn.Bind(wx.EVT_BUTTON,_onRemWave)
btn = wx.Button(FPdlg, wx.ID_ANY,'CuKa1+2')
btnsizer.Add(btn)
btn.Bind(wx.EVT_BUTTON,_onSetCu2Wave)
btn = wx.Button(FPdlg, wx.ID_ANY,'CuKa-5wave')
btnsizer.Add(btn)
btn.Bind(wx.EVT_BUTTON,_onSetCu5Wave)
MainSizer.Add(btnsizer, 0, wx.ALIGN_CENTER, 0)
MainSizer.Add((-1,5))
btnsizer = wx.BoxSizer(wx.HORIZONTAL)
btn = wx.Button(FPdlg, wx.ID_ANY,'Point Dect.')
btn.Enable(not mode == 'BBpoint')
btnsizer.Add(btn)
btn.Bind(wx.EVT_BUTTON,_onSetPoint)
btn = wx.Button(FPdlg, wx.ID_ANY,'PSD')
btn.Enable(not mode == 'BBPSD')
btnsizer.Add(btn)
btn.Bind(wx.EVT_BUTTON,_onSetPSD)
MainSizer.Add(btnsizer, 0, wx.ALIGN_CENTER, 0)
MainSizer.Add((-1,5))
prmSizer = wx.FlexGridSizer(cols=3,hgap=3,vgap=5)
text = wx.StaticText(FPdlg,wx.ID_ANY,'label',style=wx.ALIGN_CENTER)
text.SetBackgroundColour(wx.WHITE)
prmSizer.Add(text,0,wx.EXPAND)
text = wx.StaticText(FPdlg,wx.ID_ANY,'value',style=wx.ALIGN_CENTER)
text.SetBackgroundColour(wx.WHITE)
prmSizer.Add(text,0,wx.EXPAND)
text = wx.StaticText(FPdlg,wx.ID_ANY,'explanation',style=wx.ALIGN_CENTER)
text.SetBackgroundColour(wx.WHITE)
prmSizer.Add(text,0,wx.EXPAND)
for lbl,defVal,text in itemList:
prmSizer.Add(wx.StaticText(FPdlg,wx.ID_ANY,lbl),1,wx.ALIGN_RIGHT|wx.ALIGN_CENTER_VERTICAL,1)
if lbl not in parmDict: parmDict[lbl] = defVal
ctrl = G2G.ValidatedTxtCtrl(FPdlg,parmDict,lbl,size=(70,-1))
prmSizer.Add(ctrl,1,wx.ALL|wx.ALIGN_CENTER_VERTICAL,1)
txt = wx.StaticText(FPdlg,wx.ID_ANY,text,size=(400,-1))
txt.Wrap(380)
prmSizer.Add(txt)
MainSizer.Add(prmSizer)
MainSizer.Add((-1,4),1,wx.EXPAND,1)
btnsizer = wx.BoxSizer(wx.HORIZONTAL)
btn = wx.Button(FPdlg, wx.ID_ANY, 'Plot peak')
btnsizer.Add(btn)
btn.Bind(wx.EVT_BUTTON,PlotTopasFPA)
btnsizer.Add(wx.StaticText(FPdlg,wx.ID_ANY,' at '))
if 'plotpos' not in simParms: simParms['plotpos'] = simParms['minTT']
ctrl = G2G.ValidatedTxtCtrl(FPdlg,simParms,'plotpos',size=(70,-1))
btnsizer.Add(ctrl)
btnsizer.Add(wx.StaticText(FPdlg,wx.ID_ANY,' deg.'))
MainSizer.Add(btnsizer, 0, wx.ALIGN_CENTER, 0)
MainSizer.Add((-1,4),1,wx.EXPAND,1)
btnsizer = wx.BoxSizer(wx.HORIZONTAL)
OKbtn = wx.Button(FPdlg, wx.ID_OK)
OKbtn.SetDefault()
btnsizer.Add(OKbtn)
Cbtn = wx.Button(FPdlg, wx.ID_CLOSE,"Cancel")
btnsizer.Add(Cbtn)
MainSizer.Add(btnsizer, 0, wx.ALIGN_CENTER, 0)
MainSizer.Add((-1,4),1,wx.EXPAND,1)
# bindings for close of window
OKbtn.Bind(wx.EVT_BUTTON,_onOK)
Cbtn.Bind(wx.EVT_BUTTON,_onClose)
FPdlg.SetSizer(MainSizer)
MainSizer.Layout()
MainSizer.Fit(FPdlg)
FPdlg.SetMinSize(FPdlg.GetSize())
FPdlg.SendSizeEvent()
def XferFPAsettings(InpParms):
'''convert Topas-type parameters to SI units for NIST and place in a dict sorted
according to use in each convoluter
:param dict InpParms: a dict with Topas-like parameters, as set in
:func:`MakeTopasFPASizer`
:returns: a nested dict with global parameters and those for each convolution
'''
wavenums = range(InpParms['numWave'])
source_wavelengths_m = 1.e-10 * np.array([InpParms['wave'][i] for i in wavenums])
la = [InpParms['int'][i] for i in wavenums]
source_intensities = np.array(la)/max(la)
source_lor_widths_m = 1.e-10 * 1.e-3 * np.array([InpParms['lwidth'][i] for i in wavenums])
source_gauss_widths_m = 1.e-10 * 1.e-3 * np.array([0.001 for i in wavenums])
NISTparms["emission"] = {'emiss_wavelengths' : source_wavelengths_m,
'emiss_intensities' : source_intensities,
'emiss_gauss_widths' : source_gauss_widths_m,
'emiss_lor_widths' : source_lor_widths_m,
'crystallite_size_gauss' : 1.e-9 * InpParms.get('Size_G',1e6),
'crystallite_size_lor' : 1.e-9 * InpParms.get('Size_L',1e6)}
if InpParms['filament_length'] == InpParms['receiving_slit_length']: # workaround:
InpParms['receiving_slit_length'] *= 1.00001 # avoid bug when slit lengths are identical
NISTparms["axial"] = {
'axDiv':"full", 'slit_length_source' : 1e-3*InpParms['filament_length'],
'slit_length_target' : 1e-3*InpParms['receiving_slit_length'],
'length_sample' : 1e-3 * InpParms['sample_length'],
'n_integral_points' : 10,
'angI_deg' : InpParms['soller_angle'],
'angD_deg': InpParms['soller_angle']
}
if InpParms.get('LAC_cm',0) > 0:
NISTparms["absorption"] = {
'absorption_coefficient': InpParms['LAC_cm']*100, #like LaB6, in m^(-1)
'sample_thickness': 1e-3 * InpParms['sample_thickness'],
}
elif "absorption" in NISTparms:
del NISTparms["absorption"]
if InpParms.get('lpsd_equitorial_divergence',0) > 0 and InpParms.get(
'lpsd_th2_angular_range',0) > 0:
PSDdetector_length_mm=np.arcsin(np.pi*InpParms['lpsd_th2_angular_range']/180.
)*InpParms['Rs'] # mm
NISTparms["si_psd"] = {
'equatorial_divergence_deg': InpParms['lpsd_equitorial_divergence'],
'si_psd_window_bounds': (0.,PSDdetector_length_mm/1000.)
}
elif "si_psd" in NISTparms:
del NISTparms["si_psd"]
if InpParms.get('Specimen_Displacement'):
NISTparms["displacement"] = {'specimen_displacement': 1e-3 * InpParms['Specimen_Displacement']}
elif "displacement" in NISTparms:
del NISTparms["displacement"]
if InpParms.get('receiving_slit_width'):
NISTparms["receiver_slit"] = {'slit_width':1e-3*InpParms['receiving_slit_width']}
elif "receiver_slit" in NISTparms:
del NISTparms["receiver_slit"]
if InpParms.get('tube-tails_width', 0) > 0 and InpParms.get(
'tube-tails_rel-I',0) > 0:
NISTparms["tube_tails"] = {
'main_width' : 1e-3 * InpParms.get('tube-tails_width', 0.),
'tail_left' : -1e-3 * InpParms.get('tube-tails_L-tail',0.),
'tail_right' : 1e-3 * InpParms.get('tube-tails_R-tail',0.),
'tail_intens' : InpParms.get('tube-tails_rel-I',0.),}
elif "tube_tails" in NISTparms:
del NISTparms["tube_tails"]
# set Global parameters
max_wavelength = source_wavelengths_m[np.argmax(source_intensities)]
NISTparms[""] = {
'equatorial_divergence_deg' : InpParms['divergence'],
'dominant_wavelength' : max_wavelength,
'diffractometer_radius' : 1e-3* InpParms['Rs'],
'oversampling' : InpParms['convolution_steps'],
}
def setupFPAcalc():
'''Create a peak profile object using the NIST XRD Fundamental
Parameters Code.
:returns: a profile object that can provide information on
each convolution or compute the composite peak shape.
'''
p=FP.FP_profile(anglemode="twotheta",
output_gaussian_smoother_bins_sigma=1.0,
oversampling=NISTparms.get('oversampling',10))
p.debug_cache=False
#set parameters for each convolver
for key in NISTparms:
if key:
p.set_parameters(convolver=key,**NISTparms[key])
else:
p.set_parameters(**NISTparms[key])
return p
def doFPAcalc(NISTpk,ttArr,twotheta,calcwid,step):
'''Compute a single peak using a NIST profile object
:param object NISTpk: a peak profile computational object from the
NIST XRD Fundamental Parameters Code, typically established from
a call to :func:`SetupFPAcalc`
:param np.Array ttArr: an evenly-spaced grid of two-theta points (degrees)
:param float twotheta: nominal center of peak (degrees)
:param float calcwid: width to perform convolution (degrees)
:param float step: step size
'''
# find closest point to twotheta (may be outside limits of the array)
center_bin_idx=min(ttArr.searchsorted(twotheta),len(ttArr)-1)
NISTpk.set_optimized_window(twotheta_exact_bin_spacing_deg=step,
twotheta_window_center_deg=ttArr[center_bin_idx],
twotheta_approx_window_fullwidth_deg=calcwid,
)
NISTpk.set_parameters(twotheta0_deg=twotheta)
return center_bin_idx,NISTpk.compute_line_profile()
def MakeSimSizer(G2frame, dlg):
'''Create a GUI to get simulation with parameters for Fundamental
Parameters fitting.
:param wx.Window dlg: Frame or Dialog where GUI will appear
:returns: a sizer with the GUI controls
'''
def _onOK(event):
msg = ''
if simParms['minTT']-simParms['calcwid']/1.5 < 0.1:
msg += 'First peak minus half the calc width is too low'
if simParms['maxTT']+simParms['calcwid']/1.5 > 175:
if msg: msg += '\n'
msg += 'Last peak plus half the calc width is too high'
if simParms['npeaks'] < 8:
if msg: msg += '\n'
msg += 'At least 8 peaks are needed'
if msg:
G2G.G2MessageBox(dlg,msg,'Bad input, try again')
return
# compute "obs" pattern
ttArr = np.arange(max(0.5,
simParms['minTT']-simParms['calcwid']/1.5),
simParms['maxTT']+simParms['calcwid']/1.5,
simParms['step'])
intArr = np.zeros_like(ttArr)
peaklist = np.linspace(simParms['minTT'],simParms['maxTT'],
simParms['npeaks'],endpoint=True)
peakSpacing = (peaklist[-1]-peaklist[0])/(len(peaklist)-1)
NISTpk = setupFPAcalc()
minPtsHM = len(intArr) # initialize points above half-max
maxPtsHM = 0
for num,twoth_peak in enumerate(peaklist):
try:
center_bin_idx,peakObj = doFPAcalc(
NISTpk,ttArr,twoth_peak,simParms['calcwid'],
simParms['step'])
except:
if msg: msg += '\n'
msg = "Error computing convolution, revise input"
continue
if num == 0: G2plt.PlotFPAconvolutors(G2frame,NISTpk)
pkMax = peakObj.peak.max()
pkPts = len(peakObj.peak)
minPtsHM = min(minPtsHM,sum(peakObj.peak >= 0.5*pkMax)) # points above half-max
maxPtsHM = max(maxPtsHM,sum(peakObj.peak >= 0.5*pkMax)) # points above half-max
startInd = center_bin_idx-(pkPts//2) #this should be the aligned start of the new data
# scale peak so max I=10,000 and add into intensity array
if startInd < 0:
intArr[:startInd+pkPts] += 10000 * peakObj.peak[-startInd:]/pkMax
elif startInd > len(intArr):
break
elif startInd+pkPts >= len(intArr):
offset = pkPts - len( intArr[startInd:] )
intArr[startInd:startInd+pkPts-offset] += 10000 * peakObj.peak[:-offset]/pkMax
else:
intArr[startInd:startInd+pkPts] += 10000 * peakObj.peak/pkMax
# check if peaks are too closely spaced
if maxPtsHM*simParms['step'] > peakSpacing/4:
if msg: msg += '\n'
msg += 'Maximum FWHM ({}) is too large compared to the peak spacing ({}). Decrease number of peaks or increase data range.'.format(
maxPtsHM*simParms['step'], peakSpacing)
# check if too few points across Hmax
if minPtsHM < 10:
if msg: msg += '\n'
msg += 'There are only {} points above the half-max. 10 are needed. Dropping step size.'.format(minPtsHM)
simParms['step'] *= 0.5
if msg:
G2G.G2MessageBox(dlg,msg,'Bad input, try again')
wx.CallAfter(MakeSimSizer,G2frame, dlg)
return
# pattern has been computed successfully
dlg.Destroy()
wx.CallAfter(FitFPApeaks,ttArr, intArr, peaklist, maxPtsHM) # do peakfit outside event callback
def FitFPApeaks(ttArr, intArr, peaklist, maxPtsHM):
'''Perform a peak fit to the FP simulated pattern
'''
plswait = wx.Dialog(G2frame,style=wx.DEFAULT_DIALOG_STYLE | wx.RESIZE_BORDER)
vbox = wx.BoxSizer(wx.VERTICAL)
vbox.Add((1,1),1,wx.ALL|wx.EXPAND,1)
txt = wx.StaticText(plswait,wx.ID_ANY,
'Fitting peaks...\nPlease wait...',
style=wx.ALIGN_CENTER)
vbox.Add(txt,0,wx.ALL|wx.EXPAND)
vbox.Add((1,1),1,wx.ALL|wx.EXPAND,1)
plswait.SetSizer(vbox)
plswait.Layout()
plswait.CenterOnParent()
plswait.Show() # post "please wait"
wx.BeginBusyCursor()
# pick out one or two most intense wavelengths
ints = list(NISTparms['emission']['emiss_intensities'])
Lam1 = NISTparms['emission']['emiss_wavelengths'][np.argmax(ints)]*1e10
if len(ints) > 1:
ints[np.argmax(ints)] = -1
Lam2 = NISTparms['emission']['emiss_wavelengths'][np.argmax(ints)]*1e10
else:
Lam2 = None
histId = G2frame.AddSimulatedPowder(ttArr,intArr,
'NIST Fundamental Parameters simulation',
Lam1,Lam2)
controls = G2frame.GPXtree.GetItemPyData(
G2gd.GetGPXtreeItemId(G2frame,G2frame.root,'Controls'))
controldat = controls.get('data',
{'deriv type':'analytic','min dM/M':0.001,}) #fil
Parms,Parms2 = G2frame.GPXtree.GetItemPyData(
G2gd.GetGPXtreeItemId(G2frame,histId,'Instrument Parameters'))
peakData = G2frame.GPXtree.GetItemPyData(
G2gd.GetGPXtreeItemId(G2frame,histId,'Peak List'))
# set background to 0 with one term = 0; disable refinement
bkg1,bkg2 = bkg = G2frame.GPXtree.GetItemPyData(
G2gd.GetGPXtreeItemId(G2frame,histId,'Background'))
bkg1[1]=False
bkg1[2]=0
bkg1[3]=0.0
limits = G2frame.GPXtree.GetItemPyData(
G2gd.GetGPXtreeItemId(G2frame,histId,'Limits'))
# approximate asym correction
try:
Parms['SH/L'][1] = 0.25 * (
NISTparms['axial']['length_sample']+
NISTparms['axial']['slit_length_source']
) / NISTparms['']['diffractometer_radius']
except:
pass
for pos in peaklist:
i = ttArr.searchsorted(pos)
area = sum(intArr[max(0,i-maxPtsHM):min(len(intArr),i+maxPtsHM)])
peakData['peaks'].append(G2mth.setPeakparms(Parms,Parms2,pos,area))
histData = G2frame.GPXtree.GetItemPyData(histId)
# refine peak positions only
bxye = np.zeros(len(histData[1][1]))
peakData['sigDict'] = G2pwd.DoPeakFit('LSQ',peakData['peaks'],
bkg,limits[1],
Parms,Parms2,histData[1],bxye,[],
False,controldat,None)[0]
# refine peak areas as well
for pk in peakData['peaks']:
pk[1] = True
peakData['sigDict'] = G2pwd.DoPeakFit('LSQ',peakData['peaks'],
bkg,limits[1],
Parms,Parms2,histData[1],bxye,[],
False,controldat)[0]
# refine profile function
for p in ('U', 'V', 'W', 'X', 'Y'):
Parms[p][2] = True
peakData['sigDict'] = G2pwd.DoPeakFit('LSQ',peakData['peaks'],
bkg,limits[1],
Parms,Parms2,histData[1],bxye,[],
False,controldat)[0]
# add in asymmetry
Parms['SH/L'][2] = True
peakData['sigDict'] = G2pwd.DoPeakFit('LSQ',peakData['peaks'],
bkg,limits[1],
Parms,Parms2,histData[1],bxye,[],
False,controldat)[0]
# reset "initial" profile
for p in Parms:
if len(Parms[p]) == 3:
Parms[p][0] = Parms[p][1]
Parms[p][2] = False
wx.EndBusyCursor()
plswait.Destroy() # remove "please wait"
# save Iparms
pth = G2G.GetExportPath(G2frame)
fldlg = wx.FileDialog(G2frame, 'Set name to save GSAS-II instrument parameters file', pth, '',
'instrument parameter files (*.instprm)|*.instprm',wx.FD_SAVE|wx.FD_OVERWRITE_PROMPT)
try:
if fldlg.ShowModal() == wx.ID_OK:
filename = fldlg.GetPath()
# make sure extension is .instprm
filename = os.path.splitext(filename)[0]+'.instprm'
File = open(filename,'w')
File.write("#GSAS-II instrument parameter file; do not add/delete items!\n")
for item in Parms:
File.write(item+':'+str(Parms[item][1])+'\n')
File.close()
print ('Instrument parameters saved to: '+filename)
finally:
fldlg.Destroy()
#GSASIIpath.IPyBreak()
def _onClose(event):
dlg.Destroy()
def SetButtonStatus(done=False):
OKbtn.Enable(bool(NISTparms))
saveBtn.Enable(bool(NISTparms))
if done: _onOK(None)
def _onSetFPA(event):
# Create a non-modal dialog for Topas-style FP input.
FPdlg = wx.Dialog(dlg,wx.ID_ANY,'FPA parameters',
style=wx.DEFAULT_DIALOG_STYLE|wx.RESIZE_BORDER)
MakeTopasFPASizer(G2frame,FPdlg,'BBpoint',SetButtonStatus)
FPdlg.CenterOnParent()
FPdlg.Raise()
FPdlg.Show()
def _onSaveFPA(event):
filename = G2G.askSaveFile(G2frame,'','.NISTfpa',
'dict of NIST FPA values',dlg)
if not filename: return
fp = open(filename,'w')
fp.write('# parameters to be used in the NIST XRD Fundamental Parameters program\n')
fp.write('{\n')
for key in sorted(NISTparms):
fp.write(" '"+key+"' : "+str(NISTparms[key])+",")
if not key: fp.write(' # global parameters')
fp.write('\n')
fp.write('}\n')
fp.close()
def _onReadFPA(event):
filename = G2G.GetImportFile(G2frame,
message='Read file with dict of values for NIST Fundamental Parameters',
parent=dlg,
wildcard='dict of NIST FPA values|*.NISTfpa')
if not filename: return
if not filename[0]: return
try:
txt = open(filename[0],'r').read()
NISTparms.clear()
array = np.array
d = eval(txt)
NISTparms.update(d)
except Exception as err:
G2G.G2MessageBox(dlg,
u'Error reading file {}:{}\n'.format(filename,err),
'Bad dict input')
#GSASIIpath.IPyBreak()
SetButtonStatus()
if dlg.GetSizer(): dlg.GetSizer().Clear(True)
MainSizer = wx.BoxSizer(wx.VERTICAL)
MainSizer.Add(wx.StaticText(dlg,wx.ID_ANY,
'Fit Profile Parameters to Peaks from Fundamental Parameters',
style=wx.ALIGN_CENTER),0,wx.EXPAND)
MainSizer.Add((-1,5))
prmSizer = wx.FlexGridSizer(cols=2,hgap=3,vgap=5)
text = wx.StaticText(dlg,wx.ID_ANY,'value',style=wx.ALIGN_CENTER)
text.SetBackgroundColour(wx.WHITE)
prmSizer.Add(text,0,wx.EXPAND)
text = wx.StaticText(dlg,wx.ID_ANY,'explanation',style=wx.ALIGN_CENTER)
text.SetBackgroundColour(wx.WHITE)
prmSizer.Add(text,0,wx.EXPAND)
for key,defVal,text in (
('minTT',3.,'Location of first peak in 2theta (deg)'),
('maxTT',123.,'Location of last peak in 2theta (deg)'),
('step',0.01,'Pattern step size (deg 2theta)'),
('npeaks',13.,'Number of peaks'),
('calcwid',2.,'Range to compute each peak (deg 2theta)'),
):
if key not in simParms: simParms[key] = defVal
ctrl = G2G.ValidatedTxtCtrl(dlg,simParms,key,size=(70,-1))
prmSizer.Add(ctrl,1,wx.ALL|wx.ALIGN_CENTER_VERTICAL,1)
txt = wx.StaticText(dlg,wx.ID_ANY,text,size=(300,-1))
txt.Wrap(280)
prmSizer.Add(txt)
MainSizer.Add(prmSizer)
btnsizer = wx.BoxSizer(wx.HORIZONTAL)
btn = wx.Button(dlg, wx.ID_ANY,'Input FP vals')
btnsizer.Add(btn)
btn.Bind(wx.EVT_BUTTON,_onSetFPA)
saveBtn = wx.Button(dlg, wx.ID_ANY,'Save FPA dict')
btnsizer.Add(saveBtn)
saveBtn.Bind(wx.EVT_BUTTON,_onSaveFPA)
readBtn = wx.Button(dlg, wx.ID_ANY,'Read FPA dict')
btnsizer.Add(readBtn)
readBtn.Bind(wx.EVT_BUTTON,_onReadFPA)
MainSizer.Add(btnsizer, 0, wx.ALIGN_CENTER, 0)
MainSizer.Add((-1,4),1,wx.EXPAND,1)
txt = wx.StaticText(dlg,wx.ID_ANY,
'If you use this, please cite: '+Citation,
size=(350,-1))
txt.Wrap(340)
MainSizer.Add(txt,0,wx.ALIGN_CENTER)
btnsizer = wx.BoxSizer(wx.HORIZONTAL)
OKbtn = wx.Button(dlg, wx.ID_OK)
OKbtn.SetDefault()
btnsizer.Add(OKbtn)
Cbtn = wx.Button(dlg, wx.ID_CLOSE,"Cancel")
btnsizer.Add(Cbtn)
MainSizer.Add(btnsizer, 0, wx.ALIGN_CENTER, 0)
MainSizer.Add((-1,4),1,wx.EXPAND,1)
# bindings for close of window
OKbtn.Bind(wx.EVT_BUTTON,_onOK)
Cbtn.Bind(wx.EVT_BUTTON,_onClose)
SetButtonStatus()
dlg.SetSizer(MainSizer)
MainSizer.Layout()
MainSizer.Fit(dlg)
dlg.SetMinSize(dlg.GetSize())
dlg.SendSizeEvent()
dlg.Raise()
def GetFPAInput(G2frame):
dlg = wx.Dialog(G2frame,wx.ID_ANY,'FPA input',
style=wx.DEFAULT_DIALOG_STYLE|wx.RESIZE_BORDER)
MakeSimSizer(G2frame,dlg)
dlg.CenterOnParent()
dlg.Show()
return
|
flexible
|
{
"blob_id": "3b1426e0f29093e1e462765bcf1d351a064b9639",
"index": 142,
"step-1": "<mask token>\n\n\ndef SetCu2Wave():\n \"\"\"Set the parameters to the two-line Cu K alpha 1+2 spectrum\n \"\"\"\n parmDict['wave'] = {i: v for i, v in enumerate((1.540596, 1.544493))}\n parmDict['int'] = {i: v for i, v in enumerate((0.653817, 0.346183))}\n parmDict['lwidth'] = {i: v for i, v in enumerate((0.501844, 0.626579))}\n\n\n<mask token>\n\n\ndef MakeTopasFPASizer(G2frame, FPdlg, mode, SetButtonStatus):\n \"\"\"Create a GUI with parameters for the NIST XRD Fundamental Parameters Code. \n Parameter input is modeled after Topas input parameters.\n\n :param wx.Window FPdlg: Frame or Dialog where GUI will appear\n :param str mode: either 'BBpoint' or 'BBPSD' for Bragg-Brentano point detector or \n (linear) position sensitive detector\n :param dict parmDict: dict to place parameters. If empty, default values from \n globals BraggBrentanoParms, BBPointDetector & BBPSDDetector will be placed in \n the array. \n :returns: a sizer with the GUI controls\n \n \"\"\"\n\n def _onOK(event):\n XferFPAsettings(parmDict)\n SetButtonStatus(done=True)\n FPdlg.Destroy()\n\n def _onClose(event):\n SetButtonStatus()\n FPdlg.Destroy()\n\n def _onAddWave(event):\n parmDict['numWave'] += 1\n wx.CallAfter(MakeTopasFPASizer, G2frame, FPdlg, mode, SetButtonStatus)\n\n def _onRemWave(event):\n parmDict['numWave'] -= 1\n wx.CallAfter(MakeTopasFPASizer, G2frame, FPdlg, mode, SetButtonStatus)\n\n def _onSetCu5Wave(event):\n parmDict['wave'] = {i: v for i, v in enumerate((1.534753, 1.540596,\n 1.541058, 1.54441, 1.544721))}\n parmDict['int'] = {i: v for i, v in enumerate((0.0159, 0.5791, \n 0.0762, 0.2417, 0.0871))}\n parmDict['lwidth'] = {i: v for i, v in enumerate((3.6854, 0.437, \n 0.6, 0.52, 0.62))}\n parmDict['numWave'] = 5\n wx.CallAfter(MakeTopasFPASizer, G2frame, FPdlg, mode, SetButtonStatus)\n\n def _onSetCu2Wave(event):\n SetCu2Wave()\n parmDict['numWave'] = 2\n wx.CallAfter(MakeTopasFPASizer, G2frame, FPdlg, mode, SetButtonStatus)\n\n def _onSetPoint(event):\n wx.CallAfter(MakeTopasFPASizer, G2frame, FPdlg, 'BBpoint',\n SetButtonStatus)\n\n def _onSetPSD(event):\n wx.CallAfter(MakeTopasFPASizer, G2frame, FPdlg, 'BBPSD',\n SetButtonStatus)\n\n def PlotTopasFPA(event):\n XferFPAsettings(parmDict)\n ttArr = np.arange(max(0.5, simParms['plotpos'] - simParms['calcwid'\n ]), simParms['plotpos'] + simParms['calcwid'], simParms['step'])\n intArr = np.zeros_like(ttArr)\n NISTpk = setupFPAcalc()\n try:\n center_bin_idx, peakObj = doFPAcalc(NISTpk, ttArr, simParms[\n 'plotpos'], simParms['calcwid'], simParms['step'])\n except Exception as err:\n msg = 'Error computing convolution, revise input'\n print(msg)\n print(err)\n return\n G2plt.PlotFPAconvolutors(G2frame, NISTpk)\n pkPts = len(peakObj.peak)\n pkMax = peakObj.peak.max()\n startInd = center_bin_idx - pkPts // 2\n if startInd < 0:\n intArr[:startInd + pkPts] += 10000 * peakObj.peak[-startInd:\n ] / pkMax\n elif startInd > len(intArr):\n return\n elif startInd + pkPts >= len(intArr):\n offset = pkPts - len(intArr[startInd:])\n intArr[startInd:startInd + pkPts - offset] += 10000 * peakObj.peak[\n :-offset] / pkMax\n else:\n intArr[startInd:startInd + pkPts] += 10000 * peakObj.peak / pkMax\n G2plt.PlotXY(G2frame, [(ttArr, intArr)], labelX='$2\\\\theta, deg$',\n labelY='Intensity (arbitrary)', Title='FPA peak', newPlot=True,\n lines=True)\n if FPdlg.GetSizer():\n FPdlg.GetSizer().Clear(True)\n numWave = parmDict['numWave']\n if mode == 'BBpoint':\n itemList = BraggBrentanoParms + BBPointDetector\n elif mode == 'BBPSD':\n itemList = BraggBrentanoParms + BBPSDDetector\n else:\n raise Exception('Unknown mode in MakeTopasFPASizer: ' + mode)\n MainSizer = wx.BoxSizer(wx.VERTICAL)\n MainSizer.Add((-1, 5))\n waveSizer = wx.FlexGridSizer(cols=numWave + 1, hgap=3, vgap=5)\n for lbl, prm, defVal in zip((u'Wavelength (Å)', 'Rel. Intensity',\n u'Lorentz Width\\n(Å/1000)'), ('wave', 'int', 'lwidth'), (0.0, 1.0, 0.1)\n ):\n text = wx.StaticText(FPdlg, wx.ID_ANY, lbl, style=wx.ALIGN_CENTER)\n text.SetBackgroundColour(wx.WHITE)\n waveSizer.Add(text, 0, wx.EXPAND)\n if prm not in parmDict:\n parmDict[prm] = {}\n for i in range(numWave):\n if i not in parmDict[prm]:\n parmDict[prm][i] = defVal\n ctrl = G2G.ValidatedTxtCtrl(FPdlg, parmDict[prm], i, size=(90, -1))\n waveSizer.Add(ctrl, 1, wx.ALIGN_CENTER_VERTICAL, 1)\n MainSizer.Add(waveSizer)\n MainSizer.Add((-1, 5))\n btnsizer = wx.BoxSizer(wx.HORIZONTAL)\n btn = wx.Button(FPdlg, wx.ID_ANY, 'Add col')\n btnsizer.Add(btn)\n btn.Bind(wx.EVT_BUTTON, _onAddWave)\n btn = wx.Button(FPdlg, wx.ID_ANY, 'Remove col')\n btnsizer.Add(btn)\n btn.Bind(wx.EVT_BUTTON, _onRemWave)\n btn = wx.Button(FPdlg, wx.ID_ANY, 'CuKa1+2')\n btnsizer.Add(btn)\n btn.Bind(wx.EVT_BUTTON, _onSetCu2Wave)\n btn = wx.Button(FPdlg, wx.ID_ANY, 'CuKa-5wave')\n btnsizer.Add(btn)\n btn.Bind(wx.EVT_BUTTON, _onSetCu5Wave)\n MainSizer.Add(btnsizer, 0, wx.ALIGN_CENTER, 0)\n MainSizer.Add((-1, 5))\n btnsizer = wx.BoxSizer(wx.HORIZONTAL)\n btn = wx.Button(FPdlg, wx.ID_ANY, 'Point Dect.')\n btn.Enable(not mode == 'BBpoint')\n btnsizer.Add(btn)\n btn.Bind(wx.EVT_BUTTON, _onSetPoint)\n btn = wx.Button(FPdlg, wx.ID_ANY, 'PSD')\n btn.Enable(not mode == 'BBPSD')\n btnsizer.Add(btn)\n btn.Bind(wx.EVT_BUTTON, _onSetPSD)\n MainSizer.Add(btnsizer, 0, wx.ALIGN_CENTER, 0)\n MainSizer.Add((-1, 5))\n prmSizer = wx.FlexGridSizer(cols=3, hgap=3, vgap=5)\n text = wx.StaticText(FPdlg, wx.ID_ANY, 'label', style=wx.ALIGN_CENTER)\n text.SetBackgroundColour(wx.WHITE)\n prmSizer.Add(text, 0, wx.EXPAND)\n text = wx.StaticText(FPdlg, wx.ID_ANY, 'value', style=wx.ALIGN_CENTER)\n text.SetBackgroundColour(wx.WHITE)\n prmSizer.Add(text, 0, wx.EXPAND)\n text = wx.StaticText(FPdlg, wx.ID_ANY, 'explanation', style=wx.ALIGN_CENTER\n )\n text.SetBackgroundColour(wx.WHITE)\n prmSizer.Add(text, 0, wx.EXPAND)\n for lbl, defVal, text in itemList:\n prmSizer.Add(wx.StaticText(FPdlg, wx.ID_ANY, lbl), 1, wx.\n ALIGN_RIGHT | wx.ALIGN_CENTER_VERTICAL, 1)\n if lbl not in parmDict:\n parmDict[lbl] = defVal\n ctrl = G2G.ValidatedTxtCtrl(FPdlg, parmDict, lbl, size=(70, -1))\n prmSizer.Add(ctrl, 1, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 1)\n txt = wx.StaticText(FPdlg, wx.ID_ANY, text, size=(400, -1))\n txt.Wrap(380)\n prmSizer.Add(txt)\n MainSizer.Add(prmSizer)\n MainSizer.Add((-1, 4), 1, wx.EXPAND, 1)\n btnsizer = wx.BoxSizer(wx.HORIZONTAL)\n btn = wx.Button(FPdlg, wx.ID_ANY, 'Plot peak')\n btnsizer.Add(btn)\n btn.Bind(wx.EVT_BUTTON, PlotTopasFPA)\n btnsizer.Add(wx.StaticText(FPdlg, wx.ID_ANY, ' at '))\n if 'plotpos' not in simParms:\n simParms['plotpos'] = simParms['minTT']\n ctrl = G2G.ValidatedTxtCtrl(FPdlg, simParms, 'plotpos', size=(70, -1))\n btnsizer.Add(ctrl)\n btnsizer.Add(wx.StaticText(FPdlg, wx.ID_ANY, ' deg.'))\n MainSizer.Add(btnsizer, 0, wx.ALIGN_CENTER, 0)\n MainSizer.Add((-1, 4), 1, wx.EXPAND, 1)\n btnsizer = wx.BoxSizer(wx.HORIZONTAL)\n OKbtn = wx.Button(FPdlg, wx.ID_OK)\n OKbtn.SetDefault()\n btnsizer.Add(OKbtn)\n Cbtn = wx.Button(FPdlg, wx.ID_CLOSE, 'Cancel')\n btnsizer.Add(Cbtn)\n MainSizer.Add(btnsizer, 0, wx.ALIGN_CENTER, 0)\n MainSizer.Add((-1, 4), 1, wx.EXPAND, 1)\n OKbtn.Bind(wx.EVT_BUTTON, _onOK)\n Cbtn.Bind(wx.EVT_BUTTON, _onClose)\n FPdlg.SetSizer(MainSizer)\n MainSizer.Layout()\n MainSizer.Fit(FPdlg)\n FPdlg.SetMinSize(FPdlg.GetSize())\n FPdlg.SendSizeEvent()\n\n\ndef XferFPAsettings(InpParms):\n \"\"\"convert Topas-type parameters to SI units for NIST and place in a dict sorted\n according to use in each convoluter\n\n :param dict InpParms: a dict with Topas-like parameters, as set in \n :func:`MakeTopasFPASizer`\n :returns: a nested dict with global parameters and those for each convolution\n \"\"\"\n wavenums = range(InpParms['numWave'])\n source_wavelengths_m = 1e-10 * np.array([InpParms['wave'][i] for i in\n wavenums])\n la = [InpParms['int'][i] for i in wavenums]\n source_intensities = np.array(la) / max(la)\n source_lor_widths_m = 1e-10 * 0.001 * np.array([InpParms['lwidth'][i] for\n i in wavenums])\n source_gauss_widths_m = 1e-10 * 0.001 * np.array([(0.001) for i in\n wavenums])\n NISTparms['emission'] = {'emiss_wavelengths': source_wavelengths_m,\n 'emiss_intensities': source_intensities, 'emiss_gauss_widths':\n source_gauss_widths_m, 'emiss_lor_widths': source_lor_widths_m,\n 'crystallite_size_gauss': 1e-09 * InpParms.get('Size_G', 1000000.0),\n 'crystallite_size_lor': 1e-09 * InpParms.get('Size_L', 1000000.0)}\n if InpParms['filament_length'] == InpParms['receiving_slit_length']:\n InpParms['receiving_slit_length'] *= 1.00001\n NISTparms['axial'] = {'axDiv': 'full', 'slit_length_source': 0.001 *\n InpParms['filament_length'], 'slit_length_target': 0.001 * InpParms\n ['receiving_slit_length'], 'length_sample': 0.001 * InpParms[\n 'sample_length'], 'n_integral_points': 10, 'angI_deg': InpParms[\n 'soller_angle'], 'angD_deg': InpParms['soller_angle']}\n if InpParms.get('LAC_cm', 0) > 0:\n NISTparms['absorption'] = {'absorption_coefficient': InpParms[\n 'LAC_cm'] * 100, 'sample_thickness': 0.001 * InpParms[\n 'sample_thickness']}\n elif 'absorption' in NISTparms:\n del NISTparms['absorption']\n if InpParms.get('lpsd_equitorial_divergence', 0) > 0 and InpParms.get(\n 'lpsd_th2_angular_range', 0) > 0:\n PSDdetector_length_mm = np.arcsin(np.pi * InpParms[\n 'lpsd_th2_angular_range'] / 180.0) * InpParms['Rs']\n NISTparms['si_psd'] = {'equatorial_divergence_deg': InpParms[\n 'lpsd_equitorial_divergence'], 'si_psd_window_bounds': (0.0, \n PSDdetector_length_mm / 1000.0)}\n elif 'si_psd' in NISTparms:\n del NISTparms['si_psd']\n if InpParms.get('Specimen_Displacement'):\n NISTparms['displacement'] = {'specimen_displacement': 0.001 *\n InpParms['Specimen_Displacement']}\n elif 'displacement' in NISTparms:\n del NISTparms['displacement']\n if InpParms.get('receiving_slit_width'):\n NISTparms['receiver_slit'] = {'slit_width': 0.001 * InpParms[\n 'receiving_slit_width']}\n elif 'receiver_slit' in NISTparms:\n del NISTparms['receiver_slit']\n if InpParms.get('tube-tails_width', 0) > 0 and InpParms.get(\n 'tube-tails_rel-I', 0) > 0:\n NISTparms['tube_tails'] = {'main_width': 0.001 * InpParms.get(\n 'tube-tails_width', 0.0), 'tail_left': -0.001 * InpParms.get(\n 'tube-tails_L-tail', 0.0), 'tail_right': 0.001 * InpParms.get(\n 'tube-tails_R-tail', 0.0), 'tail_intens': InpParms.get(\n 'tube-tails_rel-I', 0.0)}\n elif 'tube_tails' in NISTparms:\n del NISTparms['tube_tails']\n max_wavelength = source_wavelengths_m[np.argmax(source_intensities)]\n NISTparms[''] = {'equatorial_divergence_deg': InpParms['divergence'],\n 'dominant_wavelength': max_wavelength, 'diffractometer_radius': \n 0.001 * InpParms['Rs'], 'oversampling': InpParms['convolution_steps']}\n\n\ndef setupFPAcalc():\n \"\"\"Create a peak profile object using the NIST XRD Fundamental \n Parameters Code. \n \n :returns: a profile object that can provide information on \n each convolution or compute the composite peak shape. \n \"\"\"\n p = FP.FP_profile(anglemode='twotheta',\n output_gaussian_smoother_bins_sigma=1.0, oversampling=NISTparms.get\n ('oversampling', 10))\n p.debug_cache = False\n for key in NISTparms:\n if key:\n p.set_parameters(convolver=key, **NISTparms[key])\n else:\n p.set_parameters(**NISTparms[key])\n return p\n\n\ndef doFPAcalc(NISTpk, ttArr, twotheta, calcwid, step):\n \"\"\"Compute a single peak using a NIST profile object\n\n :param object NISTpk: a peak profile computational object from the \n NIST XRD Fundamental Parameters Code, typically established from\n a call to :func:`SetupFPAcalc`\n :param np.Array ttArr: an evenly-spaced grid of two-theta points (degrees)\n :param float twotheta: nominal center of peak (degrees)\n :param float calcwid: width to perform convolution (degrees)\n :param float step: step size\n \"\"\"\n center_bin_idx = min(ttArr.searchsorted(twotheta), len(ttArr) - 1)\n NISTpk.set_optimized_window(twotheta_exact_bin_spacing_deg=step,\n twotheta_window_center_deg=ttArr[center_bin_idx],\n twotheta_approx_window_fullwidth_deg=calcwid)\n NISTpk.set_parameters(twotheta0_deg=twotheta)\n return center_bin_idx, NISTpk.compute_line_profile()\n\n\ndef MakeSimSizer(G2frame, dlg):\n \"\"\"Create a GUI to get simulation with parameters for Fundamental \n Parameters fitting. \n\n :param wx.Window dlg: Frame or Dialog where GUI will appear\n\n :returns: a sizer with the GUI controls \n \n \"\"\"\n\n def _onOK(event):\n msg = ''\n if simParms['minTT'] - simParms['calcwid'] / 1.5 < 0.1:\n msg += 'First peak minus half the calc width is too low'\n if simParms['maxTT'] + simParms['calcwid'] / 1.5 > 175:\n if msg:\n msg += '\\n'\n msg += 'Last peak plus half the calc width is too high'\n if simParms['npeaks'] < 8:\n if msg:\n msg += '\\n'\n msg += 'At least 8 peaks are needed'\n if msg:\n G2G.G2MessageBox(dlg, msg, 'Bad input, try again')\n return\n ttArr = np.arange(max(0.5, simParms['minTT'] - simParms['calcwid'] /\n 1.5), simParms['maxTT'] + simParms['calcwid'] / 1.5, simParms[\n 'step'])\n intArr = np.zeros_like(ttArr)\n peaklist = np.linspace(simParms['minTT'], simParms['maxTT'],\n simParms['npeaks'], endpoint=True)\n peakSpacing = (peaklist[-1] - peaklist[0]) / (len(peaklist) - 1)\n NISTpk = setupFPAcalc()\n minPtsHM = len(intArr)\n maxPtsHM = 0\n for num, twoth_peak in enumerate(peaklist):\n try:\n center_bin_idx, peakObj = doFPAcalc(NISTpk, ttArr,\n twoth_peak, simParms['calcwid'], simParms['step'])\n except:\n if msg:\n msg += '\\n'\n msg = 'Error computing convolution, revise input'\n continue\n if num == 0:\n G2plt.PlotFPAconvolutors(G2frame, NISTpk)\n pkMax = peakObj.peak.max()\n pkPts = len(peakObj.peak)\n minPtsHM = min(minPtsHM, sum(peakObj.peak >= 0.5 * pkMax))\n maxPtsHM = max(maxPtsHM, sum(peakObj.peak >= 0.5 * pkMax))\n startInd = center_bin_idx - pkPts // 2\n if startInd < 0:\n intArr[:startInd + pkPts] += 10000 * peakObj.peak[-startInd:\n ] / pkMax\n elif startInd > len(intArr):\n break\n elif startInd + pkPts >= len(intArr):\n offset = pkPts - len(intArr[startInd:])\n intArr[startInd:startInd + pkPts - offset\n ] += 10000 * peakObj.peak[:-offset] / pkMax\n else:\n intArr[startInd:startInd + pkPts\n ] += 10000 * peakObj.peak / pkMax\n if maxPtsHM * simParms['step'] > peakSpacing / 4:\n if msg:\n msg += '\\n'\n msg += (\n 'Maximum FWHM ({}) is too large compared to the peak spacing ({}). Decrease number of peaks or increase data range.'\n .format(maxPtsHM * simParms['step'], peakSpacing))\n if minPtsHM < 10:\n if msg:\n msg += '\\n'\n msg += (\n 'There are only {} points above the half-max. 10 are needed. Dropping step size.'\n .format(minPtsHM))\n simParms['step'] *= 0.5\n if msg:\n G2G.G2MessageBox(dlg, msg, 'Bad input, try again')\n wx.CallAfter(MakeSimSizer, G2frame, dlg)\n return\n dlg.Destroy()\n wx.CallAfter(FitFPApeaks, ttArr, intArr, peaklist, maxPtsHM)\n\n def FitFPApeaks(ttArr, intArr, peaklist, maxPtsHM):\n \"\"\"Perform a peak fit to the FP simulated pattern\n \"\"\"\n plswait = wx.Dialog(G2frame, style=wx.DEFAULT_DIALOG_STYLE | wx.\n RESIZE_BORDER)\n vbox = wx.BoxSizer(wx.VERTICAL)\n vbox.Add((1, 1), 1, wx.ALL | wx.EXPAND, 1)\n txt = wx.StaticText(plswait, wx.ID_ANY,\n 'Fitting peaks...\\nPlease wait...', style=wx.ALIGN_CENTER)\n vbox.Add(txt, 0, wx.ALL | wx.EXPAND)\n vbox.Add((1, 1), 1, wx.ALL | wx.EXPAND, 1)\n plswait.SetSizer(vbox)\n plswait.Layout()\n plswait.CenterOnParent()\n plswait.Show()\n wx.BeginBusyCursor()\n ints = list(NISTparms['emission']['emiss_intensities'])\n Lam1 = NISTparms['emission']['emiss_wavelengths'][np.argmax(ints)\n ] * 10000000000.0\n if len(ints) > 1:\n ints[np.argmax(ints)] = -1\n Lam2 = NISTparms['emission']['emiss_wavelengths'][np.argmax(ints)\n ] * 10000000000.0\n else:\n Lam2 = None\n histId = G2frame.AddSimulatedPowder(ttArr, intArr,\n 'NIST Fundamental Parameters simulation', Lam1, Lam2)\n controls = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(\n G2frame, G2frame.root, 'Controls'))\n controldat = controls.get('data', {'deriv type': 'analytic',\n 'min dM/M': 0.001})\n Parms, Parms2 = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId\n (G2frame, histId, 'Instrument Parameters'))\n peakData = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(\n G2frame, histId, 'Peak List'))\n bkg1, bkg2 = bkg = G2frame.GPXtree.GetItemPyData(G2gd.\n GetGPXtreeItemId(G2frame, histId, 'Background'))\n bkg1[1] = False\n bkg1[2] = 0\n bkg1[3] = 0.0\n limits = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(\n G2frame, histId, 'Limits'))\n try:\n Parms['SH/L'][1] = 0.25 * (NISTparms['axial']['length_sample'] +\n NISTparms['axial']['slit_length_source']) / NISTparms[''][\n 'diffractometer_radius']\n except:\n pass\n for pos in peaklist:\n i = ttArr.searchsorted(pos)\n area = sum(intArr[max(0, i - maxPtsHM):min(len(intArr), i +\n maxPtsHM)])\n peakData['peaks'].append(G2mth.setPeakparms(Parms, Parms2, pos,\n area))\n histData = G2frame.GPXtree.GetItemPyData(histId)\n bxye = np.zeros(len(histData[1][1]))\n peakData['sigDict'] = G2pwd.DoPeakFit('LSQ', peakData['peaks'], bkg,\n limits[1], Parms, Parms2, histData[1], bxye, [], False,\n controldat, None)[0]\n for pk in peakData['peaks']:\n pk[1] = True\n peakData['sigDict'] = G2pwd.DoPeakFit('LSQ', peakData['peaks'], bkg,\n limits[1], Parms, Parms2, histData[1], bxye, [], False, controldat\n )[0]\n for p in ('U', 'V', 'W', 'X', 'Y'):\n Parms[p][2] = True\n peakData['sigDict'] = G2pwd.DoPeakFit('LSQ', peakData['peaks'], bkg,\n limits[1], Parms, Parms2, histData[1], bxye, [], False, controldat\n )[0]\n Parms['SH/L'][2] = True\n peakData['sigDict'] = G2pwd.DoPeakFit('LSQ', peakData['peaks'], bkg,\n limits[1], Parms, Parms2, histData[1], bxye, [], False, controldat\n )[0]\n for p in Parms:\n if len(Parms[p]) == 3:\n Parms[p][0] = Parms[p][1]\n Parms[p][2] = False\n wx.EndBusyCursor()\n plswait.Destroy()\n pth = G2G.GetExportPath(G2frame)\n fldlg = wx.FileDialog(G2frame,\n 'Set name to save GSAS-II instrument parameters file', pth, '',\n 'instrument parameter files (*.instprm)|*.instprm', wx.FD_SAVE |\n wx.FD_OVERWRITE_PROMPT)\n try:\n if fldlg.ShowModal() == wx.ID_OK:\n filename = fldlg.GetPath()\n filename = os.path.splitext(filename)[0] + '.instprm'\n File = open(filename, 'w')\n File.write(\n '#GSAS-II instrument parameter file; do not add/delete items!\\n'\n )\n for item in Parms:\n File.write(item + ':' + str(Parms[item][1]) + '\\n')\n File.close()\n print('Instrument parameters saved to: ' + filename)\n finally:\n fldlg.Destroy()\n\n def _onClose(event):\n dlg.Destroy()\n\n def SetButtonStatus(done=False):\n OKbtn.Enable(bool(NISTparms))\n saveBtn.Enable(bool(NISTparms))\n if done:\n _onOK(None)\n\n def _onSetFPA(event):\n FPdlg = wx.Dialog(dlg, wx.ID_ANY, 'FPA parameters', style=wx.\n DEFAULT_DIALOG_STYLE | wx.RESIZE_BORDER)\n MakeTopasFPASizer(G2frame, FPdlg, 'BBpoint', SetButtonStatus)\n FPdlg.CenterOnParent()\n FPdlg.Raise()\n FPdlg.Show()\n\n def _onSaveFPA(event):\n filename = G2G.askSaveFile(G2frame, '', '.NISTfpa',\n 'dict of NIST FPA values', dlg)\n if not filename:\n return\n fp = open(filename, 'w')\n fp.write(\n '# parameters to be used in the NIST XRD Fundamental Parameters program\\n'\n )\n fp.write('{\\n')\n for key in sorted(NISTparms):\n fp.write(\" '\" + key + \"' : \" + str(NISTparms[key]) + ',')\n if not key:\n fp.write(' # global parameters')\n fp.write('\\n')\n fp.write('}\\n')\n fp.close()\n\n def _onReadFPA(event):\n filename = G2G.GetImportFile(G2frame, message=\n 'Read file with dict of values for NIST Fundamental Parameters',\n parent=dlg, wildcard='dict of NIST FPA values|*.NISTfpa')\n if not filename:\n return\n if not filename[0]:\n return\n try:\n txt = open(filename[0], 'r').read()\n NISTparms.clear()\n array = np.array\n d = eval(txt)\n NISTparms.update(d)\n except Exception as err:\n G2G.G2MessageBox(dlg, u'Error reading file {}:{}\\n'.format(\n filename, err), 'Bad dict input')\n SetButtonStatus()\n if dlg.GetSizer():\n dlg.GetSizer().Clear(True)\n MainSizer = wx.BoxSizer(wx.VERTICAL)\n MainSizer.Add(wx.StaticText(dlg, wx.ID_ANY,\n 'Fit Profile Parameters to Peaks from Fundamental Parameters',\n style=wx.ALIGN_CENTER), 0, wx.EXPAND)\n MainSizer.Add((-1, 5))\n prmSizer = wx.FlexGridSizer(cols=2, hgap=3, vgap=5)\n text = wx.StaticText(dlg, wx.ID_ANY, 'value', style=wx.ALIGN_CENTER)\n text.SetBackgroundColour(wx.WHITE)\n prmSizer.Add(text, 0, wx.EXPAND)\n text = wx.StaticText(dlg, wx.ID_ANY, 'explanation', style=wx.ALIGN_CENTER)\n text.SetBackgroundColour(wx.WHITE)\n prmSizer.Add(text, 0, wx.EXPAND)\n for key, defVal, text in (('minTT', 3.0,\n 'Location of first peak in 2theta (deg)'), ('maxTT', 123.0,\n 'Location of last peak in 2theta (deg)'), ('step', 0.01,\n 'Pattern step size (deg 2theta)'), ('npeaks', 13.0,\n 'Number of peaks'), ('calcwid', 2.0,\n 'Range to compute each peak (deg 2theta)')):\n if key not in simParms:\n simParms[key] = defVal\n ctrl = G2G.ValidatedTxtCtrl(dlg, simParms, key, size=(70, -1))\n prmSizer.Add(ctrl, 1, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 1)\n txt = wx.StaticText(dlg, wx.ID_ANY, text, size=(300, -1))\n txt.Wrap(280)\n prmSizer.Add(txt)\n MainSizer.Add(prmSizer)\n btnsizer = wx.BoxSizer(wx.HORIZONTAL)\n btn = wx.Button(dlg, wx.ID_ANY, 'Input FP vals')\n btnsizer.Add(btn)\n btn.Bind(wx.EVT_BUTTON, _onSetFPA)\n saveBtn = wx.Button(dlg, wx.ID_ANY, 'Save FPA dict')\n btnsizer.Add(saveBtn)\n saveBtn.Bind(wx.EVT_BUTTON, _onSaveFPA)\n readBtn = wx.Button(dlg, wx.ID_ANY, 'Read FPA dict')\n btnsizer.Add(readBtn)\n readBtn.Bind(wx.EVT_BUTTON, _onReadFPA)\n MainSizer.Add(btnsizer, 0, wx.ALIGN_CENTER, 0)\n MainSizer.Add((-1, 4), 1, wx.EXPAND, 1)\n txt = wx.StaticText(dlg, wx.ID_ANY, 'If you use this, please cite: ' +\n Citation, size=(350, -1))\n txt.Wrap(340)\n MainSizer.Add(txt, 0, wx.ALIGN_CENTER)\n btnsizer = wx.BoxSizer(wx.HORIZONTAL)\n OKbtn = wx.Button(dlg, wx.ID_OK)\n OKbtn.SetDefault()\n btnsizer.Add(OKbtn)\n Cbtn = wx.Button(dlg, wx.ID_CLOSE, 'Cancel')\n btnsizer.Add(Cbtn)\n MainSizer.Add(btnsizer, 0, wx.ALIGN_CENTER, 0)\n MainSizer.Add((-1, 4), 1, wx.EXPAND, 1)\n OKbtn.Bind(wx.EVT_BUTTON, _onOK)\n Cbtn.Bind(wx.EVT_BUTTON, _onClose)\n SetButtonStatus()\n dlg.SetSizer(MainSizer)\n MainSizer.Layout()\n MainSizer.Fit(dlg)\n dlg.SetMinSize(dlg.GetSize())\n dlg.SendSizeEvent()\n dlg.Raise()\n\n\ndef GetFPAInput(G2frame):\n dlg = wx.Dialog(G2frame, wx.ID_ANY, 'FPA input', style=wx.\n DEFAULT_DIALOG_STYLE | wx.RESIZE_BORDER)\n MakeSimSizer(G2frame, dlg)\n dlg.CenterOnParent()\n dlg.Show()\n return\n",
"step-2": "<mask token>\n\n\ndef SetCu2Wave():\n \"\"\"Set the parameters to the two-line Cu K alpha 1+2 spectrum\n \"\"\"\n parmDict['wave'] = {i: v for i, v in enumerate((1.540596, 1.544493))}\n parmDict['int'] = {i: v for i, v in enumerate((0.653817, 0.346183))}\n parmDict['lwidth'] = {i: v for i, v in enumerate((0.501844, 0.626579))}\n\n\nSetCu2Wave()\n\n\ndef MakeTopasFPASizer(G2frame, FPdlg, mode, SetButtonStatus):\n \"\"\"Create a GUI with parameters for the NIST XRD Fundamental Parameters Code. \n Parameter input is modeled after Topas input parameters.\n\n :param wx.Window FPdlg: Frame or Dialog where GUI will appear\n :param str mode: either 'BBpoint' or 'BBPSD' for Bragg-Brentano point detector or \n (linear) position sensitive detector\n :param dict parmDict: dict to place parameters. If empty, default values from \n globals BraggBrentanoParms, BBPointDetector & BBPSDDetector will be placed in \n the array. \n :returns: a sizer with the GUI controls\n \n \"\"\"\n\n def _onOK(event):\n XferFPAsettings(parmDict)\n SetButtonStatus(done=True)\n FPdlg.Destroy()\n\n def _onClose(event):\n SetButtonStatus()\n FPdlg.Destroy()\n\n def _onAddWave(event):\n parmDict['numWave'] += 1\n wx.CallAfter(MakeTopasFPASizer, G2frame, FPdlg, mode, SetButtonStatus)\n\n def _onRemWave(event):\n parmDict['numWave'] -= 1\n wx.CallAfter(MakeTopasFPASizer, G2frame, FPdlg, mode, SetButtonStatus)\n\n def _onSetCu5Wave(event):\n parmDict['wave'] = {i: v for i, v in enumerate((1.534753, 1.540596,\n 1.541058, 1.54441, 1.544721))}\n parmDict['int'] = {i: v for i, v in enumerate((0.0159, 0.5791, \n 0.0762, 0.2417, 0.0871))}\n parmDict['lwidth'] = {i: v for i, v in enumerate((3.6854, 0.437, \n 0.6, 0.52, 0.62))}\n parmDict['numWave'] = 5\n wx.CallAfter(MakeTopasFPASizer, G2frame, FPdlg, mode, SetButtonStatus)\n\n def _onSetCu2Wave(event):\n SetCu2Wave()\n parmDict['numWave'] = 2\n wx.CallAfter(MakeTopasFPASizer, G2frame, FPdlg, mode, SetButtonStatus)\n\n def _onSetPoint(event):\n wx.CallAfter(MakeTopasFPASizer, G2frame, FPdlg, 'BBpoint',\n SetButtonStatus)\n\n def _onSetPSD(event):\n wx.CallAfter(MakeTopasFPASizer, G2frame, FPdlg, 'BBPSD',\n SetButtonStatus)\n\n def PlotTopasFPA(event):\n XferFPAsettings(parmDict)\n ttArr = np.arange(max(0.5, simParms['plotpos'] - simParms['calcwid'\n ]), simParms['plotpos'] + simParms['calcwid'], simParms['step'])\n intArr = np.zeros_like(ttArr)\n NISTpk = setupFPAcalc()\n try:\n center_bin_idx, peakObj = doFPAcalc(NISTpk, ttArr, simParms[\n 'plotpos'], simParms['calcwid'], simParms['step'])\n except Exception as err:\n msg = 'Error computing convolution, revise input'\n print(msg)\n print(err)\n return\n G2plt.PlotFPAconvolutors(G2frame, NISTpk)\n pkPts = len(peakObj.peak)\n pkMax = peakObj.peak.max()\n startInd = center_bin_idx - pkPts // 2\n if startInd < 0:\n intArr[:startInd + pkPts] += 10000 * peakObj.peak[-startInd:\n ] / pkMax\n elif startInd > len(intArr):\n return\n elif startInd + pkPts >= len(intArr):\n offset = pkPts - len(intArr[startInd:])\n intArr[startInd:startInd + pkPts - offset] += 10000 * peakObj.peak[\n :-offset] / pkMax\n else:\n intArr[startInd:startInd + pkPts] += 10000 * peakObj.peak / pkMax\n G2plt.PlotXY(G2frame, [(ttArr, intArr)], labelX='$2\\\\theta, deg$',\n labelY='Intensity (arbitrary)', Title='FPA peak', newPlot=True,\n lines=True)\n if FPdlg.GetSizer():\n FPdlg.GetSizer().Clear(True)\n numWave = parmDict['numWave']\n if mode == 'BBpoint':\n itemList = BraggBrentanoParms + BBPointDetector\n elif mode == 'BBPSD':\n itemList = BraggBrentanoParms + BBPSDDetector\n else:\n raise Exception('Unknown mode in MakeTopasFPASizer: ' + mode)\n MainSizer = wx.BoxSizer(wx.VERTICAL)\n MainSizer.Add((-1, 5))\n waveSizer = wx.FlexGridSizer(cols=numWave + 1, hgap=3, vgap=5)\n for lbl, prm, defVal in zip((u'Wavelength (Å)', 'Rel. Intensity',\n u'Lorentz Width\\n(Å/1000)'), ('wave', 'int', 'lwidth'), (0.0, 1.0, 0.1)\n ):\n text = wx.StaticText(FPdlg, wx.ID_ANY, lbl, style=wx.ALIGN_CENTER)\n text.SetBackgroundColour(wx.WHITE)\n waveSizer.Add(text, 0, wx.EXPAND)\n if prm not in parmDict:\n parmDict[prm] = {}\n for i in range(numWave):\n if i not in parmDict[prm]:\n parmDict[prm][i] = defVal\n ctrl = G2G.ValidatedTxtCtrl(FPdlg, parmDict[prm], i, size=(90, -1))\n waveSizer.Add(ctrl, 1, wx.ALIGN_CENTER_VERTICAL, 1)\n MainSizer.Add(waveSizer)\n MainSizer.Add((-1, 5))\n btnsizer = wx.BoxSizer(wx.HORIZONTAL)\n btn = wx.Button(FPdlg, wx.ID_ANY, 'Add col')\n btnsizer.Add(btn)\n btn.Bind(wx.EVT_BUTTON, _onAddWave)\n btn = wx.Button(FPdlg, wx.ID_ANY, 'Remove col')\n btnsizer.Add(btn)\n btn.Bind(wx.EVT_BUTTON, _onRemWave)\n btn = wx.Button(FPdlg, wx.ID_ANY, 'CuKa1+2')\n btnsizer.Add(btn)\n btn.Bind(wx.EVT_BUTTON, _onSetCu2Wave)\n btn = wx.Button(FPdlg, wx.ID_ANY, 'CuKa-5wave')\n btnsizer.Add(btn)\n btn.Bind(wx.EVT_BUTTON, _onSetCu5Wave)\n MainSizer.Add(btnsizer, 0, wx.ALIGN_CENTER, 0)\n MainSizer.Add((-1, 5))\n btnsizer = wx.BoxSizer(wx.HORIZONTAL)\n btn = wx.Button(FPdlg, wx.ID_ANY, 'Point Dect.')\n btn.Enable(not mode == 'BBpoint')\n btnsizer.Add(btn)\n btn.Bind(wx.EVT_BUTTON, _onSetPoint)\n btn = wx.Button(FPdlg, wx.ID_ANY, 'PSD')\n btn.Enable(not mode == 'BBPSD')\n btnsizer.Add(btn)\n btn.Bind(wx.EVT_BUTTON, _onSetPSD)\n MainSizer.Add(btnsizer, 0, wx.ALIGN_CENTER, 0)\n MainSizer.Add((-1, 5))\n prmSizer = wx.FlexGridSizer(cols=3, hgap=3, vgap=5)\n text = wx.StaticText(FPdlg, wx.ID_ANY, 'label', style=wx.ALIGN_CENTER)\n text.SetBackgroundColour(wx.WHITE)\n prmSizer.Add(text, 0, wx.EXPAND)\n text = wx.StaticText(FPdlg, wx.ID_ANY, 'value', style=wx.ALIGN_CENTER)\n text.SetBackgroundColour(wx.WHITE)\n prmSizer.Add(text, 0, wx.EXPAND)\n text = wx.StaticText(FPdlg, wx.ID_ANY, 'explanation', style=wx.ALIGN_CENTER\n )\n text.SetBackgroundColour(wx.WHITE)\n prmSizer.Add(text, 0, wx.EXPAND)\n for lbl, defVal, text in itemList:\n prmSizer.Add(wx.StaticText(FPdlg, wx.ID_ANY, lbl), 1, wx.\n ALIGN_RIGHT | wx.ALIGN_CENTER_VERTICAL, 1)\n if lbl not in parmDict:\n parmDict[lbl] = defVal\n ctrl = G2G.ValidatedTxtCtrl(FPdlg, parmDict, lbl, size=(70, -1))\n prmSizer.Add(ctrl, 1, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 1)\n txt = wx.StaticText(FPdlg, wx.ID_ANY, text, size=(400, -1))\n txt.Wrap(380)\n prmSizer.Add(txt)\n MainSizer.Add(prmSizer)\n MainSizer.Add((-1, 4), 1, wx.EXPAND, 1)\n btnsizer = wx.BoxSizer(wx.HORIZONTAL)\n btn = wx.Button(FPdlg, wx.ID_ANY, 'Plot peak')\n btnsizer.Add(btn)\n btn.Bind(wx.EVT_BUTTON, PlotTopasFPA)\n btnsizer.Add(wx.StaticText(FPdlg, wx.ID_ANY, ' at '))\n if 'plotpos' not in simParms:\n simParms['plotpos'] = simParms['minTT']\n ctrl = G2G.ValidatedTxtCtrl(FPdlg, simParms, 'plotpos', size=(70, -1))\n btnsizer.Add(ctrl)\n btnsizer.Add(wx.StaticText(FPdlg, wx.ID_ANY, ' deg.'))\n MainSizer.Add(btnsizer, 0, wx.ALIGN_CENTER, 0)\n MainSizer.Add((-1, 4), 1, wx.EXPAND, 1)\n btnsizer = wx.BoxSizer(wx.HORIZONTAL)\n OKbtn = wx.Button(FPdlg, wx.ID_OK)\n OKbtn.SetDefault()\n btnsizer.Add(OKbtn)\n Cbtn = wx.Button(FPdlg, wx.ID_CLOSE, 'Cancel')\n btnsizer.Add(Cbtn)\n MainSizer.Add(btnsizer, 0, wx.ALIGN_CENTER, 0)\n MainSizer.Add((-1, 4), 1, wx.EXPAND, 1)\n OKbtn.Bind(wx.EVT_BUTTON, _onOK)\n Cbtn.Bind(wx.EVT_BUTTON, _onClose)\n FPdlg.SetSizer(MainSizer)\n MainSizer.Layout()\n MainSizer.Fit(FPdlg)\n FPdlg.SetMinSize(FPdlg.GetSize())\n FPdlg.SendSizeEvent()\n\n\ndef XferFPAsettings(InpParms):\n \"\"\"convert Topas-type parameters to SI units for NIST and place in a dict sorted\n according to use in each convoluter\n\n :param dict InpParms: a dict with Topas-like parameters, as set in \n :func:`MakeTopasFPASizer`\n :returns: a nested dict with global parameters and those for each convolution\n \"\"\"\n wavenums = range(InpParms['numWave'])\n source_wavelengths_m = 1e-10 * np.array([InpParms['wave'][i] for i in\n wavenums])\n la = [InpParms['int'][i] for i in wavenums]\n source_intensities = np.array(la) / max(la)\n source_lor_widths_m = 1e-10 * 0.001 * np.array([InpParms['lwidth'][i] for\n i in wavenums])\n source_gauss_widths_m = 1e-10 * 0.001 * np.array([(0.001) for i in\n wavenums])\n NISTparms['emission'] = {'emiss_wavelengths': source_wavelengths_m,\n 'emiss_intensities': source_intensities, 'emiss_gauss_widths':\n source_gauss_widths_m, 'emiss_lor_widths': source_lor_widths_m,\n 'crystallite_size_gauss': 1e-09 * InpParms.get('Size_G', 1000000.0),\n 'crystallite_size_lor': 1e-09 * InpParms.get('Size_L', 1000000.0)}\n if InpParms['filament_length'] == InpParms['receiving_slit_length']:\n InpParms['receiving_slit_length'] *= 1.00001\n NISTparms['axial'] = {'axDiv': 'full', 'slit_length_source': 0.001 *\n InpParms['filament_length'], 'slit_length_target': 0.001 * InpParms\n ['receiving_slit_length'], 'length_sample': 0.001 * InpParms[\n 'sample_length'], 'n_integral_points': 10, 'angI_deg': InpParms[\n 'soller_angle'], 'angD_deg': InpParms['soller_angle']}\n if InpParms.get('LAC_cm', 0) > 0:\n NISTparms['absorption'] = {'absorption_coefficient': InpParms[\n 'LAC_cm'] * 100, 'sample_thickness': 0.001 * InpParms[\n 'sample_thickness']}\n elif 'absorption' in NISTparms:\n del NISTparms['absorption']\n if InpParms.get('lpsd_equitorial_divergence', 0) > 0 and InpParms.get(\n 'lpsd_th2_angular_range', 0) > 0:\n PSDdetector_length_mm = np.arcsin(np.pi * InpParms[\n 'lpsd_th2_angular_range'] / 180.0) * InpParms['Rs']\n NISTparms['si_psd'] = {'equatorial_divergence_deg': InpParms[\n 'lpsd_equitorial_divergence'], 'si_psd_window_bounds': (0.0, \n PSDdetector_length_mm / 1000.0)}\n elif 'si_psd' in NISTparms:\n del NISTparms['si_psd']\n if InpParms.get('Specimen_Displacement'):\n NISTparms['displacement'] = {'specimen_displacement': 0.001 *\n InpParms['Specimen_Displacement']}\n elif 'displacement' in NISTparms:\n del NISTparms['displacement']\n if InpParms.get('receiving_slit_width'):\n NISTparms['receiver_slit'] = {'slit_width': 0.001 * InpParms[\n 'receiving_slit_width']}\n elif 'receiver_slit' in NISTparms:\n del NISTparms['receiver_slit']\n if InpParms.get('tube-tails_width', 0) > 0 and InpParms.get(\n 'tube-tails_rel-I', 0) > 0:\n NISTparms['tube_tails'] = {'main_width': 0.001 * InpParms.get(\n 'tube-tails_width', 0.0), 'tail_left': -0.001 * InpParms.get(\n 'tube-tails_L-tail', 0.0), 'tail_right': 0.001 * InpParms.get(\n 'tube-tails_R-tail', 0.0), 'tail_intens': InpParms.get(\n 'tube-tails_rel-I', 0.0)}\n elif 'tube_tails' in NISTparms:\n del NISTparms['tube_tails']\n max_wavelength = source_wavelengths_m[np.argmax(source_intensities)]\n NISTparms[''] = {'equatorial_divergence_deg': InpParms['divergence'],\n 'dominant_wavelength': max_wavelength, 'diffractometer_radius': \n 0.001 * InpParms['Rs'], 'oversampling': InpParms['convolution_steps']}\n\n\ndef setupFPAcalc():\n \"\"\"Create a peak profile object using the NIST XRD Fundamental \n Parameters Code. \n \n :returns: a profile object that can provide information on \n each convolution or compute the composite peak shape. \n \"\"\"\n p = FP.FP_profile(anglemode='twotheta',\n output_gaussian_smoother_bins_sigma=1.0, oversampling=NISTparms.get\n ('oversampling', 10))\n p.debug_cache = False\n for key in NISTparms:\n if key:\n p.set_parameters(convolver=key, **NISTparms[key])\n else:\n p.set_parameters(**NISTparms[key])\n return p\n\n\ndef doFPAcalc(NISTpk, ttArr, twotheta, calcwid, step):\n \"\"\"Compute a single peak using a NIST profile object\n\n :param object NISTpk: a peak profile computational object from the \n NIST XRD Fundamental Parameters Code, typically established from\n a call to :func:`SetupFPAcalc`\n :param np.Array ttArr: an evenly-spaced grid of two-theta points (degrees)\n :param float twotheta: nominal center of peak (degrees)\n :param float calcwid: width to perform convolution (degrees)\n :param float step: step size\n \"\"\"\n center_bin_idx = min(ttArr.searchsorted(twotheta), len(ttArr) - 1)\n NISTpk.set_optimized_window(twotheta_exact_bin_spacing_deg=step,\n twotheta_window_center_deg=ttArr[center_bin_idx],\n twotheta_approx_window_fullwidth_deg=calcwid)\n NISTpk.set_parameters(twotheta0_deg=twotheta)\n return center_bin_idx, NISTpk.compute_line_profile()\n\n\ndef MakeSimSizer(G2frame, dlg):\n \"\"\"Create a GUI to get simulation with parameters for Fundamental \n Parameters fitting. \n\n :param wx.Window dlg: Frame or Dialog where GUI will appear\n\n :returns: a sizer with the GUI controls \n \n \"\"\"\n\n def _onOK(event):\n msg = ''\n if simParms['minTT'] - simParms['calcwid'] / 1.5 < 0.1:\n msg += 'First peak minus half the calc width is too low'\n if simParms['maxTT'] + simParms['calcwid'] / 1.5 > 175:\n if msg:\n msg += '\\n'\n msg += 'Last peak plus half the calc width is too high'\n if simParms['npeaks'] < 8:\n if msg:\n msg += '\\n'\n msg += 'At least 8 peaks are needed'\n if msg:\n G2G.G2MessageBox(dlg, msg, 'Bad input, try again')\n return\n ttArr = np.arange(max(0.5, simParms['minTT'] - simParms['calcwid'] /\n 1.5), simParms['maxTT'] + simParms['calcwid'] / 1.5, simParms[\n 'step'])\n intArr = np.zeros_like(ttArr)\n peaklist = np.linspace(simParms['minTT'], simParms['maxTT'],\n simParms['npeaks'], endpoint=True)\n peakSpacing = (peaklist[-1] - peaklist[0]) / (len(peaklist) - 1)\n NISTpk = setupFPAcalc()\n minPtsHM = len(intArr)\n maxPtsHM = 0\n for num, twoth_peak in enumerate(peaklist):\n try:\n center_bin_idx, peakObj = doFPAcalc(NISTpk, ttArr,\n twoth_peak, simParms['calcwid'], simParms['step'])\n except:\n if msg:\n msg += '\\n'\n msg = 'Error computing convolution, revise input'\n continue\n if num == 0:\n G2plt.PlotFPAconvolutors(G2frame, NISTpk)\n pkMax = peakObj.peak.max()\n pkPts = len(peakObj.peak)\n minPtsHM = min(minPtsHM, sum(peakObj.peak >= 0.5 * pkMax))\n maxPtsHM = max(maxPtsHM, sum(peakObj.peak >= 0.5 * pkMax))\n startInd = center_bin_idx - pkPts // 2\n if startInd < 0:\n intArr[:startInd + pkPts] += 10000 * peakObj.peak[-startInd:\n ] / pkMax\n elif startInd > len(intArr):\n break\n elif startInd + pkPts >= len(intArr):\n offset = pkPts - len(intArr[startInd:])\n intArr[startInd:startInd + pkPts - offset\n ] += 10000 * peakObj.peak[:-offset] / pkMax\n else:\n intArr[startInd:startInd + pkPts\n ] += 10000 * peakObj.peak / pkMax\n if maxPtsHM * simParms['step'] > peakSpacing / 4:\n if msg:\n msg += '\\n'\n msg += (\n 'Maximum FWHM ({}) is too large compared to the peak spacing ({}). Decrease number of peaks or increase data range.'\n .format(maxPtsHM * simParms['step'], peakSpacing))\n if minPtsHM < 10:\n if msg:\n msg += '\\n'\n msg += (\n 'There are only {} points above the half-max. 10 are needed. Dropping step size.'\n .format(minPtsHM))\n simParms['step'] *= 0.5\n if msg:\n G2G.G2MessageBox(dlg, msg, 'Bad input, try again')\n wx.CallAfter(MakeSimSizer, G2frame, dlg)\n return\n dlg.Destroy()\n wx.CallAfter(FitFPApeaks, ttArr, intArr, peaklist, maxPtsHM)\n\n def FitFPApeaks(ttArr, intArr, peaklist, maxPtsHM):\n \"\"\"Perform a peak fit to the FP simulated pattern\n \"\"\"\n plswait = wx.Dialog(G2frame, style=wx.DEFAULT_DIALOG_STYLE | wx.\n RESIZE_BORDER)\n vbox = wx.BoxSizer(wx.VERTICAL)\n vbox.Add((1, 1), 1, wx.ALL | wx.EXPAND, 1)\n txt = wx.StaticText(plswait, wx.ID_ANY,\n 'Fitting peaks...\\nPlease wait...', style=wx.ALIGN_CENTER)\n vbox.Add(txt, 0, wx.ALL | wx.EXPAND)\n vbox.Add((1, 1), 1, wx.ALL | wx.EXPAND, 1)\n plswait.SetSizer(vbox)\n plswait.Layout()\n plswait.CenterOnParent()\n plswait.Show()\n wx.BeginBusyCursor()\n ints = list(NISTparms['emission']['emiss_intensities'])\n Lam1 = NISTparms['emission']['emiss_wavelengths'][np.argmax(ints)\n ] * 10000000000.0\n if len(ints) > 1:\n ints[np.argmax(ints)] = -1\n Lam2 = NISTparms['emission']['emiss_wavelengths'][np.argmax(ints)\n ] * 10000000000.0\n else:\n Lam2 = None\n histId = G2frame.AddSimulatedPowder(ttArr, intArr,\n 'NIST Fundamental Parameters simulation', Lam1, Lam2)\n controls = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(\n G2frame, G2frame.root, 'Controls'))\n controldat = controls.get('data', {'deriv type': 'analytic',\n 'min dM/M': 0.001})\n Parms, Parms2 = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId\n (G2frame, histId, 'Instrument Parameters'))\n peakData = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(\n G2frame, histId, 'Peak List'))\n bkg1, bkg2 = bkg = G2frame.GPXtree.GetItemPyData(G2gd.\n GetGPXtreeItemId(G2frame, histId, 'Background'))\n bkg1[1] = False\n bkg1[2] = 0\n bkg1[3] = 0.0\n limits = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(\n G2frame, histId, 'Limits'))\n try:\n Parms['SH/L'][1] = 0.25 * (NISTparms['axial']['length_sample'] +\n NISTparms['axial']['slit_length_source']) / NISTparms[''][\n 'diffractometer_radius']\n except:\n pass\n for pos in peaklist:\n i = ttArr.searchsorted(pos)\n area = sum(intArr[max(0, i - maxPtsHM):min(len(intArr), i +\n maxPtsHM)])\n peakData['peaks'].append(G2mth.setPeakparms(Parms, Parms2, pos,\n area))\n histData = G2frame.GPXtree.GetItemPyData(histId)\n bxye = np.zeros(len(histData[1][1]))\n peakData['sigDict'] = G2pwd.DoPeakFit('LSQ', peakData['peaks'], bkg,\n limits[1], Parms, Parms2, histData[1], bxye, [], False,\n controldat, None)[0]\n for pk in peakData['peaks']:\n pk[1] = True\n peakData['sigDict'] = G2pwd.DoPeakFit('LSQ', peakData['peaks'], bkg,\n limits[1], Parms, Parms2, histData[1], bxye, [], False, controldat\n )[0]\n for p in ('U', 'V', 'W', 'X', 'Y'):\n Parms[p][2] = True\n peakData['sigDict'] = G2pwd.DoPeakFit('LSQ', peakData['peaks'], bkg,\n limits[1], Parms, Parms2, histData[1], bxye, [], False, controldat\n )[0]\n Parms['SH/L'][2] = True\n peakData['sigDict'] = G2pwd.DoPeakFit('LSQ', peakData['peaks'], bkg,\n limits[1], Parms, Parms2, histData[1], bxye, [], False, controldat\n )[0]\n for p in Parms:\n if len(Parms[p]) == 3:\n Parms[p][0] = Parms[p][1]\n Parms[p][2] = False\n wx.EndBusyCursor()\n plswait.Destroy()\n pth = G2G.GetExportPath(G2frame)\n fldlg = wx.FileDialog(G2frame,\n 'Set name to save GSAS-II instrument parameters file', pth, '',\n 'instrument parameter files (*.instprm)|*.instprm', wx.FD_SAVE |\n wx.FD_OVERWRITE_PROMPT)\n try:\n if fldlg.ShowModal() == wx.ID_OK:\n filename = fldlg.GetPath()\n filename = os.path.splitext(filename)[0] + '.instprm'\n File = open(filename, 'w')\n File.write(\n '#GSAS-II instrument parameter file; do not add/delete items!\\n'\n )\n for item in Parms:\n File.write(item + ':' + str(Parms[item][1]) + '\\n')\n File.close()\n print('Instrument parameters saved to: ' + filename)\n finally:\n fldlg.Destroy()\n\n def _onClose(event):\n dlg.Destroy()\n\n def SetButtonStatus(done=False):\n OKbtn.Enable(bool(NISTparms))\n saveBtn.Enable(bool(NISTparms))\n if done:\n _onOK(None)\n\n def _onSetFPA(event):\n FPdlg = wx.Dialog(dlg, wx.ID_ANY, 'FPA parameters', style=wx.\n DEFAULT_DIALOG_STYLE | wx.RESIZE_BORDER)\n MakeTopasFPASizer(G2frame, FPdlg, 'BBpoint', SetButtonStatus)\n FPdlg.CenterOnParent()\n FPdlg.Raise()\n FPdlg.Show()\n\n def _onSaveFPA(event):\n filename = G2G.askSaveFile(G2frame, '', '.NISTfpa',\n 'dict of NIST FPA values', dlg)\n if not filename:\n return\n fp = open(filename, 'w')\n fp.write(\n '# parameters to be used in the NIST XRD Fundamental Parameters program\\n'\n )\n fp.write('{\\n')\n for key in sorted(NISTparms):\n fp.write(\" '\" + key + \"' : \" + str(NISTparms[key]) + ',')\n if not key:\n fp.write(' # global parameters')\n fp.write('\\n')\n fp.write('}\\n')\n fp.close()\n\n def _onReadFPA(event):\n filename = G2G.GetImportFile(G2frame, message=\n 'Read file with dict of values for NIST Fundamental Parameters',\n parent=dlg, wildcard='dict of NIST FPA values|*.NISTfpa')\n if not filename:\n return\n if not filename[0]:\n return\n try:\n txt = open(filename[0], 'r').read()\n NISTparms.clear()\n array = np.array\n d = eval(txt)\n NISTparms.update(d)\n except Exception as err:\n G2G.G2MessageBox(dlg, u'Error reading file {}:{}\\n'.format(\n filename, err), 'Bad dict input')\n SetButtonStatus()\n if dlg.GetSizer():\n dlg.GetSizer().Clear(True)\n MainSizer = wx.BoxSizer(wx.VERTICAL)\n MainSizer.Add(wx.StaticText(dlg, wx.ID_ANY,\n 'Fit Profile Parameters to Peaks from Fundamental Parameters',\n style=wx.ALIGN_CENTER), 0, wx.EXPAND)\n MainSizer.Add((-1, 5))\n prmSizer = wx.FlexGridSizer(cols=2, hgap=3, vgap=5)\n text = wx.StaticText(dlg, wx.ID_ANY, 'value', style=wx.ALIGN_CENTER)\n text.SetBackgroundColour(wx.WHITE)\n prmSizer.Add(text, 0, wx.EXPAND)\n text = wx.StaticText(dlg, wx.ID_ANY, 'explanation', style=wx.ALIGN_CENTER)\n text.SetBackgroundColour(wx.WHITE)\n prmSizer.Add(text, 0, wx.EXPAND)\n for key, defVal, text in (('minTT', 3.0,\n 'Location of first peak in 2theta (deg)'), ('maxTT', 123.0,\n 'Location of last peak in 2theta (deg)'), ('step', 0.01,\n 'Pattern step size (deg 2theta)'), ('npeaks', 13.0,\n 'Number of peaks'), ('calcwid', 2.0,\n 'Range to compute each peak (deg 2theta)')):\n if key not in simParms:\n simParms[key] = defVal\n ctrl = G2G.ValidatedTxtCtrl(dlg, simParms, key, size=(70, -1))\n prmSizer.Add(ctrl, 1, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 1)\n txt = wx.StaticText(dlg, wx.ID_ANY, text, size=(300, -1))\n txt.Wrap(280)\n prmSizer.Add(txt)\n MainSizer.Add(prmSizer)\n btnsizer = wx.BoxSizer(wx.HORIZONTAL)\n btn = wx.Button(dlg, wx.ID_ANY, 'Input FP vals')\n btnsizer.Add(btn)\n btn.Bind(wx.EVT_BUTTON, _onSetFPA)\n saveBtn = wx.Button(dlg, wx.ID_ANY, 'Save FPA dict')\n btnsizer.Add(saveBtn)\n saveBtn.Bind(wx.EVT_BUTTON, _onSaveFPA)\n readBtn = wx.Button(dlg, wx.ID_ANY, 'Read FPA dict')\n btnsizer.Add(readBtn)\n readBtn.Bind(wx.EVT_BUTTON, _onReadFPA)\n MainSizer.Add(btnsizer, 0, wx.ALIGN_CENTER, 0)\n MainSizer.Add((-1, 4), 1, wx.EXPAND, 1)\n txt = wx.StaticText(dlg, wx.ID_ANY, 'If you use this, please cite: ' +\n Citation, size=(350, -1))\n txt.Wrap(340)\n MainSizer.Add(txt, 0, wx.ALIGN_CENTER)\n btnsizer = wx.BoxSizer(wx.HORIZONTAL)\n OKbtn = wx.Button(dlg, wx.ID_OK)\n OKbtn.SetDefault()\n btnsizer.Add(OKbtn)\n Cbtn = wx.Button(dlg, wx.ID_CLOSE, 'Cancel')\n btnsizer.Add(Cbtn)\n MainSizer.Add(btnsizer, 0, wx.ALIGN_CENTER, 0)\n MainSizer.Add((-1, 4), 1, wx.EXPAND, 1)\n OKbtn.Bind(wx.EVT_BUTTON, _onOK)\n Cbtn.Bind(wx.EVT_BUTTON, _onClose)\n SetButtonStatus()\n dlg.SetSizer(MainSizer)\n MainSizer.Layout()\n MainSizer.Fit(dlg)\n dlg.SetMinSize(dlg.GetSize())\n dlg.SendSizeEvent()\n dlg.Raise()\n\n\ndef GetFPAInput(G2frame):\n dlg = wx.Dialog(G2frame, wx.ID_ANY, 'FPA input', style=wx.\n DEFAULT_DIALOG_STYLE | wx.RESIZE_BORDER)\n MakeSimSizer(G2frame, dlg)\n dlg.CenterOnParent()\n dlg.Show()\n return\n",
"step-3": "<mask token>\nsimParms = {}\n<mask token>\nparmDict = {'numWave': 2}\n<mask token>\nNISTparms = {}\n<mask token>\nBraggBrentanoParms = [('divergence', 0.5,\n 'Bragg-Brentano divergence angle (degrees)'), ('soller_angle', 2.0,\n 'Soller slit axial divergence (degrees)'), ('Rs', 220,\n 'Diffractometer radius (mm)'), ('filament_length', 12.0,\n 'X-ray tube line focus length (mm)'), ('sample_length', 12.0,\n 'Illuminated sample length in axial direction (mm)'), (\n 'receiving_slit_length', 12.0,\n 'Length of receiving slit in axial direction (mm)'), ('LAC_cm', 0.0,\n 'Linear absorption coef. adjusted for packing density (cm-1)'), (\n 'sample_thickness', 1.0, 'Depth of sample (mm)'), ('convolution_steps',\n 8, 'Number of Fourier-space bins per two-theta step'), (\n 'tube-tails_width', 0.04,\n 'Tube filament width, in projection at takeoff angle (mm)'), (\n 'tube-tails_L-tail', -1.0,\n 'Left-side tube tails width, in projection (mm)'), ('tube-tails_R-tail',\n 1.0, 'Right-side tube tails width, in projection (mm)'), (\n 'tube-tails_rel-I', 0.001, 'Tube tails fractional intensity (no units)')]\n<mask token>\nBBPointDetector = [('receiving_slit_width', 0.2,\n 'Width of receiving slit (mm)')]\n<mask token>\nBBPSDDetector = [('lpsd_th2_angular_range', 3.0,\n 'Angular range observed by PSD (degrees 2Theta)'), (\n 'lpsd_equitorial_divergence', 0.1,\n 'Equatorial divergence of the primary beam (degrees)')]\n<mask token>\nCitation = \"\"\"MH Mendenhall, K Mullen && JP Cline. (2015) J. Res. of NIST 120, 223-251. doi:10.6028/jres.120.014.\n\"\"\"\n\n\ndef SetCu2Wave():\n \"\"\"Set the parameters to the two-line Cu K alpha 1+2 spectrum\n \"\"\"\n parmDict['wave'] = {i: v for i, v in enumerate((1.540596, 1.544493))}\n parmDict['int'] = {i: v for i, v in enumerate((0.653817, 0.346183))}\n parmDict['lwidth'] = {i: v for i, v in enumerate((0.501844, 0.626579))}\n\n\nSetCu2Wave()\n\n\ndef MakeTopasFPASizer(G2frame, FPdlg, mode, SetButtonStatus):\n \"\"\"Create a GUI with parameters for the NIST XRD Fundamental Parameters Code. \n Parameter input is modeled after Topas input parameters.\n\n :param wx.Window FPdlg: Frame or Dialog where GUI will appear\n :param str mode: either 'BBpoint' or 'BBPSD' for Bragg-Brentano point detector or \n (linear) position sensitive detector\n :param dict parmDict: dict to place parameters. If empty, default values from \n globals BraggBrentanoParms, BBPointDetector & BBPSDDetector will be placed in \n the array. \n :returns: a sizer with the GUI controls\n \n \"\"\"\n\n def _onOK(event):\n XferFPAsettings(parmDict)\n SetButtonStatus(done=True)\n FPdlg.Destroy()\n\n def _onClose(event):\n SetButtonStatus()\n FPdlg.Destroy()\n\n def _onAddWave(event):\n parmDict['numWave'] += 1\n wx.CallAfter(MakeTopasFPASizer, G2frame, FPdlg, mode, SetButtonStatus)\n\n def _onRemWave(event):\n parmDict['numWave'] -= 1\n wx.CallAfter(MakeTopasFPASizer, G2frame, FPdlg, mode, SetButtonStatus)\n\n def _onSetCu5Wave(event):\n parmDict['wave'] = {i: v for i, v in enumerate((1.534753, 1.540596,\n 1.541058, 1.54441, 1.544721))}\n parmDict['int'] = {i: v for i, v in enumerate((0.0159, 0.5791, \n 0.0762, 0.2417, 0.0871))}\n parmDict['lwidth'] = {i: v for i, v in enumerate((3.6854, 0.437, \n 0.6, 0.52, 0.62))}\n parmDict['numWave'] = 5\n wx.CallAfter(MakeTopasFPASizer, G2frame, FPdlg, mode, SetButtonStatus)\n\n def _onSetCu2Wave(event):\n SetCu2Wave()\n parmDict['numWave'] = 2\n wx.CallAfter(MakeTopasFPASizer, G2frame, FPdlg, mode, SetButtonStatus)\n\n def _onSetPoint(event):\n wx.CallAfter(MakeTopasFPASizer, G2frame, FPdlg, 'BBpoint',\n SetButtonStatus)\n\n def _onSetPSD(event):\n wx.CallAfter(MakeTopasFPASizer, G2frame, FPdlg, 'BBPSD',\n SetButtonStatus)\n\n def PlotTopasFPA(event):\n XferFPAsettings(parmDict)\n ttArr = np.arange(max(0.5, simParms['plotpos'] - simParms['calcwid'\n ]), simParms['plotpos'] + simParms['calcwid'], simParms['step'])\n intArr = np.zeros_like(ttArr)\n NISTpk = setupFPAcalc()\n try:\n center_bin_idx, peakObj = doFPAcalc(NISTpk, ttArr, simParms[\n 'plotpos'], simParms['calcwid'], simParms['step'])\n except Exception as err:\n msg = 'Error computing convolution, revise input'\n print(msg)\n print(err)\n return\n G2plt.PlotFPAconvolutors(G2frame, NISTpk)\n pkPts = len(peakObj.peak)\n pkMax = peakObj.peak.max()\n startInd = center_bin_idx - pkPts // 2\n if startInd < 0:\n intArr[:startInd + pkPts] += 10000 * peakObj.peak[-startInd:\n ] / pkMax\n elif startInd > len(intArr):\n return\n elif startInd + pkPts >= len(intArr):\n offset = pkPts - len(intArr[startInd:])\n intArr[startInd:startInd + pkPts - offset] += 10000 * peakObj.peak[\n :-offset] / pkMax\n else:\n intArr[startInd:startInd + pkPts] += 10000 * peakObj.peak / pkMax\n G2plt.PlotXY(G2frame, [(ttArr, intArr)], labelX='$2\\\\theta, deg$',\n labelY='Intensity (arbitrary)', Title='FPA peak', newPlot=True,\n lines=True)\n if FPdlg.GetSizer():\n FPdlg.GetSizer().Clear(True)\n numWave = parmDict['numWave']\n if mode == 'BBpoint':\n itemList = BraggBrentanoParms + BBPointDetector\n elif mode == 'BBPSD':\n itemList = BraggBrentanoParms + BBPSDDetector\n else:\n raise Exception('Unknown mode in MakeTopasFPASizer: ' + mode)\n MainSizer = wx.BoxSizer(wx.VERTICAL)\n MainSizer.Add((-1, 5))\n waveSizer = wx.FlexGridSizer(cols=numWave + 1, hgap=3, vgap=5)\n for lbl, prm, defVal in zip((u'Wavelength (Å)', 'Rel. Intensity',\n u'Lorentz Width\\n(Å/1000)'), ('wave', 'int', 'lwidth'), (0.0, 1.0, 0.1)\n ):\n text = wx.StaticText(FPdlg, wx.ID_ANY, lbl, style=wx.ALIGN_CENTER)\n text.SetBackgroundColour(wx.WHITE)\n waveSizer.Add(text, 0, wx.EXPAND)\n if prm not in parmDict:\n parmDict[prm] = {}\n for i in range(numWave):\n if i not in parmDict[prm]:\n parmDict[prm][i] = defVal\n ctrl = G2G.ValidatedTxtCtrl(FPdlg, parmDict[prm], i, size=(90, -1))\n waveSizer.Add(ctrl, 1, wx.ALIGN_CENTER_VERTICAL, 1)\n MainSizer.Add(waveSizer)\n MainSizer.Add((-1, 5))\n btnsizer = wx.BoxSizer(wx.HORIZONTAL)\n btn = wx.Button(FPdlg, wx.ID_ANY, 'Add col')\n btnsizer.Add(btn)\n btn.Bind(wx.EVT_BUTTON, _onAddWave)\n btn = wx.Button(FPdlg, wx.ID_ANY, 'Remove col')\n btnsizer.Add(btn)\n btn.Bind(wx.EVT_BUTTON, _onRemWave)\n btn = wx.Button(FPdlg, wx.ID_ANY, 'CuKa1+2')\n btnsizer.Add(btn)\n btn.Bind(wx.EVT_BUTTON, _onSetCu2Wave)\n btn = wx.Button(FPdlg, wx.ID_ANY, 'CuKa-5wave')\n btnsizer.Add(btn)\n btn.Bind(wx.EVT_BUTTON, _onSetCu5Wave)\n MainSizer.Add(btnsizer, 0, wx.ALIGN_CENTER, 0)\n MainSizer.Add((-1, 5))\n btnsizer = wx.BoxSizer(wx.HORIZONTAL)\n btn = wx.Button(FPdlg, wx.ID_ANY, 'Point Dect.')\n btn.Enable(not mode == 'BBpoint')\n btnsizer.Add(btn)\n btn.Bind(wx.EVT_BUTTON, _onSetPoint)\n btn = wx.Button(FPdlg, wx.ID_ANY, 'PSD')\n btn.Enable(not mode == 'BBPSD')\n btnsizer.Add(btn)\n btn.Bind(wx.EVT_BUTTON, _onSetPSD)\n MainSizer.Add(btnsizer, 0, wx.ALIGN_CENTER, 0)\n MainSizer.Add((-1, 5))\n prmSizer = wx.FlexGridSizer(cols=3, hgap=3, vgap=5)\n text = wx.StaticText(FPdlg, wx.ID_ANY, 'label', style=wx.ALIGN_CENTER)\n text.SetBackgroundColour(wx.WHITE)\n prmSizer.Add(text, 0, wx.EXPAND)\n text = wx.StaticText(FPdlg, wx.ID_ANY, 'value', style=wx.ALIGN_CENTER)\n text.SetBackgroundColour(wx.WHITE)\n prmSizer.Add(text, 0, wx.EXPAND)\n text = wx.StaticText(FPdlg, wx.ID_ANY, 'explanation', style=wx.ALIGN_CENTER\n )\n text.SetBackgroundColour(wx.WHITE)\n prmSizer.Add(text, 0, wx.EXPAND)\n for lbl, defVal, text in itemList:\n prmSizer.Add(wx.StaticText(FPdlg, wx.ID_ANY, lbl), 1, wx.\n ALIGN_RIGHT | wx.ALIGN_CENTER_VERTICAL, 1)\n if lbl not in parmDict:\n parmDict[lbl] = defVal\n ctrl = G2G.ValidatedTxtCtrl(FPdlg, parmDict, lbl, size=(70, -1))\n prmSizer.Add(ctrl, 1, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 1)\n txt = wx.StaticText(FPdlg, wx.ID_ANY, text, size=(400, -1))\n txt.Wrap(380)\n prmSizer.Add(txt)\n MainSizer.Add(prmSizer)\n MainSizer.Add((-1, 4), 1, wx.EXPAND, 1)\n btnsizer = wx.BoxSizer(wx.HORIZONTAL)\n btn = wx.Button(FPdlg, wx.ID_ANY, 'Plot peak')\n btnsizer.Add(btn)\n btn.Bind(wx.EVT_BUTTON, PlotTopasFPA)\n btnsizer.Add(wx.StaticText(FPdlg, wx.ID_ANY, ' at '))\n if 'plotpos' not in simParms:\n simParms['plotpos'] = simParms['minTT']\n ctrl = G2G.ValidatedTxtCtrl(FPdlg, simParms, 'plotpos', size=(70, -1))\n btnsizer.Add(ctrl)\n btnsizer.Add(wx.StaticText(FPdlg, wx.ID_ANY, ' deg.'))\n MainSizer.Add(btnsizer, 0, wx.ALIGN_CENTER, 0)\n MainSizer.Add((-1, 4), 1, wx.EXPAND, 1)\n btnsizer = wx.BoxSizer(wx.HORIZONTAL)\n OKbtn = wx.Button(FPdlg, wx.ID_OK)\n OKbtn.SetDefault()\n btnsizer.Add(OKbtn)\n Cbtn = wx.Button(FPdlg, wx.ID_CLOSE, 'Cancel')\n btnsizer.Add(Cbtn)\n MainSizer.Add(btnsizer, 0, wx.ALIGN_CENTER, 0)\n MainSizer.Add((-1, 4), 1, wx.EXPAND, 1)\n OKbtn.Bind(wx.EVT_BUTTON, _onOK)\n Cbtn.Bind(wx.EVT_BUTTON, _onClose)\n FPdlg.SetSizer(MainSizer)\n MainSizer.Layout()\n MainSizer.Fit(FPdlg)\n FPdlg.SetMinSize(FPdlg.GetSize())\n FPdlg.SendSizeEvent()\n\n\ndef XferFPAsettings(InpParms):\n \"\"\"convert Topas-type parameters to SI units for NIST and place in a dict sorted\n according to use in each convoluter\n\n :param dict InpParms: a dict with Topas-like parameters, as set in \n :func:`MakeTopasFPASizer`\n :returns: a nested dict with global parameters and those for each convolution\n \"\"\"\n wavenums = range(InpParms['numWave'])\n source_wavelengths_m = 1e-10 * np.array([InpParms['wave'][i] for i in\n wavenums])\n la = [InpParms['int'][i] for i in wavenums]\n source_intensities = np.array(la) / max(la)\n source_lor_widths_m = 1e-10 * 0.001 * np.array([InpParms['lwidth'][i] for\n i in wavenums])\n source_gauss_widths_m = 1e-10 * 0.001 * np.array([(0.001) for i in\n wavenums])\n NISTparms['emission'] = {'emiss_wavelengths': source_wavelengths_m,\n 'emiss_intensities': source_intensities, 'emiss_gauss_widths':\n source_gauss_widths_m, 'emiss_lor_widths': source_lor_widths_m,\n 'crystallite_size_gauss': 1e-09 * InpParms.get('Size_G', 1000000.0),\n 'crystallite_size_lor': 1e-09 * InpParms.get('Size_L', 1000000.0)}\n if InpParms['filament_length'] == InpParms['receiving_slit_length']:\n InpParms['receiving_slit_length'] *= 1.00001\n NISTparms['axial'] = {'axDiv': 'full', 'slit_length_source': 0.001 *\n InpParms['filament_length'], 'slit_length_target': 0.001 * InpParms\n ['receiving_slit_length'], 'length_sample': 0.001 * InpParms[\n 'sample_length'], 'n_integral_points': 10, 'angI_deg': InpParms[\n 'soller_angle'], 'angD_deg': InpParms['soller_angle']}\n if InpParms.get('LAC_cm', 0) > 0:\n NISTparms['absorption'] = {'absorption_coefficient': InpParms[\n 'LAC_cm'] * 100, 'sample_thickness': 0.001 * InpParms[\n 'sample_thickness']}\n elif 'absorption' in NISTparms:\n del NISTparms['absorption']\n if InpParms.get('lpsd_equitorial_divergence', 0) > 0 and InpParms.get(\n 'lpsd_th2_angular_range', 0) > 0:\n PSDdetector_length_mm = np.arcsin(np.pi * InpParms[\n 'lpsd_th2_angular_range'] / 180.0) * InpParms['Rs']\n NISTparms['si_psd'] = {'equatorial_divergence_deg': InpParms[\n 'lpsd_equitorial_divergence'], 'si_psd_window_bounds': (0.0, \n PSDdetector_length_mm / 1000.0)}\n elif 'si_psd' in NISTparms:\n del NISTparms['si_psd']\n if InpParms.get('Specimen_Displacement'):\n NISTparms['displacement'] = {'specimen_displacement': 0.001 *\n InpParms['Specimen_Displacement']}\n elif 'displacement' in NISTparms:\n del NISTparms['displacement']\n if InpParms.get('receiving_slit_width'):\n NISTparms['receiver_slit'] = {'slit_width': 0.001 * InpParms[\n 'receiving_slit_width']}\n elif 'receiver_slit' in NISTparms:\n del NISTparms['receiver_slit']\n if InpParms.get('tube-tails_width', 0) > 0 and InpParms.get(\n 'tube-tails_rel-I', 0) > 0:\n NISTparms['tube_tails'] = {'main_width': 0.001 * InpParms.get(\n 'tube-tails_width', 0.0), 'tail_left': -0.001 * InpParms.get(\n 'tube-tails_L-tail', 0.0), 'tail_right': 0.001 * InpParms.get(\n 'tube-tails_R-tail', 0.0), 'tail_intens': InpParms.get(\n 'tube-tails_rel-I', 0.0)}\n elif 'tube_tails' in NISTparms:\n del NISTparms['tube_tails']\n max_wavelength = source_wavelengths_m[np.argmax(source_intensities)]\n NISTparms[''] = {'equatorial_divergence_deg': InpParms['divergence'],\n 'dominant_wavelength': max_wavelength, 'diffractometer_radius': \n 0.001 * InpParms['Rs'], 'oversampling': InpParms['convolution_steps']}\n\n\ndef setupFPAcalc():\n \"\"\"Create a peak profile object using the NIST XRD Fundamental \n Parameters Code. \n \n :returns: a profile object that can provide information on \n each convolution or compute the composite peak shape. \n \"\"\"\n p = FP.FP_profile(anglemode='twotheta',\n output_gaussian_smoother_bins_sigma=1.0, oversampling=NISTparms.get\n ('oversampling', 10))\n p.debug_cache = False\n for key in NISTparms:\n if key:\n p.set_parameters(convolver=key, **NISTparms[key])\n else:\n p.set_parameters(**NISTparms[key])\n return p\n\n\ndef doFPAcalc(NISTpk, ttArr, twotheta, calcwid, step):\n \"\"\"Compute a single peak using a NIST profile object\n\n :param object NISTpk: a peak profile computational object from the \n NIST XRD Fundamental Parameters Code, typically established from\n a call to :func:`SetupFPAcalc`\n :param np.Array ttArr: an evenly-spaced grid of two-theta points (degrees)\n :param float twotheta: nominal center of peak (degrees)\n :param float calcwid: width to perform convolution (degrees)\n :param float step: step size\n \"\"\"\n center_bin_idx = min(ttArr.searchsorted(twotheta), len(ttArr) - 1)\n NISTpk.set_optimized_window(twotheta_exact_bin_spacing_deg=step,\n twotheta_window_center_deg=ttArr[center_bin_idx],\n twotheta_approx_window_fullwidth_deg=calcwid)\n NISTpk.set_parameters(twotheta0_deg=twotheta)\n return center_bin_idx, NISTpk.compute_line_profile()\n\n\ndef MakeSimSizer(G2frame, dlg):\n \"\"\"Create a GUI to get simulation with parameters for Fundamental \n Parameters fitting. \n\n :param wx.Window dlg: Frame or Dialog where GUI will appear\n\n :returns: a sizer with the GUI controls \n \n \"\"\"\n\n def _onOK(event):\n msg = ''\n if simParms['minTT'] - simParms['calcwid'] / 1.5 < 0.1:\n msg += 'First peak minus half the calc width is too low'\n if simParms['maxTT'] + simParms['calcwid'] / 1.5 > 175:\n if msg:\n msg += '\\n'\n msg += 'Last peak plus half the calc width is too high'\n if simParms['npeaks'] < 8:\n if msg:\n msg += '\\n'\n msg += 'At least 8 peaks are needed'\n if msg:\n G2G.G2MessageBox(dlg, msg, 'Bad input, try again')\n return\n ttArr = np.arange(max(0.5, simParms['minTT'] - simParms['calcwid'] /\n 1.5), simParms['maxTT'] + simParms['calcwid'] / 1.5, simParms[\n 'step'])\n intArr = np.zeros_like(ttArr)\n peaklist = np.linspace(simParms['minTT'], simParms['maxTT'],\n simParms['npeaks'], endpoint=True)\n peakSpacing = (peaklist[-1] - peaklist[0]) / (len(peaklist) - 1)\n NISTpk = setupFPAcalc()\n minPtsHM = len(intArr)\n maxPtsHM = 0\n for num, twoth_peak in enumerate(peaklist):\n try:\n center_bin_idx, peakObj = doFPAcalc(NISTpk, ttArr,\n twoth_peak, simParms['calcwid'], simParms['step'])\n except:\n if msg:\n msg += '\\n'\n msg = 'Error computing convolution, revise input'\n continue\n if num == 0:\n G2plt.PlotFPAconvolutors(G2frame, NISTpk)\n pkMax = peakObj.peak.max()\n pkPts = len(peakObj.peak)\n minPtsHM = min(minPtsHM, sum(peakObj.peak >= 0.5 * pkMax))\n maxPtsHM = max(maxPtsHM, sum(peakObj.peak >= 0.5 * pkMax))\n startInd = center_bin_idx - pkPts // 2\n if startInd < 0:\n intArr[:startInd + pkPts] += 10000 * peakObj.peak[-startInd:\n ] / pkMax\n elif startInd > len(intArr):\n break\n elif startInd + pkPts >= len(intArr):\n offset = pkPts - len(intArr[startInd:])\n intArr[startInd:startInd + pkPts - offset\n ] += 10000 * peakObj.peak[:-offset] / pkMax\n else:\n intArr[startInd:startInd + pkPts\n ] += 10000 * peakObj.peak / pkMax\n if maxPtsHM * simParms['step'] > peakSpacing / 4:\n if msg:\n msg += '\\n'\n msg += (\n 'Maximum FWHM ({}) is too large compared to the peak spacing ({}). Decrease number of peaks or increase data range.'\n .format(maxPtsHM * simParms['step'], peakSpacing))\n if minPtsHM < 10:\n if msg:\n msg += '\\n'\n msg += (\n 'There are only {} points above the half-max. 10 are needed. Dropping step size.'\n .format(minPtsHM))\n simParms['step'] *= 0.5\n if msg:\n G2G.G2MessageBox(dlg, msg, 'Bad input, try again')\n wx.CallAfter(MakeSimSizer, G2frame, dlg)\n return\n dlg.Destroy()\n wx.CallAfter(FitFPApeaks, ttArr, intArr, peaklist, maxPtsHM)\n\n def FitFPApeaks(ttArr, intArr, peaklist, maxPtsHM):\n \"\"\"Perform a peak fit to the FP simulated pattern\n \"\"\"\n plswait = wx.Dialog(G2frame, style=wx.DEFAULT_DIALOG_STYLE | wx.\n RESIZE_BORDER)\n vbox = wx.BoxSizer(wx.VERTICAL)\n vbox.Add((1, 1), 1, wx.ALL | wx.EXPAND, 1)\n txt = wx.StaticText(plswait, wx.ID_ANY,\n 'Fitting peaks...\\nPlease wait...', style=wx.ALIGN_CENTER)\n vbox.Add(txt, 0, wx.ALL | wx.EXPAND)\n vbox.Add((1, 1), 1, wx.ALL | wx.EXPAND, 1)\n plswait.SetSizer(vbox)\n plswait.Layout()\n plswait.CenterOnParent()\n plswait.Show()\n wx.BeginBusyCursor()\n ints = list(NISTparms['emission']['emiss_intensities'])\n Lam1 = NISTparms['emission']['emiss_wavelengths'][np.argmax(ints)\n ] * 10000000000.0\n if len(ints) > 1:\n ints[np.argmax(ints)] = -1\n Lam2 = NISTparms['emission']['emiss_wavelengths'][np.argmax(ints)\n ] * 10000000000.0\n else:\n Lam2 = None\n histId = G2frame.AddSimulatedPowder(ttArr, intArr,\n 'NIST Fundamental Parameters simulation', Lam1, Lam2)\n controls = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(\n G2frame, G2frame.root, 'Controls'))\n controldat = controls.get('data', {'deriv type': 'analytic',\n 'min dM/M': 0.001})\n Parms, Parms2 = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId\n (G2frame, histId, 'Instrument Parameters'))\n peakData = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(\n G2frame, histId, 'Peak List'))\n bkg1, bkg2 = bkg = G2frame.GPXtree.GetItemPyData(G2gd.\n GetGPXtreeItemId(G2frame, histId, 'Background'))\n bkg1[1] = False\n bkg1[2] = 0\n bkg1[3] = 0.0\n limits = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(\n G2frame, histId, 'Limits'))\n try:\n Parms['SH/L'][1] = 0.25 * (NISTparms['axial']['length_sample'] +\n NISTparms['axial']['slit_length_source']) / NISTparms[''][\n 'diffractometer_radius']\n except:\n pass\n for pos in peaklist:\n i = ttArr.searchsorted(pos)\n area = sum(intArr[max(0, i - maxPtsHM):min(len(intArr), i +\n maxPtsHM)])\n peakData['peaks'].append(G2mth.setPeakparms(Parms, Parms2, pos,\n area))\n histData = G2frame.GPXtree.GetItemPyData(histId)\n bxye = np.zeros(len(histData[1][1]))\n peakData['sigDict'] = G2pwd.DoPeakFit('LSQ', peakData['peaks'], bkg,\n limits[1], Parms, Parms2, histData[1], bxye, [], False,\n controldat, None)[0]\n for pk in peakData['peaks']:\n pk[1] = True\n peakData['sigDict'] = G2pwd.DoPeakFit('LSQ', peakData['peaks'], bkg,\n limits[1], Parms, Parms2, histData[1], bxye, [], False, controldat\n )[0]\n for p in ('U', 'V', 'W', 'X', 'Y'):\n Parms[p][2] = True\n peakData['sigDict'] = G2pwd.DoPeakFit('LSQ', peakData['peaks'], bkg,\n limits[1], Parms, Parms2, histData[1], bxye, [], False, controldat\n )[0]\n Parms['SH/L'][2] = True\n peakData['sigDict'] = G2pwd.DoPeakFit('LSQ', peakData['peaks'], bkg,\n limits[1], Parms, Parms2, histData[1], bxye, [], False, controldat\n )[0]\n for p in Parms:\n if len(Parms[p]) == 3:\n Parms[p][0] = Parms[p][1]\n Parms[p][2] = False\n wx.EndBusyCursor()\n plswait.Destroy()\n pth = G2G.GetExportPath(G2frame)\n fldlg = wx.FileDialog(G2frame,\n 'Set name to save GSAS-II instrument parameters file', pth, '',\n 'instrument parameter files (*.instprm)|*.instprm', wx.FD_SAVE |\n wx.FD_OVERWRITE_PROMPT)\n try:\n if fldlg.ShowModal() == wx.ID_OK:\n filename = fldlg.GetPath()\n filename = os.path.splitext(filename)[0] + '.instprm'\n File = open(filename, 'w')\n File.write(\n '#GSAS-II instrument parameter file; do not add/delete items!\\n'\n )\n for item in Parms:\n File.write(item + ':' + str(Parms[item][1]) + '\\n')\n File.close()\n print('Instrument parameters saved to: ' + filename)\n finally:\n fldlg.Destroy()\n\n def _onClose(event):\n dlg.Destroy()\n\n def SetButtonStatus(done=False):\n OKbtn.Enable(bool(NISTparms))\n saveBtn.Enable(bool(NISTparms))\n if done:\n _onOK(None)\n\n def _onSetFPA(event):\n FPdlg = wx.Dialog(dlg, wx.ID_ANY, 'FPA parameters', style=wx.\n DEFAULT_DIALOG_STYLE | wx.RESIZE_BORDER)\n MakeTopasFPASizer(G2frame, FPdlg, 'BBpoint', SetButtonStatus)\n FPdlg.CenterOnParent()\n FPdlg.Raise()\n FPdlg.Show()\n\n def _onSaveFPA(event):\n filename = G2G.askSaveFile(G2frame, '', '.NISTfpa',\n 'dict of NIST FPA values', dlg)\n if not filename:\n return\n fp = open(filename, 'w')\n fp.write(\n '# parameters to be used in the NIST XRD Fundamental Parameters program\\n'\n )\n fp.write('{\\n')\n for key in sorted(NISTparms):\n fp.write(\" '\" + key + \"' : \" + str(NISTparms[key]) + ',')\n if not key:\n fp.write(' # global parameters')\n fp.write('\\n')\n fp.write('}\\n')\n fp.close()\n\n def _onReadFPA(event):\n filename = G2G.GetImportFile(G2frame, message=\n 'Read file with dict of values for NIST Fundamental Parameters',\n parent=dlg, wildcard='dict of NIST FPA values|*.NISTfpa')\n if not filename:\n return\n if not filename[0]:\n return\n try:\n txt = open(filename[0], 'r').read()\n NISTparms.clear()\n array = np.array\n d = eval(txt)\n NISTparms.update(d)\n except Exception as err:\n G2G.G2MessageBox(dlg, u'Error reading file {}:{}\\n'.format(\n filename, err), 'Bad dict input')\n SetButtonStatus()\n if dlg.GetSizer():\n dlg.GetSizer().Clear(True)\n MainSizer = wx.BoxSizer(wx.VERTICAL)\n MainSizer.Add(wx.StaticText(dlg, wx.ID_ANY,\n 'Fit Profile Parameters to Peaks from Fundamental Parameters',\n style=wx.ALIGN_CENTER), 0, wx.EXPAND)\n MainSizer.Add((-1, 5))\n prmSizer = wx.FlexGridSizer(cols=2, hgap=3, vgap=5)\n text = wx.StaticText(dlg, wx.ID_ANY, 'value', style=wx.ALIGN_CENTER)\n text.SetBackgroundColour(wx.WHITE)\n prmSizer.Add(text, 0, wx.EXPAND)\n text = wx.StaticText(dlg, wx.ID_ANY, 'explanation', style=wx.ALIGN_CENTER)\n text.SetBackgroundColour(wx.WHITE)\n prmSizer.Add(text, 0, wx.EXPAND)\n for key, defVal, text in (('minTT', 3.0,\n 'Location of first peak in 2theta (deg)'), ('maxTT', 123.0,\n 'Location of last peak in 2theta (deg)'), ('step', 0.01,\n 'Pattern step size (deg 2theta)'), ('npeaks', 13.0,\n 'Number of peaks'), ('calcwid', 2.0,\n 'Range to compute each peak (deg 2theta)')):\n if key not in simParms:\n simParms[key] = defVal\n ctrl = G2G.ValidatedTxtCtrl(dlg, simParms, key, size=(70, -1))\n prmSizer.Add(ctrl, 1, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 1)\n txt = wx.StaticText(dlg, wx.ID_ANY, text, size=(300, -1))\n txt.Wrap(280)\n prmSizer.Add(txt)\n MainSizer.Add(prmSizer)\n btnsizer = wx.BoxSizer(wx.HORIZONTAL)\n btn = wx.Button(dlg, wx.ID_ANY, 'Input FP vals')\n btnsizer.Add(btn)\n btn.Bind(wx.EVT_BUTTON, _onSetFPA)\n saveBtn = wx.Button(dlg, wx.ID_ANY, 'Save FPA dict')\n btnsizer.Add(saveBtn)\n saveBtn.Bind(wx.EVT_BUTTON, _onSaveFPA)\n readBtn = wx.Button(dlg, wx.ID_ANY, 'Read FPA dict')\n btnsizer.Add(readBtn)\n readBtn.Bind(wx.EVT_BUTTON, _onReadFPA)\n MainSizer.Add(btnsizer, 0, wx.ALIGN_CENTER, 0)\n MainSizer.Add((-1, 4), 1, wx.EXPAND, 1)\n txt = wx.StaticText(dlg, wx.ID_ANY, 'If you use this, please cite: ' +\n Citation, size=(350, -1))\n txt.Wrap(340)\n MainSizer.Add(txt, 0, wx.ALIGN_CENTER)\n btnsizer = wx.BoxSizer(wx.HORIZONTAL)\n OKbtn = wx.Button(dlg, wx.ID_OK)\n OKbtn.SetDefault()\n btnsizer.Add(OKbtn)\n Cbtn = wx.Button(dlg, wx.ID_CLOSE, 'Cancel')\n btnsizer.Add(Cbtn)\n MainSizer.Add(btnsizer, 0, wx.ALIGN_CENTER, 0)\n MainSizer.Add((-1, 4), 1, wx.EXPAND, 1)\n OKbtn.Bind(wx.EVT_BUTTON, _onOK)\n Cbtn.Bind(wx.EVT_BUTTON, _onClose)\n SetButtonStatus()\n dlg.SetSizer(MainSizer)\n MainSizer.Layout()\n MainSizer.Fit(dlg)\n dlg.SetMinSize(dlg.GetSize())\n dlg.SendSizeEvent()\n dlg.Raise()\n\n\ndef GetFPAInput(G2frame):\n dlg = wx.Dialog(G2frame, wx.ID_ANY, 'FPA input', style=wx.\n DEFAULT_DIALOG_STYLE | wx.RESIZE_BORDER)\n MakeSimSizer(G2frame, dlg)\n dlg.CenterOnParent()\n dlg.Show()\n return\n",
"step-4": "<mask token>\nfrom __future__ import division, print_function\nimport wx\nimport os.path\nimport numpy as np\nimport NIST_profile as FP\nimport GSASIIpath\nimport GSASIIctrlGUI as G2G\nimport GSASIIdataGUI as G2gd\nimport GSASIIplot as G2plt\nimport GSASIImath as G2mth\nimport GSASIIpwd as G2pwd\nsimParms = {}\n<mask token>\nparmDict = {'numWave': 2}\n<mask token>\nNISTparms = {}\n<mask token>\nBraggBrentanoParms = [('divergence', 0.5,\n 'Bragg-Brentano divergence angle (degrees)'), ('soller_angle', 2.0,\n 'Soller slit axial divergence (degrees)'), ('Rs', 220,\n 'Diffractometer radius (mm)'), ('filament_length', 12.0,\n 'X-ray tube line focus length (mm)'), ('sample_length', 12.0,\n 'Illuminated sample length in axial direction (mm)'), (\n 'receiving_slit_length', 12.0,\n 'Length of receiving slit in axial direction (mm)'), ('LAC_cm', 0.0,\n 'Linear absorption coef. adjusted for packing density (cm-1)'), (\n 'sample_thickness', 1.0, 'Depth of sample (mm)'), ('convolution_steps',\n 8, 'Number of Fourier-space bins per two-theta step'), (\n 'tube-tails_width', 0.04,\n 'Tube filament width, in projection at takeoff angle (mm)'), (\n 'tube-tails_L-tail', -1.0,\n 'Left-side tube tails width, in projection (mm)'), ('tube-tails_R-tail',\n 1.0, 'Right-side tube tails width, in projection (mm)'), (\n 'tube-tails_rel-I', 0.001, 'Tube tails fractional intensity (no units)')]\n<mask token>\nBBPointDetector = [('receiving_slit_width', 0.2,\n 'Width of receiving slit (mm)')]\n<mask token>\nBBPSDDetector = [('lpsd_th2_angular_range', 3.0,\n 'Angular range observed by PSD (degrees 2Theta)'), (\n 'lpsd_equitorial_divergence', 0.1,\n 'Equatorial divergence of the primary beam (degrees)')]\n<mask token>\nCitation = \"\"\"MH Mendenhall, K Mullen && JP Cline. (2015) J. Res. of NIST 120, 223-251. doi:10.6028/jres.120.014.\n\"\"\"\n\n\ndef SetCu2Wave():\n \"\"\"Set the parameters to the two-line Cu K alpha 1+2 spectrum\n \"\"\"\n parmDict['wave'] = {i: v for i, v in enumerate((1.540596, 1.544493))}\n parmDict['int'] = {i: v for i, v in enumerate((0.653817, 0.346183))}\n parmDict['lwidth'] = {i: v for i, v in enumerate((0.501844, 0.626579))}\n\n\nSetCu2Wave()\n\n\ndef MakeTopasFPASizer(G2frame, FPdlg, mode, SetButtonStatus):\n \"\"\"Create a GUI with parameters for the NIST XRD Fundamental Parameters Code. \n Parameter input is modeled after Topas input parameters.\n\n :param wx.Window FPdlg: Frame or Dialog where GUI will appear\n :param str mode: either 'BBpoint' or 'BBPSD' for Bragg-Brentano point detector or \n (linear) position sensitive detector\n :param dict parmDict: dict to place parameters. If empty, default values from \n globals BraggBrentanoParms, BBPointDetector & BBPSDDetector will be placed in \n the array. \n :returns: a sizer with the GUI controls\n \n \"\"\"\n\n def _onOK(event):\n XferFPAsettings(parmDict)\n SetButtonStatus(done=True)\n FPdlg.Destroy()\n\n def _onClose(event):\n SetButtonStatus()\n FPdlg.Destroy()\n\n def _onAddWave(event):\n parmDict['numWave'] += 1\n wx.CallAfter(MakeTopasFPASizer, G2frame, FPdlg, mode, SetButtonStatus)\n\n def _onRemWave(event):\n parmDict['numWave'] -= 1\n wx.CallAfter(MakeTopasFPASizer, G2frame, FPdlg, mode, SetButtonStatus)\n\n def _onSetCu5Wave(event):\n parmDict['wave'] = {i: v for i, v in enumerate((1.534753, 1.540596,\n 1.541058, 1.54441, 1.544721))}\n parmDict['int'] = {i: v for i, v in enumerate((0.0159, 0.5791, \n 0.0762, 0.2417, 0.0871))}\n parmDict['lwidth'] = {i: v for i, v in enumerate((3.6854, 0.437, \n 0.6, 0.52, 0.62))}\n parmDict['numWave'] = 5\n wx.CallAfter(MakeTopasFPASizer, G2frame, FPdlg, mode, SetButtonStatus)\n\n def _onSetCu2Wave(event):\n SetCu2Wave()\n parmDict['numWave'] = 2\n wx.CallAfter(MakeTopasFPASizer, G2frame, FPdlg, mode, SetButtonStatus)\n\n def _onSetPoint(event):\n wx.CallAfter(MakeTopasFPASizer, G2frame, FPdlg, 'BBpoint',\n SetButtonStatus)\n\n def _onSetPSD(event):\n wx.CallAfter(MakeTopasFPASizer, G2frame, FPdlg, 'BBPSD',\n SetButtonStatus)\n\n def PlotTopasFPA(event):\n XferFPAsettings(parmDict)\n ttArr = np.arange(max(0.5, simParms['plotpos'] - simParms['calcwid'\n ]), simParms['plotpos'] + simParms['calcwid'], simParms['step'])\n intArr = np.zeros_like(ttArr)\n NISTpk = setupFPAcalc()\n try:\n center_bin_idx, peakObj = doFPAcalc(NISTpk, ttArr, simParms[\n 'plotpos'], simParms['calcwid'], simParms['step'])\n except Exception as err:\n msg = 'Error computing convolution, revise input'\n print(msg)\n print(err)\n return\n G2plt.PlotFPAconvolutors(G2frame, NISTpk)\n pkPts = len(peakObj.peak)\n pkMax = peakObj.peak.max()\n startInd = center_bin_idx - pkPts // 2\n if startInd < 0:\n intArr[:startInd + pkPts] += 10000 * peakObj.peak[-startInd:\n ] / pkMax\n elif startInd > len(intArr):\n return\n elif startInd + pkPts >= len(intArr):\n offset = pkPts - len(intArr[startInd:])\n intArr[startInd:startInd + pkPts - offset] += 10000 * peakObj.peak[\n :-offset] / pkMax\n else:\n intArr[startInd:startInd + pkPts] += 10000 * peakObj.peak / pkMax\n G2plt.PlotXY(G2frame, [(ttArr, intArr)], labelX='$2\\\\theta, deg$',\n labelY='Intensity (arbitrary)', Title='FPA peak', newPlot=True,\n lines=True)\n if FPdlg.GetSizer():\n FPdlg.GetSizer().Clear(True)\n numWave = parmDict['numWave']\n if mode == 'BBpoint':\n itemList = BraggBrentanoParms + BBPointDetector\n elif mode == 'BBPSD':\n itemList = BraggBrentanoParms + BBPSDDetector\n else:\n raise Exception('Unknown mode in MakeTopasFPASizer: ' + mode)\n MainSizer = wx.BoxSizer(wx.VERTICAL)\n MainSizer.Add((-1, 5))\n waveSizer = wx.FlexGridSizer(cols=numWave + 1, hgap=3, vgap=5)\n for lbl, prm, defVal in zip((u'Wavelength (Å)', 'Rel. Intensity',\n u'Lorentz Width\\n(Å/1000)'), ('wave', 'int', 'lwidth'), (0.0, 1.0, 0.1)\n ):\n text = wx.StaticText(FPdlg, wx.ID_ANY, lbl, style=wx.ALIGN_CENTER)\n text.SetBackgroundColour(wx.WHITE)\n waveSizer.Add(text, 0, wx.EXPAND)\n if prm not in parmDict:\n parmDict[prm] = {}\n for i in range(numWave):\n if i not in parmDict[prm]:\n parmDict[prm][i] = defVal\n ctrl = G2G.ValidatedTxtCtrl(FPdlg, parmDict[prm], i, size=(90, -1))\n waveSizer.Add(ctrl, 1, wx.ALIGN_CENTER_VERTICAL, 1)\n MainSizer.Add(waveSizer)\n MainSizer.Add((-1, 5))\n btnsizer = wx.BoxSizer(wx.HORIZONTAL)\n btn = wx.Button(FPdlg, wx.ID_ANY, 'Add col')\n btnsizer.Add(btn)\n btn.Bind(wx.EVT_BUTTON, _onAddWave)\n btn = wx.Button(FPdlg, wx.ID_ANY, 'Remove col')\n btnsizer.Add(btn)\n btn.Bind(wx.EVT_BUTTON, _onRemWave)\n btn = wx.Button(FPdlg, wx.ID_ANY, 'CuKa1+2')\n btnsizer.Add(btn)\n btn.Bind(wx.EVT_BUTTON, _onSetCu2Wave)\n btn = wx.Button(FPdlg, wx.ID_ANY, 'CuKa-5wave')\n btnsizer.Add(btn)\n btn.Bind(wx.EVT_BUTTON, _onSetCu5Wave)\n MainSizer.Add(btnsizer, 0, wx.ALIGN_CENTER, 0)\n MainSizer.Add((-1, 5))\n btnsizer = wx.BoxSizer(wx.HORIZONTAL)\n btn = wx.Button(FPdlg, wx.ID_ANY, 'Point Dect.')\n btn.Enable(not mode == 'BBpoint')\n btnsizer.Add(btn)\n btn.Bind(wx.EVT_BUTTON, _onSetPoint)\n btn = wx.Button(FPdlg, wx.ID_ANY, 'PSD')\n btn.Enable(not mode == 'BBPSD')\n btnsizer.Add(btn)\n btn.Bind(wx.EVT_BUTTON, _onSetPSD)\n MainSizer.Add(btnsizer, 0, wx.ALIGN_CENTER, 0)\n MainSizer.Add((-1, 5))\n prmSizer = wx.FlexGridSizer(cols=3, hgap=3, vgap=5)\n text = wx.StaticText(FPdlg, wx.ID_ANY, 'label', style=wx.ALIGN_CENTER)\n text.SetBackgroundColour(wx.WHITE)\n prmSizer.Add(text, 0, wx.EXPAND)\n text = wx.StaticText(FPdlg, wx.ID_ANY, 'value', style=wx.ALIGN_CENTER)\n text.SetBackgroundColour(wx.WHITE)\n prmSizer.Add(text, 0, wx.EXPAND)\n text = wx.StaticText(FPdlg, wx.ID_ANY, 'explanation', style=wx.ALIGN_CENTER\n )\n text.SetBackgroundColour(wx.WHITE)\n prmSizer.Add(text, 0, wx.EXPAND)\n for lbl, defVal, text in itemList:\n prmSizer.Add(wx.StaticText(FPdlg, wx.ID_ANY, lbl), 1, wx.\n ALIGN_RIGHT | wx.ALIGN_CENTER_VERTICAL, 1)\n if lbl not in parmDict:\n parmDict[lbl] = defVal\n ctrl = G2G.ValidatedTxtCtrl(FPdlg, parmDict, lbl, size=(70, -1))\n prmSizer.Add(ctrl, 1, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 1)\n txt = wx.StaticText(FPdlg, wx.ID_ANY, text, size=(400, -1))\n txt.Wrap(380)\n prmSizer.Add(txt)\n MainSizer.Add(prmSizer)\n MainSizer.Add((-1, 4), 1, wx.EXPAND, 1)\n btnsizer = wx.BoxSizer(wx.HORIZONTAL)\n btn = wx.Button(FPdlg, wx.ID_ANY, 'Plot peak')\n btnsizer.Add(btn)\n btn.Bind(wx.EVT_BUTTON, PlotTopasFPA)\n btnsizer.Add(wx.StaticText(FPdlg, wx.ID_ANY, ' at '))\n if 'plotpos' not in simParms:\n simParms['plotpos'] = simParms['minTT']\n ctrl = G2G.ValidatedTxtCtrl(FPdlg, simParms, 'plotpos', size=(70, -1))\n btnsizer.Add(ctrl)\n btnsizer.Add(wx.StaticText(FPdlg, wx.ID_ANY, ' deg.'))\n MainSizer.Add(btnsizer, 0, wx.ALIGN_CENTER, 0)\n MainSizer.Add((-1, 4), 1, wx.EXPAND, 1)\n btnsizer = wx.BoxSizer(wx.HORIZONTAL)\n OKbtn = wx.Button(FPdlg, wx.ID_OK)\n OKbtn.SetDefault()\n btnsizer.Add(OKbtn)\n Cbtn = wx.Button(FPdlg, wx.ID_CLOSE, 'Cancel')\n btnsizer.Add(Cbtn)\n MainSizer.Add(btnsizer, 0, wx.ALIGN_CENTER, 0)\n MainSizer.Add((-1, 4), 1, wx.EXPAND, 1)\n OKbtn.Bind(wx.EVT_BUTTON, _onOK)\n Cbtn.Bind(wx.EVT_BUTTON, _onClose)\n FPdlg.SetSizer(MainSizer)\n MainSizer.Layout()\n MainSizer.Fit(FPdlg)\n FPdlg.SetMinSize(FPdlg.GetSize())\n FPdlg.SendSizeEvent()\n\n\ndef XferFPAsettings(InpParms):\n \"\"\"convert Topas-type parameters to SI units for NIST and place in a dict sorted\n according to use in each convoluter\n\n :param dict InpParms: a dict with Topas-like parameters, as set in \n :func:`MakeTopasFPASizer`\n :returns: a nested dict with global parameters and those for each convolution\n \"\"\"\n wavenums = range(InpParms['numWave'])\n source_wavelengths_m = 1e-10 * np.array([InpParms['wave'][i] for i in\n wavenums])\n la = [InpParms['int'][i] for i in wavenums]\n source_intensities = np.array(la) / max(la)\n source_lor_widths_m = 1e-10 * 0.001 * np.array([InpParms['lwidth'][i] for\n i in wavenums])\n source_gauss_widths_m = 1e-10 * 0.001 * np.array([(0.001) for i in\n wavenums])\n NISTparms['emission'] = {'emiss_wavelengths': source_wavelengths_m,\n 'emiss_intensities': source_intensities, 'emiss_gauss_widths':\n source_gauss_widths_m, 'emiss_lor_widths': source_lor_widths_m,\n 'crystallite_size_gauss': 1e-09 * InpParms.get('Size_G', 1000000.0),\n 'crystallite_size_lor': 1e-09 * InpParms.get('Size_L', 1000000.0)}\n if InpParms['filament_length'] == InpParms['receiving_slit_length']:\n InpParms['receiving_slit_length'] *= 1.00001\n NISTparms['axial'] = {'axDiv': 'full', 'slit_length_source': 0.001 *\n InpParms['filament_length'], 'slit_length_target': 0.001 * InpParms\n ['receiving_slit_length'], 'length_sample': 0.001 * InpParms[\n 'sample_length'], 'n_integral_points': 10, 'angI_deg': InpParms[\n 'soller_angle'], 'angD_deg': InpParms['soller_angle']}\n if InpParms.get('LAC_cm', 0) > 0:\n NISTparms['absorption'] = {'absorption_coefficient': InpParms[\n 'LAC_cm'] * 100, 'sample_thickness': 0.001 * InpParms[\n 'sample_thickness']}\n elif 'absorption' in NISTparms:\n del NISTparms['absorption']\n if InpParms.get('lpsd_equitorial_divergence', 0) > 0 and InpParms.get(\n 'lpsd_th2_angular_range', 0) > 0:\n PSDdetector_length_mm = np.arcsin(np.pi * InpParms[\n 'lpsd_th2_angular_range'] / 180.0) * InpParms['Rs']\n NISTparms['si_psd'] = {'equatorial_divergence_deg': InpParms[\n 'lpsd_equitorial_divergence'], 'si_psd_window_bounds': (0.0, \n PSDdetector_length_mm / 1000.0)}\n elif 'si_psd' in NISTparms:\n del NISTparms['si_psd']\n if InpParms.get('Specimen_Displacement'):\n NISTparms['displacement'] = {'specimen_displacement': 0.001 *\n InpParms['Specimen_Displacement']}\n elif 'displacement' in NISTparms:\n del NISTparms['displacement']\n if InpParms.get('receiving_slit_width'):\n NISTparms['receiver_slit'] = {'slit_width': 0.001 * InpParms[\n 'receiving_slit_width']}\n elif 'receiver_slit' in NISTparms:\n del NISTparms['receiver_slit']\n if InpParms.get('tube-tails_width', 0) > 0 and InpParms.get(\n 'tube-tails_rel-I', 0) > 0:\n NISTparms['tube_tails'] = {'main_width': 0.001 * InpParms.get(\n 'tube-tails_width', 0.0), 'tail_left': -0.001 * InpParms.get(\n 'tube-tails_L-tail', 0.0), 'tail_right': 0.001 * InpParms.get(\n 'tube-tails_R-tail', 0.0), 'tail_intens': InpParms.get(\n 'tube-tails_rel-I', 0.0)}\n elif 'tube_tails' in NISTparms:\n del NISTparms['tube_tails']\n max_wavelength = source_wavelengths_m[np.argmax(source_intensities)]\n NISTparms[''] = {'equatorial_divergence_deg': InpParms['divergence'],\n 'dominant_wavelength': max_wavelength, 'diffractometer_radius': \n 0.001 * InpParms['Rs'], 'oversampling': InpParms['convolution_steps']}\n\n\ndef setupFPAcalc():\n \"\"\"Create a peak profile object using the NIST XRD Fundamental \n Parameters Code. \n \n :returns: a profile object that can provide information on \n each convolution or compute the composite peak shape. \n \"\"\"\n p = FP.FP_profile(anglemode='twotheta',\n output_gaussian_smoother_bins_sigma=1.0, oversampling=NISTparms.get\n ('oversampling', 10))\n p.debug_cache = False\n for key in NISTparms:\n if key:\n p.set_parameters(convolver=key, **NISTparms[key])\n else:\n p.set_parameters(**NISTparms[key])\n return p\n\n\ndef doFPAcalc(NISTpk, ttArr, twotheta, calcwid, step):\n \"\"\"Compute a single peak using a NIST profile object\n\n :param object NISTpk: a peak profile computational object from the \n NIST XRD Fundamental Parameters Code, typically established from\n a call to :func:`SetupFPAcalc`\n :param np.Array ttArr: an evenly-spaced grid of two-theta points (degrees)\n :param float twotheta: nominal center of peak (degrees)\n :param float calcwid: width to perform convolution (degrees)\n :param float step: step size\n \"\"\"\n center_bin_idx = min(ttArr.searchsorted(twotheta), len(ttArr) - 1)\n NISTpk.set_optimized_window(twotheta_exact_bin_spacing_deg=step,\n twotheta_window_center_deg=ttArr[center_bin_idx],\n twotheta_approx_window_fullwidth_deg=calcwid)\n NISTpk.set_parameters(twotheta0_deg=twotheta)\n return center_bin_idx, NISTpk.compute_line_profile()\n\n\ndef MakeSimSizer(G2frame, dlg):\n \"\"\"Create a GUI to get simulation with parameters for Fundamental \n Parameters fitting. \n\n :param wx.Window dlg: Frame or Dialog where GUI will appear\n\n :returns: a sizer with the GUI controls \n \n \"\"\"\n\n def _onOK(event):\n msg = ''\n if simParms['minTT'] - simParms['calcwid'] / 1.5 < 0.1:\n msg += 'First peak minus half the calc width is too low'\n if simParms['maxTT'] + simParms['calcwid'] / 1.5 > 175:\n if msg:\n msg += '\\n'\n msg += 'Last peak plus half the calc width is too high'\n if simParms['npeaks'] < 8:\n if msg:\n msg += '\\n'\n msg += 'At least 8 peaks are needed'\n if msg:\n G2G.G2MessageBox(dlg, msg, 'Bad input, try again')\n return\n ttArr = np.arange(max(0.5, simParms['minTT'] - simParms['calcwid'] /\n 1.5), simParms['maxTT'] + simParms['calcwid'] / 1.5, simParms[\n 'step'])\n intArr = np.zeros_like(ttArr)\n peaklist = np.linspace(simParms['minTT'], simParms['maxTT'],\n simParms['npeaks'], endpoint=True)\n peakSpacing = (peaklist[-1] - peaklist[0]) / (len(peaklist) - 1)\n NISTpk = setupFPAcalc()\n minPtsHM = len(intArr)\n maxPtsHM = 0\n for num, twoth_peak in enumerate(peaklist):\n try:\n center_bin_idx, peakObj = doFPAcalc(NISTpk, ttArr,\n twoth_peak, simParms['calcwid'], simParms['step'])\n except:\n if msg:\n msg += '\\n'\n msg = 'Error computing convolution, revise input'\n continue\n if num == 0:\n G2plt.PlotFPAconvolutors(G2frame, NISTpk)\n pkMax = peakObj.peak.max()\n pkPts = len(peakObj.peak)\n minPtsHM = min(minPtsHM, sum(peakObj.peak >= 0.5 * pkMax))\n maxPtsHM = max(maxPtsHM, sum(peakObj.peak >= 0.5 * pkMax))\n startInd = center_bin_idx - pkPts // 2\n if startInd < 0:\n intArr[:startInd + pkPts] += 10000 * peakObj.peak[-startInd:\n ] / pkMax\n elif startInd > len(intArr):\n break\n elif startInd + pkPts >= len(intArr):\n offset = pkPts - len(intArr[startInd:])\n intArr[startInd:startInd + pkPts - offset\n ] += 10000 * peakObj.peak[:-offset] / pkMax\n else:\n intArr[startInd:startInd + pkPts\n ] += 10000 * peakObj.peak / pkMax\n if maxPtsHM * simParms['step'] > peakSpacing / 4:\n if msg:\n msg += '\\n'\n msg += (\n 'Maximum FWHM ({}) is too large compared to the peak spacing ({}). Decrease number of peaks or increase data range.'\n .format(maxPtsHM * simParms['step'], peakSpacing))\n if minPtsHM < 10:\n if msg:\n msg += '\\n'\n msg += (\n 'There are only {} points above the half-max. 10 are needed. Dropping step size.'\n .format(minPtsHM))\n simParms['step'] *= 0.5\n if msg:\n G2G.G2MessageBox(dlg, msg, 'Bad input, try again')\n wx.CallAfter(MakeSimSizer, G2frame, dlg)\n return\n dlg.Destroy()\n wx.CallAfter(FitFPApeaks, ttArr, intArr, peaklist, maxPtsHM)\n\n def FitFPApeaks(ttArr, intArr, peaklist, maxPtsHM):\n \"\"\"Perform a peak fit to the FP simulated pattern\n \"\"\"\n plswait = wx.Dialog(G2frame, style=wx.DEFAULT_DIALOG_STYLE | wx.\n RESIZE_BORDER)\n vbox = wx.BoxSizer(wx.VERTICAL)\n vbox.Add((1, 1), 1, wx.ALL | wx.EXPAND, 1)\n txt = wx.StaticText(plswait, wx.ID_ANY,\n 'Fitting peaks...\\nPlease wait...', style=wx.ALIGN_CENTER)\n vbox.Add(txt, 0, wx.ALL | wx.EXPAND)\n vbox.Add((1, 1), 1, wx.ALL | wx.EXPAND, 1)\n plswait.SetSizer(vbox)\n plswait.Layout()\n plswait.CenterOnParent()\n plswait.Show()\n wx.BeginBusyCursor()\n ints = list(NISTparms['emission']['emiss_intensities'])\n Lam1 = NISTparms['emission']['emiss_wavelengths'][np.argmax(ints)\n ] * 10000000000.0\n if len(ints) > 1:\n ints[np.argmax(ints)] = -1\n Lam2 = NISTparms['emission']['emiss_wavelengths'][np.argmax(ints)\n ] * 10000000000.0\n else:\n Lam2 = None\n histId = G2frame.AddSimulatedPowder(ttArr, intArr,\n 'NIST Fundamental Parameters simulation', Lam1, Lam2)\n controls = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(\n G2frame, G2frame.root, 'Controls'))\n controldat = controls.get('data', {'deriv type': 'analytic',\n 'min dM/M': 0.001})\n Parms, Parms2 = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId\n (G2frame, histId, 'Instrument Parameters'))\n peakData = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(\n G2frame, histId, 'Peak List'))\n bkg1, bkg2 = bkg = G2frame.GPXtree.GetItemPyData(G2gd.\n GetGPXtreeItemId(G2frame, histId, 'Background'))\n bkg1[1] = False\n bkg1[2] = 0\n bkg1[3] = 0.0\n limits = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(\n G2frame, histId, 'Limits'))\n try:\n Parms['SH/L'][1] = 0.25 * (NISTparms['axial']['length_sample'] +\n NISTparms['axial']['slit_length_source']) / NISTparms[''][\n 'diffractometer_radius']\n except:\n pass\n for pos in peaklist:\n i = ttArr.searchsorted(pos)\n area = sum(intArr[max(0, i - maxPtsHM):min(len(intArr), i +\n maxPtsHM)])\n peakData['peaks'].append(G2mth.setPeakparms(Parms, Parms2, pos,\n area))\n histData = G2frame.GPXtree.GetItemPyData(histId)\n bxye = np.zeros(len(histData[1][1]))\n peakData['sigDict'] = G2pwd.DoPeakFit('LSQ', peakData['peaks'], bkg,\n limits[1], Parms, Parms2, histData[1], bxye, [], False,\n controldat, None)[0]\n for pk in peakData['peaks']:\n pk[1] = True\n peakData['sigDict'] = G2pwd.DoPeakFit('LSQ', peakData['peaks'], bkg,\n limits[1], Parms, Parms2, histData[1], bxye, [], False, controldat\n )[0]\n for p in ('U', 'V', 'W', 'X', 'Y'):\n Parms[p][2] = True\n peakData['sigDict'] = G2pwd.DoPeakFit('LSQ', peakData['peaks'], bkg,\n limits[1], Parms, Parms2, histData[1], bxye, [], False, controldat\n )[0]\n Parms['SH/L'][2] = True\n peakData['sigDict'] = G2pwd.DoPeakFit('LSQ', peakData['peaks'], bkg,\n limits[1], Parms, Parms2, histData[1], bxye, [], False, controldat\n )[0]\n for p in Parms:\n if len(Parms[p]) == 3:\n Parms[p][0] = Parms[p][1]\n Parms[p][2] = False\n wx.EndBusyCursor()\n plswait.Destroy()\n pth = G2G.GetExportPath(G2frame)\n fldlg = wx.FileDialog(G2frame,\n 'Set name to save GSAS-II instrument parameters file', pth, '',\n 'instrument parameter files (*.instprm)|*.instprm', wx.FD_SAVE |\n wx.FD_OVERWRITE_PROMPT)\n try:\n if fldlg.ShowModal() == wx.ID_OK:\n filename = fldlg.GetPath()\n filename = os.path.splitext(filename)[0] + '.instprm'\n File = open(filename, 'w')\n File.write(\n '#GSAS-II instrument parameter file; do not add/delete items!\\n'\n )\n for item in Parms:\n File.write(item + ':' + str(Parms[item][1]) + '\\n')\n File.close()\n print('Instrument parameters saved to: ' + filename)\n finally:\n fldlg.Destroy()\n\n def _onClose(event):\n dlg.Destroy()\n\n def SetButtonStatus(done=False):\n OKbtn.Enable(bool(NISTparms))\n saveBtn.Enable(bool(NISTparms))\n if done:\n _onOK(None)\n\n def _onSetFPA(event):\n FPdlg = wx.Dialog(dlg, wx.ID_ANY, 'FPA parameters', style=wx.\n DEFAULT_DIALOG_STYLE | wx.RESIZE_BORDER)\n MakeTopasFPASizer(G2frame, FPdlg, 'BBpoint', SetButtonStatus)\n FPdlg.CenterOnParent()\n FPdlg.Raise()\n FPdlg.Show()\n\n def _onSaveFPA(event):\n filename = G2G.askSaveFile(G2frame, '', '.NISTfpa',\n 'dict of NIST FPA values', dlg)\n if not filename:\n return\n fp = open(filename, 'w')\n fp.write(\n '# parameters to be used in the NIST XRD Fundamental Parameters program\\n'\n )\n fp.write('{\\n')\n for key in sorted(NISTparms):\n fp.write(\" '\" + key + \"' : \" + str(NISTparms[key]) + ',')\n if not key:\n fp.write(' # global parameters')\n fp.write('\\n')\n fp.write('}\\n')\n fp.close()\n\n def _onReadFPA(event):\n filename = G2G.GetImportFile(G2frame, message=\n 'Read file with dict of values for NIST Fundamental Parameters',\n parent=dlg, wildcard='dict of NIST FPA values|*.NISTfpa')\n if not filename:\n return\n if not filename[0]:\n return\n try:\n txt = open(filename[0], 'r').read()\n NISTparms.clear()\n array = np.array\n d = eval(txt)\n NISTparms.update(d)\n except Exception as err:\n G2G.G2MessageBox(dlg, u'Error reading file {}:{}\\n'.format(\n filename, err), 'Bad dict input')\n SetButtonStatus()\n if dlg.GetSizer():\n dlg.GetSizer().Clear(True)\n MainSizer = wx.BoxSizer(wx.VERTICAL)\n MainSizer.Add(wx.StaticText(dlg, wx.ID_ANY,\n 'Fit Profile Parameters to Peaks from Fundamental Parameters',\n style=wx.ALIGN_CENTER), 0, wx.EXPAND)\n MainSizer.Add((-1, 5))\n prmSizer = wx.FlexGridSizer(cols=2, hgap=3, vgap=5)\n text = wx.StaticText(dlg, wx.ID_ANY, 'value', style=wx.ALIGN_CENTER)\n text.SetBackgroundColour(wx.WHITE)\n prmSizer.Add(text, 0, wx.EXPAND)\n text = wx.StaticText(dlg, wx.ID_ANY, 'explanation', style=wx.ALIGN_CENTER)\n text.SetBackgroundColour(wx.WHITE)\n prmSizer.Add(text, 0, wx.EXPAND)\n for key, defVal, text in (('minTT', 3.0,\n 'Location of first peak in 2theta (deg)'), ('maxTT', 123.0,\n 'Location of last peak in 2theta (deg)'), ('step', 0.01,\n 'Pattern step size (deg 2theta)'), ('npeaks', 13.0,\n 'Number of peaks'), ('calcwid', 2.0,\n 'Range to compute each peak (deg 2theta)')):\n if key not in simParms:\n simParms[key] = defVal\n ctrl = G2G.ValidatedTxtCtrl(dlg, simParms, key, size=(70, -1))\n prmSizer.Add(ctrl, 1, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 1)\n txt = wx.StaticText(dlg, wx.ID_ANY, text, size=(300, -1))\n txt.Wrap(280)\n prmSizer.Add(txt)\n MainSizer.Add(prmSizer)\n btnsizer = wx.BoxSizer(wx.HORIZONTAL)\n btn = wx.Button(dlg, wx.ID_ANY, 'Input FP vals')\n btnsizer.Add(btn)\n btn.Bind(wx.EVT_BUTTON, _onSetFPA)\n saveBtn = wx.Button(dlg, wx.ID_ANY, 'Save FPA dict')\n btnsizer.Add(saveBtn)\n saveBtn.Bind(wx.EVT_BUTTON, _onSaveFPA)\n readBtn = wx.Button(dlg, wx.ID_ANY, 'Read FPA dict')\n btnsizer.Add(readBtn)\n readBtn.Bind(wx.EVT_BUTTON, _onReadFPA)\n MainSizer.Add(btnsizer, 0, wx.ALIGN_CENTER, 0)\n MainSizer.Add((-1, 4), 1, wx.EXPAND, 1)\n txt = wx.StaticText(dlg, wx.ID_ANY, 'If you use this, please cite: ' +\n Citation, size=(350, -1))\n txt.Wrap(340)\n MainSizer.Add(txt, 0, wx.ALIGN_CENTER)\n btnsizer = wx.BoxSizer(wx.HORIZONTAL)\n OKbtn = wx.Button(dlg, wx.ID_OK)\n OKbtn.SetDefault()\n btnsizer.Add(OKbtn)\n Cbtn = wx.Button(dlg, wx.ID_CLOSE, 'Cancel')\n btnsizer.Add(Cbtn)\n MainSizer.Add(btnsizer, 0, wx.ALIGN_CENTER, 0)\n MainSizer.Add((-1, 4), 1, wx.EXPAND, 1)\n OKbtn.Bind(wx.EVT_BUTTON, _onOK)\n Cbtn.Bind(wx.EVT_BUTTON, _onClose)\n SetButtonStatus()\n dlg.SetSizer(MainSizer)\n MainSizer.Layout()\n MainSizer.Fit(dlg)\n dlg.SetMinSize(dlg.GetSize())\n dlg.SendSizeEvent()\n dlg.Raise()\n\n\ndef GetFPAInput(G2frame):\n dlg = wx.Dialog(G2frame, wx.ID_ANY, 'FPA input', style=wx.\n DEFAULT_DIALOG_STYLE | wx.RESIZE_BORDER)\n MakeSimSizer(G2frame, dlg)\n dlg.CenterOnParent()\n dlg.Show()\n return\n",
"step-5": "# -*- coding: utf-8 -*-\n########### SVN repository information ###################\n# $Date: $\n# $Author: $\n# $Revision: $\n# $URL: $\n# $Id: $\n########### SVN repository information ###################\n'''\n*GSASIIfpaGUI: Fundamental Parameters Routines*\n===============================================\n\nThis module contains routines for getting Fundamental Parameters \nApproach (FPA) input, setting up for running the NIST XRD Fundamental \nParameters Code, plotting the convolutors and computing a set of peaks\ngenerated by that code. \n\n'''\nfrom __future__ import division, print_function\nimport wx\nimport os.path\nimport numpy as np\n\nimport NIST_profile as FP\n\nimport GSASIIpath\nimport GSASIIctrlGUI as G2G\nimport GSASIIdataGUI as G2gd\nimport GSASIIplot as G2plt\nimport GSASIImath as G2mth\nimport GSASIIpwd as G2pwd\n\nsimParms = {}\n'''Parameters to set range for pattern simulation\n'''\n\nparmDict = {'numWave':2}\n'''Parameter dict used for reading Topas-style values. These are \nconverted to SI units and placed into :data:`NISTparms`\n'''\n\nNISTparms = {}\n'''Parameters in a nested dict, with an entry for each concolutor. Entries in \nthose dicts have values in SI units (of course). NISTparms can be \ncan be input directly or can be from created from :data:`parmDict`\nby :func:`XferFPAsettings`\n'''\n\nBraggBrentanoParms = [\n ('divergence', 0.5, 'Bragg-Brentano divergence angle (degrees)'),\n ('soller_angle', 2.0, 'Soller slit axial divergence (degrees)'),\n ('Rs', 220, 'Diffractometer radius (mm)'),\n ('filament_length', 12., 'X-ray tube line focus length (mm)'),\n ('sample_length', 12., 'Illuminated sample length in axial direction (mm)'),\n ('receiving_slit_length', 12., 'Length of receiving slit in axial direction (mm)'),\n ('LAC_cm', 0.,'Linear absorption coef. adjusted for packing density (cm-1)'),\n ('sample_thickness', 1., 'Depth of sample (mm)'),\n ('convolution_steps', 8, 'Number of Fourier-space bins per two-theta step'),\n ('tube-tails_width', 0.04,'Tube filament width, in projection at takeoff angle (mm)'),\n ('tube-tails_L-tail', -1.,'Left-side tube tails width, in projection (mm)'), \n ('tube-tails_R-tail', 1.,'Right-side tube tails width, in projection (mm)'),\n ('tube-tails_rel-I', 0.001,'Tube tails fractional intensity (no units)'),\n ]\n'''FPA dict entries used in :func:`MakeTopasFPASizer`. Tuple contains\na dict key, a default value and a description. These are the parameters\nneeded for all Bragg Brentano instruments\n'''\n\nBBPointDetector = [\n ('receiving_slit_width', 0.2, 'Width of receiving slit (mm)'),]\n'''Additional FPA dict entries used in :func:`MakeTopasFPASizer` \nneeded for Bragg Brentano instruments with point detectors.\n'''\n\nBBPSDDetector = [\n ('lpsd_th2_angular_range', 3.0, 'Angular range observed by PSD (degrees 2Theta)'),\n ('lpsd_equitorial_divergence', 0.1, 'Equatorial divergence of the primary beam (degrees)'),]\n'''Additional FPA dict entries used in :func:`MakeTopasFPASizer` \nneeded for Bragg Brentano instruments with linear (1-D) PSD detectors.\n'''\n\nCitation = '''MH Mendenhall, K Mullen && JP Cline. (2015) J. Res. of NIST 120, 223-251. doi:10.6028/jres.120.014.\n'''\n \ndef SetCu2Wave():\n '''Set the parameters to the two-line Cu K alpha 1+2 spectrum\n '''\n parmDict['wave'] = {i:v for i,v in enumerate((1.540596,1.544493))}\n parmDict['int'] = {i:v for i,v in enumerate((0.653817, 0.346183))}\n parmDict['lwidth'] = {i:v for i,v in enumerate((0.501844,0.626579))}\nSetCu2Wave() # use these as default\n\ndef MakeTopasFPASizer(G2frame,FPdlg,mode,SetButtonStatus):\n '''Create a GUI with parameters for the NIST XRD Fundamental Parameters Code. \n Parameter input is modeled after Topas input parameters.\n\n :param wx.Window FPdlg: Frame or Dialog where GUI will appear\n :param str mode: either 'BBpoint' or 'BBPSD' for Bragg-Brentano point detector or \n (linear) position sensitive detector\n :param dict parmDict: dict to place parameters. If empty, default values from \n globals BraggBrentanoParms, BBPointDetector & BBPSDDetector will be placed in \n the array. \n :returns: a sizer with the GUI controls\n \n '''\n def _onOK(event):\n XferFPAsettings(parmDict)\n SetButtonStatus(done=True) # done=True triggers the simulation\n FPdlg.Destroy()\n def _onClose(event):\n SetButtonStatus()\n FPdlg.Destroy()\n def _onAddWave(event):\n parmDict['numWave'] += 1 \n wx.CallAfter(MakeTopasFPASizer,G2frame,FPdlg,mode,SetButtonStatus)\n def _onRemWave(event):\n parmDict['numWave'] -= 1 \n wx.CallAfter(MakeTopasFPASizer,G2frame,FPdlg,mode,SetButtonStatus)\n def _onSetCu5Wave(event):\n parmDict['wave'] = {i:v for i,v in enumerate((1.534753,1.540596,1.541058,1.54441,1.544721))}\n parmDict['int'] = {i:v for i,v in enumerate((0.0159, 0.5791, 0.0762, 0.2417, 0.0871))}\n parmDict['lwidth'] = {i:v for i,v in enumerate((3.6854, 0.437, 0.6, 0.52, 0.62))}\n parmDict['numWave'] = 5\n wx.CallAfter(MakeTopasFPASizer,G2frame,FPdlg,mode,SetButtonStatus)\n def _onSetCu2Wave(event):\n SetCu2Wave()\n parmDict['numWave'] = 2\n wx.CallAfter(MakeTopasFPASizer,G2frame,FPdlg,mode,SetButtonStatus)\n def _onSetPoint(event):\n wx.CallAfter(MakeTopasFPASizer,G2frame,FPdlg,'BBpoint',SetButtonStatus)\n def _onSetPSD(event):\n wx.CallAfter(MakeTopasFPASizer,G2frame,FPdlg,'BBPSD',SetButtonStatus)\n def PlotTopasFPA(event):\n XferFPAsettings(parmDict)\n ttArr = np.arange(max(0.5,\n simParms['plotpos']-simParms['calcwid']),\n simParms['plotpos']+simParms['calcwid'],\n simParms['step'])\n intArr = np.zeros_like(ttArr)\n NISTpk = setupFPAcalc()\n try:\n center_bin_idx,peakObj = doFPAcalc(\n NISTpk,ttArr,simParms['plotpos'],simParms['calcwid'],\n simParms['step'])\n except Exception as err:\n msg = \"Error computing convolution, revise input\"\n print(msg)\n print(err)\n return\n G2plt.PlotFPAconvolutors(G2frame,NISTpk)\n pkPts = len(peakObj.peak)\n pkMax = peakObj.peak.max()\n startInd = center_bin_idx-(pkPts//2) #this should be the aligned start of the new data\n # scale peak so max I=10,000 and add into intensity array\n if startInd < 0:\n intArr[:startInd+pkPts] += 10000 * peakObj.peak[-startInd:]/pkMax\n elif startInd > len(intArr):\n return\n elif startInd+pkPts >= len(intArr):\n offset = pkPts - len( intArr[startInd:] )\n intArr[startInd:startInd+pkPts-offset] += 10000 * peakObj.peak[:-offset]/pkMax\n else:\n intArr[startInd:startInd+pkPts] += 10000 * peakObj.peak/pkMax\n G2plt.PlotXY(G2frame, [(ttArr, intArr)],\n labelX=r'$2\\theta, deg$',\n labelY=r'Intensity (arbitrary)',\n Title='FPA peak', newPlot=True, lines=True)\n\n if FPdlg.GetSizer(): FPdlg.GetSizer().Clear(True)\n numWave = parmDict['numWave']\n if mode == 'BBpoint':\n itemList = BraggBrentanoParms+BBPointDetector\n elif mode == 'BBPSD':\n itemList = BraggBrentanoParms+BBPSDDetector\n else:\n raise Exception('Unknown mode in MakeTopasFPASizer: '+mode)\n \n MainSizer = wx.BoxSizer(wx.VERTICAL)\n MainSizer.Add((-1,5))\n waveSizer = wx.FlexGridSizer(cols=numWave+1,hgap=3,vgap=5)\n for lbl,prm,defVal in zip(\n (u'Wavelength (\\u212b)','Rel. Intensity',u'Lorentz Width\\n(\\u212b/1000)'),\n ('wave','int','lwidth'),\n (0.0, 1.0, 0.1),\n ):\n text = wx.StaticText(FPdlg,wx.ID_ANY,lbl,style=wx.ALIGN_CENTER)\n text.SetBackgroundColour(wx.WHITE)\n waveSizer.Add(text,0,wx.EXPAND)\n if prm not in parmDict: parmDict[prm] = {}\n for i in range(numWave):\n if i not in parmDict[prm]: parmDict[prm][i] = defVal\n ctrl = G2G.ValidatedTxtCtrl(FPdlg,parmDict[prm],i,size=(90,-1))\n waveSizer.Add(ctrl,1,wx.ALIGN_CENTER_VERTICAL,1)\n MainSizer.Add(waveSizer)\n MainSizer.Add((-1,5))\n btnsizer = wx.BoxSizer(wx.HORIZONTAL)\n btn = wx.Button(FPdlg, wx.ID_ANY,'Add col')\n btnsizer.Add(btn)\n btn.Bind(wx.EVT_BUTTON,_onAddWave)\n btn = wx.Button(FPdlg, wx.ID_ANY,'Remove col')\n btnsizer.Add(btn)\n btn.Bind(wx.EVT_BUTTON,_onRemWave)\n btn = wx.Button(FPdlg, wx.ID_ANY,'CuKa1+2')\n btnsizer.Add(btn)\n btn.Bind(wx.EVT_BUTTON,_onSetCu2Wave)\n btn = wx.Button(FPdlg, wx.ID_ANY,'CuKa-5wave')\n btnsizer.Add(btn)\n btn.Bind(wx.EVT_BUTTON,_onSetCu5Wave)\n MainSizer.Add(btnsizer, 0, wx.ALIGN_CENTER, 0)\n MainSizer.Add((-1,5))\n btnsizer = wx.BoxSizer(wx.HORIZONTAL)\n btn = wx.Button(FPdlg, wx.ID_ANY,'Point Dect.')\n btn.Enable(not mode == 'BBpoint')\n btnsizer.Add(btn)\n btn.Bind(wx.EVT_BUTTON,_onSetPoint)\n btn = wx.Button(FPdlg, wx.ID_ANY,'PSD')\n btn.Enable(not mode == 'BBPSD')\n btnsizer.Add(btn)\n btn.Bind(wx.EVT_BUTTON,_onSetPSD)\n MainSizer.Add(btnsizer, 0, wx.ALIGN_CENTER, 0)\n MainSizer.Add((-1,5))\n \n prmSizer = wx.FlexGridSizer(cols=3,hgap=3,vgap=5)\n text = wx.StaticText(FPdlg,wx.ID_ANY,'label',style=wx.ALIGN_CENTER)\n text.SetBackgroundColour(wx.WHITE)\n prmSizer.Add(text,0,wx.EXPAND)\n text = wx.StaticText(FPdlg,wx.ID_ANY,'value',style=wx.ALIGN_CENTER)\n text.SetBackgroundColour(wx.WHITE)\n prmSizer.Add(text,0,wx.EXPAND)\n text = wx.StaticText(FPdlg,wx.ID_ANY,'explanation',style=wx.ALIGN_CENTER)\n text.SetBackgroundColour(wx.WHITE)\n prmSizer.Add(text,0,wx.EXPAND)\n for lbl,defVal,text in itemList:\n prmSizer.Add(wx.StaticText(FPdlg,wx.ID_ANY,lbl),1,wx.ALIGN_RIGHT|wx.ALIGN_CENTER_VERTICAL,1)\n if lbl not in parmDict: parmDict[lbl] = defVal\n ctrl = G2G.ValidatedTxtCtrl(FPdlg,parmDict,lbl,size=(70,-1))\n prmSizer.Add(ctrl,1,wx.ALL|wx.ALIGN_CENTER_VERTICAL,1)\n txt = wx.StaticText(FPdlg,wx.ID_ANY,text,size=(400,-1))\n txt.Wrap(380)\n prmSizer.Add(txt)\n MainSizer.Add(prmSizer)\n MainSizer.Add((-1,4),1,wx.EXPAND,1)\n btnsizer = wx.BoxSizer(wx.HORIZONTAL)\n btn = wx.Button(FPdlg, wx.ID_ANY, 'Plot peak')\n btnsizer.Add(btn)\n btn.Bind(wx.EVT_BUTTON,PlotTopasFPA)\n btnsizer.Add(wx.StaticText(FPdlg,wx.ID_ANY,' at '))\n if 'plotpos' not in simParms: simParms['plotpos'] = simParms['minTT']\n ctrl = G2G.ValidatedTxtCtrl(FPdlg,simParms,'plotpos',size=(70,-1))\n btnsizer.Add(ctrl)\n btnsizer.Add(wx.StaticText(FPdlg,wx.ID_ANY,' deg.')) \n MainSizer.Add(btnsizer, 0, wx.ALIGN_CENTER, 0)\n MainSizer.Add((-1,4),1,wx.EXPAND,1)\n btnsizer = wx.BoxSizer(wx.HORIZONTAL)\n OKbtn = wx.Button(FPdlg, wx.ID_OK)\n OKbtn.SetDefault()\n btnsizer.Add(OKbtn)\n Cbtn = wx.Button(FPdlg, wx.ID_CLOSE,\"Cancel\") \n btnsizer.Add(Cbtn)\n MainSizer.Add(btnsizer, 0, wx.ALIGN_CENTER, 0)\n MainSizer.Add((-1,4),1,wx.EXPAND,1)\n # bindings for close of window\n OKbtn.Bind(wx.EVT_BUTTON,_onOK)\n Cbtn.Bind(wx.EVT_BUTTON,_onClose)\n FPdlg.SetSizer(MainSizer)\n MainSizer.Layout()\n MainSizer.Fit(FPdlg)\n FPdlg.SetMinSize(FPdlg.GetSize())\n FPdlg.SendSizeEvent()\n\ndef XferFPAsettings(InpParms):\n '''convert Topas-type parameters to SI units for NIST and place in a dict sorted\n according to use in each convoluter\n\n :param dict InpParms: a dict with Topas-like parameters, as set in \n :func:`MakeTopasFPASizer`\n :returns: a nested dict with global parameters and those for each convolution\n '''\n wavenums = range(InpParms['numWave'])\n source_wavelengths_m = 1.e-10 * np.array([InpParms['wave'][i] for i in wavenums])\n la = [InpParms['int'][i] for i in wavenums]\n source_intensities = np.array(la)/max(la)\n source_lor_widths_m = 1.e-10 * 1.e-3 * np.array([InpParms['lwidth'][i] for i in wavenums])\n source_gauss_widths_m = 1.e-10 * 1.e-3 * np.array([0.001 for i in wavenums])\n \n NISTparms[\"emission\"] = {'emiss_wavelengths' : source_wavelengths_m,\n 'emiss_intensities' : source_intensities,\n 'emiss_gauss_widths' : source_gauss_widths_m,\n 'emiss_lor_widths' : source_lor_widths_m,\n 'crystallite_size_gauss' : 1.e-9 * InpParms.get('Size_G',1e6),\n 'crystallite_size_lor' : 1.e-9 * InpParms.get('Size_L',1e6)}\n \n if InpParms['filament_length'] == InpParms['receiving_slit_length']: # workaround: \n InpParms['receiving_slit_length'] *= 1.00001 # avoid bug when slit lengths are identical\n NISTparms[\"axial\"] = {\n 'axDiv':\"full\", 'slit_length_source' : 1e-3*InpParms['filament_length'],\n 'slit_length_target' : 1e-3*InpParms['receiving_slit_length'],\n 'length_sample' : 1e-3 * InpParms['sample_length'], \n 'n_integral_points' : 10,\n 'angI_deg' : InpParms['soller_angle'],\n 'angD_deg': InpParms['soller_angle']\n }\n if InpParms.get('LAC_cm',0) > 0:\n NISTparms[\"absorption\"] = {\n 'absorption_coefficient': InpParms['LAC_cm']*100, #like LaB6, in m^(-1)\n 'sample_thickness': 1e-3 * InpParms['sample_thickness'],\n }\n elif \"absorption\" in NISTparms:\n del NISTparms[\"absorption\"]\n\n if InpParms.get('lpsd_equitorial_divergence',0) > 0 and InpParms.get(\n 'lpsd_th2_angular_range',0) > 0:\n PSDdetector_length_mm=np.arcsin(np.pi*InpParms['lpsd_th2_angular_range']/180.\n )*InpParms['Rs'] # mm\n NISTparms[\"si_psd\"] = {\n 'equatorial_divergence_deg': InpParms['lpsd_equitorial_divergence'],\n 'si_psd_window_bounds': (0.,PSDdetector_length_mm/1000.)\n }\n elif \"si_psd\" in NISTparms:\n del NISTparms[\"si_psd\"]\n \n if InpParms.get('Specimen_Displacement'):\n NISTparms[\"displacement\"] = {'specimen_displacement': 1e-3 * InpParms['Specimen_Displacement']}\n elif \"displacement\" in NISTparms:\n del NISTparms[\"displacement\"]\n\n if InpParms.get('receiving_slit_width'):\n NISTparms[\"receiver_slit\"] = {'slit_width':1e-3*InpParms['receiving_slit_width']}\n elif \"receiver_slit\" in NISTparms:\n del NISTparms[\"receiver_slit\"]\n\n if InpParms.get('tube-tails_width', 0) > 0 and InpParms.get(\n 'tube-tails_rel-I',0) > 0:\n NISTparms[\"tube_tails\"] = {\n 'main_width' : 1e-3 * InpParms.get('tube-tails_width', 0.),\n 'tail_left' : -1e-3 * InpParms.get('tube-tails_L-tail',0.),\n 'tail_right' : 1e-3 * InpParms.get('tube-tails_R-tail',0.),\n 'tail_intens' : InpParms.get('tube-tails_rel-I',0.),}\n elif \"tube_tails\" in NISTparms:\n del NISTparms[\"tube_tails\"]\n\n # set Global parameters\n max_wavelength = source_wavelengths_m[np.argmax(source_intensities)]\n NISTparms[\"\"] = {\n 'equatorial_divergence_deg' : InpParms['divergence'],\n 'dominant_wavelength' : max_wavelength,\n 'diffractometer_radius' : 1e-3* InpParms['Rs'],\n 'oversampling' : InpParms['convolution_steps'],\n }\ndef setupFPAcalc():\n '''Create a peak profile object using the NIST XRD Fundamental \n Parameters Code. \n \n :returns: a profile object that can provide information on \n each convolution or compute the composite peak shape. \n '''\n p=FP.FP_profile(anglemode=\"twotheta\",\n output_gaussian_smoother_bins_sigma=1.0,\n oversampling=NISTparms.get('oversampling',10))\n p.debug_cache=False\n #set parameters for each convolver\n for key in NISTparms:\n if key:\n p.set_parameters(convolver=key,**NISTparms[key])\n else:\n p.set_parameters(**NISTparms[key])\n return p\n \ndef doFPAcalc(NISTpk,ttArr,twotheta,calcwid,step):\n '''Compute a single peak using a NIST profile object\n\n :param object NISTpk: a peak profile computational object from the \n NIST XRD Fundamental Parameters Code, typically established from\n a call to :func:`SetupFPAcalc`\n :param np.Array ttArr: an evenly-spaced grid of two-theta points (degrees)\n :param float twotheta: nominal center of peak (degrees)\n :param float calcwid: width to perform convolution (degrees)\n :param float step: step size\n '''\n # find closest point to twotheta (may be outside limits of the array)\n center_bin_idx=min(ttArr.searchsorted(twotheta),len(ttArr)-1)\n NISTpk.set_optimized_window(twotheta_exact_bin_spacing_deg=step,\n twotheta_window_center_deg=ttArr[center_bin_idx],\n twotheta_approx_window_fullwidth_deg=calcwid,\n )\n NISTpk.set_parameters(twotheta0_deg=twotheta)\n return center_bin_idx,NISTpk.compute_line_profile()\n\ndef MakeSimSizer(G2frame, dlg):\n '''Create a GUI to get simulation with parameters for Fundamental \n Parameters fitting. \n\n :param wx.Window dlg: Frame or Dialog where GUI will appear\n\n :returns: a sizer with the GUI controls \n \n '''\n def _onOK(event):\n msg = ''\n if simParms['minTT']-simParms['calcwid']/1.5 < 0.1:\n msg += 'First peak minus half the calc width is too low'\n if simParms['maxTT']+simParms['calcwid']/1.5 > 175:\n if msg: msg += '\\n'\n msg += 'Last peak plus half the calc width is too high'\n if simParms['npeaks'] < 8:\n if msg: msg += '\\n'\n msg += 'At least 8 peaks are needed'\n if msg:\n G2G.G2MessageBox(dlg,msg,'Bad input, try again')\n return\n # compute \"obs\" pattern\n ttArr = np.arange(max(0.5,\n simParms['minTT']-simParms['calcwid']/1.5),\n simParms['maxTT']+simParms['calcwid']/1.5,\n simParms['step'])\n intArr = np.zeros_like(ttArr)\n peaklist = np.linspace(simParms['minTT'],simParms['maxTT'],\n simParms['npeaks'],endpoint=True)\n peakSpacing = (peaklist[-1]-peaklist[0])/(len(peaklist)-1)\n NISTpk = setupFPAcalc()\n minPtsHM = len(intArr) # initialize points above half-max\n maxPtsHM = 0\n for num,twoth_peak in enumerate(peaklist):\n try:\n center_bin_idx,peakObj = doFPAcalc(\n NISTpk,ttArr,twoth_peak,simParms['calcwid'],\n simParms['step'])\n except:\n if msg: msg += '\\n'\n msg = \"Error computing convolution, revise input\"\n continue\n if num == 0: G2plt.PlotFPAconvolutors(G2frame,NISTpk)\n pkMax = peakObj.peak.max()\n pkPts = len(peakObj.peak)\n minPtsHM = min(minPtsHM,sum(peakObj.peak >= 0.5*pkMax)) # points above half-max\n maxPtsHM = max(maxPtsHM,sum(peakObj.peak >= 0.5*pkMax)) # points above half-max\n startInd = center_bin_idx-(pkPts//2) #this should be the aligned start of the new data\n # scale peak so max I=10,000 and add into intensity array\n if startInd < 0:\n intArr[:startInd+pkPts] += 10000 * peakObj.peak[-startInd:]/pkMax\n elif startInd > len(intArr):\n break\n elif startInd+pkPts >= len(intArr):\n offset = pkPts - len( intArr[startInd:] )\n intArr[startInd:startInd+pkPts-offset] += 10000 * peakObj.peak[:-offset]/pkMax\n else:\n intArr[startInd:startInd+pkPts] += 10000 * peakObj.peak/pkMax\n # check if peaks are too closely spaced\n if maxPtsHM*simParms['step'] > peakSpacing/4:\n if msg: msg += '\\n'\n msg += 'Maximum FWHM ({}) is too large compared to the peak spacing ({}). Decrease number of peaks or increase data range.'.format(\n maxPtsHM*simParms['step'], peakSpacing)\n # check if too few points across Hmax\n if minPtsHM < 10:\n if msg: msg += '\\n'\n msg += 'There are only {} points above the half-max. 10 are needed. Dropping step size.'.format(minPtsHM)\n simParms['step'] *= 0.5\n if msg:\n G2G.G2MessageBox(dlg,msg,'Bad input, try again')\n wx.CallAfter(MakeSimSizer,G2frame, dlg)\n return\n # pattern has been computed successfully\n dlg.Destroy()\n wx.CallAfter(FitFPApeaks,ttArr, intArr, peaklist, maxPtsHM) # do peakfit outside event callback\n\n def FitFPApeaks(ttArr, intArr, peaklist, maxPtsHM):\n '''Perform a peak fit to the FP simulated pattern\n '''\n plswait = wx.Dialog(G2frame,style=wx.DEFAULT_DIALOG_STYLE | wx.RESIZE_BORDER)\n vbox = wx.BoxSizer(wx.VERTICAL)\n vbox.Add((1,1),1,wx.ALL|wx.EXPAND,1)\n txt = wx.StaticText(plswait,wx.ID_ANY,\n 'Fitting peaks...\\nPlease wait...',\n style=wx.ALIGN_CENTER)\n vbox.Add(txt,0,wx.ALL|wx.EXPAND)\n vbox.Add((1,1),1,wx.ALL|wx.EXPAND,1)\n plswait.SetSizer(vbox)\n plswait.Layout()\n plswait.CenterOnParent()\n plswait.Show() # post \"please wait\"\n wx.BeginBusyCursor()\n # pick out one or two most intense wavelengths\n ints = list(NISTparms['emission']['emiss_intensities'])\n Lam1 = NISTparms['emission']['emiss_wavelengths'][np.argmax(ints)]*1e10\n if len(ints) > 1: \n ints[np.argmax(ints)] = -1\n Lam2 = NISTparms['emission']['emiss_wavelengths'][np.argmax(ints)]*1e10\n else:\n Lam2 = None\n histId = G2frame.AddSimulatedPowder(ttArr,intArr,\n 'NIST Fundamental Parameters simulation',\n Lam1,Lam2)\n controls = G2frame.GPXtree.GetItemPyData(\n G2gd.GetGPXtreeItemId(G2frame,G2frame.root,'Controls'))\n controldat = controls.get('data',\n {'deriv type':'analytic','min dM/M':0.001,}) #fil\n Parms,Parms2 = G2frame.GPXtree.GetItemPyData(\n G2gd.GetGPXtreeItemId(G2frame,histId,'Instrument Parameters'))\n peakData = G2frame.GPXtree.GetItemPyData(\n G2gd.GetGPXtreeItemId(G2frame,histId,'Peak List'))\n # set background to 0 with one term = 0; disable refinement\n bkg1,bkg2 = bkg = G2frame.GPXtree.GetItemPyData(\n G2gd.GetGPXtreeItemId(G2frame,histId,'Background'))\n bkg1[1]=False\n bkg1[2]=0\n bkg1[3]=0.0\n limits = G2frame.GPXtree.GetItemPyData(\n G2gd.GetGPXtreeItemId(G2frame,histId,'Limits'))\n # approximate asym correction\n try:\n Parms['SH/L'][1] = 0.25 * (\n NISTparms['axial']['length_sample']+\n NISTparms['axial']['slit_length_source']\n ) / NISTparms['']['diffractometer_radius']\n except:\n pass\n \n for pos in peaklist:\n i = ttArr.searchsorted(pos)\n area = sum(intArr[max(0,i-maxPtsHM):min(len(intArr),i+maxPtsHM)])\n peakData['peaks'].append(G2mth.setPeakparms(Parms,Parms2,pos,area))\n histData = G2frame.GPXtree.GetItemPyData(histId)\n # refine peak positions only\n bxye = np.zeros(len(histData[1][1]))\n peakData['sigDict'] = G2pwd.DoPeakFit('LSQ',peakData['peaks'],\n bkg,limits[1],\n Parms,Parms2,histData[1],bxye,[],\n False,controldat,None)[0]\n # refine peak areas as well\n for pk in peakData['peaks']:\n pk[1] = True\n peakData['sigDict'] = G2pwd.DoPeakFit('LSQ',peakData['peaks'],\n bkg,limits[1],\n Parms,Parms2,histData[1],bxye,[],\n False,controldat)[0]\n # refine profile function\n for p in ('U', 'V', 'W', 'X', 'Y'):\n Parms[p][2] = True\n peakData['sigDict'] = G2pwd.DoPeakFit('LSQ',peakData['peaks'],\n bkg,limits[1],\n Parms,Parms2,histData[1],bxye,[],\n False,controldat)[0]\n # add in asymmetry\n Parms['SH/L'][2] = True\n peakData['sigDict'] = G2pwd.DoPeakFit('LSQ',peakData['peaks'],\n bkg,limits[1],\n Parms,Parms2,histData[1],bxye,[],\n False,controldat)[0]\n # reset \"initial\" profile\n for p in Parms:\n if len(Parms[p]) == 3:\n Parms[p][0] = Parms[p][1]\n Parms[p][2] = False\n wx.EndBusyCursor()\n plswait.Destroy() # remove \"please wait\"\n # save Iparms\n pth = G2G.GetExportPath(G2frame)\n fldlg = wx.FileDialog(G2frame, 'Set name to save GSAS-II instrument parameters file', pth, '', \n 'instrument parameter files (*.instprm)|*.instprm',wx.FD_SAVE|wx.FD_OVERWRITE_PROMPT)\n try:\n if fldlg.ShowModal() == wx.ID_OK:\n filename = fldlg.GetPath()\n # make sure extension is .instprm\n filename = os.path.splitext(filename)[0]+'.instprm'\n File = open(filename,'w')\n File.write(\"#GSAS-II instrument parameter file; do not add/delete items!\\n\")\n for item in Parms:\n File.write(item+':'+str(Parms[item][1])+'\\n')\n File.close()\n print ('Instrument parameters saved to: '+filename)\n finally:\n fldlg.Destroy()\n #GSASIIpath.IPyBreak()\n \n def _onClose(event):\n dlg.Destroy()\n def SetButtonStatus(done=False):\n OKbtn.Enable(bool(NISTparms))\n saveBtn.Enable(bool(NISTparms))\n if done: _onOK(None)\n def _onSetFPA(event):\n # Create a non-modal dialog for Topas-style FP input.\n FPdlg = wx.Dialog(dlg,wx.ID_ANY,'FPA parameters',\n style=wx.DEFAULT_DIALOG_STYLE|wx.RESIZE_BORDER)\n MakeTopasFPASizer(G2frame,FPdlg,'BBpoint',SetButtonStatus)\n FPdlg.CenterOnParent()\n FPdlg.Raise()\n FPdlg.Show() \n def _onSaveFPA(event):\n filename = G2G.askSaveFile(G2frame,'','.NISTfpa',\n 'dict of NIST FPA values',dlg)\n if not filename: return\n fp = open(filename,'w')\n fp.write('# parameters to be used in the NIST XRD Fundamental Parameters program\\n')\n fp.write('{\\n')\n for key in sorted(NISTparms):\n fp.write(\" '\"+key+\"' : \"+str(NISTparms[key])+\",\")\n if not key: fp.write(' # global parameters')\n fp.write('\\n')\n fp.write('}\\n')\n fp.close()\n def _onReadFPA(event):\n filename = G2G.GetImportFile(G2frame,\n message='Read file with dict of values for NIST Fundamental Parameters',\n parent=dlg,\n wildcard='dict of NIST FPA values|*.NISTfpa')\n if not filename: return\n if not filename[0]: return\n try:\n txt = open(filename[0],'r').read()\n NISTparms.clear()\n array = np.array\n d = eval(txt)\n NISTparms.update(d)\n except Exception as err:\n G2G.G2MessageBox(dlg,\n u'Error reading file {}:{}\\n'.format(filename,err),\n 'Bad dict input')\n #GSASIIpath.IPyBreak()\n SetButtonStatus()\n\n if dlg.GetSizer(): dlg.GetSizer().Clear(True)\n MainSizer = wx.BoxSizer(wx.VERTICAL)\n MainSizer.Add(wx.StaticText(dlg,wx.ID_ANY,\n 'Fit Profile Parameters to Peaks from Fundamental Parameters',\n style=wx.ALIGN_CENTER),0,wx.EXPAND)\n MainSizer.Add((-1,5))\n prmSizer = wx.FlexGridSizer(cols=2,hgap=3,vgap=5)\n text = wx.StaticText(dlg,wx.ID_ANY,'value',style=wx.ALIGN_CENTER)\n text.SetBackgroundColour(wx.WHITE)\n prmSizer.Add(text,0,wx.EXPAND)\n text = wx.StaticText(dlg,wx.ID_ANY,'explanation',style=wx.ALIGN_CENTER)\n text.SetBackgroundColour(wx.WHITE)\n prmSizer.Add(text,0,wx.EXPAND)\n for key,defVal,text in (\n ('minTT',3.,'Location of first peak in 2theta (deg)'),\n ('maxTT',123.,'Location of last peak in 2theta (deg)'),\n ('step',0.01,'Pattern step size (deg 2theta)'),\n ('npeaks',13.,'Number of peaks'),\n ('calcwid',2.,'Range to compute each peak (deg 2theta)'),\n ):\n if key not in simParms: simParms[key] = defVal\n ctrl = G2G.ValidatedTxtCtrl(dlg,simParms,key,size=(70,-1))\n prmSizer.Add(ctrl,1,wx.ALL|wx.ALIGN_CENTER_VERTICAL,1)\n txt = wx.StaticText(dlg,wx.ID_ANY,text,size=(300,-1))\n txt.Wrap(280)\n prmSizer.Add(txt)\n MainSizer.Add(prmSizer)\n\n btnsizer = wx.BoxSizer(wx.HORIZONTAL)\n btn = wx.Button(dlg, wx.ID_ANY,'Input FP vals')\n btnsizer.Add(btn)\n btn.Bind(wx.EVT_BUTTON,_onSetFPA)\n saveBtn = wx.Button(dlg, wx.ID_ANY,'Save FPA dict')\n btnsizer.Add(saveBtn)\n saveBtn.Bind(wx.EVT_BUTTON,_onSaveFPA)\n readBtn = wx.Button(dlg, wx.ID_ANY,'Read FPA dict')\n btnsizer.Add(readBtn)\n readBtn.Bind(wx.EVT_BUTTON,_onReadFPA)\n MainSizer.Add(btnsizer, 0, wx.ALIGN_CENTER, 0)\n MainSizer.Add((-1,4),1,wx.EXPAND,1)\n txt = wx.StaticText(dlg,wx.ID_ANY,\n 'If you use this, please cite: '+Citation,\n size=(350,-1))\n txt.Wrap(340)\n MainSizer.Add(txt,0,wx.ALIGN_CENTER)\n btnsizer = wx.BoxSizer(wx.HORIZONTAL)\n OKbtn = wx.Button(dlg, wx.ID_OK)\n OKbtn.SetDefault()\n btnsizer.Add(OKbtn)\n Cbtn = wx.Button(dlg, wx.ID_CLOSE,\"Cancel\") \n btnsizer.Add(Cbtn)\n MainSizer.Add(btnsizer, 0, wx.ALIGN_CENTER, 0)\n MainSizer.Add((-1,4),1,wx.EXPAND,1)\n # bindings for close of window\n OKbtn.Bind(wx.EVT_BUTTON,_onOK)\n Cbtn.Bind(wx.EVT_BUTTON,_onClose)\n SetButtonStatus()\n dlg.SetSizer(MainSizer)\n MainSizer.Layout()\n MainSizer.Fit(dlg)\n dlg.SetMinSize(dlg.GetSize())\n dlg.SendSizeEvent()\n dlg.Raise()\n \ndef GetFPAInput(G2frame):\n dlg = wx.Dialog(G2frame,wx.ID_ANY,'FPA input',\n style=wx.DEFAULT_DIALOG_STYLE|wx.RESIZE_BORDER)\n MakeSimSizer(G2frame,dlg)\n dlg.CenterOnParent()\n dlg.Show()\n return\n \n",
"step-ids": [
7,
8,
9,
10,
11
]
}
|
[
7,
8,
9,
10,
11
] |
import random
firstNames = ("Thomas", "Daniel", "James", "Aaron", "Tommy", "Terrell", "Jack", "Joseph", "Samuel", "Quinn", "Hunter", "Vince", "Young", "Ian", "Erving", "Leo")
lastNames = ("Smith", "Johnson", "Williams", "Kline","Brown", "Garcia", "Jones", "Miller", "Davis","Williams", "Alves", "Sobronsky", "Hall", "Murphy", "Morris")
# Verifies statistics are not negative
f = lambda x : 0 if (x < 0) else x
def improvementFunction(age, maxMu):
return (maxMu/-30) * (age - 17) * (age - 30)
class profile:
def __init__ (self):
self.name = firstNames[random.randrange(0,len(firstNames))] + " " + lastNames[random.randrange(0,len(lastNames))]
self.years = 2020
self.ppg = [f(round( random.gauss(10.5, 2.4), 1))]
self.apg = [f(round(random.gauss(5.2, 2.4), 1))]
self.rpg = [f(round(random.gauss(4.7, 2.4), 1))]
self.bpg = [f(round(random.gauss(1, .8), 1))]
self.spg = [f(round(random.gauss(.9, 1.2), 1))]
self.tpg = [f(round(random.gauss(1.8, .5), 1))]
self.age = random.randrange(18,24)
self.fgp = [f(round(random.gauss(39.2, 5.4), 1))]
self.tpp = [f(round(random.gauss(28.7, 6), 1))]
def getStats (self):
output = {"Age:" : self.age,
"name" : self.name,
"points per game" : self.ppg[-1],
"assists per game" : self.apg[-1],
"rebounds per game" : self.rpg[-1],
"blocks per game" : self.bpg[-1],
"steals per game" : self.spg[-1],
"turnovers per game" : self.tpg[-1],
"field goal percentage" : self.fgp[-1],
"three point percentage" : self.tpp[-1]}
return output
def incrementAge (self):
self.age += 1
def updateStats (self):
self.ppg.append(f(round(self.ppg[-1] + random.gauss(improvementFunction(self.age, 5 - 2 * 1.8), 1.8), 1)))
self.apg.append(f(round(self.apg[-1] + random.gauss(improvementFunction(self.age, self.apg[-1] * 2 - 6), 1.5), 1)))
self.rpg.append(f(round(self.rpg[-1] + random.gauss(improvementFunction(self.age, self.rpg[-1] * 1.5 - 3), 1.5), 1)))
self.bpg.append(f(round(self.bpg[-1] + random.gauss(improvementFunction(self.age, self.bpg[-1] * 2 - 1), .5), 1)))
self.spg.append(f(round(self.spg[-1] + random.gauss(improvementFunction(self.age, self.spg[-1] * 2 - 1), .5), 1)))
self.tpg.append(f(round(self.tpg[-1] + random.gauss(improvementFunction(self.age, 2.5 - .5), .5), 1)))
self.fgp.append(f(round(self.fgp[-1] + random.gauss(improvementFunction(self.age, 10 - 3), 2.5), 1)))
self.tpp.append(f(round(self.tpp[-1] + random.gauss(improvementFunction(self.age, 8 - 3), 1.9), 1)))
|
normal
|
{
"blob_id": "5607d4fea315fa7bf87337453fbef90a93a66516",
"index": 3968,
"step-1": "<mask token>\n\n\nclass profile:\n\n def __init__(self):\n self.name = firstNames[random.randrange(0, len(firstNames))\n ] + ' ' + lastNames[random.randrange(0, len(lastNames))]\n self.years = 2020\n self.ppg = [f(round(random.gauss(10.5, 2.4), 1))]\n self.apg = [f(round(random.gauss(5.2, 2.4), 1))]\n self.rpg = [f(round(random.gauss(4.7, 2.4), 1))]\n self.bpg = [f(round(random.gauss(1, 0.8), 1))]\n self.spg = [f(round(random.gauss(0.9, 1.2), 1))]\n self.tpg = [f(round(random.gauss(1.8, 0.5), 1))]\n self.age = random.randrange(18, 24)\n self.fgp = [f(round(random.gauss(39.2, 5.4), 1))]\n self.tpp = [f(round(random.gauss(28.7, 6), 1))]\n\n def getStats(self):\n output = {'Age:': self.age, 'name': self.name, 'points per game':\n self.ppg[-1], 'assists per game': self.apg[-1],\n 'rebounds per game': self.rpg[-1], 'blocks per game': self.bpg[\n -1], 'steals per game': self.spg[-1], 'turnovers per game':\n self.tpg[-1], 'field goal percentage': self.fgp[-1],\n 'three point percentage': self.tpp[-1]}\n return output\n\n def incrementAge(self):\n self.age += 1\n\n def updateStats(self):\n self.ppg.append(f(round(self.ppg[-1] + random.gauss(\n improvementFunction(self.age, 5 - 2 * 1.8), 1.8), 1)))\n self.apg.append(f(round(self.apg[-1] + random.gauss(\n improvementFunction(self.age, self.apg[-1] * 2 - 6), 1.5), 1)))\n self.rpg.append(f(round(self.rpg[-1] + random.gauss(\n improvementFunction(self.age, self.rpg[-1] * 1.5 - 3), 1.5), 1)))\n self.bpg.append(f(round(self.bpg[-1] + random.gauss(\n improvementFunction(self.age, self.bpg[-1] * 2 - 1), 0.5), 1)))\n self.spg.append(f(round(self.spg[-1] + random.gauss(\n improvementFunction(self.age, self.spg[-1] * 2 - 1), 0.5), 1)))\n self.tpg.append(f(round(self.tpg[-1] + random.gauss(\n improvementFunction(self.age, 2.5 - 0.5), 0.5), 1)))\n self.fgp.append(f(round(self.fgp[-1] + random.gauss(\n improvementFunction(self.age, 10 - 3), 2.5), 1)))\n self.tpp.append(f(round(self.tpp[-1] + random.gauss(\n improvementFunction(self.age, 8 - 3), 1.9), 1)))\n",
"step-2": "<mask token>\n\n\ndef improvementFunction(age, maxMu):\n return maxMu / -30 * (age - 17) * (age - 30)\n\n\nclass profile:\n\n def __init__(self):\n self.name = firstNames[random.randrange(0, len(firstNames))\n ] + ' ' + lastNames[random.randrange(0, len(lastNames))]\n self.years = 2020\n self.ppg = [f(round(random.gauss(10.5, 2.4), 1))]\n self.apg = [f(round(random.gauss(5.2, 2.4), 1))]\n self.rpg = [f(round(random.gauss(4.7, 2.4), 1))]\n self.bpg = [f(round(random.gauss(1, 0.8), 1))]\n self.spg = [f(round(random.gauss(0.9, 1.2), 1))]\n self.tpg = [f(round(random.gauss(1.8, 0.5), 1))]\n self.age = random.randrange(18, 24)\n self.fgp = [f(round(random.gauss(39.2, 5.4), 1))]\n self.tpp = [f(round(random.gauss(28.7, 6), 1))]\n\n def getStats(self):\n output = {'Age:': self.age, 'name': self.name, 'points per game':\n self.ppg[-1], 'assists per game': self.apg[-1],\n 'rebounds per game': self.rpg[-1], 'blocks per game': self.bpg[\n -1], 'steals per game': self.spg[-1], 'turnovers per game':\n self.tpg[-1], 'field goal percentage': self.fgp[-1],\n 'three point percentage': self.tpp[-1]}\n return output\n\n def incrementAge(self):\n self.age += 1\n\n def updateStats(self):\n self.ppg.append(f(round(self.ppg[-1] + random.gauss(\n improvementFunction(self.age, 5 - 2 * 1.8), 1.8), 1)))\n self.apg.append(f(round(self.apg[-1] + random.gauss(\n improvementFunction(self.age, self.apg[-1] * 2 - 6), 1.5), 1)))\n self.rpg.append(f(round(self.rpg[-1] + random.gauss(\n improvementFunction(self.age, self.rpg[-1] * 1.5 - 3), 1.5), 1)))\n self.bpg.append(f(round(self.bpg[-1] + random.gauss(\n improvementFunction(self.age, self.bpg[-1] * 2 - 1), 0.5), 1)))\n self.spg.append(f(round(self.spg[-1] + random.gauss(\n improvementFunction(self.age, self.spg[-1] * 2 - 1), 0.5), 1)))\n self.tpg.append(f(round(self.tpg[-1] + random.gauss(\n improvementFunction(self.age, 2.5 - 0.5), 0.5), 1)))\n self.fgp.append(f(round(self.fgp[-1] + random.gauss(\n improvementFunction(self.age, 10 - 3), 2.5), 1)))\n self.tpp.append(f(round(self.tpp[-1] + random.gauss(\n improvementFunction(self.age, 8 - 3), 1.9), 1)))\n",
"step-3": "<mask token>\nfirstNames = ('Thomas', 'Daniel', 'James', 'Aaron', 'Tommy', 'Terrell',\n 'Jack', 'Joseph', 'Samuel', 'Quinn', 'Hunter', 'Vince', 'Young', 'Ian',\n 'Erving', 'Leo')\nlastNames = ('Smith', 'Johnson', 'Williams', 'Kline', 'Brown', 'Garcia',\n 'Jones', 'Miller', 'Davis', 'Williams', 'Alves', 'Sobronsky', 'Hall',\n 'Murphy', 'Morris')\nf = lambda x: 0 if x < 0 else x\n\n\ndef improvementFunction(age, maxMu):\n return maxMu / -30 * (age - 17) * (age - 30)\n\n\nclass profile:\n\n def __init__(self):\n self.name = firstNames[random.randrange(0, len(firstNames))\n ] + ' ' + lastNames[random.randrange(0, len(lastNames))]\n self.years = 2020\n self.ppg = [f(round(random.gauss(10.5, 2.4), 1))]\n self.apg = [f(round(random.gauss(5.2, 2.4), 1))]\n self.rpg = [f(round(random.gauss(4.7, 2.4), 1))]\n self.bpg = [f(round(random.gauss(1, 0.8), 1))]\n self.spg = [f(round(random.gauss(0.9, 1.2), 1))]\n self.tpg = [f(round(random.gauss(1.8, 0.5), 1))]\n self.age = random.randrange(18, 24)\n self.fgp = [f(round(random.gauss(39.2, 5.4), 1))]\n self.tpp = [f(round(random.gauss(28.7, 6), 1))]\n\n def getStats(self):\n output = {'Age:': self.age, 'name': self.name, 'points per game':\n self.ppg[-1], 'assists per game': self.apg[-1],\n 'rebounds per game': self.rpg[-1], 'blocks per game': self.bpg[\n -1], 'steals per game': self.spg[-1], 'turnovers per game':\n self.tpg[-1], 'field goal percentage': self.fgp[-1],\n 'three point percentage': self.tpp[-1]}\n return output\n\n def incrementAge(self):\n self.age += 1\n\n def updateStats(self):\n self.ppg.append(f(round(self.ppg[-1] + random.gauss(\n improvementFunction(self.age, 5 - 2 * 1.8), 1.8), 1)))\n self.apg.append(f(round(self.apg[-1] + random.gauss(\n improvementFunction(self.age, self.apg[-1] * 2 - 6), 1.5), 1)))\n self.rpg.append(f(round(self.rpg[-1] + random.gauss(\n improvementFunction(self.age, self.rpg[-1] * 1.5 - 3), 1.5), 1)))\n self.bpg.append(f(round(self.bpg[-1] + random.gauss(\n improvementFunction(self.age, self.bpg[-1] * 2 - 1), 0.5), 1)))\n self.spg.append(f(round(self.spg[-1] + random.gauss(\n improvementFunction(self.age, self.spg[-1] * 2 - 1), 0.5), 1)))\n self.tpg.append(f(round(self.tpg[-1] + random.gauss(\n improvementFunction(self.age, 2.5 - 0.5), 0.5), 1)))\n self.fgp.append(f(round(self.fgp[-1] + random.gauss(\n improvementFunction(self.age, 10 - 3), 2.5), 1)))\n self.tpp.append(f(round(self.tpp[-1] + random.gauss(\n improvementFunction(self.age, 8 - 3), 1.9), 1)))\n",
"step-4": "import random\nfirstNames = ('Thomas', 'Daniel', 'James', 'Aaron', 'Tommy', 'Terrell',\n 'Jack', 'Joseph', 'Samuel', 'Quinn', 'Hunter', 'Vince', 'Young', 'Ian',\n 'Erving', 'Leo')\nlastNames = ('Smith', 'Johnson', 'Williams', 'Kline', 'Brown', 'Garcia',\n 'Jones', 'Miller', 'Davis', 'Williams', 'Alves', 'Sobronsky', 'Hall',\n 'Murphy', 'Morris')\nf = lambda x: 0 if x < 0 else x\n\n\ndef improvementFunction(age, maxMu):\n return maxMu / -30 * (age - 17) * (age - 30)\n\n\nclass profile:\n\n def __init__(self):\n self.name = firstNames[random.randrange(0, len(firstNames))\n ] + ' ' + lastNames[random.randrange(0, len(lastNames))]\n self.years = 2020\n self.ppg = [f(round(random.gauss(10.5, 2.4), 1))]\n self.apg = [f(round(random.gauss(5.2, 2.4), 1))]\n self.rpg = [f(round(random.gauss(4.7, 2.4), 1))]\n self.bpg = [f(round(random.gauss(1, 0.8), 1))]\n self.spg = [f(round(random.gauss(0.9, 1.2), 1))]\n self.tpg = [f(round(random.gauss(1.8, 0.5), 1))]\n self.age = random.randrange(18, 24)\n self.fgp = [f(round(random.gauss(39.2, 5.4), 1))]\n self.tpp = [f(round(random.gauss(28.7, 6), 1))]\n\n def getStats(self):\n output = {'Age:': self.age, 'name': self.name, 'points per game':\n self.ppg[-1], 'assists per game': self.apg[-1],\n 'rebounds per game': self.rpg[-1], 'blocks per game': self.bpg[\n -1], 'steals per game': self.spg[-1], 'turnovers per game':\n self.tpg[-1], 'field goal percentage': self.fgp[-1],\n 'three point percentage': self.tpp[-1]}\n return output\n\n def incrementAge(self):\n self.age += 1\n\n def updateStats(self):\n self.ppg.append(f(round(self.ppg[-1] + random.gauss(\n improvementFunction(self.age, 5 - 2 * 1.8), 1.8), 1)))\n self.apg.append(f(round(self.apg[-1] + random.gauss(\n improvementFunction(self.age, self.apg[-1] * 2 - 6), 1.5), 1)))\n self.rpg.append(f(round(self.rpg[-1] + random.gauss(\n improvementFunction(self.age, self.rpg[-1] * 1.5 - 3), 1.5), 1)))\n self.bpg.append(f(round(self.bpg[-1] + random.gauss(\n improvementFunction(self.age, self.bpg[-1] * 2 - 1), 0.5), 1)))\n self.spg.append(f(round(self.spg[-1] + random.gauss(\n improvementFunction(self.age, self.spg[-1] * 2 - 1), 0.5), 1)))\n self.tpg.append(f(round(self.tpg[-1] + random.gauss(\n improvementFunction(self.age, 2.5 - 0.5), 0.5), 1)))\n self.fgp.append(f(round(self.fgp[-1] + random.gauss(\n improvementFunction(self.age, 10 - 3), 2.5), 1)))\n self.tpp.append(f(round(self.tpp[-1] + random.gauss(\n improvementFunction(self.age, 8 - 3), 1.9), 1)))\n",
"step-5": "import random\r\n\r\n\r\nfirstNames = (\"Thomas\", \"Daniel\", \"James\", \"Aaron\", \"Tommy\", \"Terrell\", \"Jack\", \"Joseph\", \"Samuel\", \"Quinn\", \"Hunter\", \"Vince\", \"Young\", \"Ian\", \"Erving\", \"Leo\")\r\nlastNames = (\"Smith\", \"Johnson\", \"Williams\", \"Kline\",\"Brown\", \"Garcia\", \"Jones\", \"Miller\", \"Davis\",\"Williams\", \"Alves\", \"Sobronsky\", \"Hall\", \"Murphy\", \"Morris\")\r\n\r\n# Verifies statistics are not negative\r\nf = lambda x : 0 if (x < 0) else x\r\n\r\n\r\ndef improvementFunction(age, maxMu):\r\n return (maxMu/-30) * (age - 17) * (age - 30)\r\n\r\n\r\nclass profile:\r\n def __init__ (self):\r\n self.name = firstNames[random.randrange(0,len(firstNames))] + \" \" + lastNames[random.randrange(0,len(lastNames))]\r\n self.years = 2020\r\n self.ppg = [f(round( random.gauss(10.5, 2.4), 1))]\r\n self.apg = [f(round(random.gauss(5.2, 2.4), 1))]\r\n self.rpg = [f(round(random.gauss(4.7, 2.4), 1))]\r\n self.bpg = [f(round(random.gauss(1, .8), 1))]\r\n self.spg = [f(round(random.gauss(.9, 1.2), 1))]\r\n self.tpg = [f(round(random.gauss(1.8, .5), 1))]\r\n self.age = random.randrange(18,24)\r\n self.fgp = [f(round(random.gauss(39.2, 5.4), 1))]\r\n self.tpp = [f(round(random.gauss(28.7, 6), 1))]\r\n\r\n def getStats (self):\r\n output = {\"Age:\" : self.age,\r\n \"name\" : self.name,\r\n \"points per game\" : self.ppg[-1],\r\n \"assists per game\" : self.apg[-1],\r\n \"rebounds per game\" : self.rpg[-1],\r\n \"blocks per game\" : self.bpg[-1],\r\n \"steals per game\" : self.spg[-1],\r\n \"turnovers per game\" : self.tpg[-1],\r\n \"field goal percentage\" : self.fgp[-1],\r\n \"three point percentage\" : self.tpp[-1]}\r\n return output\r\n\r\n def incrementAge (self):\r\n self.age += 1\r\n\r\n def updateStats (self):\r\n self.ppg.append(f(round(self.ppg[-1] + random.gauss(improvementFunction(self.age, 5 - 2 * 1.8), 1.8), 1)))\r\n self.apg.append(f(round(self.apg[-1] + random.gauss(improvementFunction(self.age, self.apg[-1] * 2 - 6), 1.5), 1)))\r\n self.rpg.append(f(round(self.rpg[-1] + random.gauss(improvementFunction(self.age, self.rpg[-1] * 1.5 - 3), 1.5), 1)))\r\n self.bpg.append(f(round(self.bpg[-1] + random.gauss(improvementFunction(self.age, self.bpg[-1] * 2 - 1), .5), 1)))\r\n self.spg.append(f(round(self.spg[-1] + random.gauss(improvementFunction(self.age, self.spg[-1] * 2 - 1), .5), 1)))\r\n self.tpg.append(f(round(self.tpg[-1] + random.gauss(improvementFunction(self.age, 2.5 - .5), .5), 1)))\r\n self.fgp.append(f(round(self.fgp[-1] + random.gauss(improvementFunction(self.age, 10 - 3), 2.5), 1)))\r\n self.tpp.append(f(round(self.tpp[-1] + random.gauss(improvementFunction(self.age, 8 - 3), 1.9), 1)))\r\n",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print(owog.find('e'))
print(owog.count('e'))
print(owog[2:10])
<|reserved_special_token_0|>
if a > b:
print('a too ih')
elif a == b:
print('tentsuu')
else:
print('b too ih')
<|reserved_special_token_0|>
for i in range(a, b + 1):
print(i)
<|reserved_special_token_1|>
owog = 'Delger'
print(owog.find('e'))
print(owog.count('e'))
print(owog[2:10])
a = 21
b = 21
if a > b:
print('a too ih')
elif a == b:
print('tentsuu')
else:
print('b too ih')
a, b = input().split()
for i in range(a, b + 1):
print(i)
<|reserved_special_token_1|>
#str
owog="Delger"
# len()- urt
# lower()- jijigruuleh
# upper()- tomruulah
# capitalize()- ehnii useg tomruulah
# replace()- temdegt solih
print(owog.find("e"))
print(owog.count("e"))
print(owog[2:10])
a=21
b=21
if a>b:
print("a too ih")
elif a==b:
print("tentsuu")
else:
print("b too ih")
a, b = input().split()
for i in range(a, b+1):
print(i)
|
flexible
|
{
"blob_id": "c4ca4b5c77c3c912b44a4853be30298ec845c4fd",
"index": 243,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(owog.find('e'))\nprint(owog.count('e'))\nprint(owog[2:10])\n<mask token>\nif a > b:\n print('a too ih')\nelif a == b:\n print('tentsuu')\nelse:\n print('b too ih')\n<mask token>\nfor i in range(a, b + 1):\n print(i)\n",
"step-3": "owog = 'Delger'\nprint(owog.find('e'))\nprint(owog.count('e'))\nprint(owog[2:10])\na = 21\nb = 21\nif a > b:\n print('a too ih')\nelif a == b:\n print('tentsuu')\nelse:\n print('b too ih')\na, b = input().split()\nfor i in range(a, b + 1):\n print(i)\n",
"step-4": "#str\r\nowog=\"Delger\"\r\n# len()- urt\r\n# lower()- jijigruuleh\r\n# upper()- tomruulah\r\n# capitalize()- ehnii useg tomruulah\r\n# replace()- temdegt solih\r\nprint(owog.find(\"e\"))\r\nprint(owog.count(\"e\"))\r\nprint(owog[2:10])\r\n\r\na=21\r\nb=21\r\nif a>b:\r\n print(\"a too ih\")\r\nelif a==b:\r\n print(\"tentsuu\")\r\nelse:\r\n print(\"b too ih\")\r\n\r\na, b = input().split()\r\nfor i in range(a, b+1):\r\n print(i)",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
def sample_1(N):
numeros = np.array([-10, -5, 3, 9])
return np.random.choice(numeros, N, p=[0.1, 0.4, 0.2, 0.3])
def sample_2(N):
return np.random.exponential(0.5, N)
def get_mean(sampling_fun, N, M):
medias = np.zeros(M)
for i in range(M):
medias[i] = np.mean(sampling_fun(N))
return medias
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def sample_1(N):
numeros = np.array([-10, -5, 3, 9])
return np.random.choice(numeros, N, p=[0.1, 0.4, 0.2, 0.3])
def sample_2(N):
return np.random.exponential(0.5, N)
def get_mean(sampling_fun, N, M):
medias = np.zeros(M)
for i in range(M):
medias[i] = np.mean(sampling_fun(N))
return medias
<|reserved_special_token_0|>
for i in range(3):
medias_1[:, i] = get_mean(sample_1, n[i], m)
medias_2[:, i] = get_mean(sample_2, n[i], m)
np.savetxt(texto + '1_' + str(n[i]) + '.txt', medias_1[:, i])
np.savetxt(texto + '2_' + str(n[i]) + '.txt', medias_2[:, i])
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def sample_1(N):
numeros = np.array([-10, -5, 3, 9])
return np.random.choice(numeros, N, p=[0.1, 0.4, 0.2, 0.3])
def sample_2(N):
return np.random.exponential(0.5, N)
def get_mean(sampling_fun, N, M):
medias = np.zeros(M)
for i in range(M):
medias[i] = np.mean(sampling_fun(N))
return medias
n = np.array([10, 100, 1000])
m = 10000
medias_1 = np.zeros((m, 3))
medias_2 = np.zeros((m, 3))
texto = 'sample_'
for i in range(3):
medias_1[:, i] = get_mean(sample_1, n[i], m)
medias_2[:, i] = get_mean(sample_2, n[i], m)
np.savetxt(texto + '1_' + str(n[i]) + '.txt', medias_1[:, i])
np.savetxt(texto + '2_' + str(n[i]) + '.txt', medias_2[:, i])
<|reserved_special_token_1|>
import numpy as np
import matplotlib.pyplot as plt
def sample_1(N):
numeros = np.array([-10, -5, 3, 9])
return np.random.choice(numeros, N, p=[0.1, 0.4, 0.2, 0.3])
def sample_2(N):
return np.random.exponential(0.5, N)
def get_mean(sampling_fun, N, M):
medias = np.zeros(M)
for i in range(M):
medias[i] = np.mean(sampling_fun(N))
return medias
n = np.array([10, 100, 1000])
m = 10000
medias_1 = np.zeros((m, 3))
medias_2 = np.zeros((m, 3))
texto = 'sample_'
for i in range(3):
medias_1[:, i] = get_mean(sample_1, n[i], m)
medias_2[:, i] = get_mean(sample_2, n[i], m)
np.savetxt(texto + '1_' + str(n[i]) + '.txt', medias_1[:, i])
np.savetxt(texto + '2_' + str(n[i]) + '.txt', medias_2[:, i])
<|reserved_special_token_1|>
import numpy as np
import matplotlib.pyplot as plt
def sample_1(N):
numeros=np.array([-10, -5, 3, 9])
return np.random.choice(numeros, N, p=[0.1, 0.4, 0.2, 0.3])#devuelve distro aleatoria con las probabilidades indicadas
def sample_2(N):
return np.random.exponential(0.5,N)#devuelve numeros aleatorios con distro exp con beta = 0.5
def get_mean(sampling_fun,N,M):
medias=np.zeros(M)#arreglo de medias
for i in range(M):#recorrido para sacar las m medias
medias[i]=np.mean(sampling_fun(N))
return medias
n=np.array([10,100,1000])#arreglo con los distintos valores de n
m=10000#valor de M
medias_1=np.zeros((m,3))#arreglo que guarta las m medias para 3 enes de sample1
medias_2=np.zeros((m,3))#lo de arriba pero con sample 2
texto='sample_'#texto que me da pereza escribir dos veces
for i in range(3):#recorrido para cada n
medias_1[:,i]=get_mean(sample_1,n[i],m)
medias_2[:,i]=get_mean(sample_2,n[i],m)
np.savetxt(texto+'1_'+str(n[i])+'.txt',medias_1[:,i])#archivo con las m medias para cada n
np.savetxt(texto+'2_'+str(n[i])+'.txt',medias_2[:,i])
|
flexible
|
{
"blob_id": "d2d04686b3d7f8d01ca195750ca625baa06ed098",
"index": 2835,
"step-1": "<mask token>\n\n\ndef sample_1(N):\n numeros = np.array([-10, -5, 3, 9])\n return np.random.choice(numeros, N, p=[0.1, 0.4, 0.2, 0.3])\n\n\ndef sample_2(N):\n return np.random.exponential(0.5, N)\n\n\ndef get_mean(sampling_fun, N, M):\n medias = np.zeros(M)\n for i in range(M):\n medias[i] = np.mean(sampling_fun(N))\n return medias\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef sample_1(N):\n numeros = np.array([-10, -5, 3, 9])\n return np.random.choice(numeros, N, p=[0.1, 0.4, 0.2, 0.3])\n\n\ndef sample_2(N):\n return np.random.exponential(0.5, N)\n\n\ndef get_mean(sampling_fun, N, M):\n medias = np.zeros(M)\n for i in range(M):\n medias[i] = np.mean(sampling_fun(N))\n return medias\n\n\n<mask token>\nfor i in range(3):\n medias_1[:, i] = get_mean(sample_1, n[i], m)\n medias_2[:, i] = get_mean(sample_2, n[i], m)\n np.savetxt(texto + '1_' + str(n[i]) + '.txt', medias_1[:, i])\n np.savetxt(texto + '2_' + str(n[i]) + '.txt', medias_2[:, i])\n",
"step-3": "<mask token>\n\n\ndef sample_1(N):\n numeros = np.array([-10, -5, 3, 9])\n return np.random.choice(numeros, N, p=[0.1, 0.4, 0.2, 0.3])\n\n\ndef sample_2(N):\n return np.random.exponential(0.5, N)\n\n\ndef get_mean(sampling_fun, N, M):\n medias = np.zeros(M)\n for i in range(M):\n medias[i] = np.mean(sampling_fun(N))\n return medias\n\n\nn = np.array([10, 100, 1000])\nm = 10000\nmedias_1 = np.zeros((m, 3))\nmedias_2 = np.zeros((m, 3))\ntexto = 'sample_'\nfor i in range(3):\n medias_1[:, i] = get_mean(sample_1, n[i], m)\n medias_2[:, i] = get_mean(sample_2, n[i], m)\n np.savetxt(texto + '1_' + str(n[i]) + '.txt', medias_1[:, i])\n np.savetxt(texto + '2_' + str(n[i]) + '.txt', medias_2[:, i])\n",
"step-4": "import numpy as np\nimport matplotlib.pyplot as plt\n\n\ndef sample_1(N):\n numeros = np.array([-10, -5, 3, 9])\n return np.random.choice(numeros, N, p=[0.1, 0.4, 0.2, 0.3])\n\n\ndef sample_2(N):\n return np.random.exponential(0.5, N)\n\n\ndef get_mean(sampling_fun, N, M):\n medias = np.zeros(M)\n for i in range(M):\n medias[i] = np.mean(sampling_fun(N))\n return medias\n\n\nn = np.array([10, 100, 1000])\nm = 10000\nmedias_1 = np.zeros((m, 3))\nmedias_2 = np.zeros((m, 3))\ntexto = 'sample_'\nfor i in range(3):\n medias_1[:, i] = get_mean(sample_1, n[i], m)\n medias_2[:, i] = get_mean(sample_2, n[i], m)\n np.savetxt(texto + '1_' + str(n[i]) + '.txt', medias_1[:, i])\n np.savetxt(texto + '2_' + str(n[i]) + '.txt', medias_2[:, i])\n",
"step-5": "import numpy as np\nimport matplotlib.pyplot as plt\n\ndef sample_1(N):\n\tnumeros=np.array([-10, -5, 3, 9])\n\treturn np.random.choice(numeros, N, p=[0.1, 0.4, 0.2, 0.3])#devuelve distro aleatoria con las probabilidades indicadas\n\ndef sample_2(N):\n\treturn np.random.exponential(0.5,N)#devuelve numeros aleatorios con distro exp con beta = 0.5\n\ndef get_mean(sampling_fun,N,M):\n\tmedias=np.zeros(M)#arreglo de medias\n\tfor i in range(M):#recorrido para sacar las m medias\n\t\tmedias[i]=np.mean(sampling_fun(N))\n\treturn medias\n\nn=np.array([10,100,1000])#arreglo con los distintos valores de n\nm=10000#valor de M\nmedias_1=np.zeros((m,3))#arreglo que guarta las m medias para 3 enes de sample1\nmedias_2=np.zeros((m,3))#lo de arriba pero con sample 2\ntexto='sample_'#texto que me da pereza escribir dos veces\nfor i in range(3):#recorrido para cada n\n\tmedias_1[:,i]=get_mean(sample_1,n[i],m)\n\tmedias_2[:,i]=get_mean(sample_2,n[i],m)\n\tnp.savetxt(texto+'1_'+str(n[i])+'.txt',medias_1[:,i])#archivo con las m medias para cada n\n\tnp.savetxt(texto+'2_'+str(n[i])+'.txt',medias_2[:,i])\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
dependencies = [('khovan', '0003_nhapkho')]
operations = [migrations.AddField(model_name='phieunhaphang', name=
'xulykho', field=models.BooleanField(default=False, verbose_name=
'Xu Ly Kho'), preserve_default=False)]
<|reserved_special_token_1|>
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [('khovan', '0003_nhapkho')]
operations = [migrations.AddField(model_name='phieunhaphang', name=
'xulykho', field=models.BooleanField(default=False, verbose_name=
'Xu Ly Kho'), preserve_default=False)]
<|reserved_special_token_1|>
# Generated by Django 3.2.3 on 2021-07-02 08:18
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('khovan', '0003_nhapkho'),
]
operations = [
migrations.AddField(
model_name='phieunhaphang',
name='xulykho',
field=models.BooleanField(default=False, verbose_name='Xu Ly Kho'),
preserve_default=False,
),
]
|
flexible
|
{
"blob_id": "016255d74ccf4ac547e4b212d33bb9a39295c830",
"index": 2715,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('khovan', '0003_nhapkho')]\n operations = [migrations.AddField(model_name='phieunhaphang', name=\n 'xulykho', field=models.BooleanField(default=False, verbose_name=\n 'Xu Ly Kho'), preserve_default=False)]\n",
"step-4": "from django.db import migrations, models\nimport django.utils.timezone\n\n\nclass Migration(migrations.Migration):\n dependencies = [('khovan', '0003_nhapkho')]\n operations = [migrations.AddField(model_name='phieunhaphang', name=\n 'xulykho', field=models.BooleanField(default=False, verbose_name=\n 'Xu Ly Kho'), preserve_default=False)]\n",
"step-5": "# Generated by Django 3.2.3 on 2021-07-02 08:18\n\nfrom django.db import migrations, models\nimport django.utils.timezone\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('khovan', '0003_nhapkho'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='phieunhaphang',\n name='xulykho',\n field=models.BooleanField(default=False, verbose_name='Xu Ly Kho'),\n preserve_default=False,\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#!/usr/bin/env python3
def main():
A1, A2, A3 = map(int, input().split())
A=A1+A2+A3
if A >=22:
ans='bust'
else:
ans='win'
print(ans)
if __name__ == "__main__":
main()
|
normal
|
{
"blob_id": "753e062940e0580d7d33c88c1165977142dcd202",
"index": 8060,
"step-1": "<mask token>\n",
"step-2": "def main():\n A1, A2, A3 = map(int, input().split())\n A = A1 + A2 + A3\n if A >= 22:\n ans = 'bust'\n else:\n ans = 'win'\n print(ans)\n\n\n<mask token>\n",
"step-3": "def main():\n A1, A2, A3 = map(int, input().split())\n A = A1 + A2 + A3\n if A >= 22:\n ans = 'bust'\n else:\n ans = 'win'\n print(ans)\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "#!/usr/bin/env python3\n\ndef main():\n A1, A2, A3 = map(int, input().split())\n A=A1+A2+A3\n if A >=22:\n ans='bust'\n else:\n ans='win'\n print(ans)\n \nif __name__ == \"__main__\":\n main()\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print(len(ss) - ss.count(' '))
<|reserved_special_token_1|>
ss = str(input())
print(len(ss) - ss.count(' '))
|
flexible
|
{
"blob_id": "7f72f6a2ff0c7ceacb0f893d04c20402e850421a",
"index": 1840,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(len(ss) - ss.count(' '))\n",
"step-3": "ss = str(input())\nprint(len(ss) - ss.count(' '))\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
class thrs:
def __init__(self, input_wave):
from numpy import mod, array, sqrt, dot,median,convolve
self.D0 = 20
self.last_det = 0
self.mu = 0.6
self.a_up = 0.2
self.a_down = 0.6
self.z_cumulative = 10
self.n_max = max(input_wave[:1000])
self.input_wave = input_wave
self.rr = 60
self.setNewPos(0)
self.lmbda = 10
def setNewPos(self, pos):
self.rr = pos-self.last_det
self.last_det = pos
from numpy import max
self.n_max = max(self.input_wave[max([pos-400,0]):min([pos+1000,len(self.input_wave)])])*1.1
if self.input_wave[pos]-self.z_cumulative > 0:
self.z_cumulative = float(self.z_cumulative + self.a_up * (self.input_wave[pos]-self.z_cumulative))
else:
self.z_cumulative = float(self.z_cumulative + self.a_down * (self.input_wave[pos]-self.z_cumulative))
from numpy import log,e
lmbda2 = log(self.mu)/((self.D0-self.rr)/2)
from numpy import isinf
if not isinf(lmbda2):
self.lmbda = lmbda2
self.A = self.z_cumulative/e**(-self.lmbda*self.D0)
return
def getThrs(self, pos):
if pos-self.last_det < self.D0:
return self.n_max
#elif pos-self.last_det < self.D1:
# return self.n_max - (self.n_max-self.z_cumulative)/(self.D1-self.D1)(pos-self.last_det)
else:
from numpy import e
return self.A * e**(-self.lmbda * (pos-self.last_det))
|
normal
|
{
"blob_id": "2cdee8799678e8ead21a0f81c42eb7ce209cfec7",
"index": 7289,
"step-1": "class thrs:\n <mask token>\n <mask token>\n <mask token>\n",
"step-2": "class thrs:\n <mask token>\n <mask token>\n\n def getThrs(self, pos):\n if pos - self.last_det < self.D0:\n return self.n_max\n else:\n from numpy import e\n return self.A * e ** (-self.lmbda * (pos - self.last_det))\n",
"step-3": "class thrs:\n\n def __init__(self, input_wave):\n from numpy import mod, array, sqrt, dot, median, convolve\n self.D0 = 20\n self.last_det = 0\n self.mu = 0.6\n self.a_up = 0.2\n self.a_down = 0.6\n self.z_cumulative = 10\n self.n_max = max(input_wave[:1000])\n self.input_wave = input_wave\n self.rr = 60\n self.setNewPos(0)\n self.lmbda = 10\n <mask token>\n\n def getThrs(self, pos):\n if pos - self.last_det < self.D0:\n return self.n_max\n else:\n from numpy import e\n return self.A * e ** (-self.lmbda * (pos - self.last_det))\n",
"step-4": "class thrs:\n\n def __init__(self, input_wave):\n from numpy import mod, array, sqrt, dot, median, convolve\n self.D0 = 20\n self.last_det = 0\n self.mu = 0.6\n self.a_up = 0.2\n self.a_down = 0.6\n self.z_cumulative = 10\n self.n_max = max(input_wave[:1000])\n self.input_wave = input_wave\n self.rr = 60\n self.setNewPos(0)\n self.lmbda = 10\n\n def setNewPos(self, pos):\n self.rr = pos - self.last_det\n self.last_det = pos\n from numpy import max\n self.n_max = max(self.input_wave[max([pos - 400, 0]):min([pos + \n 1000, len(self.input_wave)])]) * 1.1\n if self.input_wave[pos] - self.z_cumulative > 0:\n self.z_cumulative = float(self.z_cumulative + self.a_up * (self\n .input_wave[pos] - self.z_cumulative))\n else:\n self.z_cumulative = float(self.z_cumulative + self.a_down * (\n self.input_wave[pos] - self.z_cumulative))\n from numpy import log, e\n lmbda2 = log(self.mu) / ((self.D0 - self.rr) / 2)\n from numpy import isinf\n if not isinf(lmbda2):\n self.lmbda = lmbda2\n self.A = self.z_cumulative / e ** (-self.lmbda * self.D0)\n return\n\n def getThrs(self, pos):\n if pos - self.last_det < self.D0:\n return self.n_max\n else:\n from numpy import e\n return self.A * e ** (-self.lmbda * (pos - self.last_det))\n",
"step-5": "\n\nclass thrs:\n def __init__(self, input_wave):\n from numpy import mod, array, sqrt, dot,median,convolve\n self.D0 = 20\n self.last_det = 0\n self.mu = 0.6\n self.a_up = 0.2\n self.a_down = 0.6\n self.z_cumulative = 10\n self.n_max = max(input_wave[:1000])\n self.input_wave = input_wave\n self.rr = 60\n self.setNewPos(0)\n self.lmbda = 10\n \n def setNewPos(self, pos):\n self.rr = pos-self.last_det\n self.last_det = pos\n from numpy import max\n self.n_max = max(self.input_wave[max([pos-400,0]):min([pos+1000,len(self.input_wave)])])*1.1\n if self.input_wave[pos]-self.z_cumulative > 0:\n self.z_cumulative = float(self.z_cumulative + self.a_up * (self.input_wave[pos]-self.z_cumulative))\n else:\n self.z_cumulative = float(self.z_cumulative + self.a_down * (self.input_wave[pos]-self.z_cumulative))\n\n from numpy import log,e\n \n lmbda2 = log(self.mu)/((self.D0-self.rr)/2)\n from numpy import isinf\n if not isinf(lmbda2):\n self.lmbda = lmbda2\n self.A = self.z_cumulative/e**(-self.lmbda*self.D0)\n return \n \n def getThrs(self, pos):\n if pos-self.last_det < self.D0:\n return self.n_max\n #elif pos-self.last_det < self.D1:\n # return self.n_max - (self.n_max-self.z_cumulative)/(self.D1-self.D1)(pos-self.last_det)\n else:\n from numpy import e\n return self.A * e**(-self.lmbda * (pos-self.last_det))\n \n \n \n \n \n ",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
from os import listdir
import re
import numpy as np
from sklearn.metrics import f1_score
from sklearn.naive_bayes import MultinomialNB
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.model_selection import LeaveOneOut
import matplotlib.pyplot as plt
n_gram_range = (1, 1)
alpha_smoothing = 1e-10
lambdas_best = [1e190, 1]
def parse_doc_line(line):
parsed = re.search(r'\d[\d\s]+\d', line)
return "empty" if parsed is None else parsed[0]
def get_roc_point(clf, x_set, y_set, threshold):
loo = LeaveOneOut()
vectorizer = CountVectorizer(ngram_range=n_gram_range)
roc_predictions = np.empty(0)
answers = np.empty(0)
i = 1
for train_index, test_index in loo.split(x_set):
x_train = [obj for partition in x_set[train_index] for obj in partition]
x_test = [obj for partition in x_set[test_index] for obj in partition]
x_vectorized = vectorizer.fit_transform(x_train + x_test).toarray()
x_train, x_test = x_vectorized[:len(x_train)], x_vectorized[-len(x_test):]
y_train, y_test = y_set[train_index], y_set[test_index]
clf.fit(x_train, y_train.flatten())
answers = np.append(answers, y_test)
roc_predictions = np.append(roc_predictions,
['spmsg' if prediction[0] <= threshold else 'legit' for prediction in
clf.predict_proba(x_test)])
print(f'Finished iteration {i} / 10')
i += 1
true_negatives_, true_positives_, false_negatives_, false_positives_ = 0, 0, 0, 0
for prediction, answer in zip(roc_predictions, answers):
if prediction == 'spmsg':
if answer == 'spmsg':
true_positives_ += 1
else:
false_positives_ += 1
else:
if answer == 'legit':
true_negatives_ += 1
else:
false_negatives_ += 1
roc_point_ = (
1 - (true_negatives_ / (true_negatives_ + false_positives_)),
true_positives_ / (true_positives_ + false_negatives_))
return roc_point_
def get_cv_score(clf, x_set, y_set):
loo = LeaveOneOut()
vectorizer = CountVectorizer(ngram_range=n_gram_range)
predictions = np.empty(0)
answers = np.empty(0)
i = 1
for train_index, test_index in loo.split(x_set):
x_train = [obj for partition in x_set[train_index] for obj in partition]
x_test = [obj for partition in x_set[test_index] for obj in partition]
x_vectorized = vectorizer.fit_transform(x_train + x_test).toarray()
x_train, x_test = x_vectorized[:len(x_train)], x_vectorized[-len(x_test):]
y_train, y_test = y_set[train_index], y_set[test_index]
clf.fit(x_train, y_train.flatten())
predictions = np.append(predictions, clf.predict(x_test))
answers = np.append(answers, y_test)
print(f'Finished iteration {i} / 10')
i += 1
true_negatives_, true_positives_, false_negatives_, false_positives_ = 0, 0, 0, 0
for prediction, answer in zip(predictions, answers):
if prediction == 'spmsg':
if answer == 'spmsg':
true_positives_ += 1
else:
false_positives_ += 1
else:
if answer == 'legit':
true_negatives_ += 1
else:
false_negatives_ += 1
f1_result = f1_score(answers, predictions, average='macro')
return f1_result, true_negatives_, true_positives_, false_negatives_, false_positives_
parts_X = []
parts_Y = []
for part in range(1, 11):
parts_X.append([])
parts_Y.append([])
for file in listdir(f'messages/part{part}'):
f = open(f'messages/part{part}/{file}', "r")
one = parse_doc_line(f.readline())
f.readline()
two = parse_doc_line(f.readline())
curr_obj = one + " " + two
parts_Y[-1].append(re.findall(r'\D+', file)[0])
parts_X[-1].append(curr_obj)
f.close()
roc_points = []
for thresh in range(0, 11):
roc_points.append(get_roc_point(
MultinomialNB(alpha=alpha_smoothing), np.array(parts_X), np.array(parts_Y), thresh / 10))
f1_points = []
true_positives_list = []
false_positives_list = []
true_negatives_list = []
false_negatives_list = []
lambda_ratios = [1, 1e5, 1e10, 1e20, 1e40, 1e80, 1e160, 1e190]
for lambda_ratio in lambda_ratios:
f1, true_negatives, true_positives, false_negatives, false_positives = get_cv_score(
MultinomialNB(class_prior=(lambda_ratio, 1), alpha=alpha_smoothing), np.array(parts_X), np.array(parts_Y))
print(f'F1 score: {f1}\n True negatives: {true_negatives}\n True positives: {true_positives}\n False negatives: '
f'{false_negatives}\n False positives: {false_positives}')
f1_points.append(f1)
true_positives_list.append(true_positives)
false_positives_list.append(false_positives)
true_negatives_list.append(true_negatives)
false_negatives_list.append(false_negatives)
fig, plts = plt.subplots(3)
plts[0].margins(0.0)
plts[0].set_ylim(ymin=0)
plts[0].plot([point[0] for point in roc_points], [point[1] for point in roc_points])
plts[0].set_ylabel('Roc Curve')
plts[1].set_xscale('log')
plts[1].plot(lambda_ratios, f1_points, '-b')
plts[1].set_ylabel('F1 score')
plts[1].set_xlim(xmin=1)
plts[2].set_xscale('log')
plts[2].set_yscale('log')
plts[2].plot(lambda_ratios, true_positives_list, '-r', label='True positives')
plts[2].plot(lambda_ratios, false_positives_list, '-g', label='False positives')
plts[2].plot(lambda_ratios, true_negatives_list, '-b', label='True negatives')
plts[2].plot(lambda_ratios, false_negatives_list, '-y', label='False negatives')
plts[2].legend(loc="upper right")
plts[2].set_xlabel('Lambda_legit / Lambda_spam')
plts[2].set_xlim(xmin=1)
plt.show()
|
normal
|
{
"blob_id": "8bb67317ede277e03e8cbdefefeffa3d206ece65",
"index": 9434,
"step-1": "<mask token>\n\n\ndef parse_doc_line(line):\n parsed = re.search('\\\\d[\\\\d\\\\s]+\\\\d', line)\n return 'empty' if parsed is None else parsed[0]\n\n\ndef get_roc_point(clf, x_set, y_set, threshold):\n loo = LeaveOneOut()\n vectorizer = CountVectorizer(ngram_range=n_gram_range)\n roc_predictions = np.empty(0)\n answers = np.empty(0)\n i = 1\n for train_index, test_index in loo.split(x_set):\n x_train = [obj for partition in x_set[train_index] for obj in partition\n ]\n x_test = [obj for partition in x_set[test_index] for obj in partition]\n x_vectorized = vectorizer.fit_transform(x_train + x_test).toarray()\n x_train, x_test = x_vectorized[:len(x_train)], x_vectorized[-len(\n x_test):]\n y_train, y_test = y_set[train_index], y_set[test_index]\n clf.fit(x_train, y_train.flatten())\n answers = np.append(answers, y_test)\n roc_predictions = np.append(roc_predictions, [('spmsg' if \n prediction[0] <= threshold else 'legit') for prediction in clf.\n predict_proba(x_test)])\n print(f'Finished iteration {i} / 10')\n i += 1\n (true_negatives_, true_positives_, false_negatives_, false_positives_\n ) = 0, 0, 0, 0\n for prediction, answer in zip(roc_predictions, answers):\n if prediction == 'spmsg':\n if answer == 'spmsg':\n true_positives_ += 1\n else:\n false_positives_ += 1\n elif answer == 'legit':\n true_negatives_ += 1\n else:\n false_negatives_ += 1\n roc_point_ = 1 - true_negatives_ / (true_negatives_ + false_positives_\n ), true_positives_ / (true_positives_ + false_negatives_)\n return roc_point_\n\n\ndef get_cv_score(clf, x_set, y_set):\n loo = LeaveOneOut()\n vectorizer = CountVectorizer(ngram_range=n_gram_range)\n predictions = np.empty(0)\n answers = np.empty(0)\n i = 1\n for train_index, test_index in loo.split(x_set):\n x_train = [obj for partition in x_set[train_index] for obj in partition\n ]\n x_test = [obj for partition in x_set[test_index] for obj in partition]\n x_vectorized = vectorizer.fit_transform(x_train + x_test).toarray()\n x_train, x_test = x_vectorized[:len(x_train)], x_vectorized[-len(\n x_test):]\n y_train, y_test = y_set[train_index], y_set[test_index]\n clf.fit(x_train, y_train.flatten())\n predictions = np.append(predictions, clf.predict(x_test))\n answers = np.append(answers, y_test)\n print(f'Finished iteration {i} / 10')\n i += 1\n (true_negatives_, true_positives_, false_negatives_, false_positives_\n ) = 0, 0, 0, 0\n for prediction, answer in zip(predictions, answers):\n if prediction == 'spmsg':\n if answer == 'spmsg':\n true_positives_ += 1\n else:\n false_positives_ += 1\n elif answer == 'legit':\n true_negatives_ += 1\n else:\n false_negatives_ += 1\n f1_result = f1_score(answers, predictions, average='macro')\n return (f1_result, true_negatives_, true_positives_, false_negatives_,\n false_positives_)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef parse_doc_line(line):\n parsed = re.search('\\\\d[\\\\d\\\\s]+\\\\d', line)\n return 'empty' if parsed is None else parsed[0]\n\n\ndef get_roc_point(clf, x_set, y_set, threshold):\n loo = LeaveOneOut()\n vectorizer = CountVectorizer(ngram_range=n_gram_range)\n roc_predictions = np.empty(0)\n answers = np.empty(0)\n i = 1\n for train_index, test_index in loo.split(x_set):\n x_train = [obj for partition in x_set[train_index] for obj in partition\n ]\n x_test = [obj for partition in x_set[test_index] for obj in partition]\n x_vectorized = vectorizer.fit_transform(x_train + x_test).toarray()\n x_train, x_test = x_vectorized[:len(x_train)], x_vectorized[-len(\n x_test):]\n y_train, y_test = y_set[train_index], y_set[test_index]\n clf.fit(x_train, y_train.flatten())\n answers = np.append(answers, y_test)\n roc_predictions = np.append(roc_predictions, [('spmsg' if \n prediction[0] <= threshold else 'legit') for prediction in clf.\n predict_proba(x_test)])\n print(f'Finished iteration {i} / 10')\n i += 1\n (true_negatives_, true_positives_, false_negatives_, false_positives_\n ) = 0, 0, 0, 0\n for prediction, answer in zip(roc_predictions, answers):\n if prediction == 'spmsg':\n if answer == 'spmsg':\n true_positives_ += 1\n else:\n false_positives_ += 1\n elif answer == 'legit':\n true_negatives_ += 1\n else:\n false_negatives_ += 1\n roc_point_ = 1 - true_negatives_ / (true_negatives_ + false_positives_\n ), true_positives_ / (true_positives_ + false_negatives_)\n return roc_point_\n\n\ndef get_cv_score(clf, x_set, y_set):\n loo = LeaveOneOut()\n vectorizer = CountVectorizer(ngram_range=n_gram_range)\n predictions = np.empty(0)\n answers = np.empty(0)\n i = 1\n for train_index, test_index in loo.split(x_set):\n x_train = [obj for partition in x_set[train_index] for obj in partition\n ]\n x_test = [obj for partition in x_set[test_index] for obj in partition]\n x_vectorized = vectorizer.fit_transform(x_train + x_test).toarray()\n x_train, x_test = x_vectorized[:len(x_train)], x_vectorized[-len(\n x_test):]\n y_train, y_test = y_set[train_index], y_set[test_index]\n clf.fit(x_train, y_train.flatten())\n predictions = np.append(predictions, clf.predict(x_test))\n answers = np.append(answers, y_test)\n print(f'Finished iteration {i} / 10')\n i += 1\n (true_negatives_, true_positives_, false_negatives_, false_positives_\n ) = 0, 0, 0, 0\n for prediction, answer in zip(predictions, answers):\n if prediction == 'spmsg':\n if answer == 'spmsg':\n true_positives_ += 1\n else:\n false_positives_ += 1\n elif answer == 'legit':\n true_negatives_ += 1\n else:\n false_negatives_ += 1\n f1_result = f1_score(answers, predictions, average='macro')\n return (f1_result, true_negatives_, true_positives_, false_negatives_,\n false_positives_)\n\n\n<mask token>\nfor part in range(1, 11):\n parts_X.append([])\n parts_Y.append([])\n for file in listdir(f'messages/part{part}'):\n f = open(f'messages/part{part}/{file}', 'r')\n one = parse_doc_line(f.readline())\n f.readline()\n two = parse_doc_line(f.readline())\n curr_obj = one + ' ' + two\n parts_Y[-1].append(re.findall('\\\\D+', file)[0])\n parts_X[-1].append(curr_obj)\n f.close()\n<mask token>\nfor thresh in range(0, 11):\n roc_points.append(get_roc_point(MultinomialNB(alpha=alpha_smoothing),\n np.array(parts_X), np.array(parts_Y), thresh / 10))\n<mask token>\nfor lambda_ratio in lambda_ratios:\n (f1, true_negatives, true_positives, false_negatives, false_positives) = (\n get_cv_score(MultinomialNB(class_prior=(lambda_ratio, 1), alpha=\n alpha_smoothing), np.array(parts_X), np.array(parts_Y)))\n print(\n f\"\"\"F1 score: {f1}\n True negatives: {true_negatives}\n True positives: {true_positives}\n False negatives: {false_negatives}\n False positives: {false_positives}\"\"\"\n )\n f1_points.append(f1)\n true_positives_list.append(true_positives)\n false_positives_list.append(false_positives)\n true_negatives_list.append(true_negatives)\n false_negatives_list.append(false_negatives)\n<mask token>\nplts[0].margins(0.0)\nplts[0].set_ylim(ymin=0)\nplts[0].plot([point[0] for point in roc_points], [point[1] for point in\n roc_points])\nplts[0].set_ylabel('Roc Curve')\nplts[1].set_xscale('log')\nplts[1].plot(lambda_ratios, f1_points, '-b')\nplts[1].set_ylabel('F1 score')\nplts[1].set_xlim(xmin=1)\nplts[2].set_xscale('log')\nplts[2].set_yscale('log')\nplts[2].plot(lambda_ratios, true_positives_list, '-r', label='True positives')\nplts[2].plot(lambda_ratios, false_positives_list, '-g', label='False positives'\n )\nplts[2].plot(lambda_ratios, true_negatives_list, '-b', label='True negatives')\nplts[2].plot(lambda_ratios, false_negatives_list, '-y', label='False negatives'\n )\nplts[2].legend(loc='upper right')\nplts[2].set_xlabel('Lambda_legit / Lambda_spam')\nplts[2].set_xlim(xmin=1)\nplt.show()\n",
"step-3": "<mask token>\nn_gram_range = 1, 1\nalpha_smoothing = 1e-10\nlambdas_best = [1e+190, 1]\n\n\ndef parse_doc_line(line):\n parsed = re.search('\\\\d[\\\\d\\\\s]+\\\\d', line)\n return 'empty' if parsed is None else parsed[0]\n\n\ndef get_roc_point(clf, x_set, y_set, threshold):\n loo = LeaveOneOut()\n vectorizer = CountVectorizer(ngram_range=n_gram_range)\n roc_predictions = np.empty(0)\n answers = np.empty(0)\n i = 1\n for train_index, test_index in loo.split(x_set):\n x_train = [obj for partition in x_set[train_index] for obj in partition\n ]\n x_test = [obj for partition in x_set[test_index] for obj in partition]\n x_vectorized = vectorizer.fit_transform(x_train + x_test).toarray()\n x_train, x_test = x_vectorized[:len(x_train)], x_vectorized[-len(\n x_test):]\n y_train, y_test = y_set[train_index], y_set[test_index]\n clf.fit(x_train, y_train.flatten())\n answers = np.append(answers, y_test)\n roc_predictions = np.append(roc_predictions, [('spmsg' if \n prediction[0] <= threshold else 'legit') for prediction in clf.\n predict_proba(x_test)])\n print(f'Finished iteration {i} / 10')\n i += 1\n (true_negatives_, true_positives_, false_negatives_, false_positives_\n ) = 0, 0, 0, 0\n for prediction, answer in zip(roc_predictions, answers):\n if prediction == 'spmsg':\n if answer == 'spmsg':\n true_positives_ += 1\n else:\n false_positives_ += 1\n elif answer == 'legit':\n true_negatives_ += 1\n else:\n false_negatives_ += 1\n roc_point_ = 1 - true_negatives_ / (true_negatives_ + false_positives_\n ), true_positives_ / (true_positives_ + false_negatives_)\n return roc_point_\n\n\ndef get_cv_score(clf, x_set, y_set):\n loo = LeaveOneOut()\n vectorizer = CountVectorizer(ngram_range=n_gram_range)\n predictions = np.empty(0)\n answers = np.empty(0)\n i = 1\n for train_index, test_index in loo.split(x_set):\n x_train = [obj for partition in x_set[train_index] for obj in partition\n ]\n x_test = [obj for partition in x_set[test_index] for obj in partition]\n x_vectorized = vectorizer.fit_transform(x_train + x_test).toarray()\n x_train, x_test = x_vectorized[:len(x_train)], x_vectorized[-len(\n x_test):]\n y_train, y_test = y_set[train_index], y_set[test_index]\n clf.fit(x_train, y_train.flatten())\n predictions = np.append(predictions, clf.predict(x_test))\n answers = np.append(answers, y_test)\n print(f'Finished iteration {i} / 10')\n i += 1\n (true_negatives_, true_positives_, false_negatives_, false_positives_\n ) = 0, 0, 0, 0\n for prediction, answer in zip(predictions, answers):\n if prediction == 'spmsg':\n if answer == 'spmsg':\n true_positives_ += 1\n else:\n false_positives_ += 1\n elif answer == 'legit':\n true_negatives_ += 1\n else:\n false_negatives_ += 1\n f1_result = f1_score(answers, predictions, average='macro')\n return (f1_result, true_negatives_, true_positives_, false_negatives_,\n false_positives_)\n\n\nparts_X = []\nparts_Y = []\nfor part in range(1, 11):\n parts_X.append([])\n parts_Y.append([])\n for file in listdir(f'messages/part{part}'):\n f = open(f'messages/part{part}/{file}', 'r')\n one = parse_doc_line(f.readline())\n f.readline()\n two = parse_doc_line(f.readline())\n curr_obj = one + ' ' + two\n parts_Y[-1].append(re.findall('\\\\D+', file)[0])\n parts_X[-1].append(curr_obj)\n f.close()\nroc_points = []\nfor thresh in range(0, 11):\n roc_points.append(get_roc_point(MultinomialNB(alpha=alpha_smoothing),\n np.array(parts_X), np.array(parts_Y), thresh / 10))\nf1_points = []\ntrue_positives_list = []\nfalse_positives_list = []\ntrue_negatives_list = []\nfalse_negatives_list = []\nlambda_ratios = [1, 100000.0, 10000000000.0, 1e+20, 1e+40, 1e+80, 1e+160, \n 1e+190]\nfor lambda_ratio in lambda_ratios:\n (f1, true_negatives, true_positives, false_negatives, false_positives) = (\n get_cv_score(MultinomialNB(class_prior=(lambda_ratio, 1), alpha=\n alpha_smoothing), np.array(parts_X), np.array(parts_Y)))\n print(\n f\"\"\"F1 score: {f1}\n True negatives: {true_negatives}\n True positives: {true_positives}\n False negatives: {false_negatives}\n False positives: {false_positives}\"\"\"\n )\n f1_points.append(f1)\n true_positives_list.append(true_positives)\n false_positives_list.append(false_positives)\n true_negatives_list.append(true_negatives)\n false_negatives_list.append(false_negatives)\nfig, plts = plt.subplots(3)\nplts[0].margins(0.0)\nplts[0].set_ylim(ymin=0)\nplts[0].plot([point[0] for point in roc_points], [point[1] for point in\n roc_points])\nplts[0].set_ylabel('Roc Curve')\nplts[1].set_xscale('log')\nplts[1].plot(lambda_ratios, f1_points, '-b')\nplts[1].set_ylabel('F1 score')\nplts[1].set_xlim(xmin=1)\nplts[2].set_xscale('log')\nplts[2].set_yscale('log')\nplts[2].plot(lambda_ratios, true_positives_list, '-r', label='True positives')\nplts[2].plot(lambda_ratios, false_positives_list, '-g', label='False positives'\n )\nplts[2].plot(lambda_ratios, true_negatives_list, '-b', label='True negatives')\nplts[2].plot(lambda_ratios, false_negatives_list, '-y', label='False negatives'\n )\nplts[2].legend(loc='upper right')\nplts[2].set_xlabel('Lambda_legit / Lambda_spam')\nplts[2].set_xlim(xmin=1)\nplt.show()\n",
"step-4": "from os import listdir\nimport re\nimport numpy as np\nfrom sklearn.metrics import f1_score\nfrom sklearn.naive_bayes import MultinomialNB\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.model_selection import LeaveOneOut\nimport matplotlib.pyplot as plt\nn_gram_range = 1, 1\nalpha_smoothing = 1e-10\nlambdas_best = [1e+190, 1]\n\n\ndef parse_doc_line(line):\n parsed = re.search('\\\\d[\\\\d\\\\s]+\\\\d', line)\n return 'empty' if parsed is None else parsed[0]\n\n\ndef get_roc_point(clf, x_set, y_set, threshold):\n loo = LeaveOneOut()\n vectorizer = CountVectorizer(ngram_range=n_gram_range)\n roc_predictions = np.empty(0)\n answers = np.empty(0)\n i = 1\n for train_index, test_index in loo.split(x_set):\n x_train = [obj for partition in x_set[train_index] for obj in partition\n ]\n x_test = [obj for partition in x_set[test_index] for obj in partition]\n x_vectorized = vectorizer.fit_transform(x_train + x_test).toarray()\n x_train, x_test = x_vectorized[:len(x_train)], x_vectorized[-len(\n x_test):]\n y_train, y_test = y_set[train_index], y_set[test_index]\n clf.fit(x_train, y_train.flatten())\n answers = np.append(answers, y_test)\n roc_predictions = np.append(roc_predictions, [('spmsg' if \n prediction[0] <= threshold else 'legit') for prediction in clf.\n predict_proba(x_test)])\n print(f'Finished iteration {i} / 10')\n i += 1\n (true_negatives_, true_positives_, false_negatives_, false_positives_\n ) = 0, 0, 0, 0\n for prediction, answer in zip(roc_predictions, answers):\n if prediction == 'spmsg':\n if answer == 'spmsg':\n true_positives_ += 1\n else:\n false_positives_ += 1\n elif answer == 'legit':\n true_negatives_ += 1\n else:\n false_negatives_ += 1\n roc_point_ = 1 - true_negatives_ / (true_negatives_ + false_positives_\n ), true_positives_ / (true_positives_ + false_negatives_)\n return roc_point_\n\n\ndef get_cv_score(clf, x_set, y_set):\n loo = LeaveOneOut()\n vectorizer = CountVectorizer(ngram_range=n_gram_range)\n predictions = np.empty(0)\n answers = np.empty(0)\n i = 1\n for train_index, test_index in loo.split(x_set):\n x_train = [obj for partition in x_set[train_index] for obj in partition\n ]\n x_test = [obj for partition in x_set[test_index] for obj in partition]\n x_vectorized = vectorizer.fit_transform(x_train + x_test).toarray()\n x_train, x_test = x_vectorized[:len(x_train)], x_vectorized[-len(\n x_test):]\n y_train, y_test = y_set[train_index], y_set[test_index]\n clf.fit(x_train, y_train.flatten())\n predictions = np.append(predictions, clf.predict(x_test))\n answers = np.append(answers, y_test)\n print(f'Finished iteration {i} / 10')\n i += 1\n (true_negatives_, true_positives_, false_negatives_, false_positives_\n ) = 0, 0, 0, 0\n for prediction, answer in zip(predictions, answers):\n if prediction == 'spmsg':\n if answer == 'spmsg':\n true_positives_ += 1\n else:\n false_positives_ += 1\n elif answer == 'legit':\n true_negatives_ += 1\n else:\n false_negatives_ += 1\n f1_result = f1_score(answers, predictions, average='macro')\n return (f1_result, true_negatives_, true_positives_, false_negatives_,\n false_positives_)\n\n\nparts_X = []\nparts_Y = []\nfor part in range(1, 11):\n parts_X.append([])\n parts_Y.append([])\n for file in listdir(f'messages/part{part}'):\n f = open(f'messages/part{part}/{file}', 'r')\n one = parse_doc_line(f.readline())\n f.readline()\n two = parse_doc_line(f.readline())\n curr_obj = one + ' ' + two\n parts_Y[-1].append(re.findall('\\\\D+', file)[0])\n parts_X[-1].append(curr_obj)\n f.close()\nroc_points = []\nfor thresh in range(0, 11):\n roc_points.append(get_roc_point(MultinomialNB(alpha=alpha_smoothing),\n np.array(parts_X), np.array(parts_Y), thresh / 10))\nf1_points = []\ntrue_positives_list = []\nfalse_positives_list = []\ntrue_negatives_list = []\nfalse_negatives_list = []\nlambda_ratios = [1, 100000.0, 10000000000.0, 1e+20, 1e+40, 1e+80, 1e+160, \n 1e+190]\nfor lambda_ratio in lambda_ratios:\n (f1, true_negatives, true_positives, false_negatives, false_positives) = (\n get_cv_score(MultinomialNB(class_prior=(lambda_ratio, 1), alpha=\n alpha_smoothing), np.array(parts_X), np.array(parts_Y)))\n print(\n f\"\"\"F1 score: {f1}\n True negatives: {true_negatives}\n True positives: {true_positives}\n False negatives: {false_negatives}\n False positives: {false_positives}\"\"\"\n )\n f1_points.append(f1)\n true_positives_list.append(true_positives)\n false_positives_list.append(false_positives)\n true_negatives_list.append(true_negatives)\n false_negatives_list.append(false_negatives)\nfig, plts = plt.subplots(3)\nplts[0].margins(0.0)\nplts[0].set_ylim(ymin=0)\nplts[0].plot([point[0] for point in roc_points], [point[1] for point in\n roc_points])\nplts[0].set_ylabel('Roc Curve')\nplts[1].set_xscale('log')\nplts[1].plot(lambda_ratios, f1_points, '-b')\nplts[1].set_ylabel('F1 score')\nplts[1].set_xlim(xmin=1)\nplts[2].set_xscale('log')\nplts[2].set_yscale('log')\nplts[2].plot(lambda_ratios, true_positives_list, '-r', label='True positives')\nplts[2].plot(lambda_ratios, false_positives_list, '-g', label='False positives'\n )\nplts[2].plot(lambda_ratios, true_negatives_list, '-b', label='True negatives')\nplts[2].plot(lambda_ratios, false_negatives_list, '-y', label='False negatives'\n )\nplts[2].legend(loc='upper right')\nplts[2].set_xlabel('Lambda_legit / Lambda_spam')\nplts[2].set_xlim(xmin=1)\nplt.show()\n",
"step-5": "from os import listdir\nimport re\nimport numpy as np\nfrom sklearn.metrics import f1_score\nfrom sklearn.naive_bayes import MultinomialNB\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.model_selection import LeaveOneOut\nimport matplotlib.pyplot as plt\n\nn_gram_range = (1, 1)\nalpha_smoothing = 1e-10\nlambdas_best = [1e190, 1]\n\n\ndef parse_doc_line(line):\n parsed = re.search(r'\\d[\\d\\s]+\\d', line)\n return \"empty\" if parsed is None else parsed[0]\n\n\ndef get_roc_point(clf, x_set, y_set, threshold):\n loo = LeaveOneOut()\n vectorizer = CountVectorizer(ngram_range=n_gram_range)\n roc_predictions = np.empty(0)\n answers = np.empty(0)\n\n i = 1\n for train_index, test_index in loo.split(x_set):\n x_train = [obj for partition in x_set[train_index] for obj in partition]\n x_test = [obj for partition in x_set[test_index] for obj in partition]\n x_vectorized = vectorizer.fit_transform(x_train + x_test).toarray()\n x_train, x_test = x_vectorized[:len(x_train)], x_vectorized[-len(x_test):]\n y_train, y_test = y_set[train_index], y_set[test_index]\n clf.fit(x_train, y_train.flatten())\n answers = np.append(answers, y_test)\n roc_predictions = np.append(roc_predictions,\n ['spmsg' if prediction[0] <= threshold else 'legit' for prediction in\n clf.predict_proba(x_test)])\n print(f'Finished iteration {i} / 10')\n i += 1\n\n true_negatives_, true_positives_, false_negatives_, false_positives_ = 0, 0, 0, 0\n for prediction, answer in zip(roc_predictions, answers):\n if prediction == 'spmsg':\n if answer == 'spmsg':\n true_positives_ += 1\n else:\n false_positives_ += 1\n else:\n if answer == 'legit':\n true_negatives_ += 1\n else:\n false_negatives_ += 1\n roc_point_ = (\n 1 - (true_negatives_ / (true_negatives_ + false_positives_)),\n true_positives_ / (true_positives_ + false_negatives_))\n return roc_point_\n\n\ndef get_cv_score(clf, x_set, y_set):\n loo = LeaveOneOut()\n vectorizer = CountVectorizer(ngram_range=n_gram_range)\n predictions = np.empty(0)\n answers = np.empty(0)\n\n i = 1\n for train_index, test_index in loo.split(x_set):\n x_train = [obj for partition in x_set[train_index] for obj in partition]\n x_test = [obj for partition in x_set[test_index] for obj in partition]\n x_vectorized = vectorizer.fit_transform(x_train + x_test).toarray()\n x_train, x_test = x_vectorized[:len(x_train)], x_vectorized[-len(x_test):]\n y_train, y_test = y_set[train_index], y_set[test_index]\n clf.fit(x_train, y_train.flatten())\n predictions = np.append(predictions, clf.predict(x_test))\n answers = np.append(answers, y_test)\n print(f'Finished iteration {i} / 10')\n i += 1\n\n true_negatives_, true_positives_, false_negatives_, false_positives_ = 0, 0, 0, 0\n for prediction, answer in zip(predictions, answers):\n if prediction == 'spmsg':\n if answer == 'spmsg':\n true_positives_ += 1\n else:\n false_positives_ += 1\n else:\n if answer == 'legit':\n true_negatives_ += 1\n else:\n false_negatives_ += 1\n f1_result = f1_score(answers, predictions, average='macro')\n return f1_result, true_negatives_, true_positives_, false_negatives_, false_positives_\n\n\nparts_X = []\nparts_Y = []\n\nfor part in range(1, 11):\n parts_X.append([])\n parts_Y.append([])\n for file in listdir(f'messages/part{part}'):\n f = open(f'messages/part{part}/{file}', \"r\")\n one = parse_doc_line(f.readline())\n f.readline()\n two = parse_doc_line(f.readline())\n curr_obj = one + \" \" + two\n parts_Y[-1].append(re.findall(r'\\D+', file)[0])\n parts_X[-1].append(curr_obj)\n f.close()\n\nroc_points = []\nfor thresh in range(0, 11):\n roc_points.append(get_roc_point(\n MultinomialNB(alpha=alpha_smoothing), np.array(parts_X), np.array(parts_Y), thresh / 10))\n\nf1_points = []\ntrue_positives_list = []\nfalse_positives_list = []\ntrue_negatives_list = []\nfalse_negatives_list = []\nlambda_ratios = [1, 1e5, 1e10, 1e20, 1e40, 1e80, 1e160, 1e190]\nfor lambda_ratio in lambda_ratios:\n f1, true_negatives, true_positives, false_negatives, false_positives = get_cv_score(\n MultinomialNB(class_prior=(lambda_ratio, 1), alpha=alpha_smoothing), np.array(parts_X), np.array(parts_Y))\n print(f'F1 score: {f1}\\n True negatives: {true_negatives}\\n True positives: {true_positives}\\n False negatives: '\n f'{false_negatives}\\n False positives: {false_positives}')\n f1_points.append(f1)\n true_positives_list.append(true_positives)\n false_positives_list.append(false_positives)\n true_negatives_list.append(true_negatives)\n false_negatives_list.append(false_negatives)\n\nfig, plts = plt.subplots(3)\nplts[0].margins(0.0)\nplts[0].set_ylim(ymin=0)\nplts[0].plot([point[0] for point in roc_points], [point[1] for point in roc_points])\nplts[0].set_ylabel('Roc Curve')\n\nplts[1].set_xscale('log')\nplts[1].plot(lambda_ratios, f1_points, '-b')\nplts[1].set_ylabel('F1 score')\nplts[1].set_xlim(xmin=1)\n\nplts[2].set_xscale('log')\nplts[2].set_yscale('log')\nplts[2].plot(lambda_ratios, true_positives_list, '-r', label='True positives')\nplts[2].plot(lambda_ratios, false_positives_list, '-g', label='False positives')\nplts[2].plot(lambda_ratios, true_negatives_list, '-b', label='True negatives')\nplts[2].plot(lambda_ratios, false_negatives_list, '-y', label='False negatives')\nplts[2].legend(loc=\"upper right\")\nplts[2].set_xlabel('Lambda_legit / Lambda_spam')\nplts[2].set_xlim(xmin=1)\nplt.show()\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
#
# struct_test.py
# Nazareno Bruschi <nazareno.bruschi@unibo.it>
#
# Copyright (C) 2019-2020 University of Bologna
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
PULPNNInstallPath = cwd = os.getcwd() + "/../"
PULPNNSrcDirs = {'script': PULPNNInstallPath + "scripts/"}
PULPNNInstallPath32bit = cwd = os.getcwd() + "/../32bit/"
PULPNNInstallPath64bit = cwd = os.getcwd() + "/../64bit/"
PULPNNTestFolder32bit = PULPNNInstallPath32bit + "test/"
PULPNNTestFolder64bit = PULPNNInstallPath64bit + "test/"
PULPNNSrcDirs32bit = {'pulp_nn_inc': PULPNNInstallPath32bit + "include/",
'pulp_nn_pointwise_convolution': PULPNNInstallPath32bit + "src/StandardConvolutions/",
'pulp_nn_matmul': PULPNNInstallPath32bit + "src/MatrixMultiplications/",
'pulp_nn_depthwise_convolution': PULPNNInstallPath32bit + "src/DepthwiseConvolutions/",
'pulp_nn_linear_convolution_nq': PULPNNInstallPath32bit + "src/LinearConvolutionsNoQuant/",
'pulp_nn_linear_convolution_q': PULPNNInstallPath32bit + "src/LinearConvolutionsQuant/",
'pulp_nn_support_function': PULPNNInstallPath32bit + "src/SupportFunctions/",
'include': PULPNNTestFolder32bit + "include/",
'src': PULPNNTestFolder32bit + "src/",
'pointwise_convolution': PULPNNTestFolder32bit + "src/StandardConvolutions/",
'matmul': PULPNNTestFolder32bit + "src/MatrixMultiplications/",
'depthwise_convolution': PULPNNTestFolder32bit + "src/DepthwiseConvolutions/",
'linear_convolution_nq': PULPNNTestFolder32bit + "src/LinearConvolutionsNoQuant/",
'linear_convolution_q': PULPNNTestFolder32bit + "src/LinearConvolutionsQuant/",
'support_function': PULPNNTestFolder32bit + "src/SupportFunctions/",
'data_allocation_pw': PULPNNTestFolder32bit + "include/DataAllocationStandardConvolutions/",
'data_allocation_dw': PULPNNTestFolder32bit + "include/DataAllocationDepthwiseConvolutions/",
'data_allocation_ln_nq': PULPNNTestFolder32bit + "include/DataAllocationLinearConvolutionsNoQuant/",
'data_allocation_ln_q': PULPNNTestFolder32bit + "include/DataAllocationLinearConvolutionsQuant/",
'golden_model_pw': PULPNNTestFolder32bit + "include/GoldenModelStandardConvolutions/",
'golden_model_dw': PULPNNTestFolder32bit + "include/GoldenModelDepthwiseConvolutions/",
'golden_model_ln_nq': PULPNNTestFolder32bit + "include/GoldenModelLinearConvolutionsNoQuant/",
'golden_model_ln_q': PULPNNTestFolder32bit + "include/GoldenModelLinearConvolutionsQuant/",
'test': PULPNNTestFolder32bit}
PULPNNSrcDirs64bit = {'pulp_nn_inc': PULPNNInstallPath64bit + "include/",
'pulp_nn_pointwise_convolution': PULPNNInstallPath64bit + "src/StandardConvolutions/",
'pulp_nn_matmul': PULPNNInstallPath64bit + "src/MatrixMultiplications/",
'pulp_nn_depthwise_convolution': PULPNNInstallPath64bit + "src/DepthwiseConvolutions/",
'pulp_nn_linear_convolution_nq': PULPNNInstallPath64bit + "src/LinearConvolutionsNoQuant/",
'pulp_nn_linear_convolution_q': PULPNNInstallPath64bit + "src/LinearConvolutionsQuant/",
'pulp_nn_support_function': PULPNNInstallPath64bit + "src/SupportFunctions/",
'include': PULPNNTestFolder64bit + "include/",
'src': PULPNNTestFolder64bit + "src/",
'pointwise_convolution': PULPNNTestFolder64bit + "src/StandardConvolutions/",
'matmul': PULPNNTestFolder64bit + "src/MatrixMultiplications/",
'depthwise_convolution': PULPNNTestFolder64bit + "src/DepthwiseConvolutions/",
'linear_convolution_nq': PULPNNTestFolder64bit + "src/LinearConvolutionsNoQuant/",
'linear_convolution_q': PULPNNTestFolder64bit + "src/LinearConvolutionsQuant/",
'support_function': PULPNNTestFolder64bit + "src/SupportFunctions/",
'data_allocation_pw': PULPNNTestFolder64bit + "include/DataAllocationStandardConvolutions/",
'data_allocation_dw': PULPNNTestFolder64bit + "include/DataAllocationDepthwiseConvolutions/",
'data_allocation_ln_nq': PULPNNTestFolder64bit + "include/DataAllocationLinearConvolutionsNoQuant/",
'data_allocation_ln_q': PULPNNTestFolder64bit + "include/DataAllocationLinearConvolutionsQuant/",
'golden_model_pw': PULPNNTestFolder64bit + "include/GoldenModelStandardConvolutions/",
'golden_model_dw': PULPNNTestFolder64bit + "include/GoldenModelDepthwiseConvolutions/",
'golden_model_ln_nq': PULPNNTestFolder64bit + "include/GoldenModelLinearConvolutionsNoQuant/",
'golden_model_ln_q': PULPNNTestFolder64bit + "include/GoldenModelLinearConvolutionsQuant/",
'test': PULPNNTestFolder64bit}
|
normal
|
{
"blob_id": "d8d0c181fcfc9e0692369cc7a65259c43a68e931",
"index": 5688,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nPULPNNInstallPath = cwd = os.getcwd() + '/../'\nPULPNNSrcDirs = {'script': PULPNNInstallPath + 'scripts/'}\nPULPNNInstallPath32bit = cwd = os.getcwd() + '/../32bit/'\nPULPNNInstallPath64bit = cwd = os.getcwd() + '/../64bit/'\nPULPNNTestFolder32bit = PULPNNInstallPath32bit + 'test/'\nPULPNNTestFolder64bit = PULPNNInstallPath64bit + 'test/'\nPULPNNSrcDirs32bit = {'pulp_nn_inc': PULPNNInstallPath32bit + 'include/',\n 'pulp_nn_pointwise_convolution': PULPNNInstallPath32bit +\n 'src/StandardConvolutions/', 'pulp_nn_matmul': PULPNNInstallPath32bit +\n 'src/MatrixMultiplications/', 'pulp_nn_depthwise_convolution': \n PULPNNInstallPath32bit + 'src/DepthwiseConvolutions/',\n 'pulp_nn_linear_convolution_nq': PULPNNInstallPath32bit +\n 'src/LinearConvolutionsNoQuant/', 'pulp_nn_linear_convolution_q': \n PULPNNInstallPath32bit + 'src/LinearConvolutionsQuant/',\n 'pulp_nn_support_function': PULPNNInstallPath32bit +\n 'src/SupportFunctions/', 'include': PULPNNTestFolder32bit + 'include/',\n 'src': PULPNNTestFolder32bit + 'src/', 'pointwise_convolution': \n PULPNNTestFolder32bit + 'src/StandardConvolutions/', 'matmul': \n PULPNNTestFolder32bit + 'src/MatrixMultiplications/',\n 'depthwise_convolution': PULPNNTestFolder32bit +\n 'src/DepthwiseConvolutions/', 'linear_convolution_nq': \n PULPNNTestFolder32bit + 'src/LinearConvolutionsNoQuant/',\n 'linear_convolution_q': PULPNNTestFolder32bit +\n 'src/LinearConvolutionsQuant/', 'support_function': \n PULPNNTestFolder32bit + 'src/SupportFunctions/', 'data_allocation_pw': \n PULPNNTestFolder32bit + 'include/DataAllocationStandardConvolutions/',\n 'data_allocation_dw': PULPNNTestFolder32bit +\n 'include/DataAllocationDepthwiseConvolutions/', 'data_allocation_ln_nq':\n PULPNNTestFolder32bit +\n 'include/DataAllocationLinearConvolutionsNoQuant/',\n 'data_allocation_ln_q': PULPNNTestFolder32bit +\n 'include/DataAllocationLinearConvolutionsQuant/', 'golden_model_pw': \n PULPNNTestFolder32bit + 'include/GoldenModelStandardConvolutions/',\n 'golden_model_dw': PULPNNTestFolder32bit +\n 'include/GoldenModelDepthwiseConvolutions/', 'golden_model_ln_nq': \n PULPNNTestFolder32bit + 'include/GoldenModelLinearConvolutionsNoQuant/',\n 'golden_model_ln_q': PULPNNTestFolder32bit +\n 'include/GoldenModelLinearConvolutionsQuant/', 'test':\n PULPNNTestFolder32bit}\nPULPNNSrcDirs64bit = {'pulp_nn_inc': PULPNNInstallPath64bit + 'include/',\n 'pulp_nn_pointwise_convolution': PULPNNInstallPath64bit +\n 'src/StandardConvolutions/', 'pulp_nn_matmul': PULPNNInstallPath64bit +\n 'src/MatrixMultiplications/', 'pulp_nn_depthwise_convolution': \n PULPNNInstallPath64bit + 'src/DepthwiseConvolutions/',\n 'pulp_nn_linear_convolution_nq': PULPNNInstallPath64bit +\n 'src/LinearConvolutionsNoQuant/', 'pulp_nn_linear_convolution_q': \n PULPNNInstallPath64bit + 'src/LinearConvolutionsQuant/',\n 'pulp_nn_support_function': PULPNNInstallPath64bit +\n 'src/SupportFunctions/', 'include': PULPNNTestFolder64bit + 'include/',\n 'src': PULPNNTestFolder64bit + 'src/', 'pointwise_convolution': \n PULPNNTestFolder64bit + 'src/StandardConvolutions/', 'matmul': \n PULPNNTestFolder64bit + 'src/MatrixMultiplications/',\n 'depthwise_convolution': PULPNNTestFolder64bit +\n 'src/DepthwiseConvolutions/', 'linear_convolution_nq': \n PULPNNTestFolder64bit + 'src/LinearConvolutionsNoQuant/',\n 'linear_convolution_q': PULPNNTestFolder64bit +\n 'src/LinearConvolutionsQuant/', 'support_function': \n PULPNNTestFolder64bit + 'src/SupportFunctions/', 'data_allocation_pw': \n PULPNNTestFolder64bit + 'include/DataAllocationStandardConvolutions/',\n 'data_allocation_dw': PULPNNTestFolder64bit +\n 'include/DataAllocationDepthwiseConvolutions/', 'data_allocation_ln_nq':\n PULPNNTestFolder64bit +\n 'include/DataAllocationLinearConvolutionsNoQuant/',\n 'data_allocation_ln_q': PULPNNTestFolder64bit +\n 'include/DataAllocationLinearConvolutionsQuant/', 'golden_model_pw': \n PULPNNTestFolder64bit + 'include/GoldenModelStandardConvolutions/',\n 'golden_model_dw': PULPNNTestFolder64bit +\n 'include/GoldenModelDepthwiseConvolutions/', 'golden_model_ln_nq': \n PULPNNTestFolder64bit + 'include/GoldenModelLinearConvolutionsNoQuant/',\n 'golden_model_ln_q': PULPNNTestFolder64bit +\n 'include/GoldenModelLinearConvolutionsQuant/', 'test':\n PULPNNTestFolder64bit}\n",
"step-3": "import os\nPULPNNInstallPath = cwd = os.getcwd() + '/../'\nPULPNNSrcDirs = {'script': PULPNNInstallPath + 'scripts/'}\nPULPNNInstallPath32bit = cwd = os.getcwd() + '/../32bit/'\nPULPNNInstallPath64bit = cwd = os.getcwd() + '/../64bit/'\nPULPNNTestFolder32bit = PULPNNInstallPath32bit + 'test/'\nPULPNNTestFolder64bit = PULPNNInstallPath64bit + 'test/'\nPULPNNSrcDirs32bit = {'pulp_nn_inc': PULPNNInstallPath32bit + 'include/',\n 'pulp_nn_pointwise_convolution': PULPNNInstallPath32bit +\n 'src/StandardConvolutions/', 'pulp_nn_matmul': PULPNNInstallPath32bit +\n 'src/MatrixMultiplications/', 'pulp_nn_depthwise_convolution': \n PULPNNInstallPath32bit + 'src/DepthwiseConvolutions/',\n 'pulp_nn_linear_convolution_nq': PULPNNInstallPath32bit +\n 'src/LinearConvolutionsNoQuant/', 'pulp_nn_linear_convolution_q': \n PULPNNInstallPath32bit + 'src/LinearConvolutionsQuant/',\n 'pulp_nn_support_function': PULPNNInstallPath32bit +\n 'src/SupportFunctions/', 'include': PULPNNTestFolder32bit + 'include/',\n 'src': PULPNNTestFolder32bit + 'src/', 'pointwise_convolution': \n PULPNNTestFolder32bit + 'src/StandardConvolutions/', 'matmul': \n PULPNNTestFolder32bit + 'src/MatrixMultiplications/',\n 'depthwise_convolution': PULPNNTestFolder32bit +\n 'src/DepthwiseConvolutions/', 'linear_convolution_nq': \n PULPNNTestFolder32bit + 'src/LinearConvolutionsNoQuant/',\n 'linear_convolution_q': PULPNNTestFolder32bit +\n 'src/LinearConvolutionsQuant/', 'support_function': \n PULPNNTestFolder32bit + 'src/SupportFunctions/', 'data_allocation_pw': \n PULPNNTestFolder32bit + 'include/DataAllocationStandardConvolutions/',\n 'data_allocation_dw': PULPNNTestFolder32bit +\n 'include/DataAllocationDepthwiseConvolutions/', 'data_allocation_ln_nq':\n PULPNNTestFolder32bit +\n 'include/DataAllocationLinearConvolutionsNoQuant/',\n 'data_allocation_ln_q': PULPNNTestFolder32bit +\n 'include/DataAllocationLinearConvolutionsQuant/', 'golden_model_pw': \n PULPNNTestFolder32bit + 'include/GoldenModelStandardConvolutions/',\n 'golden_model_dw': PULPNNTestFolder32bit +\n 'include/GoldenModelDepthwiseConvolutions/', 'golden_model_ln_nq': \n PULPNNTestFolder32bit + 'include/GoldenModelLinearConvolutionsNoQuant/',\n 'golden_model_ln_q': PULPNNTestFolder32bit +\n 'include/GoldenModelLinearConvolutionsQuant/', 'test':\n PULPNNTestFolder32bit}\nPULPNNSrcDirs64bit = {'pulp_nn_inc': PULPNNInstallPath64bit + 'include/',\n 'pulp_nn_pointwise_convolution': PULPNNInstallPath64bit +\n 'src/StandardConvolutions/', 'pulp_nn_matmul': PULPNNInstallPath64bit +\n 'src/MatrixMultiplications/', 'pulp_nn_depthwise_convolution': \n PULPNNInstallPath64bit + 'src/DepthwiseConvolutions/',\n 'pulp_nn_linear_convolution_nq': PULPNNInstallPath64bit +\n 'src/LinearConvolutionsNoQuant/', 'pulp_nn_linear_convolution_q': \n PULPNNInstallPath64bit + 'src/LinearConvolutionsQuant/',\n 'pulp_nn_support_function': PULPNNInstallPath64bit +\n 'src/SupportFunctions/', 'include': PULPNNTestFolder64bit + 'include/',\n 'src': PULPNNTestFolder64bit + 'src/', 'pointwise_convolution': \n PULPNNTestFolder64bit + 'src/StandardConvolutions/', 'matmul': \n PULPNNTestFolder64bit + 'src/MatrixMultiplications/',\n 'depthwise_convolution': PULPNNTestFolder64bit +\n 'src/DepthwiseConvolutions/', 'linear_convolution_nq': \n PULPNNTestFolder64bit + 'src/LinearConvolutionsNoQuant/',\n 'linear_convolution_q': PULPNNTestFolder64bit +\n 'src/LinearConvolutionsQuant/', 'support_function': \n PULPNNTestFolder64bit + 'src/SupportFunctions/', 'data_allocation_pw': \n PULPNNTestFolder64bit + 'include/DataAllocationStandardConvolutions/',\n 'data_allocation_dw': PULPNNTestFolder64bit +\n 'include/DataAllocationDepthwiseConvolutions/', 'data_allocation_ln_nq':\n PULPNNTestFolder64bit +\n 'include/DataAllocationLinearConvolutionsNoQuant/',\n 'data_allocation_ln_q': PULPNNTestFolder64bit +\n 'include/DataAllocationLinearConvolutionsQuant/', 'golden_model_pw': \n PULPNNTestFolder64bit + 'include/GoldenModelStandardConvolutions/',\n 'golden_model_dw': PULPNNTestFolder64bit +\n 'include/GoldenModelDepthwiseConvolutions/', 'golden_model_ln_nq': \n PULPNNTestFolder64bit + 'include/GoldenModelLinearConvolutionsNoQuant/',\n 'golden_model_ln_q': PULPNNTestFolder64bit +\n 'include/GoldenModelLinearConvolutionsQuant/', 'test':\n PULPNNTestFolder64bit}\n",
"step-4": "#\n# struct_test.py\n# Nazareno Bruschi <nazareno.bruschi@unibo.it>\n#\n# Copyright (C) 2019-2020 University of Bologna\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport os\n\nPULPNNInstallPath = cwd = os.getcwd() + \"/../\"\nPULPNNSrcDirs = {'script': PULPNNInstallPath + \"scripts/\"}\nPULPNNInstallPath32bit = cwd = os.getcwd() + \"/../32bit/\"\nPULPNNInstallPath64bit = cwd = os.getcwd() + \"/../64bit/\"\nPULPNNTestFolder32bit = PULPNNInstallPath32bit + \"test/\"\nPULPNNTestFolder64bit = PULPNNInstallPath64bit + \"test/\"\nPULPNNSrcDirs32bit = {'pulp_nn_inc': PULPNNInstallPath32bit + \"include/\",\n 'pulp_nn_pointwise_convolution': PULPNNInstallPath32bit + \"src/StandardConvolutions/\",\n 'pulp_nn_matmul': PULPNNInstallPath32bit + \"src/MatrixMultiplications/\",\n 'pulp_nn_depthwise_convolution': PULPNNInstallPath32bit + \"src/DepthwiseConvolutions/\",\n 'pulp_nn_linear_convolution_nq': PULPNNInstallPath32bit + \"src/LinearConvolutionsNoQuant/\",\n 'pulp_nn_linear_convolution_q': PULPNNInstallPath32bit + \"src/LinearConvolutionsQuant/\",\n 'pulp_nn_support_function': PULPNNInstallPath32bit + \"src/SupportFunctions/\",\n 'include': PULPNNTestFolder32bit + \"include/\",\n 'src': PULPNNTestFolder32bit + \"src/\",\n 'pointwise_convolution': PULPNNTestFolder32bit + \"src/StandardConvolutions/\",\n 'matmul': PULPNNTestFolder32bit + \"src/MatrixMultiplications/\",\n 'depthwise_convolution': PULPNNTestFolder32bit + \"src/DepthwiseConvolutions/\",\n 'linear_convolution_nq': PULPNNTestFolder32bit + \"src/LinearConvolutionsNoQuant/\",\n 'linear_convolution_q': PULPNNTestFolder32bit + \"src/LinearConvolutionsQuant/\",\n 'support_function': PULPNNTestFolder32bit + \"src/SupportFunctions/\",\n 'data_allocation_pw': PULPNNTestFolder32bit + \"include/DataAllocationStandardConvolutions/\",\n 'data_allocation_dw': PULPNNTestFolder32bit + \"include/DataAllocationDepthwiseConvolutions/\",\n 'data_allocation_ln_nq': PULPNNTestFolder32bit + \"include/DataAllocationLinearConvolutionsNoQuant/\",\n 'data_allocation_ln_q': PULPNNTestFolder32bit + \"include/DataAllocationLinearConvolutionsQuant/\",\n 'golden_model_pw': PULPNNTestFolder32bit + \"include/GoldenModelStandardConvolutions/\",\n 'golden_model_dw': PULPNNTestFolder32bit + \"include/GoldenModelDepthwiseConvolutions/\",\n 'golden_model_ln_nq': PULPNNTestFolder32bit + \"include/GoldenModelLinearConvolutionsNoQuant/\",\n 'golden_model_ln_q': PULPNNTestFolder32bit + \"include/GoldenModelLinearConvolutionsQuant/\",\n 'test': PULPNNTestFolder32bit}\nPULPNNSrcDirs64bit = {'pulp_nn_inc': PULPNNInstallPath64bit + \"include/\",\n 'pulp_nn_pointwise_convolution': PULPNNInstallPath64bit + \"src/StandardConvolutions/\",\n 'pulp_nn_matmul': PULPNNInstallPath64bit + \"src/MatrixMultiplications/\",\n 'pulp_nn_depthwise_convolution': PULPNNInstallPath64bit + \"src/DepthwiseConvolutions/\",\n 'pulp_nn_linear_convolution_nq': PULPNNInstallPath64bit + \"src/LinearConvolutionsNoQuant/\",\n 'pulp_nn_linear_convolution_q': PULPNNInstallPath64bit + \"src/LinearConvolutionsQuant/\",\n 'pulp_nn_support_function': PULPNNInstallPath64bit + \"src/SupportFunctions/\",\n 'include': PULPNNTestFolder64bit + \"include/\",\n 'src': PULPNNTestFolder64bit + \"src/\",\n 'pointwise_convolution': PULPNNTestFolder64bit + \"src/StandardConvolutions/\",\n 'matmul': PULPNNTestFolder64bit + \"src/MatrixMultiplications/\",\n 'depthwise_convolution': PULPNNTestFolder64bit + \"src/DepthwiseConvolutions/\",\n 'linear_convolution_nq': PULPNNTestFolder64bit + \"src/LinearConvolutionsNoQuant/\",\n 'linear_convolution_q': PULPNNTestFolder64bit + \"src/LinearConvolutionsQuant/\",\n 'support_function': PULPNNTestFolder64bit + \"src/SupportFunctions/\",\n 'data_allocation_pw': PULPNNTestFolder64bit + \"include/DataAllocationStandardConvolutions/\",\n 'data_allocation_dw': PULPNNTestFolder64bit + \"include/DataAllocationDepthwiseConvolutions/\",\n 'data_allocation_ln_nq': PULPNNTestFolder64bit + \"include/DataAllocationLinearConvolutionsNoQuant/\",\n 'data_allocation_ln_q': PULPNNTestFolder64bit + \"include/DataAllocationLinearConvolutionsQuant/\",\n 'golden_model_pw': PULPNNTestFolder64bit + \"include/GoldenModelStandardConvolutions/\",\n 'golden_model_dw': PULPNNTestFolder64bit + \"include/GoldenModelDepthwiseConvolutions/\",\n 'golden_model_ln_nq': PULPNNTestFolder64bit + \"include/GoldenModelLinearConvolutionsNoQuant/\",\n 'golden_model_ln_q': PULPNNTestFolder64bit + \"include/GoldenModelLinearConvolutionsQuant/\",\n 'test': PULPNNTestFolder64bit}",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for case in range(1, cases + 1):
digits = [False] * 10
n = int(rf.readline())
if n == 0:
wf.write('Case #%s: INSOMNIA\n' % case)
continue
for i in range(1, 999999):
cur = n * i
for c in str(cur):
digits[int(c)] = True
if all(digits):
wf.write('Case #%s: %s\n' % (case, cur))
break
<|reserved_special_token_1|>
rf = open('A-large.in', 'r')
wf = open('A-large.out', 'w')
cases = int(rf.readline())
for case in range(1, cases + 1):
digits = [False] * 10
n = int(rf.readline())
if n == 0:
wf.write('Case #%s: INSOMNIA\n' % case)
continue
for i in range(1, 999999):
cur = n * i
for c in str(cur):
digits[int(c)] = True
if all(digits):
wf.write('Case #%s: %s\n' % (case, cur))
break
|
flexible
|
{
"blob_id": "0074b0cd1e4317e36ef4a41f8179464c2ec6c197",
"index": 8250,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor case in range(1, cases + 1):\n digits = [False] * 10\n n = int(rf.readline())\n if n == 0:\n wf.write('Case #%s: INSOMNIA\\n' % case)\n continue\n for i in range(1, 999999):\n cur = n * i\n for c in str(cur):\n digits[int(c)] = True\n if all(digits):\n wf.write('Case #%s: %s\\n' % (case, cur))\n break\n",
"step-3": "rf = open('A-large.in', 'r')\nwf = open('A-large.out', 'w')\ncases = int(rf.readline())\nfor case in range(1, cases + 1):\n digits = [False] * 10\n n = int(rf.readline())\n if n == 0:\n wf.write('Case #%s: INSOMNIA\\n' % case)\n continue\n for i in range(1, 999999):\n cur = n * i\n for c in str(cur):\n digits[int(c)] = True\n if all(digits):\n wf.write('Case #%s: %s\\n' % (case, cur))\n break\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
from sqlalchemy import literal, Column, String, Integer, ForeignKey
from sqlalchemy.orm import relationship
from common.db import Base
class Airplane(Base):
__tablename__ = 'airplanes'
id = Column(Integer, primary_key=True)
icao_code = Column(String(6), unique=True, nullable=False) # ICAO 24-bit identifier
airline_id = Column(Integer, ForeignKey('airlines.id'))
airline = relationship('Airline', backref='airplanes')
manufacturer = Column(String)
model = Column(String)
def __init__(self, icao_code, airline, manufacturer=None, model=None):
self.icao_code = icao_code
self.airline = airline
self.manufacturer = manufacturer
self.model = model
def __repr__(self):
return 'Airplane({icao_code}, {airline})'.format(
icao_code=self.icao_code,
airline=self.airline)
@staticmethod
def exists_airplane(session, icao_code):
q = session.query(Airplane).filter(Airplane.icao_code == icao_code)
return session.query(literal(True)).filter(q.exists()).scalar()
@staticmethod
def airplane_from_icao_code(session, icao_code):
return session.query(Airplane).filter(Airplane.icao_code == icao_code).first()
|
normal
|
{
"blob_id": "98dac1ea372f16ecdb818fbe3287ab7e51a0d67c",
"index": 7916,
"step-1": "<mask token>\n\n\nclass Airplane(Base):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self, icao_code, airline, manufacturer=None, model=None):\n self.icao_code = icao_code\n self.airline = airline\n self.manufacturer = manufacturer\n self.model = model\n <mask token>\n\n @staticmethod\n def exists_airplane(session, icao_code):\n q = session.query(Airplane).filter(Airplane.icao_code == icao_code)\n return session.query(literal(True)).filter(q.exists()).scalar()\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Airplane(Base):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self, icao_code, airline, manufacturer=None, model=None):\n self.icao_code = icao_code\n self.airline = airline\n self.manufacturer = manufacturer\n self.model = model\n <mask token>\n\n @staticmethod\n def exists_airplane(session, icao_code):\n q = session.query(Airplane).filter(Airplane.icao_code == icao_code)\n return session.query(literal(True)).filter(q.exists()).scalar()\n\n @staticmethod\n def airplane_from_icao_code(session, icao_code):\n return session.query(Airplane).filter(Airplane.icao_code == icao_code\n ).first()\n",
"step-3": "<mask token>\n\n\nclass Airplane(Base):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self, icao_code, airline, manufacturer=None, model=None):\n self.icao_code = icao_code\n self.airline = airline\n self.manufacturer = manufacturer\n self.model = model\n\n def __repr__(self):\n return 'Airplane({icao_code}, {airline})'.format(icao_code=self.\n icao_code, airline=self.airline)\n\n @staticmethod\n def exists_airplane(session, icao_code):\n q = session.query(Airplane).filter(Airplane.icao_code == icao_code)\n return session.query(literal(True)).filter(q.exists()).scalar()\n\n @staticmethod\n def airplane_from_icao_code(session, icao_code):\n return session.query(Airplane).filter(Airplane.icao_code == icao_code\n ).first()\n",
"step-4": "from sqlalchemy import literal, Column, String, Integer, ForeignKey\nfrom sqlalchemy.orm import relationship\nfrom common.db import Base\n\n\nclass Airplane(Base):\n __tablename__ = 'airplanes'\n id = Column(Integer, primary_key=True)\n icao_code = Column(String(6), unique=True, nullable=False)\n airline_id = Column(Integer, ForeignKey('airlines.id'))\n airline = relationship('Airline', backref='airplanes')\n manufacturer = Column(String)\n model = Column(String)\n\n def __init__(self, icao_code, airline, manufacturer=None, model=None):\n self.icao_code = icao_code\n self.airline = airline\n self.manufacturer = manufacturer\n self.model = model\n\n def __repr__(self):\n return 'Airplane({icao_code}, {airline})'.format(icao_code=self.\n icao_code, airline=self.airline)\n\n @staticmethod\n def exists_airplane(session, icao_code):\n q = session.query(Airplane).filter(Airplane.icao_code == icao_code)\n return session.query(literal(True)).filter(q.exists()).scalar()\n\n @staticmethod\n def airplane_from_icao_code(session, icao_code):\n return session.query(Airplane).filter(Airplane.icao_code == icao_code\n ).first()\n",
"step-5": "from sqlalchemy import literal, Column, String, Integer, ForeignKey\nfrom sqlalchemy.orm import relationship\nfrom common.db import Base\n\n\nclass Airplane(Base):\n __tablename__ = 'airplanes'\n\n id = Column(Integer, primary_key=True)\n icao_code = Column(String(6), unique=True, nullable=False) # ICAO 24-bit identifier\n airline_id = Column(Integer, ForeignKey('airlines.id'))\n airline = relationship('Airline', backref='airplanes')\n manufacturer = Column(String)\n model = Column(String)\n\n def __init__(self, icao_code, airline, manufacturer=None, model=None):\n self.icao_code = icao_code\n self.airline = airline\n self.manufacturer = manufacturer\n self.model = model\n\n def __repr__(self):\n return 'Airplane({icao_code}, {airline})'.format(\n icao_code=self.icao_code,\n airline=self.airline)\n\n @staticmethod\n def exists_airplane(session, icao_code):\n q = session.query(Airplane).filter(Airplane.icao_code == icao_code)\n return session.query(literal(True)).filter(q.exists()).scalar()\n\n @staticmethod\n def airplane_from_icao_code(session, icao_code):\n return session.query(Airplane).filter(Airplane.icao_code == icao_code).first()\n \n ",
"step-ids": [
3,
4,
5,
7,
8
]
}
|
[
3,
4,
5,
7,
8
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def steps_in_tower_of_hanoi(no_of_disks):
res = towers_of_hanoi(no_of_disks, 'A', 'C', 'B', [])
return res
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def towers_of_hanoi(n, src, dest, temp, res):
if n == 1:
s = 'disk 1 from ', src, '->', dest
res.append(s)
return
towers_of_hanoi(n - 1, src, temp, dest, res)
s = 'disk ', n, ' from ', src, '->', dest
res.append(s)
towers_of_hanoi(n - 1, temp, dest, src, res)
return res
def steps_in_tower_of_hanoi(no_of_disks):
res = towers_of_hanoi(no_of_disks, 'A', 'C', 'B', [])
return res
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def towers_of_hanoi(n, src, dest, temp, res):
if n == 1:
s = 'disk 1 from ', src, '->', dest
res.append(s)
return
towers_of_hanoi(n - 1, src, temp, dest, res)
s = 'disk ', n, ' from ', src, '->', dest
res.append(s)
towers_of_hanoi(n - 1, temp, dest, src, res)
return res
def steps_in_tower_of_hanoi(no_of_disks):
res = towers_of_hanoi(no_of_disks, 'A', 'C', 'B', [])
return res
if __name__ == '__main__':
no_of_disks = int(input())
res = steps_in_tower_of_hanoi(no_of_disks)
print('\n'.join([' '.join(map(str, x)) for x in res]))
print('\n')
<|reserved_special_token_1|>
def towers_of_hanoi(n, src, dest, temp,res):
if n==1:
s = 'disk 1 from ',src,'->',dest
res.append(s)
return
towers_of_hanoi(n-1, src, temp, dest, res)
s = 'disk ',n, ' from ',src,'->',dest
res.append(s)
towers_of_hanoi(n-1, temp, dest, src, res)
return res
def steps_in_tower_of_hanoi(no_of_disks):
res = towers_of_hanoi(no_of_disks, 'A', 'C', 'B',[])
return res
if __name__ == "__main__":
no_of_disks = int(input())
res = steps_in_tower_of_hanoi(no_of_disks)
print('\n'.join([' '.join(map(str, x)) for x in res]))
print('\n')
|
flexible
|
{
"blob_id": "f23bfef2daf8fda4249435821dbc2e0b1846e3d6",
"index": 9842,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef steps_in_tower_of_hanoi(no_of_disks):\n res = towers_of_hanoi(no_of_disks, 'A', 'C', 'B', [])\n return res\n\n\n<mask token>\n",
"step-3": "def towers_of_hanoi(n, src, dest, temp, res):\n if n == 1:\n s = 'disk 1 from ', src, '->', dest\n res.append(s)\n return\n towers_of_hanoi(n - 1, src, temp, dest, res)\n s = 'disk ', n, ' from ', src, '->', dest\n res.append(s)\n towers_of_hanoi(n - 1, temp, dest, src, res)\n return res\n\n\ndef steps_in_tower_of_hanoi(no_of_disks):\n res = towers_of_hanoi(no_of_disks, 'A', 'C', 'B', [])\n return res\n\n\n<mask token>\n",
"step-4": "def towers_of_hanoi(n, src, dest, temp, res):\n if n == 1:\n s = 'disk 1 from ', src, '->', dest\n res.append(s)\n return\n towers_of_hanoi(n - 1, src, temp, dest, res)\n s = 'disk ', n, ' from ', src, '->', dest\n res.append(s)\n towers_of_hanoi(n - 1, temp, dest, src, res)\n return res\n\n\ndef steps_in_tower_of_hanoi(no_of_disks):\n res = towers_of_hanoi(no_of_disks, 'A', 'C', 'B', [])\n return res\n\n\nif __name__ == '__main__':\n no_of_disks = int(input())\n res = steps_in_tower_of_hanoi(no_of_disks)\n print('\\n'.join([' '.join(map(str, x)) for x in res]))\n print('\\n')\n",
"step-5": "\ndef towers_of_hanoi(n, src, dest, temp,res):\n if n==1:\n s = 'disk 1 from ',src,'->',dest\n res.append(s)\n return\n towers_of_hanoi(n-1, src, temp, dest, res)\n s = 'disk ',n, ' from ',src,'->',dest\n res.append(s)\n towers_of_hanoi(n-1, temp, dest, src, res)\n return res\n \ndef steps_in_tower_of_hanoi(no_of_disks):\n res = towers_of_hanoi(no_of_disks, 'A', 'C', 'B',[])\n return res\n\nif __name__ == \"__main__\":\n\n no_of_disks = int(input())\n\n res = steps_in_tower_of_hanoi(no_of_disks)\n\n print('\\n'.join([' '.join(map(str, x)) for x in res]))\n print('\\n')\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TestCadastro(BaseTest):
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TestCadastro(BaseTest):
def test_cadastro_com_sucesso(self):
self.campoDeTreinamento = CampoDeTreinamentoPage(self.driver)
self.campoDeTreinamento.fill_name('Everton')
self.campoDeTreinamento.fill_sobrenome('Araujo')
self.campoDeTreinamento.select_sexo_masculino()
self.campoDeTreinamento.cadastra()
time.sleep(3)
<|reserved_special_token_1|>
import time
from tests.test_base import BaseTest
from pages.campo_de_treinamento_page import CampoDeTreinamentoPage
class TestCadastro(BaseTest):
def test_cadastro_com_sucesso(self):
self.campoDeTreinamento = CampoDeTreinamentoPage(self.driver)
self.campoDeTreinamento.fill_name('Everton')
self.campoDeTreinamento.fill_sobrenome('Araujo')
self.campoDeTreinamento.select_sexo_masculino()
self.campoDeTreinamento.cadastra()
time.sleep(3)
<|reserved_special_token_1|>
import time
from tests.test_base import BaseTest
from pages.campo_de_treinamento_page import CampoDeTreinamentoPage
class TestCadastro(BaseTest):
def test_cadastro_com_sucesso(self):
self.campoDeTreinamento = CampoDeTreinamentoPage(self.driver)
self.campoDeTreinamento.fill_name("Everton")
self.campoDeTreinamento.fill_sobrenome("Araujo")
self.campoDeTreinamento.select_sexo_masculino()
self.campoDeTreinamento.cadastra()
time.sleep(3)
|
flexible
|
{
"blob_id": "4e50a7a757bacb04dc8f292bdaafb03c86042e6c",
"index": 1633,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass TestCadastro(BaseTest):\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass TestCadastro(BaseTest):\n\n def test_cadastro_com_sucesso(self):\n self.campoDeTreinamento = CampoDeTreinamentoPage(self.driver)\n self.campoDeTreinamento.fill_name('Everton')\n self.campoDeTreinamento.fill_sobrenome('Araujo')\n self.campoDeTreinamento.select_sexo_masculino()\n self.campoDeTreinamento.cadastra()\n time.sleep(3)\n",
"step-4": "import time\nfrom tests.test_base import BaseTest\nfrom pages.campo_de_treinamento_page import CampoDeTreinamentoPage\n\n\nclass TestCadastro(BaseTest):\n\n def test_cadastro_com_sucesso(self):\n self.campoDeTreinamento = CampoDeTreinamentoPage(self.driver)\n self.campoDeTreinamento.fill_name('Everton')\n self.campoDeTreinamento.fill_sobrenome('Araujo')\n self.campoDeTreinamento.select_sexo_masculino()\n self.campoDeTreinamento.cadastra()\n time.sleep(3)\n",
"step-5": "import time\nfrom tests.test_base import BaseTest\nfrom pages.campo_de_treinamento_page import CampoDeTreinamentoPage\n\n\nclass TestCadastro(BaseTest):\n def test_cadastro_com_sucesso(self):\n self.campoDeTreinamento = CampoDeTreinamentoPage(self.driver)\n self.campoDeTreinamento.fill_name(\"Everton\")\n self.campoDeTreinamento.fill_sobrenome(\"Araujo\")\n self.campoDeTreinamento.select_sexo_masculino()\n self.campoDeTreinamento.cadastra()\n time.sleep(3)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class SampleAdmin(admin.ModelAdmin):
inlines = [MultipleFileInline]
prepopulated_fields = {'slug': ('heading',)}
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class MultipleFileInline(admin.TabularInline):
model = SampleMultipleFile
class SampleAdmin(admin.ModelAdmin):
inlines = [MultipleFileInline]
prepopulated_fields = {'slug': ('heading',)}
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
admin.site.register(Assignment)
admin.site.register(Review)
admin.site.register(Question)
class MultipleFileInline(admin.TabularInline):
model = SampleMultipleFile
class SampleAdmin(admin.ModelAdmin):
inlines = [MultipleFileInline]
prepopulated_fields = {'slug': ('heading',)}
admin.site.register(Sample, SampleAdmin)
<|reserved_special_token_1|>
from django.contrib import admin
from main.models import Assignment, Review, Sample, Question, SampleMultipleFile
admin.site.register(Assignment)
admin.site.register(Review)
admin.site.register(Question)
class MultipleFileInline(admin.TabularInline):
model = SampleMultipleFile
class SampleAdmin(admin.ModelAdmin):
inlines = [MultipleFileInline]
prepopulated_fields = {'slug': ('heading',)}
admin.site.register(Sample, SampleAdmin)
<|reserved_special_token_1|>
from django.contrib import admin
from main.models import Assignment, Review, Sample, Question, SampleMultipleFile
# Register your models here.
admin.site.register(Assignment)
admin.site.register(Review)
admin.site.register(Question)
class MultipleFileInline(admin.TabularInline):
model = SampleMultipleFile
class SampleAdmin(admin.ModelAdmin):
inlines = [ MultipleFileInline ]
prepopulated_fields = {'slug': ('heading',)}
admin.site.register(Sample, SampleAdmin)
|
flexible
|
{
"blob_id": "d18c45c08face08ce8f7dad915f1896c24c95cbf",
"index": 2991,
"step-1": "<mask token>\n\n\nclass SampleAdmin(admin.ModelAdmin):\n inlines = [MultipleFileInline]\n prepopulated_fields = {'slug': ('heading',)}\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass MultipleFileInline(admin.TabularInline):\n model = SampleMultipleFile\n\n\nclass SampleAdmin(admin.ModelAdmin):\n inlines = [MultipleFileInline]\n prepopulated_fields = {'slug': ('heading',)}\n\n\n<mask token>\n",
"step-3": "<mask token>\nadmin.site.register(Assignment)\nadmin.site.register(Review)\nadmin.site.register(Question)\n\n\nclass MultipleFileInline(admin.TabularInline):\n model = SampleMultipleFile\n\n\nclass SampleAdmin(admin.ModelAdmin):\n inlines = [MultipleFileInline]\n prepopulated_fields = {'slug': ('heading',)}\n\n\nadmin.site.register(Sample, SampleAdmin)\n",
"step-4": "from django.contrib import admin\nfrom main.models import Assignment, Review, Sample, Question, SampleMultipleFile\nadmin.site.register(Assignment)\nadmin.site.register(Review)\nadmin.site.register(Question)\n\n\nclass MultipleFileInline(admin.TabularInline):\n model = SampleMultipleFile\n\n\nclass SampleAdmin(admin.ModelAdmin):\n inlines = [MultipleFileInline]\n prepopulated_fields = {'slug': ('heading',)}\n\n\nadmin.site.register(Sample, SampleAdmin)\n",
"step-5": "from django.contrib import admin\nfrom main.models import Assignment, Review, Sample, Question, SampleMultipleFile\n\n# Register your models here.\nadmin.site.register(Assignment)\nadmin.site.register(Review)\nadmin.site.register(Question)\n\n\nclass MultipleFileInline(admin.TabularInline):\n\tmodel = SampleMultipleFile\n\n\nclass SampleAdmin(admin.ModelAdmin):\n\tinlines = [ MultipleFileInline ]\n\tprepopulated_fields = {'slug': ('heading',)}\n\nadmin.site.register(Sample, SampleAdmin)",
"step-ids": [
2,
4,
5,
6,
7
]
}
|
[
2,
4,
5,
6,
7
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class director(models.Model):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class director(models.Model):
_inherit = 'base.entidad'
_name = 'cinemateca.director'
name = fields.Char(string='name', required=True, help='Nombre del director'
)
apellidos = fields.Char(string='apellidos', required=True, help=
'Apellidos del director')
pelicula_ids = fields.One2many('cinemateca.pelicula', 'director_id',
string='sesion')
<|reserved_special_token_1|>
from odoo import models, fields, api
class director(models.Model):
_inherit = 'base.entidad'
_name = 'cinemateca.director'
name = fields.Char(string='name', required=True, help='Nombre del director'
)
apellidos = fields.Char(string='apellidos', required=True, help=
'Apellidos del director')
pelicula_ids = fields.One2many('cinemateca.pelicula', 'director_id',
string='sesion')
<|reserved_special_token_1|>
from odoo import models,fields, api
class director(models.Model):
#Clasica
_inherit = 'base.entidad'
_name = 'cinemateca.director'
name = fields.Char(string="name", required=True, help="Nombre del director")
apellidos = fields.Char(string="apellidos", required=True, help="Apellidos del director")
pelicula_ids = fields.One2many("cinemateca.pelicula", "director_id", string="sesion")
|
flexible
|
{
"blob_id": "006f499eed7cd5d73bb0cb9b242c90726fff35c1",
"index": 3185,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass director(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass director(models.Model):\n _inherit = 'base.entidad'\n _name = 'cinemateca.director'\n name = fields.Char(string='name', required=True, help='Nombre del director'\n )\n apellidos = fields.Char(string='apellidos', required=True, help=\n 'Apellidos del director')\n pelicula_ids = fields.One2many('cinemateca.pelicula', 'director_id',\n string='sesion')\n",
"step-4": "from odoo import models, fields, api\n\n\nclass director(models.Model):\n _inherit = 'base.entidad'\n _name = 'cinemateca.director'\n name = fields.Char(string='name', required=True, help='Nombre del director'\n )\n apellidos = fields.Char(string='apellidos', required=True, help=\n 'Apellidos del director')\n pelicula_ids = fields.One2many('cinemateca.pelicula', 'director_id',\n string='sesion')\n",
"step-5": "from odoo import models,fields, api\n\nclass director(models.Model):\n #Clasica\n _inherit = 'base.entidad'\n _name = 'cinemateca.director'\n name = fields.Char(string=\"name\", required=True, help=\"Nombre del director\")\n apellidos = fields.Char(string=\"apellidos\", required=True, help=\"Apellidos del director\")\n pelicula_ids = fields.One2many(\"cinemateca.pelicula\", \"director_id\", string=\"sesion\")",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from typing import *
class Solution:
def isMonotonic(self, A: List[int]) ->bool:
flag = 0
for i in range(1, len(A)):
diff = A[i] - A[i - 1]
if diff * flag < 0:
return False
if flag == 0:
flag = diff
return True
sl = Solution()
inp = [1, 2, 2, 2, 1]
print(sl.isMonotonic(inp))
|
normal
|
{
"blob_id": "a55d1286485e66a64aa78259ad1b1922c5c4c831",
"index": 4385,
"step-1": "<mask token>\n\n\nclass Solution:\n\n def isMonotonic(self, A: List[int]) ->bool:\n flag = 0\n for i in range(1, len(A)):\n diff = A[i] - A[i - 1]\n if diff * flag < 0:\n return False\n if flag == 0:\n flag = diff\n return True\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Solution:\n\n def isMonotonic(self, A: List[int]) ->bool:\n flag = 0\n for i in range(1, len(A)):\n diff = A[i] - A[i - 1]\n if diff * flag < 0:\n return False\n if flag == 0:\n flag = diff\n return True\n\n\n<mask token>\nprint(sl.isMonotonic(inp))\n",
"step-3": "<mask token>\n\n\nclass Solution:\n\n def isMonotonic(self, A: List[int]) ->bool:\n flag = 0\n for i in range(1, len(A)):\n diff = A[i] - A[i - 1]\n if diff * flag < 0:\n return False\n if flag == 0:\n flag = diff\n return True\n\n\nsl = Solution()\ninp = [1, 2, 2, 2, 1]\nprint(sl.isMonotonic(inp))\n",
"step-4": "from typing import *\n\n\nclass Solution:\n\n def isMonotonic(self, A: List[int]) ->bool:\n flag = 0\n for i in range(1, len(A)):\n diff = A[i] - A[i - 1]\n if diff * flag < 0:\n return False\n if flag == 0:\n flag = diff\n return True\n\n\nsl = Solution()\ninp = [1, 2, 2, 2, 1]\nprint(sl.isMonotonic(inp))\n",
"step-5": null,
"step-ids": [
2,
3,
4,
5
]
}
|
[
2,
3,
4,
5
] |
from django.apps import AppConfig
class ShortenConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'shorten'
|
normal
|
{
"blob_id": "8c2920db7fc49d56aa8da6289cd22272ed3e3283",
"index": 4402,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass ShortenConfig(AppConfig):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass ShortenConfig(AppConfig):\n default_auto_field = 'django.db.models.BigAutoField'\n name = 'shorten'\n",
"step-4": "from django.apps import AppConfig\n\n\nclass ShortenConfig(AppConfig):\n default_auto_field = 'django.db.models.BigAutoField'\n name = 'shorten'\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import functools
import re
from pprint import pprint
def heading(*, marker=''):
'''
Add a new line with the same number of heading markers as the characters in the title
Need to specify marker to one of the valid rst line markups
'''
def wrapper_heading(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
title = func(*args, **kwargs)
class_obj, passed_title, = args
title = title.strip()
return f'\n{title}\n{marker*len(title)}\n' if passed_title.strip() != title else passed_title
return wrapper
return wrapper_heading
def code_pre_block(func):
'''
formats a code block according to rst format
'''
@functools.wraps(func)
def wrapper(*args, **kwargs):
block = func(*args, **kwargs)
new_block = '\n.. code-block::\n\n'
for line in block.split('\n'):
new_block += f' {line}\n'
return new_block
return wrapper
def source_block(func):
'''
formats code from <source lang="some_language"> blocks
where the language is optional
'''
@functools.wraps(func)
def wrapper(*args, **kwargs):
lang, block = func(*args, **kwargs)
new_block = f'\n\n.. code-block:: {lang or ""}\n\n'
for line in block.split('\n'):
new_block += f' {line}\n'
return new_block
return wrapper
def list_block(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
items = func(*args, **kwargs)
new_list = '\n'
prev_indent = 0
sub_list_started = False
for line in items.split('\n'):
num_markers = get_num_markers(line) # how many # there are
indent_by = (num_markers - 1) * 2 # no indentation for first level
def get_printable_part(string):
'''
trim out up to a colon or semi-colon after a # list marker
'''
return string[num_markers+1:].strip() if string[num_markers] in [':', ';', '*'] else string[num_markers:].strip()
# if # is followed by ; or :, it is a continuation of the previous list item
# this can just be indented
if line[num_markers] == '*': # bullet list item
if not sub_list_started:
new_list += f'\n{" " * num_markers*2}* {get_printable_part(line)}\n'
sub_list_started = True
else:
new_list += f'{" " * num_markers*2}* {get_printable_part(line)}\n'
continue
sub_list_started = False
if line[num_markers] in [':', ';']:
line = f'{" " * num_markers*2}{get_printable_part(line)}'
else:
line = f'{" " * indent_by}* {get_printable_part(line)}'
if indent_by != prev_indent: # starting a new level or going back to old level
line = f'\n{line}' # new level starts a new line
prev_indent = indent_by
new_list += f'{line}\n'
return new_list
return wrapper
def get_num_markers(string):
indent_by = 0
for i in range(len(string)):
if string[i] == '#':
indent_by += 1
else:
break
return indent_by
@list_block
def list_block_converter(match_group):
return match_group.group(1)
@code_pre_block
def code_pre_block_converter(match_group):
return match_group.group(2)
@source_block
def source_block_converter(match_group):
'''
formats a code block from <source lang="some_language">
the language part is optional
'''
return (match_group.group(1), match_group.group(2))
if __name__ == '__main__':
pass
|
normal
|
{
"blob_id": "d1b2420778e788d78be2a12a27c80f5fa1b15a0f",
"index": 465,
"step-1": "<mask token>\n\n\ndef code_pre_block(func):\n \"\"\"\n formats a code block according to rst format\n \"\"\"\n\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n block = func(*args, **kwargs)\n new_block = '\\n.. code-block::\\n\\n'\n for line in block.split('\\n'):\n new_block += f' {line}\\n'\n return new_block\n return wrapper\n\n\ndef source_block(func):\n \"\"\"\n formats code from <source lang=\"some_language\"> blocks\n where the language is optional\n \"\"\"\n\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n lang, block = func(*args, **kwargs)\n new_block = f\"\\n\\n.. code-block:: {lang or ''}\\n\\n\"\n for line in block.split('\\n'):\n new_block += f' {line}\\n'\n return new_block\n return wrapper\n\n\ndef list_block(func):\n\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n items = func(*args, **kwargs)\n new_list = '\\n'\n prev_indent = 0\n sub_list_started = False\n for line in items.split('\\n'):\n num_markers = get_num_markers(line)\n indent_by = (num_markers - 1) * 2\n\n def get_printable_part(string):\n \"\"\"\n trim out up to a colon or semi-colon after a # list marker\n \"\"\"\n return string[num_markers + 1:].strip() if string[num_markers\n ] in [':', ';', '*'] else string[num_markers:].strip()\n if line[num_markers] == '*':\n if not sub_list_started:\n new_list += (\n f\"\\n{' ' * num_markers * 2}* {get_printable_part(line)}\\n\"\n )\n sub_list_started = True\n else:\n new_list += (\n f\"{' ' * num_markers * 2}* {get_printable_part(line)}\\n\"\n )\n continue\n sub_list_started = False\n if line[num_markers] in [':', ';']:\n line = f\"{' ' * num_markers * 2}{get_printable_part(line)}\"\n else:\n line = f\"{' ' * indent_by}* {get_printable_part(line)}\"\n if indent_by != prev_indent:\n line = f'\\n{line}'\n prev_indent = indent_by\n new_list += f'{line}\\n'\n return new_list\n return wrapper\n\n\ndef get_num_markers(string):\n indent_by = 0\n for i in range(len(string)):\n if string[i] == '#':\n indent_by += 1\n else:\n break\n return indent_by\n\n\n@list_block\ndef list_block_converter(match_group):\n return match_group.group(1)\n\n\n@code_pre_block\ndef code_pre_block_converter(match_group):\n return match_group.group(2)\n\n\n@source_block\ndef source_block_converter(match_group):\n \"\"\"\n formats a code block from <source lang=\"some_language\">\n the language part is optional\n \"\"\"\n return match_group.group(1), match_group.group(2)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef heading(*, marker=''):\n \"\"\"\n Add a new line with the same number of heading markers as the characters in the title\n Need to specify marker to one of the valid rst line markups\n \"\"\"\n\n def wrapper_heading(func):\n\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n title = func(*args, **kwargs)\n class_obj, passed_title = args\n title = title.strip()\n return f'\\n{title}\\n{marker * len(title)}\\n' if passed_title.strip(\n ) != title else passed_title\n return wrapper\n return wrapper_heading\n\n\ndef code_pre_block(func):\n \"\"\"\n formats a code block according to rst format\n \"\"\"\n\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n block = func(*args, **kwargs)\n new_block = '\\n.. code-block::\\n\\n'\n for line in block.split('\\n'):\n new_block += f' {line}\\n'\n return new_block\n return wrapper\n\n\ndef source_block(func):\n \"\"\"\n formats code from <source lang=\"some_language\"> blocks\n where the language is optional\n \"\"\"\n\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n lang, block = func(*args, **kwargs)\n new_block = f\"\\n\\n.. code-block:: {lang or ''}\\n\\n\"\n for line in block.split('\\n'):\n new_block += f' {line}\\n'\n return new_block\n return wrapper\n\n\ndef list_block(func):\n\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n items = func(*args, **kwargs)\n new_list = '\\n'\n prev_indent = 0\n sub_list_started = False\n for line in items.split('\\n'):\n num_markers = get_num_markers(line)\n indent_by = (num_markers - 1) * 2\n\n def get_printable_part(string):\n \"\"\"\n trim out up to a colon or semi-colon after a # list marker\n \"\"\"\n return string[num_markers + 1:].strip() if string[num_markers\n ] in [':', ';', '*'] else string[num_markers:].strip()\n if line[num_markers] == '*':\n if not sub_list_started:\n new_list += (\n f\"\\n{' ' * num_markers * 2}* {get_printable_part(line)}\\n\"\n )\n sub_list_started = True\n else:\n new_list += (\n f\"{' ' * num_markers * 2}* {get_printable_part(line)}\\n\"\n )\n continue\n sub_list_started = False\n if line[num_markers] in [':', ';']:\n line = f\"{' ' * num_markers * 2}{get_printable_part(line)}\"\n else:\n line = f\"{' ' * indent_by}* {get_printable_part(line)}\"\n if indent_by != prev_indent:\n line = f'\\n{line}'\n prev_indent = indent_by\n new_list += f'{line}\\n'\n return new_list\n return wrapper\n\n\ndef get_num_markers(string):\n indent_by = 0\n for i in range(len(string)):\n if string[i] == '#':\n indent_by += 1\n else:\n break\n return indent_by\n\n\n@list_block\ndef list_block_converter(match_group):\n return match_group.group(1)\n\n\n@code_pre_block\ndef code_pre_block_converter(match_group):\n return match_group.group(2)\n\n\n@source_block\ndef source_block_converter(match_group):\n \"\"\"\n formats a code block from <source lang=\"some_language\">\n the language part is optional\n \"\"\"\n return match_group.group(1), match_group.group(2)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef heading(*, marker=''):\n \"\"\"\n Add a new line with the same number of heading markers as the characters in the title\n Need to specify marker to one of the valid rst line markups\n \"\"\"\n\n def wrapper_heading(func):\n\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n title = func(*args, **kwargs)\n class_obj, passed_title = args\n title = title.strip()\n return f'\\n{title}\\n{marker * len(title)}\\n' if passed_title.strip(\n ) != title else passed_title\n return wrapper\n return wrapper_heading\n\n\ndef code_pre_block(func):\n \"\"\"\n formats a code block according to rst format\n \"\"\"\n\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n block = func(*args, **kwargs)\n new_block = '\\n.. code-block::\\n\\n'\n for line in block.split('\\n'):\n new_block += f' {line}\\n'\n return new_block\n return wrapper\n\n\ndef source_block(func):\n \"\"\"\n formats code from <source lang=\"some_language\"> blocks\n where the language is optional\n \"\"\"\n\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n lang, block = func(*args, **kwargs)\n new_block = f\"\\n\\n.. code-block:: {lang or ''}\\n\\n\"\n for line in block.split('\\n'):\n new_block += f' {line}\\n'\n return new_block\n return wrapper\n\n\ndef list_block(func):\n\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n items = func(*args, **kwargs)\n new_list = '\\n'\n prev_indent = 0\n sub_list_started = False\n for line in items.split('\\n'):\n num_markers = get_num_markers(line)\n indent_by = (num_markers - 1) * 2\n\n def get_printable_part(string):\n \"\"\"\n trim out up to a colon or semi-colon after a # list marker\n \"\"\"\n return string[num_markers + 1:].strip() if string[num_markers\n ] in [':', ';', '*'] else string[num_markers:].strip()\n if line[num_markers] == '*':\n if not sub_list_started:\n new_list += (\n f\"\\n{' ' * num_markers * 2}* {get_printable_part(line)}\\n\"\n )\n sub_list_started = True\n else:\n new_list += (\n f\"{' ' * num_markers * 2}* {get_printable_part(line)}\\n\"\n )\n continue\n sub_list_started = False\n if line[num_markers] in [':', ';']:\n line = f\"{' ' * num_markers * 2}{get_printable_part(line)}\"\n else:\n line = f\"{' ' * indent_by}* {get_printable_part(line)}\"\n if indent_by != prev_indent:\n line = f'\\n{line}'\n prev_indent = indent_by\n new_list += f'{line}\\n'\n return new_list\n return wrapper\n\n\ndef get_num_markers(string):\n indent_by = 0\n for i in range(len(string)):\n if string[i] == '#':\n indent_by += 1\n else:\n break\n return indent_by\n\n\n@list_block\ndef list_block_converter(match_group):\n return match_group.group(1)\n\n\n@code_pre_block\ndef code_pre_block_converter(match_group):\n return match_group.group(2)\n\n\n@source_block\ndef source_block_converter(match_group):\n \"\"\"\n formats a code block from <source lang=\"some_language\">\n the language part is optional\n \"\"\"\n return match_group.group(1), match_group.group(2)\n\n\nif __name__ == '__main__':\n pass\n",
"step-4": "import functools\nimport re\nfrom pprint import pprint\n\n\ndef heading(*, marker=''):\n \"\"\"\n Add a new line with the same number of heading markers as the characters in the title\n Need to specify marker to one of the valid rst line markups\n \"\"\"\n\n def wrapper_heading(func):\n\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n title = func(*args, **kwargs)\n class_obj, passed_title = args\n title = title.strip()\n return f'\\n{title}\\n{marker * len(title)}\\n' if passed_title.strip(\n ) != title else passed_title\n return wrapper\n return wrapper_heading\n\n\ndef code_pre_block(func):\n \"\"\"\n formats a code block according to rst format\n \"\"\"\n\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n block = func(*args, **kwargs)\n new_block = '\\n.. code-block::\\n\\n'\n for line in block.split('\\n'):\n new_block += f' {line}\\n'\n return new_block\n return wrapper\n\n\ndef source_block(func):\n \"\"\"\n formats code from <source lang=\"some_language\"> blocks\n where the language is optional\n \"\"\"\n\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n lang, block = func(*args, **kwargs)\n new_block = f\"\\n\\n.. code-block:: {lang or ''}\\n\\n\"\n for line in block.split('\\n'):\n new_block += f' {line}\\n'\n return new_block\n return wrapper\n\n\ndef list_block(func):\n\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n items = func(*args, **kwargs)\n new_list = '\\n'\n prev_indent = 0\n sub_list_started = False\n for line in items.split('\\n'):\n num_markers = get_num_markers(line)\n indent_by = (num_markers - 1) * 2\n\n def get_printable_part(string):\n \"\"\"\n trim out up to a colon or semi-colon after a # list marker\n \"\"\"\n return string[num_markers + 1:].strip() if string[num_markers\n ] in [':', ';', '*'] else string[num_markers:].strip()\n if line[num_markers] == '*':\n if not sub_list_started:\n new_list += (\n f\"\\n{' ' * num_markers * 2}* {get_printable_part(line)}\\n\"\n )\n sub_list_started = True\n else:\n new_list += (\n f\"{' ' * num_markers * 2}* {get_printable_part(line)}\\n\"\n )\n continue\n sub_list_started = False\n if line[num_markers] in [':', ';']:\n line = f\"{' ' * num_markers * 2}{get_printable_part(line)}\"\n else:\n line = f\"{' ' * indent_by}* {get_printable_part(line)}\"\n if indent_by != prev_indent:\n line = f'\\n{line}'\n prev_indent = indent_by\n new_list += f'{line}\\n'\n return new_list\n return wrapper\n\n\ndef get_num_markers(string):\n indent_by = 0\n for i in range(len(string)):\n if string[i] == '#':\n indent_by += 1\n else:\n break\n return indent_by\n\n\n@list_block\ndef list_block_converter(match_group):\n return match_group.group(1)\n\n\n@code_pre_block\ndef code_pre_block_converter(match_group):\n return match_group.group(2)\n\n\n@source_block\ndef source_block_converter(match_group):\n \"\"\"\n formats a code block from <source lang=\"some_language\">\n the language part is optional\n \"\"\"\n return match_group.group(1), match_group.group(2)\n\n\nif __name__ == '__main__':\n pass\n",
"step-5": "import functools\nimport re\nfrom pprint import pprint\n\ndef heading(*, marker=''):\n '''\n Add a new line with the same number of heading markers as the characters in the title\n Need to specify marker to one of the valid rst line markups\n '''\n def wrapper_heading(func):\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n title = func(*args, **kwargs)\n class_obj, passed_title, = args\n title = title.strip()\n return f'\\n{title}\\n{marker*len(title)}\\n' if passed_title.strip() != title else passed_title\n return wrapper\n return wrapper_heading\n\ndef code_pre_block(func):\n '''\n formats a code block according to rst format\n '''\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n block = func(*args, **kwargs)\n new_block = '\\n.. code-block::\\n\\n'\n for line in block.split('\\n'):\n new_block += f' {line}\\n'\n return new_block\n return wrapper\n\ndef source_block(func):\n '''\n formats code from <source lang=\"some_language\"> blocks\n where the language is optional\n '''\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n lang, block = func(*args, **kwargs)\n new_block = f'\\n\\n.. code-block:: {lang or \"\"}\\n\\n'\n for line in block.split('\\n'):\n new_block += f' {line}\\n'\n return new_block\n return wrapper\n\ndef list_block(func):\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n items = func(*args, **kwargs)\n new_list = '\\n'\n prev_indent = 0\n sub_list_started = False\n for line in items.split('\\n'):\n num_markers = get_num_markers(line) # how many # there are\n indent_by = (num_markers - 1) * 2 # no indentation for first level\n def get_printable_part(string):\n '''\n trim out up to a colon or semi-colon after a # list marker\n '''\n return string[num_markers+1:].strip() if string[num_markers] in [':', ';', '*'] else string[num_markers:].strip()\n # if # is followed by ; or :, it is a continuation of the previous list item\n # this can just be indented\n if line[num_markers] == '*': # bullet list item\n if not sub_list_started:\n new_list += f'\\n{\" \" * num_markers*2}* {get_printable_part(line)}\\n'\n sub_list_started = True\n else:\n new_list += f'{\" \" * num_markers*2}* {get_printable_part(line)}\\n'\n continue\n sub_list_started = False\n if line[num_markers] in [':', ';']:\n line = f'{\" \" * num_markers*2}{get_printable_part(line)}'\n else:\n line = f'{\" \" * indent_by}* {get_printable_part(line)}'\n if indent_by != prev_indent: # starting a new level or going back to old level\n line = f'\\n{line}' # new level starts a new line\n prev_indent = indent_by\n new_list += f'{line}\\n'\n return new_list\n return wrapper\n\ndef get_num_markers(string):\n indent_by = 0\n for i in range(len(string)):\n if string[i] == '#':\n indent_by += 1\n else:\n break\n return indent_by\n\n@list_block\ndef list_block_converter(match_group):\n return match_group.group(1)\n\n@code_pre_block\ndef code_pre_block_converter(match_group):\n return match_group.group(2)\n\n@source_block\ndef source_block_converter(match_group):\n '''\n formats a code block from <source lang=\"some_language\">\n the language part is optional\n '''\n return (match_group.group(1), match_group.group(2))\n\nif __name__ == '__main__':\n pass",
"step-ids": [
7,
8,
9,
10,
11
]
}
|
[
7,
8,
9,
10,
11
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
with tf.Session() as sess:
output = sess.run(z, feed_dict={x: 10, y: 2})
print(output)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
x = tf.placeholder(tf.int32)
y = tf.placeholder(tf.int32)
u = tf.divide(x, y)
z = tf.subtract(u, tf.constant(1.0, dtype=tf.float64))
with tf.Session() as sess:
output = sess.run(z, feed_dict={x: 10, y: 2})
print(output)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import tensorflow as tf
x = tf.placeholder(tf.int32)
y = tf.placeholder(tf.int32)
u = tf.divide(x, y)
z = tf.subtract(u, tf.constant(1.0, dtype=tf.float64))
with tf.Session() as sess:
output = sess.run(z, feed_dict={x: 10, y: 2})
print(output)
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 13 11:43:58 2020
@author: Dr. Tang
"""
import tensorflow as tf
# 需要你编程:将下面转换成tensorflow
#x = 10
#y = 2
#u=x/y
#z = u- 1
x=tf.placeholder(tf.int32)
y=tf.placeholder(tf.int32)
u=tf.divide(x,y)
z=tf.subtract(u,tf.constant(1.0,dtype=tf.float64))
# 需要你编程:从session中打印 z
with tf.Session() as sess:
output=sess.run(z,feed_dict={x:10,y:2})
print(output)
|
flexible
|
{
"blob_id": "ca91052072d7b2da5729cf55f7f4ba4b54608017",
"index": 3477,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwith tf.Session() as sess:\n output = sess.run(z, feed_dict={x: 10, y: 2})\n print(output)\n",
"step-3": "<mask token>\nx = tf.placeholder(tf.int32)\ny = tf.placeholder(tf.int32)\nu = tf.divide(x, y)\nz = tf.subtract(u, tf.constant(1.0, dtype=tf.float64))\nwith tf.Session() as sess:\n output = sess.run(z, feed_dict={x: 10, y: 2})\n print(output)\n",
"step-4": "<mask token>\nimport tensorflow as tf\nx = tf.placeholder(tf.int32)\ny = tf.placeholder(tf.int32)\nu = tf.divide(x, y)\nz = tf.subtract(u, tf.constant(1.0, dtype=tf.float64))\nwith tf.Session() as sess:\n output = sess.run(z, feed_dict={x: 10, y: 2})\n print(output)\n",
"step-5": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jul 13 11:43:58 2020\n\n@author: Dr. Tang\n\"\"\"\n\nimport tensorflow as tf\n# 需要你编程:将下面转换成tensorflow\n#x = 10\n#y = 2\n#u=x/y\n#z = u- 1\n\nx=tf.placeholder(tf.int32)\ny=tf.placeholder(tf.int32)\nu=tf.divide(x,y)\nz=tf.subtract(u,tf.constant(1.0,dtype=tf.float64))\n# 需要你编程:从session中打印 z\nwith tf.Session() as sess:\n output=sess.run(z,feed_dict={x:10,y:2})\n print(output)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from subprocess import call
app = Flask(__name__)
app.config['SECRET_KEY'] = "SuperSecretKey"
#app.config['SQLALCHEMY_DATABASE_URI'] = "postgresql://fmnibhaashbxuy:73b8e2e2485adfd45f57da653d63950b88fdcae12202a84f80c7f4c297e9e30a@ec2-23-23-222-184.compute-1.amazonaws.com:5432/d27ig8fpt4ch7r"
app.config['SQLALCHEMY_DATABASE_URI'] = "postgresql://info2180-project1:password123@localhost/profilebook"
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True # added just to suppress a warning
app.config['UPLOAD_FOLDER'] = './app/static/profile_photo'
db = SQLAlchemy(app)
allowed_exts = ["jpg", "jpeg", "png"]
from app import views
|
normal
|
{
"blob_id": "7b45c9e31bfb868b1abde6af0d8579b52f86d9c3",
"index": 5689,
"step-1": "<mask token>\n",
"step-2": "<mask token>\napp = Flask(__name__)\napp.config['SECRET_KEY'] = 'SuperSecretKey'\napp.config['SQLALCHEMY_DATABASE_URI'\n ] = 'postgresql://info2180-project1:password123@localhost/profilebook'\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True\napp.config['UPLOAD_FOLDER'] = './app/static/profile_photo'\ndb = SQLAlchemy(app)\nallowed_exts = ['jpg', 'jpeg', 'png']\n<mask token>\n",
"step-3": "from flask import Flask\nfrom flask_sqlalchemy import SQLAlchemy\nfrom subprocess import call\napp = Flask(__name__)\napp.config['SECRET_KEY'] = 'SuperSecretKey'\napp.config['SQLALCHEMY_DATABASE_URI'\n ] = 'postgresql://info2180-project1:password123@localhost/profilebook'\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True\napp.config['UPLOAD_FOLDER'] = './app/static/profile_photo'\ndb = SQLAlchemy(app)\nallowed_exts = ['jpg', 'jpeg', 'png']\nfrom app import views\n",
"step-4": "from flask import Flask\nfrom flask_sqlalchemy import SQLAlchemy\nfrom subprocess import call\n\napp = Flask(__name__)\napp.config['SECRET_KEY'] = \"SuperSecretKey\"\n#app.config['SQLALCHEMY_DATABASE_URI'] = \"postgresql://fmnibhaashbxuy:73b8e2e2485adfd45f57da653d63950b88fdcae12202a84f80c7f4c297e9e30a@ec2-23-23-222-184.compute-1.amazonaws.com:5432/d27ig8fpt4ch7r\"\napp.config['SQLALCHEMY_DATABASE_URI'] = \"postgresql://info2180-project1:password123@localhost/profilebook\"\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True # added just to suppress a warning\napp.config['UPLOAD_FOLDER'] = './app/static/profile_photo'\ndb = SQLAlchemy(app)\n\nallowed_exts = [\"jpg\", \"jpeg\", \"png\"]\n\nfrom app import views",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
try:
from .local_settings import *
except Exception as e:
pass
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
DATABASES = {'default': dj_database_url.config()}
SECURE_PROXY_SSL_HEADER = 'HTTP_X_FORWARDED_PROTO', 'https'
try:
from .local_settings import *
except Exception as e:
pass
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_EMAIL_VERIFICATION = 'none'
ACCOUNT_LOGOUT_ON_GET = False
SOCIALACCOUNT_QUERY_EMAIL = True
SOCIALACCOUNT_PROVIDERS = {'facebook': {'METHOD': 'js_sdk', 'SCOPE': [
'email', 'public_profile', 'user_friends'], 'AUTH_PARAMS': {'auth_type':
'reauthenticate'}, 'FIELDS': ['first_name', 'last_name', 'email',
'birthday'], 'EXCHANGE_TOKEN': True, 'LOCALE_FUNC': 'path.to.callable',
'VERIFIED_EMAIL': False, 'VERSION': 'v2.7'}, 'linkedin': {'SCOPE': [
'r_emailaddress'], 'PROFILE_FIELDS': ['id', 'first-name', 'last-name',
'email-address', 'public-profile-url']}}
LOGIN_REDIRECT_URL = '/blog/jobs'
BOOTSTRAP3 = {'jquery_url': '//code.jquery.com/jquery.min.js', 'base_url':
'//maxcdn.bootstrapcdn.com/bootstrap/3.3.7/', 'css_url': None,
'theme_url': None, 'javascript_url': None, 'javascript_in_head': False,
'include_jquery': False, 'horizontal_label_class': 'col-md-3',
'horizontal_field_class': 'col-md-9', 'set_required': True,
'set_disabled': False, 'set_placeholder': True, 'required_css_class':
'', 'error_css_class': 'has-error', 'success_css_class': 'has-success',
'formset_renderers': {'default': 'bootstrap3.renderers.FormsetRenderer'
}, 'form_renderers': {'default': 'bootstrap3.renderers.FormRenderer'},
'field_renderers': {'default': 'bootstrap3.renderers.FieldRenderer',
'inline': 'bootstrap3.renderers.InlineFieldRenderer'}}
AXES_LOGIN_FAILURE_LIMIT = 3
AXES_LOCK_OUT_AT_FAILURE = True
AXES_USE_USER_AGENT = True
AXES_COOLOFF_TIME = 50
AXES_LOCKOUT_TEMPLATE = 'axes.watch_login'
AXES_LOCKOUT_TEMPLATE = None
AXES_LOCKOUT_URL = None
AXES_VERBOSE = True
AXES_LOCK_OUT_BY_COMBINATION_USER_AND_IP = False
CRISPY_TEMPLATE_PACK = 'bootstrap3'
ADMINS = ('Petar Pilipovic', 'petar@literatillc.com'),
REST_FRAMEWORK = {'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.DjangoModelPermissionsOrAnonReadOnly']}
<|reserved_special_token_1|>
from .settings import *
import dj_database_url
DATABASES = {'default': dj_database_url.config()}
SECURE_PROXY_SSL_HEADER = 'HTTP_X_FORWARDED_PROTO', 'https'
try:
from .local_settings import *
except Exception as e:
pass
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_EMAIL_VERIFICATION = 'none'
ACCOUNT_LOGOUT_ON_GET = False
SOCIALACCOUNT_QUERY_EMAIL = True
SOCIALACCOUNT_PROVIDERS = {'facebook': {'METHOD': 'js_sdk', 'SCOPE': [
'email', 'public_profile', 'user_friends'], 'AUTH_PARAMS': {'auth_type':
'reauthenticate'}, 'FIELDS': ['first_name', 'last_name', 'email',
'birthday'], 'EXCHANGE_TOKEN': True, 'LOCALE_FUNC': 'path.to.callable',
'VERIFIED_EMAIL': False, 'VERSION': 'v2.7'}, 'linkedin': {'SCOPE': [
'r_emailaddress'], 'PROFILE_FIELDS': ['id', 'first-name', 'last-name',
'email-address', 'public-profile-url']}}
LOGIN_REDIRECT_URL = '/blog/jobs'
BOOTSTRAP3 = {'jquery_url': '//code.jquery.com/jquery.min.js', 'base_url':
'//maxcdn.bootstrapcdn.com/bootstrap/3.3.7/', 'css_url': None,
'theme_url': None, 'javascript_url': None, 'javascript_in_head': False,
'include_jquery': False, 'horizontal_label_class': 'col-md-3',
'horizontal_field_class': 'col-md-9', 'set_required': True,
'set_disabled': False, 'set_placeholder': True, 'required_css_class':
'', 'error_css_class': 'has-error', 'success_css_class': 'has-success',
'formset_renderers': {'default': 'bootstrap3.renderers.FormsetRenderer'
}, 'form_renderers': {'default': 'bootstrap3.renderers.FormRenderer'},
'field_renderers': {'default': 'bootstrap3.renderers.FieldRenderer',
'inline': 'bootstrap3.renderers.InlineFieldRenderer'}}
AXES_LOGIN_FAILURE_LIMIT = 3
AXES_LOCK_OUT_AT_FAILURE = True
AXES_USE_USER_AGENT = True
AXES_COOLOFF_TIME = 50
AXES_LOCKOUT_TEMPLATE = 'axes.watch_login'
AXES_LOCKOUT_TEMPLATE = None
AXES_LOCKOUT_URL = None
AXES_VERBOSE = True
AXES_LOCK_OUT_BY_COMBINATION_USER_AND_IP = False
CRISPY_TEMPLATE_PACK = 'bootstrap3'
ADMINS = ('Petar Pilipovic', 'petar@literatillc.com'),
REST_FRAMEWORK = {'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.DjangoModelPermissionsOrAnonReadOnly']}
<|reserved_special_token_1|>
from .settings import *
# Heroku Configurations
# Parse database configuration from $DATABASE_URL
import dj_database_url
DATABASES = {'default': dj_database_url.config()}
# Honor the 'X-Forwarded-Proto' header for request.is_secure()
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# loading local_settings.py
try:
from .local_settings import *
except Exception as e:
pass
# ALLAUTH configuration
# Specific the login method to use
ACCOUNT_USERNAME_REQUIRED = False
# ACCOUNT_AUTHENTICATION_METHOD = "username", "email", "username_email"
# Determines the e-mail verification method during signup – choose one of “mandatory”, “optional”, or “none”.
# When set to “mandatory” the user is blocked from logging in until the email address is verified.
# Choose “optional” or “none” to allow logins with an unverified e-mail address.
# In case of “optional”, the e-mail verification mail is still sent,
# whereas in case of “none” no e-mail verification mails are sent.
ACCOUNT_EMAIL_VERIFICATION = "none"
# Determines whether or not the user is automatically logged out by a mere GET request.
# See documentation for the LogoutView for details.
ACCOUNT_LOGOUT_ON_GET = False
# Request e-mail address from 3rd import party account provider?
# E.g. using OpenID AX, or the Facebook “email” permission.
SOCIALACCOUNT_QUERY_EMAIL = True
# Dictionary containing provider specific settings.
SOCIALACCOUNT_PROVIDERS = {
'facebook': {
# we use facebook js_sdk instead od oauth2
'METHOD': 'js_sdk',
'SCOPE': ['email', 'public_profile', 'user_friends'],
# using AUTH_PARAMS to pass along other parametees
# to the FB.login JS SDK call
'AUTH_PARAMS': {'auth_type': 'reauthenticate'},
# field are fetch from the import Graph API
'FIELDS': ['first_name', 'last_name', 'email', 'birthday'],
# JS SDK return a short-lived token suitable for client-side use.
'EXCHANGE_TOKEN': True,
# Chose the current active language of the request
'LOCALE_FUNC': 'path.to.callable',
'VERIFIED_EMAIL': False,
# Facebook Graph API version
'VERSION': 'v2.7'
},
'linkedin': {
'SCOPE': ['r_emailaddress'],
'PROFILE_FIELDS': [
'id',
'first-name',
'last-name',
'email-address',
'public-profile-url'
]
}
}
# login redirect url
LOGIN_REDIRECT_URL = "/blog/jobs"
# Default settings
BOOTSTRAP3 = {
# The URL to the jQuery JavaScript file
'jquery_url': '//code.jquery.com/jquery.min.js',
# The Bootstrap base URL
'base_url': '//maxcdn.bootstrapcdn.com/bootstrap/3.3.7/',
# The complete URL to the Bootstrap CSS file (None means derive it from base_url)
'css_url': None,
# The complete URL to the Bootstrap CSS file (None means no theme)
'theme_url': None,
# The complete URL to the Bootstrap JavaScript file (None means derive it from base_url)
'javascript_url': None,
# Put JavaScript in the HEAD section of the HTML document (only relevant if you use bootstrap3.html)
'javascript_in_head': False,
# Include jQuery with Bootstrap JavaScript (affects django-bootstrap3 template tags)
'include_jquery': False,
# Label class to use in horizontal forms
'horizontal_label_class': 'col-md-3',
# Field class to use in horizontal forms
'horizontal_field_class': 'col-md-9',
# Set HTML required attribute on required fields
'set_required': True,
# Set HTML disabled attribute on disabled fields
'set_disabled': False,
# Set placeholder attributes to label if no placeholder is provided
'set_placeholder': True,
# Class to indicate required (better to set this in your Django form)
'required_css_class': '',
# Class to indicate error (better to set this in your Django form)
'error_css_class': 'has-error',
# Class to indicate success, meaning the field has valid input (better to set this in your Django form)
'success_css_class': 'has-success',
# Renderers (only set these if you have studied the source and understand the inner workings)
'formset_renderers':{
'default': 'bootstrap3.renderers.FormsetRenderer',
},
'form_renderers': {
'default': 'bootstrap3.renderers.FormRenderer',
},
'field_renderers': {
'default': 'bootstrap3.renderers.FieldRenderer',
'inline': 'bootstrap3.renderers.InlineFieldRenderer',
},
}
# Axes Configurations
# Number of login attempts allowed before a record is created for the failed logins.
AXES_LOGIN_FAILURE_LIMIT = 3
# After the number os allowed login attempts are exceeded, should we lock this IP (and optinal user agend)?
AXES_LOCK_OUT_AT_FAILURE = True
# If True, lock out / log based on an IP address AND a user agent. This means requests from different import user
# agents but from the import same IP are treated differently.
AXES_USE_USER_AGENT = True
# Defines a period of inactivity after which old failed login attempts will be forgotten. You can set to a
# python timedelta object or an integer, if you set it to be integer it will represent a number of hours
AXES_COOLOFF_TIME = 50
# Specifies a logging mechanism for axes to use
AXES_LOCKOUT_TEMPLATE = 'axes.watch_login'
# Specifies a template to render when a user is locked out. Template receives cooloff_time and failure_limit as
# context variables
AXES_LOCKOUT_TEMPLATE = None
# Specifies a URL to redirect to on lockout. If both AXES_LOCKOUT_TEMPLATE and AXES_LOCKOUT_URL are set, the template
# will be used
AXES_LOCKOUT_URL = None
# If Truem you'll see slightly more logging for Axes
AXES_VERBOSE = True
# The name of the for field that contains your usernames
# AXES_USERNAME_FORM_FIELD = username
# If True prevents to login from IP import under particular user if attempts limit exceed, otherwise lock out based on
# IP. Default: False
AXES_LOCK_OUT_BY_COMBINATION_USER_AND_IP = False
# Crispy forms will use BOOTSTRAP3 TEMPLATE PACK
CRISPY_TEMPLATE_PACK = "bootstrap3"
# Signal Admins Configurations
ADMINS = (
("Petar Pilipovic", "petar@literatillc.com"),
)
# RESTframework Permission classes configuration
REST_FRAMEWORK = {
"DEFAULT_PERMISSION_CLASSES": [
"rest_framework.permissions.DjangoModelPermissionsOrAnonReadOnly"
]
}
|
flexible
|
{
"blob_id": "8bb86cae3387a0d4ce5987f3e3c458c8298174e0",
"index": 7342,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ntry:\n from .local_settings import *\nexcept Exception as e:\n pass\n<mask token>\n",
"step-3": "<mask token>\nDATABASES = {'default': dj_database_url.config()}\nSECURE_PROXY_SSL_HEADER = 'HTTP_X_FORWARDED_PROTO', 'https'\ntry:\n from .local_settings import *\nexcept Exception as e:\n pass\nACCOUNT_USERNAME_REQUIRED = False\nACCOUNT_EMAIL_VERIFICATION = 'none'\nACCOUNT_LOGOUT_ON_GET = False\nSOCIALACCOUNT_QUERY_EMAIL = True\nSOCIALACCOUNT_PROVIDERS = {'facebook': {'METHOD': 'js_sdk', 'SCOPE': [\n 'email', 'public_profile', 'user_friends'], 'AUTH_PARAMS': {'auth_type':\n 'reauthenticate'}, 'FIELDS': ['first_name', 'last_name', 'email',\n 'birthday'], 'EXCHANGE_TOKEN': True, 'LOCALE_FUNC': 'path.to.callable',\n 'VERIFIED_EMAIL': False, 'VERSION': 'v2.7'}, 'linkedin': {'SCOPE': [\n 'r_emailaddress'], 'PROFILE_FIELDS': ['id', 'first-name', 'last-name',\n 'email-address', 'public-profile-url']}}\nLOGIN_REDIRECT_URL = '/blog/jobs'\nBOOTSTRAP3 = {'jquery_url': '//code.jquery.com/jquery.min.js', 'base_url':\n '//maxcdn.bootstrapcdn.com/bootstrap/3.3.7/', 'css_url': None,\n 'theme_url': None, 'javascript_url': None, 'javascript_in_head': False,\n 'include_jquery': False, 'horizontal_label_class': 'col-md-3',\n 'horizontal_field_class': 'col-md-9', 'set_required': True,\n 'set_disabled': False, 'set_placeholder': True, 'required_css_class':\n '', 'error_css_class': 'has-error', 'success_css_class': 'has-success',\n 'formset_renderers': {'default': 'bootstrap3.renderers.FormsetRenderer'\n }, 'form_renderers': {'default': 'bootstrap3.renderers.FormRenderer'},\n 'field_renderers': {'default': 'bootstrap3.renderers.FieldRenderer',\n 'inline': 'bootstrap3.renderers.InlineFieldRenderer'}}\nAXES_LOGIN_FAILURE_LIMIT = 3\nAXES_LOCK_OUT_AT_FAILURE = True\nAXES_USE_USER_AGENT = True\nAXES_COOLOFF_TIME = 50\nAXES_LOCKOUT_TEMPLATE = 'axes.watch_login'\nAXES_LOCKOUT_TEMPLATE = None\nAXES_LOCKOUT_URL = None\nAXES_VERBOSE = True\nAXES_LOCK_OUT_BY_COMBINATION_USER_AND_IP = False\nCRISPY_TEMPLATE_PACK = 'bootstrap3'\nADMINS = ('Petar Pilipovic', 'petar@literatillc.com'),\nREST_FRAMEWORK = {'DEFAULT_PERMISSION_CLASSES': [\n 'rest_framework.permissions.DjangoModelPermissionsOrAnonReadOnly']}\n",
"step-4": "from .settings import *\nimport dj_database_url\nDATABASES = {'default': dj_database_url.config()}\nSECURE_PROXY_SSL_HEADER = 'HTTP_X_FORWARDED_PROTO', 'https'\ntry:\n from .local_settings import *\nexcept Exception as e:\n pass\nACCOUNT_USERNAME_REQUIRED = False\nACCOUNT_EMAIL_VERIFICATION = 'none'\nACCOUNT_LOGOUT_ON_GET = False\nSOCIALACCOUNT_QUERY_EMAIL = True\nSOCIALACCOUNT_PROVIDERS = {'facebook': {'METHOD': 'js_sdk', 'SCOPE': [\n 'email', 'public_profile', 'user_friends'], 'AUTH_PARAMS': {'auth_type':\n 'reauthenticate'}, 'FIELDS': ['first_name', 'last_name', 'email',\n 'birthday'], 'EXCHANGE_TOKEN': True, 'LOCALE_FUNC': 'path.to.callable',\n 'VERIFIED_EMAIL': False, 'VERSION': 'v2.7'}, 'linkedin': {'SCOPE': [\n 'r_emailaddress'], 'PROFILE_FIELDS': ['id', 'first-name', 'last-name',\n 'email-address', 'public-profile-url']}}\nLOGIN_REDIRECT_URL = '/blog/jobs'\nBOOTSTRAP3 = {'jquery_url': '//code.jquery.com/jquery.min.js', 'base_url':\n '//maxcdn.bootstrapcdn.com/bootstrap/3.3.7/', 'css_url': None,\n 'theme_url': None, 'javascript_url': None, 'javascript_in_head': False,\n 'include_jquery': False, 'horizontal_label_class': 'col-md-3',\n 'horizontal_field_class': 'col-md-9', 'set_required': True,\n 'set_disabled': False, 'set_placeholder': True, 'required_css_class':\n '', 'error_css_class': 'has-error', 'success_css_class': 'has-success',\n 'formset_renderers': {'default': 'bootstrap3.renderers.FormsetRenderer'\n }, 'form_renderers': {'default': 'bootstrap3.renderers.FormRenderer'},\n 'field_renderers': {'default': 'bootstrap3.renderers.FieldRenderer',\n 'inline': 'bootstrap3.renderers.InlineFieldRenderer'}}\nAXES_LOGIN_FAILURE_LIMIT = 3\nAXES_LOCK_OUT_AT_FAILURE = True\nAXES_USE_USER_AGENT = True\nAXES_COOLOFF_TIME = 50\nAXES_LOCKOUT_TEMPLATE = 'axes.watch_login'\nAXES_LOCKOUT_TEMPLATE = None\nAXES_LOCKOUT_URL = None\nAXES_VERBOSE = True\nAXES_LOCK_OUT_BY_COMBINATION_USER_AND_IP = False\nCRISPY_TEMPLATE_PACK = 'bootstrap3'\nADMINS = ('Petar Pilipovic', 'petar@literatillc.com'),\nREST_FRAMEWORK = {'DEFAULT_PERMISSION_CLASSES': [\n 'rest_framework.permissions.DjangoModelPermissionsOrAnonReadOnly']}\n",
"step-5": "from .settings import *\n\n\n\n# Heroku Configurations\n# Parse database configuration from $DATABASE_URL\nimport dj_database_url\n\nDATABASES = {'default': dj_database_url.config()}\n\n# Honor the 'X-Forwarded-Proto' header for request.is_secure()\nSECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')\n\n# loading local_settings.py\ntry:\n from .local_settings import *\nexcept Exception as e:\n pass\n\n# ALLAUTH configuration\n\n\n# Specific the login method to use\nACCOUNT_USERNAME_REQUIRED = False\n# ACCOUNT_AUTHENTICATION_METHOD = \"username\", \"email\", \"username_email\"\n\n# Determines the e-mail verification method during signup – choose one of “mandatory”, “optional”, or “none”.\n# When set to “mandatory” the user is blocked from logging in until the email address is verified.\n# Choose “optional” or “none” to allow logins with an unverified e-mail address.\n# In case of “optional”, the e-mail verification mail is still sent,\n# whereas in case of “none” no e-mail verification mails are sent.\nACCOUNT_EMAIL_VERIFICATION = \"none\"\n\n# Determines whether or not the user is automatically logged out by a mere GET request.\n# See documentation for the LogoutView for details.\nACCOUNT_LOGOUT_ON_GET = False\n\n# Request e-mail address from 3rd import party account provider?\n# E.g. using OpenID AX, or the Facebook “email” permission.\nSOCIALACCOUNT_QUERY_EMAIL = True\n\n# Dictionary containing provider specific settings.\nSOCIALACCOUNT_PROVIDERS = {\n 'facebook': {\n # we use facebook js_sdk instead od oauth2\n 'METHOD': 'js_sdk',\n 'SCOPE': ['email', 'public_profile', 'user_friends'],\n # using AUTH_PARAMS to pass along other parametees\n # to the FB.login JS SDK call\n 'AUTH_PARAMS': {'auth_type': 'reauthenticate'},\n # field are fetch from the import Graph API\n 'FIELDS': ['first_name', 'last_name', 'email', 'birthday'],\n # JS SDK return a short-lived token suitable for client-side use.\n 'EXCHANGE_TOKEN': True,\n # Chose the current active language of the request\n 'LOCALE_FUNC': 'path.to.callable',\n 'VERIFIED_EMAIL': False,\n # Facebook Graph API version\n 'VERSION': 'v2.7'\n },\n 'linkedin': {\n 'SCOPE': ['r_emailaddress'],\n 'PROFILE_FIELDS': [\n 'id',\n 'first-name',\n 'last-name',\n 'email-address',\n 'public-profile-url'\n ]\n }\n}\n\n# login redirect url\nLOGIN_REDIRECT_URL = \"/blog/jobs\"\n\n# Default settings\nBOOTSTRAP3 = {\n\n # The URL to the jQuery JavaScript file\n 'jquery_url': '//code.jquery.com/jquery.min.js',\n\n # The Bootstrap base URL\n 'base_url': '//maxcdn.bootstrapcdn.com/bootstrap/3.3.7/',\n\n # The complete URL to the Bootstrap CSS file (None means derive it from base_url)\n 'css_url': None,\n\n # The complete URL to the Bootstrap CSS file (None means no theme)\n 'theme_url': None,\n\n # The complete URL to the Bootstrap JavaScript file (None means derive it from base_url)\n 'javascript_url': None,\n\n # Put JavaScript in the HEAD section of the HTML document (only relevant if you use bootstrap3.html)\n 'javascript_in_head': False,\n\n # Include jQuery with Bootstrap JavaScript (affects django-bootstrap3 template tags)\n 'include_jquery': False,\n\n # Label class to use in horizontal forms\n 'horizontal_label_class': 'col-md-3',\n\n # Field class to use in horizontal forms\n 'horizontal_field_class': 'col-md-9',\n\n # Set HTML required attribute on required fields\n 'set_required': True,\n\n # Set HTML disabled attribute on disabled fields\n 'set_disabled': False,\n\n # Set placeholder attributes to label if no placeholder is provided\n 'set_placeholder': True,\n\n # Class to indicate required (better to set this in your Django form)\n 'required_css_class': '',\n\n # Class to indicate error (better to set this in your Django form)\n 'error_css_class': 'has-error',\n\n # Class to indicate success, meaning the field has valid input (better to set this in your Django form)\n 'success_css_class': 'has-success',\n\n # Renderers (only set these if you have studied the source and understand the inner workings)\n 'formset_renderers':{\n 'default': 'bootstrap3.renderers.FormsetRenderer',\n },\n 'form_renderers': {\n 'default': 'bootstrap3.renderers.FormRenderer',\n },\n 'field_renderers': {\n 'default': 'bootstrap3.renderers.FieldRenderer',\n 'inline': 'bootstrap3.renderers.InlineFieldRenderer',\n },\n}\n\n# Axes Configurations\n# Number of login attempts allowed before a record is created for the failed logins.\nAXES_LOGIN_FAILURE_LIMIT = 3\n\n# After the number os allowed login attempts are exceeded, should we lock this IP (and optinal user agend)?\nAXES_LOCK_OUT_AT_FAILURE = True\n\n# If True, lock out / log based on an IP address AND a user agent. This means requests from different import user\n# agents but from the import same IP are treated differently.\nAXES_USE_USER_AGENT = True\n\n# Defines a period of inactivity after which old failed login attempts will be forgotten. You can set to a\n# python timedelta object or an integer, if you set it to be integer it will represent a number of hours\nAXES_COOLOFF_TIME = 50\n\n# Specifies a logging mechanism for axes to use\nAXES_LOCKOUT_TEMPLATE = 'axes.watch_login'\n\n# Specifies a template to render when a user is locked out. Template receives cooloff_time and failure_limit as\n# context variables\nAXES_LOCKOUT_TEMPLATE = None\n\n# Specifies a URL to redirect to on lockout. If both AXES_LOCKOUT_TEMPLATE and AXES_LOCKOUT_URL are set, the template\n# will be used\nAXES_LOCKOUT_URL = None\n\n# If Truem you'll see slightly more logging for Axes\nAXES_VERBOSE = True\n\n# The name of the for field that contains your usernames\n# AXES_USERNAME_FORM_FIELD = username\n\n# If True prevents to login from IP import under particular user if attempts limit exceed, otherwise lock out based on\n# IP. Default: False\nAXES_LOCK_OUT_BY_COMBINATION_USER_AND_IP = False\n\n# Crispy forms will use BOOTSTRAP3 TEMPLATE PACK\nCRISPY_TEMPLATE_PACK = \"bootstrap3\"\n\n# Signal Admins Configurations\nADMINS = (\n (\"Petar Pilipovic\", \"petar@literatillc.com\"),\n)\n\n# RESTframework Permission classes configuration\nREST_FRAMEWORK = {\n \"DEFAULT_PERMISSION_CLASSES\": [\n \"rest_framework.permissions.DjangoModelPermissionsOrAnonReadOnly\"\n ]\n}\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
a=list(input("enter the string or sentence to perform caesar cipher : "))
b=int(input('enter the frequency to perform ceasar cipher '))
e=[]
#print(a)
#print (a[4])
c=len(a)
#print(c)
for i in range (0,c):
d=ord(a[i])
#print(d)
if b> 0:
for j in range (1,b+1):
if a[i] >='a' and a[i] <='z' or a[i] >= 'A' and a[i] <='Z':
if d>= 65 and d< 90 or d>=97 and d<122:
d+=1
elif d==90:
d=65
elif d==122:
d=97
else :
pass
f=chr(d)
e.append(f)
if b<0:
g=abs(b)
for j in range (1,g+1):
if a[i] >='a' and a[i] <='z' or a[i] >= 'A' and a[i] <='Z':
if d> 65 and d<= 90 or d>97 and d<=122:
d-=1
elif d==97:
d=122
elif d==65:
d=90
else :
pass
f=chr(d)
e.append(f)
#print (e)
for k in range (0,c):
print(e[k],end='')
'''65-90 A-Z
97-122 a-z'''
|
normal
|
{
"blob_id": "287d4c2d490c9dcdd7be7e86fe577139a3d30f54",
"index": 6676,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in range(0, c):\n d = ord(a[i])\n if b > 0:\n for j in range(1, b + 1):\n if a[i] >= 'a' and a[i] <= 'z' or a[i] >= 'A' and a[i] <= 'Z':\n if d >= 65 and d < 90 or d >= 97 and d < 122:\n d += 1\n elif d == 90:\n d = 65\n elif d == 122:\n d = 97\n else:\n pass\n f = chr(d)\n e.append(f)\n if b < 0:\n g = abs(b)\n for j in range(1, g + 1):\n if a[i] >= 'a' and a[i] <= 'z' or a[i] >= 'A' and a[i] <= 'Z':\n if d > 65 and d <= 90 or d > 97 and d <= 122:\n d -= 1\n elif d == 97:\n d = 122\n elif d == 65:\n d = 90\n else:\n pass\n f = chr(d)\n e.append(f)\nfor k in range(0, c):\n print(e[k], end='')\n<mask token>\n",
"step-3": "a = list(input('enter the string or sentence to perform caesar cipher : '))\nb = int(input('enter the frequency to perform ceasar cipher '))\ne = []\nc = len(a)\nfor i in range(0, c):\n d = ord(a[i])\n if b > 0:\n for j in range(1, b + 1):\n if a[i] >= 'a' and a[i] <= 'z' or a[i] >= 'A' and a[i] <= 'Z':\n if d >= 65 and d < 90 or d >= 97 and d < 122:\n d += 1\n elif d == 90:\n d = 65\n elif d == 122:\n d = 97\n else:\n pass\n f = chr(d)\n e.append(f)\n if b < 0:\n g = abs(b)\n for j in range(1, g + 1):\n if a[i] >= 'a' and a[i] <= 'z' or a[i] >= 'A' and a[i] <= 'Z':\n if d > 65 and d <= 90 or d > 97 and d <= 122:\n d -= 1\n elif d == 97:\n d = 122\n elif d == 65:\n d = 90\n else:\n pass\n f = chr(d)\n e.append(f)\nfor k in range(0, c):\n print(e[k], end='')\n<mask token>\n",
"step-4": "a=list(input(\"enter the string or sentence to perform caesar cipher : \"))\r\nb=int(input('enter the frequency to perform ceasar cipher '))\r\ne=[]\r\n#print(a)\r\n#print (a[4])\r\nc=len(a)\r\n#print(c)\r\nfor i in range (0,c):\r\n d=ord(a[i])\r\n #print(d)\r\n if b> 0:\r\n for j in range (1,b+1):\r\n if a[i] >='a' and a[i] <='z' or a[i] >= 'A' and a[i] <='Z':\r\n if d>= 65 and d< 90 or d>=97 and d<122:\r\n d+=1\r\n elif d==90:\r\n d=65\r\n elif d==122:\r\n d=97\r\n else :\r\n pass\r\n f=chr(d)\r\n e.append(f)\r\n if b<0:\r\n g=abs(b)\r\n for j in range (1,g+1):\r\n if a[i] >='a' and a[i] <='z' or a[i] >= 'A' and a[i] <='Z':\r\n if d> 65 and d<= 90 or d>97 and d<=122:\r\n d-=1\r\n elif d==97:\r\n d=122\r\n elif d==65:\r\n d=90\r\n else :\r\n pass\r\n f=chr(d)\r\n e.append(f)\r\n#print (e)\r\nfor k in range (0,c):\r\n print(e[k],end='')\r\n'''65-90 A-Z\r\n 97-122 a-z'''\r\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
class Member:
not_allowed_name = ["Shit", "Hell", "Baloot"]
users_num = 0
def __init__(self, first_name, middle_name, last_name, gender):
self.fname = first_name
self.mname = middle_name
self.lname = last_name
self.gender = gender
Member.users_num += 1
@classmethod
def show_users_count(cls):
print(f"We Have {cls.users_num} Users In Our System.")
@staticmethod
def say_hello(): # static method not linked to class or instances
print(f"Hello From Static Method")
def full_name(self): # instance method
if self.fname in Member.not_allowed_name:
raise ValueError("Name is Not Allowed")
else:
return f"{self.fname} {self.mname} {self.lname}"
def welcome(self):
if self.gender == "Male":
return f"Hello Mr {self.fname}"
elif self.gender == "Female":
return f"Hello Mrs {self.fname}"
else:
return f"Hello {self.fname}"
def get_all_info(self):
return f"{self.welcome()}, Your Full Name Is: {self.full_name()}"
def delete_user(self):
Member.users_num -= 1
return f"Users {self.fname} Is Deleted"
print(Member.users_num)
member_one = Member("Osama", "Mohammed", "Elzero", "Male")
member_two = Member("Mohammed", "Mohammed", "Saad", "Male")
member_three = Member("Hala", "Mohammed", "Saad", "Female")
member_four = Member("Shit", "Hell", "Metal", "DD")
# print(member_one.fname, member_one.mname, member_one.lname)
# print(member_two.mname)
# print(member_three.lname)
# print(member_one.full_name())
# print(member_three.welcome())
# print(dir(Member))
# print(member_three.get_all_info())
# print(member_four.get_all_info()) # Value Error
print(Member.users_num)
print(member_four.delete_user())
print(Member.users_num)
print("#" * 100)
Member.show_users_count()
Member.say_hello()
#print("#" * 100)
#print(member_three.full_name()) # Both same
#print(Member.full_name(member_three)) # Both same (Backend)
|
normal
|
{
"blob_id": "f276e33cde2e043fc8f81403e499544aa816a639",
"index": 9316,
"step-1": "class Member:\n <mask token>\n <mask token>\n\n def __init__(self, first_name, middle_name, last_name, gender):\n self.fname = first_name\n self.mname = middle_name\n self.lname = last_name\n self.gender = gender\n Member.users_num += 1\n <mask token>\n\n @staticmethod\n def say_hello():\n print(f'Hello From Static Method')\n\n def full_name(self):\n if self.fname in Member.not_allowed_name:\n raise ValueError('Name is Not Allowed')\n else:\n return f'{self.fname} {self.mname} {self.lname}'\n\n def welcome(self):\n if self.gender == 'Male':\n return f'Hello Mr {self.fname}'\n elif self.gender == 'Female':\n return f'Hello Mrs {self.fname}'\n else:\n return f'Hello {self.fname}'\n\n def get_all_info(self):\n return f'{self.welcome()}, Your Full Name Is: {self.full_name()}'\n\n def delete_user(self):\n Member.users_num -= 1\n return f'Users {self.fname} Is Deleted'\n\n\n<mask token>\n",
"step-2": "class Member:\n <mask token>\n <mask token>\n\n def __init__(self, first_name, middle_name, last_name, gender):\n self.fname = first_name\n self.mname = middle_name\n self.lname = last_name\n self.gender = gender\n Member.users_num += 1\n\n @classmethod\n def show_users_count(cls):\n print(f'We Have {cls.users_num} Users In Our System.')\n\n @staticmethod\n def say_hello():\n print(f'Hello From Static Method')\n\n def full_name(self):\n if self.fname in Member.not_allowed_name:\n raise ValueError('Name is Not Allowed')\n else:\n return f'{self.fname} {self.mname} {self.lname}'\n\n def welcome(self):\n if self.gender == 'Male':\n return f'Hello Mr {self.fname}'\n elif self.gender == 'Female':\n return f'Hello Mrs {self.fname}'\n else:\n return f'Hello {self.fname}'\n\n def get_all_info(self):\n return f'{self.welcome()}, Your Full Name Is: {self.full_name()}'\n\n def delete_user(self):\n Member.users_num -= 1\n return f'Users {self.fname} Is Deleted'\n\n\n<mask token>\n",
"step-3": "class Member:\n not_allowed_name = ['Shit', 'Hell', 'Baloot']\n users_num = 0\n\n def __init__(self, first_name, middle_name, last_name, gender):\n self.fname = first_name\n self.mname = middle_name\n self.lname = last_name\n self.gender = gender\n Member.users_num += 1\n\n @classmethod\n def show_users_count(cls):\n print(f'We Have {cls.users_num} Users In Our System.')\n\n @staticmethod\n def say_hello():\n print(f'Hello From Static Method')\n\n def full_name(self):\n if self.fname in Member.not_allowed_name:\n raise ValueError('Name is Not Allowed')\n else:\n return f'{self.fname} {self.mname} {self.lname}'\n\n def welcome(self):\n if self.gender == 'Male':\n return f'Hello Mr {self.fname}'\n elif self.gender == 'Female':\n return f'Hello Mrs {self.fname}'\n else:\n return f'Hello {self.fname}'\n\n def get_all_info(self):\n return f'{self.welcome()}, Your Full Name Is: {self.full_name()}'\n\n def delete_user(self):\n Member.users_num -= 1\n return f'Users {self.fname} Is Deleted'\n\n\nprint(Member.users_num)\n<mask token>\nprint(Member.users_num)\nprint(member_four.delete_user())\nprint(Member.users_num)\nprint('#' * 100)\nMember.show_users_count()\nMember.say_hello()\n",
"step-4": "class Member:\n not_allowed_name = ['Shit', 'Hell', 'Baloot']\n users_num = 0\n\n def __init__(self, first_name, middle_name, last_name, gender):\n self.fname = first_name\n self.mname = middle_name\n self.lname = last_name\n self.gender = gender\n Member.users_num += 1\n\n @classmethod\n def show_users_count(cls):\n print(f'We Have {cls.users_num} Users In Our System.')\n\n @staticmethod\n def say_hello():\n print(f'Hello From Static Method')\n\n def full_name(self):\n if self.fname in Member.not_allowed_name:\n raise ValueError('Name is Not Allowed')\n else:\n return f'{self.fname} {self.mname} {self.lname}'\n\n def welcome(self):\n if self.gender == 'Male':\n return f'Hello Mr {self.fname}'\n elif self.gender == 'Female':\n return f'Hello Mrs {self.fname}'\n else:\n return f'Hello {self.fname}'\n\n def get_all_info(self):\n return f'{self.welcome()}, Your Full Name Is: {self.full_name()}'\n\n def delete_user(self):\n Member.users_num -= 1\n return f'Users {self.fname} Is Deleted'\n\n\nprint(Member.users_num)\nmember_one = Member('Osama', 'Mohammed', 'Elzero', 'Male')\nmember_two = Member('Mohammed', 'Mohammed', 'Saad', 'Male')\nmember_three = Member('Hala', 'Mohammed', 'Saad', 'Female')\nmember_four = Member('Shit', 'Hell', 'Metal', 'DD')\nprint(Member.users_num)\nprint(member_four.delete_user())\nprint(Member.users_num)\nprint('#' * 100)\nMember.show_users_count()\nMember.say_hello()\n",
"step-5": "class Member:\n not_allowed_name = [\"Shit\", \"Hell\", \"Baloot\"]\n users_num = 0\n\n def __init__(self, first_name, middle_name, last_name, gender):\n\n self.fname = first_name\n self.mname = middle_name\n self.lname = last_name\n self.gender = gender\n\n Member.users_num += 1\n\n @classmethod\n def show_users_count(cls):\n\n print(f\"We Have {cls.users_num} Users In Our System.\")\n\n @staticmethod\n def say_hello(): # static method not linked to class or instances\n\n print(f\"Hello From Static Method\")\n\n def full_name(self): # instance method\n\n if self.fname in Member.not_allowed_name:\n raise ValueError(\"Name is Not Allowed\")\n\n else:\n return f\"{self.fname} {self.mname} {self.lname}\"\n\n def welcome(self):\n\n if self.gender == \"Male\":\n return f\"Hello Mr {self.fname}\"\n elif self.gender == \"Female\":\n return f\"Hello Mrs {self.fname}\"\n else:\n return f\"Hello {self.fname}\"\n\n def get_all_info(self):\n\n return f\"{self.welcome()}, Your Full Name Is: {self.full_name()}\"\n\n def delete_user(self):\n\n Member.users_num -= 1\n\n return f\"Users {self.fname} Is Deleted\"\n\n\nprint(Member.users_num)\nmember_one = Member(\"Osama\", \"Mohammed\", \"Elzero\", \"Male\")\nmember_two = Member(\"Mohammed\", \"Mohammed\", \"Saad\", \"Male\")\nmember_three = Member(\"Hala\", \"Mohammed\", \"Saad\", \"Female\")\nmember_four = Member(\"Shit\", \"Hell\", \"Metal\", \"DD\")\n\n# print(member_one.fname, member_one.mname, member_one.lname)\n# print(member_two.mname)\n# print(member_three.lname)\n\n# print(member_one.full_name())\n# print(member_three.welcome())\n\n# print(dir(Member))\n\n# print(member_three.get_all_info())\n# print(member_four.get_all_info()) # Value Error\nprint(Member.users_num)\nprint(member_four.delete_user())\nprint(Member.users_num)\n\nprint(\"#\" * 100)\n\nMember.show_users_count()\nMember.say_hello()\n#print(\"#\" * 100)\n\n#print(member_three.full_name()) # Both same\n#print(Member.full_name(member_three)) # Both same (Backend)\n\n\n",
"step-ids": [
7,
8,
10,
11,
12
]
}
|
[
7,
8,
10,
11,
12
] |
"""
.. currentmodule:: jotting
.. automodule:: jotting.book
:members:
.. automodule:: jotting.to
:members:
.. automodule:: jotting.read
:members:
.. automodule:: jotting.style
:members:
"""
from .book import book
from . import style, to, read, dist
|
normal
|
{
"blob_id": "ce6dba2f682b091249f3bbf362bead4b95fee1f4",
"index": 292,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfrom .book import book\nfrom . import style, to, read, dist\n",
"step-3": "\"\"\"\n.. currentmodule:: jotting\n\n.. automodule:: jotting.book\n :members:\n\n.. automodule:: jotting.to\n :members:\n\n.. automodule:: jotting.read\n :members:\n\n.. automodule:: jotting.style\n :members:\n\"\"\"\n\n\nfrom .book import book\nfrom . import style, to, read, dist\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
#! /usr/bin/env python
#
# Copyright (c) 2015 Jason Ish
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
# WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Read unified2 log files and output events as Suricata EVE JSON."""
from __future__ import print_function
import sys
import os
import os.path
import base64
if sys.argv[0] == __file__:
sys.path.insert(
0, os.path.abspath(os.path.join(__file__, "..", "..", "..")))
import socket
import time
import json
import logging
import struct
from datetime import datetime
try:
from collections import OrderedDict
except ImportError as err:
from idstools.compat.ordereddict import OrderedDict
try:
import argparse
except ImportError as err:
from idstools.compat.argparse import argparse
from idstools import unified2
from idstools import maps
logging.basicConfig(level=logging.INFO, format="%(message)s")
LOG = logging.getLogger()
proto_map = {
1: "ICMP",
6: "TCP",
17: "UDP",
}
def get_tzoffset(sec):
offset = datetime.fromtimestamp(sec) - datetime.utcfromtimestamp(sec)
if offset.days == -1:
return "-%02d%02d" % (
(86400 - offset.seconds) / 3600, (86400 - offset.seconds) % 3600)
else:
return "+%02d%02d" % (
offset.seconds / 3600, offset.seconds % 3600)
def render_timestamp(sec, usec):
tt = time.localtime(sec)
return "%04d-%02d-%02dT%02d:%02d:%02d.%06d%s" % (
tt.tm_year, tt.tm_mon, tt.tm_mday, tt.tm_hour, tt.tm_min, tt.tm_sec,
usec, get_tzoffset(sec))
def calculate_flow_id(event):
flow_id = event["protocol"] << 24
if len(event["source-ip.raw"]) == 4:
flow_id = flow_id ^ \
struct.unpack(">L", event["source-ip.raw"])[0] ^ \
struct.unpack(">L", event["destination-ip.raw"])[0]
else:
for part in struct.unpack(">LLLL", event["source-ip.raw"]):
flow_id = flow_id ^ part
for part in struct.unpack(">LLLL", event["destination-ip.raw"]):
flow_id = flow_id ^ part
if "src_port" in event and "dest_port" in event:
flow_id = flow_id ^ event["src_port"] ^ event["dest_port"]
return flow_id
class EveFilter(object):
def __init__(
self, msgmap=None, classmap=None):
self.msgmap = msgmap
self.classmap = classmap
def filter(self, event):
output = OrderedDict()
output["timestamp"] = render_timestamp(
event["event-second"], event["event-microsecond"])
output["sensor_id"] = event["sensor-id"]
output["event_type"] = "alert"
output["src_ip"] = event["source-ip"]
if event["protocol"] in [socket.IPPROTO_UDP, socket.IPPROTO_TCP]:
output["src_port"] = event["sport-itype"]
output["dest_ip"] = event["destination-ip"]
if event["protocol"] in [socket.IPPROTO_UDP, socket.IPPROTO_TCP]:
output["dest_port"] = event["dport-icode"]
output["proto"] = self.getprotobynumber(event["protocol"])
if event["protocol"] in [socket.IPPROTO_ICMP, socket.IPPROTO_ICMPV6]:
output["icmp_type"] = event["sport-itype"]
output["icmp_code"] = event["dport-icode"]
output["flow_id"] = calculate_flow_id(event)
alert = OrderedDict()
alert["action"] = "blocked" if event["blocked"] == 1 else "allowed"
alert["gid"] = event["generator-id"]
alert["signature_id"] = event["signature-id"]
alert["rev"] = event["signature-revision"]
alert["signature"] = self.resolve_msg(event)
alert["category"] = self.resolve_classification(event)
alert["severity"] = event["priority"]
output["alert"] = alert
# EVE only includes one packet.
if event["packets"]:
output["packet"] = base64.b64encode(event["packets"][0]["data"])
return output
def resolve_classification(self, event, default=None):
if self.classmap:
classinfo = self.classmap.get(event["classification-id"])
if classinfo:
return classinfo["description"]
return default
def resolve_msg(self, event, default=None):
if self.msgmap:
signature = self.msgmap.get(
event["generator-id"], event["signature-id"])
if signature:
return signature["msg"]
return default
def getprotobynumber(self, protocol):
return proto_map.get(protocol, str(protocol))
class OutputWrapper(object):
def __init__(self, filename, fileobj=None):
self.filename = filename
self.fileobj = fileobj
if self.fileobj is None:
self.reopen()
self.isfile = True
else:
self.isfile = False
def reopen(self):
if self.fileobj:
self.fileobj.close()
self.fileobj = open(self.filename, "ab")
def write(self, buf):
if self.isfile:
if not os.path.exists(self.filename):
self.reopen()
self.fileobj.write(buf)
self.fileobj.write("\n")
self.fileobj.flush()
def load_from_snort_conf(snort_conf, classmap, msgmap):
snort_etc = os.path.dirname(os.path.expanduser(snort_conf))
classification_config = os.path.join(snort_etc, "classification.config")
if os.path.exists(classification_config):
LOG.debug("Loading %s.", classification_config)
classmap.load_from_file(open(classification_config))
genmsg_map = os.path.join(snort_etc, "gen-msg.map")
if os.path.exists(genmsg_map):
LOG.debug("Loading %s.", genmsg_map)
msgmap.load_generator_map(open(genmsg_map))
sidmsg_map = os.path.join(snort_etc, "sid-msg.map")
if os.path.exists(sidmsg_map):
LOG.debug("Loading %s.", sidmsg_map)
msgmap.load_signature_map(open(sidmsg_map))
epilog = """If --directory and --prefix are provided files will be
read from the specified 'spool' directory. Otherwise files on the
command line will be processed.
"""
def main():
msgmap = maps.SignatureMap()
classmap = maps.ClassificationMap()
parser = argparse.ArgumentParser(
fromfile_prefix_chars='@', epilog=epilog)
parser.add_argument(
"-C", dest="classification_path", metavar="<classification.config>",
help="path to classification config")
parser.add_argument(
"-S", dest="sidmsgmap_path", metavar="<msg-msg.map>",
help="path to sid-msg.map")
parser.add_argument(
"-G", dest="genmsgmap_path", metavar="<gen-msg.map>",
help="path to gen-msg.map")
parser.add_argument(
"--snort-conf", dest="snort_conf", metavar="<snort.conf>",
help="attempt to load classifications and map files based on the "
"location of the snort.conf")
parser.add_argument(
"--directory", metavar="<spool directory>",
help="spool directory (eg: /var/log/snort)")
parser.add_argument(
"--prefix", metavar="<spool file prefix>",
help="spool filename prefix (eg: unified2.log)")
parser.add_argument(
"--bookmark", action="store_true", default=False,
help="enable bookmarking")
parser.add_argument(
"--follow", action="store_true", default=False,
help="follow files/continuous mode (spool mode only)")
parser.add_argument(
"--delete", action="store_true", default=False,
help="delete spool files")
parser.add_argument(
"--output", metavar="<filename>",
help="output filename (eg: /var/log/snort/alerts.json")
parser.add_argument(
"--stdout", action="store_true", default=False,
help="also log to stdout if --output is a file")
parser.add_argument(
"filenames", nargs="*")
args = parser.parse_args()
if args.snort_conf:
load_from_snort_conf(args.snort_conf, classmap, msgmap)
if args.classification_path:
classmap.load_from_file(
open(os.path.expanduser(args.classification_path)))
if args.genmsgmap_path:
msgmap.load_generator_map(open(os.path.expanduser(args.genmsgmap_path)))
if args.sidmsgmap_path:
msgmap.load_signature_map(open(os.path.expanduser(args.sidmsgmap_path)))
if msgmap.size() == 0:
LOG.warn("WARNING: No alert message map entries loaded.")
else:
LOG.info("Loaded %s rule message map entries.", msgmap.size())
if classmap.size() == 0:
LOG.warn("WARNING: No classifications loaded.")
else:
LOG.info("Loaded %s classifications.", classmap.size())
eve_filter = EveFilter(msgmap, classmap)
outputs = []
if args.output:
outputs.append(OutputWrapper(args.output))
if args.stdout:
outputs.append(OutputWrapper("-", sys.stdout))
else:
outputs.append(OutputWrapper("-", sys.stdout))
if args.directory and args.prefix:
reader = unified2.SpoolEventReader(
directory=args.directory,
prefix=args.prefix,
follow=args.follow,
delete=args.delete,
bookmark=args.bookmark)
elif args.filenames:
reader = unified2.FileEventReader(*args.filenames)
else:
print("nothing to do.")
return
for event in reader:
try:
encoded = json.dumps(eve_filter.filter(event), encoding="latin-1")
for out in outputs:
out.write(encoded)
except Exception as err:
LOG.error("Failed to encode record as JSON: %s: %s" % (
str(err), str(event)))
if __name__ == "__main__":
sys.exit(main())
|
normal
|
{
"blob_id": "41889456fbb56d263e0039716519e8959316b67e",
"index": 3473,
"step-1": "<mask token>\n\n\ndef render_timestamp(sec, usec):\n tt = time.localtime(sec)\n return '%04d-%02d-%02dT%02d:%02d:%02d.%06d%s' % (tt.tm_year, tt.tm_mon,\n tt.tm_mday, tt.tm_hour, tt.tm_min, tt.tm_sec, usec, get_tzoffset(sec))\n\n\n<mask token>\n\n\nclass EveFilter(object):\n\n def __init__(self, msgmap=None, classmap=None):\n self.msgmap = msgmap\n self.classmap = classmap\n\n def filter(self, event):\n output = OrderedDict()\n output['timestamp'] = render_timestamp(event['event-second'], event\n ['event-microsecond'])\n output['sensor_id'] = event['sensor-id']\n output['event_type'] = 'alert'\n output['src_ip'] = event['source-ip']\n if event['protocol'] in [socket.IPPROTO_UDP, socket.IPPROTO_TCP]:\n output['src_port'] = event['sport-itype']\n output['dest_ip'] = event['destination-ip']\n if event['protocol'] in [socket.IPPROTO_UDP, socket.IPPROTO_TCP]:\n output['dest_port'] = event['dport-icode']\n output['proto'] = self.getprotobynumber(event['protocol'])\n if event['protocol'] in [socket.IPPROTO_ICMP, socket.IPPROTO_ICMPV6]:\n output['icmp_type'] = event['sport-itype']\n output['icmp_code'] = event['dport-icode']\n output['flow_id'] = calculate_flow_id(event)\n alert = OrderedDict()\n alert['action'] = 'blocked' if event['blocked'] == 1 else 'allowed'\n alert['gid'] = event['generator-id']\n alert['signature_id'] = event['signature-id']\n alert['rev'] = event['signature-revision']\n alert['signature'] = self.resolve_msg(event)\n alert['category'] = self.resolve_classification(event)\n alert['severity'] = event['priority']\n output['alert'] = alert\n if event['packets']:\n output['packet'] = base64.b64encode(event['packets'][0]['data'])\n return output\n\n def resolve_classification(self, event, default=None):\n if self.classmap:\n classinfo = self.classmap.get(event['classification-id'])\n if classinfo:\n return classinfo['description']\n return default\n\n def resolve_msg(self, event, default=None):\n if self.msgmap:\n signature = self.msgmap.get(event['generator-id'], event[\n 'signature-id'])\n if signature:\n return signature['msg']\n return default\n\n def getprotobynumber(self, protocol):\n return proto_map.get(protocol, str(protocol))\n\n\nclass OutputWrapper(object):\n\n def __init__(self, filename, fileobj=None):\n self.filename = filename\n self.fileobj = fileobj\n if self.fileobj is None:\n self.reopen()\n self.isfile = True\n else:\n self.isfile = False\n\n def reopen(self):\n if self.fileobj:\n self.fileobj.close()\n self.fileobj = open(self.filename, 'ab')\n\n def write(self, buf):\n if self.isfile:\n if not os.path.exists(self.filename):\n self.reopen()\n self.fileobj.write(buf)\n self.fileobj.write('\\n')\n self.fileobj.flush()\n\n\n<mask token>\n\n\ndef main():\n msgmap = maps.SignatureMap()\n classmap = maps.ClassificationMap()\n parser = argparse.ArgumentParser(fromfile_prefix_chars='@', epilog=epilog)\n parser.add_argument('-C', dest='classification_path', metavar=\n '<classification.config>', help='path to classification config')\n parser.add_argument('-S', dest='sidmsgmap_path', metavar=\n '<msg-msg.map>', help='path to sid-msg.map')\n parser.add_argument('-G', dest='genmsgmap_path', metavar=\n '<gen-msg.map>', help='path to gen-msg.map')\n parser.add_argument('--snort-conf', dest='snort_conf', metavar=\n '<snort.conf>', help=\n 'attempt to load classifications and map files based on the location of the snort.conf'\n )\n parser.add_argument('--directory', metavar='<spool directory>', help=\n 'spool directory (eg: /var/log/snort)')\n parser.add_argument('--prefix', metavar='<spool file prefix>', help=\n 'spool filename prefix (eg: unified2.log)')\n parser.add_argument('--bookmark', action='store_true', default=False,\n help='enable bookmarking')\n parser.add_argument('--follow', action='store_true', default=False,\n help='follow files/continuous mode (spool mode only)')\n parser.add_argument('--delete', action='store_true', default=False,\n help='delete spool files')\n parser.add_argument('--output', metavar='<filename>', help=\n 'output filename (eg: /var/log/snort/alerts.json')\n parser.add_argument('--stdout', action='store_true', default=False,\n help='also log to stdout if --output is a file')\n parser.add_argument('filenames', nargs='*')\n args = parser.parse_args()\n if args.snort_conf:\n load_from_snort_conf(args.snort_conf, classmap, msgmap)\n if args.classification_path:\n classmap.load_from_file(open(os.path.expanduser(args.\n classification_path)))\n if args.genmsgmap_path:\n msgmap.load_generator_map(open(os.path.expanduser(args.genmsgmap_path))\n )\n if args.sidmsgmap_path:\n msgmap.load_signature_map(open(os.path.expanduser(args.sidmsgmap_path))\n )\n if msgmap.size() == 0:\n LOG.warn('WARNING: No alert message map entries loaded.')\n else:\n LOG.info('Loaded %s rule message map entries.', msgmap.size())\n if classmap.size() == 0:\n LOG.warn('WARNING: No classifications loaded.')\n else:\n LOG.info('Loaded %s classifications.', classmap.size())\n eve_filter = EveFilter(msgmap, classmap)\n outputs = []\n if args.output:\n outputs.append(OutputWrapper(args.output))\n if args.stdout:\n outputs.append(OutputWrapper('-', sys.stdout))\n else:\n outputs.append(OutputWrapper('-', sys.stdout))\n if args.directory and args.prefix:\n reader = unified2.SpoolEventReader(directory=args.directory, prefix\n =args.prefix, follow=args.follow, delete=args.delete, bookmark=\n args.bookmark)\n elif args.filenames:\n reader = unified2.FileEventReader(*args.filenames)\n else:\n print('nothing to do.')\n return\n for event in reader:\n try:\n encoded = json.dumps(eve_filter.filter(event), encoding='latin-1')\n for out in outputs:\n out.write(encoded)\n except Exception as err:\n LOG.error('Failed to encode record as JSON: %s: %s' % (str(err),\n str(event)))\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_tzoffset(sec):\n offset = datetime.fromtimestamp(sec) - datetime.utcfromtimestamp(sec)\n if offset.days == -1:\n return '-%02d%02d' % ((86400 - offset.seconds) / 3600, (86400 -\n offset.seconds) % 3600)\n else:\n return '+%02d%02d' % (offset.seconds / 3600, offset.seconds % 3600)\n\n\ndef render_timestamp(sec, usec):\n tt = time.localtime(sec)\n return '%04d-%02d-%02dT%02d:%02d:%02d.%06d%s' % (tt.tm_year, tt.tm_mon,\n tt.tm_mday, tt.tm_hour, tt.tm_min, tt.tm_sec, usec, get_tzoffset(sec))\n\n\ndef calculate_flow_id(event):\n flow_id = event['protocol'] << 24\n if len(event['source-ip.raw']) == 4:\n flow_id = flow_id ^ struct.unpack('>L', event['source-ip.raw'])[0\n ] ^ struct.unpack('>L', event['destination-ip.raw'])[0]\n else:\n for part in struct.unpack('>LLLL', event['source-ip.raw']):\n flow_id = flow_id ^ part\n for part in struct.unpack('>LLLL', event['destination-ip.raw']):\n flow_id = flow_id ^ part\n if 'src_port' in event and 'dest_port' in event:\n flow_id = flow_id ^ event['src_port'] ^ event['dest_port']\n return flow_id\n\n\nclass EveFilter(object):\n\n def __init__(self, msgmap=None, classmap=None):\n self.msgmap = msgmap\n self.classmap = classmap\n\n def filter(self, event):\n output = OrderedDict()\n output['timestamp'] = render_timestamp(event['event-second'], event\n ['event-microsecond'])\n output['sensor_id'] = event['sensor-id']\n output['event_type'] = 'alert'\n output['src_ip'] = event['source-ip']\n if event['protocol'] in [socket.IPPROTO_UDP, socket.IPPROTO_TCP]:\n output['src_port'] = event['sport-itype']\n output['dest_ip'] = event['destination-ip']\n if event['protocol'] in [socket.IPPROTO_UDP, socket.IPPROTO_TCP]:\n output['dest_port'] = event['dport-icode']\n output['proto'] = self.getprotobynumber(event['protocol'])\n if event['protocol'] in [socket.IPPROTO_ICMP, socket.IPPROTO_ICMPV6]:\n output['icmp_type'] = event['sport-itype']\n output['icmp_code'] = event['dport-icode']\n output['flow_id'] = calculate_flow_id(event)\n alert = OrderedDict()\n alert['action'] = 'blocked' if event['blocked'] == 1 else 'allowed'\n alert['gid'] = event['generator-id']\n alert['signature_id'] = event['signature-id']\n alert['rev'] = event['signature-revision']\n alert['signature'] = self.resolve_msg(event)\n alert['category'] = self.resolve_classification(event)\n alert['severity'] = event['priority']\n output['alert'] = alert\n if event['packets']:\n output['packet'] = base64.b64encode(event['packets'][0]['data'])\n return output\n\n def resolve_classification(self, event, default=None):\n if self.classmap:\n classinfo = self.classmap.get(event['classification-id'])\n if classinfo:\n return classinfo['description']\n return default\n\n def resolve_msg(self, event, default=None):\n if self.msgmap:\n signature = self.msgmap.get(event['generator-id'], event[\n 'signature-id'])\n if signature:\n return signature['msg']\n return default\n\n def getprotobynumber(self, protocol):\n return proto_map.get(protocol, str(protocol))\n\n\nclass OutputWrapper(object):\n\n def __init__(self, filename, fileobj=None):\n self.filename = filename\n self.fileobj = fileobj\n if self.fileobj is None:\n self.reopen()\n self.isfile = True\n else:\n self.isfile = False\n\n def reopen(self):\n if self.fileobj:\n self.fileobj.close()\n self.fileobj = open(self.filename, 'ab')\n\n def write(self, buf):\n if self.isfile:\n if not os.path.exists(self.filename):\n self.reopen()\n self.fileobj.write(buf)\n self.fileobj.write('\\n')\n self.fileobj.flush()\n\n\ndef load_from_snort_conf(snort_conf, classmap, msgmap):\n snort_etc = os.path.dirname(os.path.expanduser(snort_conf))\n classification_config = os.path.join(snort_etc, 'classification.config')\n if os.path.exists(classification_config):\n LOG.debug('Loading %s.', classification_config)\n classmap.load_from_file(open(classification_config))\n genmsg_map = os.path.join(snort_etc, 'gen-msg.map')\n if os.path.exists(genmsg_map):\n LOG.debug('Loading %s.', genmsg_map)\n msgmap.load_generator_map(open(genmsg_map))\n sidmsg_map = os.path.join(snort_etc, 'sid-msg.map')\n if os.path.exists(sidmsg_map):\n LOG.debug('Loading %s.', sidmsg_map)\n msgmap.load_signature_map(open(sidmsg_map))\n\n\n<mask token>\n\n\ndef main():\n msgmap = maps.SignatureMap()\n classmap = maps.ClassificationMap()\n parser = argparse.ArgumentParser(fromfile_prefix_chars='@', epilog=epilog)\n parser.add_argument('-C', dest='classification_path', metavar=\n '<classification.config>', help='path to classification config')\n parser.add_argument('-S', dest='sidmsgmap_path', metavar=\n '<msg-msg.map>', help='path to sid-msg.map')\n parser.add_argument('-G', dest='genmsgmap_path', metavar=\n '<gen-msg.map>', help='path to gen-msg.map')\n parser.add_argument('--snort-conf', dest='snort_conf', metavar=\n '<snort.conf>', help=\n 'attempt to load classifications and map files based on the location of the snort.conf'\n )\n parser.add_argument('--directory', metavar='<spool directory>', help=\n 'spool directory (eg: /var/log/snort)')\n parser.add_argument('--prefix', metavar='<spool file prefix>', help=\n 'spool filename prefix (eg: unified2.log)')\n parser.add_argument('--bookmark', action='store_true', default=False,\n help='enable bookmarking')\n parser.add_argument('--follow', action='store_true', default=False,\n help='follow files/continuous mode (spool mode only)')\n parser.add_argument('--delete', action='store_true', default=False,\n help='delete spool files')\n parser.add_argument('--output', metavar='<filename>', help=\n 'output filename (eg: /var/log/snort/alerts.json')\n parser.add_argument('--stdout', action='store_true', default=False,\n help='also log to stdout if --output is a file')\n parser.add_argument('filenames', nargs='*')\n args = parser.parse_args()\n if args.snort_conf:\n load_from_snort_conf(args.snort_conf, classmap, msgmap)\n if args.classification_path:\n classmap.load_from_file(open(os.path.expanduser(args.\n classification_path)))\n if args.genmsgmap_path:\n msgmap.load_generator_map(open(os.path.expanduser(args.genmsgmap_path))\n )\n if args.sidmsgmap_path:\n msgmap.load_signature_map(open(os.path.expanduser(args.sidmsgmap_path))\n )\n if msgmap.size() == 0:\n LOG.warn('WARNING: No alert message map entries loaded.')\n else:\n LOG.info('Loaded %s rule message map entries.', msgmap.size())\n if classmap.size() == 0:\n LOG.warn('WARNING: No classifications loaded.')\n else:\n LOG.info('Loaded %s classifications.', classmap.size())\n eve_filter = EveFilter(msgmap, classmap)\n outputs = []\n if args.output:\n outputs.append(OutputWrapper(args.output))\n if args.stdout:\n outputs.append(OutputWrapper('-', sys.stdout))\n else:\n outputs.append(OutputWrapper('-', sys.stdout))\n if args.directory and args.prefix:\n reader = unified2.SpoolEventReader(directory=args.directory, prefix\n =args.prefix, follow=args.follow, delete=args.delete, bookmark=\n args.bookmark)\n elif args.filenames:\n reader = unified2.FileEventReader(*args.filenames)\n else:\n print('nothing to do.')\n return\n for event in reader:\n try:\n encoded = json.dumps(eve_filter.filter(event), encoding='latin-1')\n for out in outputs:\n out.write(encoded)\n except Exception as err:\n LOG.error('Failed to encode record as JSON: %s: %s' % (str(err),\n str(event)))\n\n\n<mask token>\n",
"step-3": "<mask token>\nif sys.argv[0] == __file__:\n sys.path.insert(0, os.path.abspath(os.path.join(__file__, '..', '..',\n '..')))\n<mask token>\ntry:\n from collections import OrderedDict\nexcept ImportError as err:\n from idstools.compat.ordereddict import OrderedDict\ntry:\n import argparse\nexcept ImportError as err:\n from idstools.compat.argparse import argparse\n<mask token>\nlogging.basicConfig(level=logging.INFO, format='%(message)s')\n<mask token>\n\n\ndef get_tzoffset(sec):\n offset = datetime.fromtimestamp(sec) - datetime.utcfromtimestamp(sec)\n if offset.days == -1:\n return '-%02d%02d' % ((86400 - offset.seconds) / 3600, (86400 -\n offset.seconds) % 3600)\n else:\n return '+%02d%02d' % (offset.seconds / 3600, offset.seconds % 3600)\n\n\ndef render_timestamp(sec, usec):\n tt = time.localtime(sec)\n return '%04d-%02d-%02dT%02d:%02d:%02d.%06d%s' % (tt.tm_year, tt.tm_mon,\n tt.tm_mday, tt.tm_hour, tt.tm_min, tt.tm_sec, usec, get_tzoffset(sec))\n\n\ndef calculate_flow_id(event):\n flow_id = event['protocol'] << 24\n if len(event['source-ip.raw']) == 4:\n flow_id = flow_id ^ struct.unpack('>L', event['source-ip.raw'])[0\n ] ^ struct.unpack('>L', event['destination-ip.raw'])[0]\n else:\n for part in struct.unpack('>LLLL', event['source-ip.raw']):\n flow_id = flow_id ^ part\n for part in struct.unpack('>LLLL', event['destination-ip.raw']):\n flow_id = flow_id ^ part\n if 'src_port' in event and 'dest_port' in event:\n flow_id = flow_id ^ event['src_port'] ^ event['dest_port']\n return flow_id\n\n\nclass EveFilter(object):\n\n def __init__(self, msgmap=None, classmap=None):\n self.msgmap = msgmap\n self.classmap = classmap\n\n def filter(self, event):\n output = OrderedDict()\n output['timestamp'] = render_timestamp(event['event-second'], event\n ['event-microsecond'])\n output['sensor_id'] = event['sensor-id']\n output['event_type'] = 'alert'\n output['src_ip'] = event['source-ip']\n if event['protocol'] in [socket.IPPROTO_UDP, socket.IPPROTO_TCP]:\n output['src_port'] = event['sport-itype']\n output['dest_ip'] = event['destination-ip']\n if event['protocol'] in [socket.IPPROTO_UDP, socket.IPPROTO_TCP]:\n output['dest_port'] = event['dport-icode']\n output['proto'] = self.getprotobynumber(event['protocol'])\n if event['protocol'] in [socket.IPPROTO_ICMP, socket.IPPROTO_ICMPV6]:\n output['icmp_type'] = event['sport-itype']\n output['icmp_code'] = event['dport-icode']\n output['flow_id'] = calculate_flow_id(event)\n alert = OrderedDict()\n alert['action'] = 'blocked' if event['blocked'] == 1 else 'allowed'\n alert['gid'] = event['generator-id']\n alert['signature_id'] = event['signature-id']\n alert['rev'] = event['signature-revision']\n alert['signature'] = self.resolve_msg(event)\n alert['category'] = self.resolve_classification(event)\n alert['severity'] = event['priority']\n output['alert'] = alert\n if event['packets']:\n output['packet'] = base64.b64encode(event['packets'][0]['data'])\n return output\n\n def resolve_classification(self, event, default=None):\n if self.classmap:\n classinfo = self.classmap.get(event['classification-id'])\n if classinfo:\n return classinfo['description']\n return default\n\n def resolve_msg(self, event, default=None):\n if self.msgmap:\n signature = self.msgmap.get(event['generator-id'], event[\n 'signature-id'])\n if signature:\n return signature['msg']\n return default\n\n def getprotobynumber(self, protocol):\n return proto_map.get(protocol, str(protocol))\n\n\nclass OutputWrapper(object):\n\n def __init__(self, filename, fileobj=None):\n self.filename = filename\n self.fileobj = fileobj\n if self.fileobj is None:\n self.reopen()\n self.isfile = True\n else:\n self.isfile = False\n\n def reopen(self):\n if self.fileobj:\n self.fileobj.close()\n self.fileobj = open(self.filename, 'ab')\n\n def write(self, buf):\n if self.isfile:\n if not os.path.exists(self.filename):\n self.reopen()\n self.fileobj.write(buf)\n self.fileobj.write('\\n')\n self.fileobj.flush()\n\n\ndef load_from_snort_conf(snort_conf, classmap, msgmap):\n snort_etc = os.path.dirname(os.path.expanduser(snort_conf))\n classification_config = os.path.join(snort_etc, 'classification.config')\n if os.path.exists(classification_config):\n LOG.debug('Loading %s.', classification_config)\n classmap.load_from_file(open(classification_config))\n genmsg_map = os.path.join(snort_etc, 'gen-msg.map')\n if os.path.exists(genmsg_map):\n LOG.debug('Loading %s.', genmsg_map)\n msgmap.load_generator_map(open(genmsg_map))\n sidmsg_map = os.path.join(snort_etc, 'sid-msg.map')\n if os.path.exists(sidmsg_map):\n LOG.debug('Loading %s.', sidmsg_map)\n msgmap.load_signature_map(open(sidmsg_map))\n\n\n<mask token>\n\n\ndef main():\n msgmap = maps.SignatureMap()\n classmap = maps.ClassificationMap()\n parser = argparse.ArgumentParser(fromfile_prefix_chars='@', epilog=epilog)\n parser.add_argument('-C', dest='classification_path', metavar=\n '<classification.config>', help='path to classification config')\n parser.add_argument('-S', dest='sidmsgmap_path', metavar=\n '<msg-msg.map>', help='path to sid-msg.map')\n parser.add_argument('-G', dest='genmsgmap_path', metavar=\n '<gen-msg.map>', help='path to gen-msg.map')\n parser.add_argument('--snort-conf', dest='snort_conf', metavar=\n '<snort.conf>', help=\n 'attempt to load classifications and map files based on the location of the snort.conf'\n )\n parser.add_argument('--directory', metavar='<spool directory>', help=\n 'spool directory (eg: /var/log/snort)')\n parser.add_argument('--prefix', metavar='<spool file prefix>', help=\n 'spool filename prefix (eg: unified2.log)')\n parser.add_argument('--bookmark', action='store_true', default=False,\n help='enable bookmarking')\n parser.add_argument('--follow', action='store_true', default=False,\n help='follow files/continuous mode (spool mode only)')\n parser.add_argument('--delete', action='store_true', default=False,\n help='delete spool files')\n parser.add_argument('--output', metavar='<filename>', help=\n 'output filename (eg: /var/log/snort/alerts.json')\n parser.add_argument('--stdout', action='store_true', default=False,\n help='also log to stdout if --output is a file')\n parser.add_argument('filenames', nargs='*')\n args = parser.parse_args()\n if args.snort_conf:\n load_from_snort_conf(args.snort_conf, classmap, msgmap)\n if args.classification_path:\n classmap.load_from_file(open(os.path.expanduser(args.\n classification_path)))\n if args.genmsgmap_path:\n msgmap.load_generator_map(open(os.path.expanduser(args.genmsgmap_path))\n )\n if args.sidmsgmap_path:\n msgmap.load_signature_map(open(os.path.expanduser(args.sidmsgmap_path))\n )\n if msgmap.size() == 0:\n LOG.warn('WARNING: No alert message map entries loaded.')\n else:\n LOG.info('Loaded %s rule message map entries.', msgmap.size())\n if classmap.size() == 0:\n LOG.warn('WARNING: No classifications loaded.')\n else:\n LOG.info('Loaded %s classifications.', classmap.size())\n eve_filter = EveFilter(msgmap, classmap)\n outputs = []\n if args.output:\n outputs.append(OutputWrapper(args.output))\n if args.stdout:\n outputs.append(OutputWrapper('-', sys.stdout))\n else:\n outputs.append(OutputWrapper('-', sys.stdout))\n if args.directory and args.prefix:\n reader = unified2.SpoolEventReader(directory=args.directory, prefix\n =args.prefix, follow=args.follow, delete=args.delete, bookmark=\n args.bookmark)\n elif args.filenames:\n reader = unified2.FileEventReader(*args.filenames)\n else:\n print('nothing to do.')\n return\n for event in reader:\n try:\n encoded = json.dumps(eve_filter.filter(event), encoding='latin-1')\n for out in outputs:\n out.write(encoded)\n except Exception as err:\n LOG.error('Failed to encode record as JSON: %s: %s' % (str(err),\n str(event)))\n\n\nif __name__ == '__main__':\n sys.exit(main())\n",
"step-4": "<mask token>\nif sys.argv[0] == __file__:\n sys.path.insert(0, os.path.abspath(os.path.join(__file__, '..', '..',\n '..')))\n<mask token>\ntry:\n from collections import OrderedDict\nexcept ImportError as err:\n from idstools.compat.ordereddict import OrderedDict\ntry:\n import argparse\nexcept ImportError as err:\n from idstools.compat.argparse import argparse\n<mask token>\nlogging.basicConfig(level=logging.INFO, format='%(message)s')\nLOG = logging.getLogger()\nproto_map = {(1): 'ICMP', (6): 'TCP', (17): 'UDP'}\n\n\ndef get_tzoffset(sec):\n offset = datetime.fromtimestamp(sec) - datetime.utcfromtimestamp(sec)\n if offset.days == -1:\n return '-%02d%02d' % ((86400 - offset.seconds) / 3600, (86400 -\n offset.seconds) % 3600)\n else:\n return '+%02d%02d' % (offset.seconds / 3600, offset.seconds % 3600)\n\n\ndef render_timestamp(sec, usec):\n tt = time.localtime(sec)\n return '%04d-%02d-%02dT%02d:%02d:%02d.%06d%s' % (tt.tm_year, tt.tm_mon,\n tt.tm_mday, tt.tm_hour, tt.tm_min, tt.tm_sec, usec, get_tzoffset(sec))\n\n\ndef calculate_flow_id(event):\n flow_id = event['protocol'] << 24\n if len(event['source-ip.raw']) == 4:\n flow_id = flow_id ^ struct.unpack('>L', event['source-ip.raw'])[0\n ] ^ struct.unpack('>L', event['destination-ip.raw'])[0]\n else:\n for part in struct.unpack('>LLLL', event['source-ip.raw']):\n flow_id = flow_id ^ part\n for part in struct.unpack('>LLLL', event['destination-ip.raw']):\n flow_id = flow_id ^ part\n if 'src_port' in event and 'dest_port' in event:\n flow_id = flow_id ^ event['src_port'] ^ event['dest_port']\n return flow_id\n\n\nclass EveFilter(object):\n\n def __init__(self, msgmap=None, classmap=None):\n self.msgmap = msgmap\n self.classmap = classmap\n\n def filter(self, event):\n output = OrderedDict()\n output['timestamp'] = render_timestamp(event['event-second'], event\n ['event-microsecond'])\n output['sensor_id'] = event['sensor-id']\n output['event_type'] = 'alert'\n output['src_ip'] = event['source-ip']\n if event['protocol'] in [socket.IPPROTO_UDP, socket.IPPROTO_TCP]:\n output['src_port'] = event['sport-itype']\n output['dest_ip'] = event['destination-ip']\n if event['protocol'] in [socket.IPPROTO_UDP, socket.IPPROTO_TCP]:\n output['dest_port'] = event['dport-icode']\n output['proto'] = self.getprotobynumber(event['protocol'])\n if event['protocol'] in [socket.IPPROTO_ICMP, socket.IPPROTO_ICMPV6]:\n output['icmp_type'] = event['sport-itype']\n output['icmp_code'] = event['dport-icode']\n output['flow_id'] = calculate_flow_id(event)\n alert = OrderedDict()\n alert['action'] = 'blocked' if event['blocked'] == 1 else 'allowed'\n alert['gid'] = event['generator-id']\n alert['signature_id'] = event['signature-id']\n alert['rev'] = event['signature-revision']\n alert['signature'] = self.resolve_msg(event)\n alert['category'] = self.resolve_classification(event)\n alert['severity'] = event['priority']\n output['alert'] = alert\n if event['packets']:\n output['packet'] = base64.b64encode(event['packets'][0]['data'])\n return output\n\n def resolve_classification(self, event, default=None):\n if self.classmap:\n classinfo = self.classmap.get(event['classification-id'])\n if classinfo:\n return classinfo['description']\n return default\n\n def resolve_msg(self, event, default=None):\n if self.msgmap:\n signature = self.msgmap.get(event['generator-id'], event[\n 'signature-id'])\n if signature:\n return signature['msg']\n return default\n\n def getprotobynumber(self, protocol):\n return proto_map.get(protocol, str(protocol))\n\n\nclass OutputWrapper(object):\n\n def __init__(self, filename, fileobj=None):\n self.filename = filename\n self.fileobj = fileobj\n if self.fileobj is None:\n self.reopen()\n self.isfile = True\n else:\n self.isfile = False\n\n def reopen(self):\n if self.fileobj:\n self.fileobj.close()\n self.fileobj = open(self.filename, 'ab')\n\n def write(self, buf):\n if self.isfile:\n if not os.path.exists(self.filename):\n self.reopen()\n self.fileobj.write(buf)\n self.fileobj.write('\\n')\n self.fileobj.flush()\n\n\ndef load_from_snort_conf(snort_conf, classmap, msgmap):\n snort_etc = os.path.dirname(os.path.expanduser(snort_conf))\n classification_config = os.path.join(snort_etc, 'classification.config')\n if os.path.exists(classification_config):\n LOG.debug('Loading %s.', classification_config)\n classmap.load_from_file(open(classification_config))\n genmsg_map = os.path.join(snort_etc, 'gen-msg.map')\n if os.path.exists(genmsg_map):\n LOG.debug('Loading %s.', genmsg_map)\n msgmap.load_generator_map(open(genmsg_map))\n sidmsg_map = os.path.join(snort_etc, 'sid-msg.map')\n if os.path.exists(sidmsg_map):\n LOG.debug('Loading %s.', sidmsg_map)\n msgmap.load_signature_map(open(sidmsg_map))\n\n\nepilog = \"\"\"If --directory and --prefix are provided files will be\nread from the specified 'spool' directory. Otherwise files on the\ncommand line will be processed.\n\"\"\"\n\n\ndef main():\n msgmap = maps.SignatureMap()\n classmap = maps.ClassificationMap()\n parser = argparse.ArgumentParser(fromfile_prefix_chars='@', epilog=epilog)\n parser.add_argument('-C', dest='classification_path', metavar=\n '<classification.config>', help='path to classification config')\n parser.add_argument('-S', dest='sidmsgmap_path', metavar=\n '<msg-msg.map>', help='path to sid-msg.map')\n parser.add_argument('-G', dest='genmsgmap_path', metavar=\n '<gen-msg.map>', help='path to gen-msg.map')\n parser.add_argument('--snort-conf', dest='snort_conf', metavar=\n '<snort.conf>', help=\n 'attempt to load classifications and map files based on the location of the snort.conf'\n )\n parser.add_argument('--directory', metavar='<spool directory>', help=\n 'spool directory (eg: /var/log/snort)')\n parser.add_argument('--prefix', metavar='<spool file prefix>', help=\n 'spool filename prefix (eg: unified2.log)')\n parser.add_argument('--bookmark', action='store_true', default=False,\n help='enable bookmarking')\n parser.add_argument('--follow', action='store_true', default=False,\n help='follow files/continuous mode (spool mode only)')\n parser.add_argument('--delete', action='store_true', default=False,\n help='delete spool files')\n parser.add_argument('--output', metavar='<filename>', help=\n 'output filename (eg: /var/log/snort/alerts.json')\n parser.add_argument('--stdout', action='store_true', default=False,\n help='also log to stdout if --output is a file')\n parser.add_argument('filenames', nargs='*')\n args = parser.parse_args()\n if args.snort_conf:\n load_from_snort_conf(args.snort_conf, classmap, msgmap)\n if args.classification_path:\n classmap.load_from_file(open(os.path.expanduser(args.\n classification_path)))\n if args.genmsgmap_path:\n msgmap.load_generator_map(open(os.path.expanduser(args.genmsgmap_path))\n )\n if args.sidmsgmap_path:\n msgmap.load_signature_map(open(os.path.expanduser(args.sidmsgmap_path))\n )\n if msgmap.size() == 0:\n LOG.warn('WARNING: No alert message map entries loaded.')\n else:\n LOG.info('Loaded %s rule message map entries.', msgmap.size())\n if classmap.size() == 0:\n LOG.warn('WARNING: No classifications loaded.')\n else:\n LOG.info('Loaded %s classifications.', classmap.size())\n eve_filter = EveFilter(msgmap, classmap)\n outputs = []\n if args.output:\n outputs.append(OutputWrapper(args.output))\n if args.stdout:\n outputs.append(OutputWrapper('-', sys.stdout))\n else:\n outputs.append(OutputWrapper('-', sys.stdout))\n if args.directory and args.prefix:\n reader = unified2.SpoolEventReader(directory=args.directory, prefix\n =args.prefix, follow=args.follow, delete=args.delete, bookmark=\n args.bookmark)\n elif args.filenames:\n reader = unified2.FileEventReader(*args.filenames)\n else:\n print('nothing to do.')\n return\n for event in reader:\n try:\n encoded = json.dumps(eve_filter.filter(event), encoding='latin-1')\n for out in outputs:\n out.write(encoded)\n except Exception as err:\n LOG.error('Failed to encode record as JSON: %s: %s' % (str(err),\n str(event)))\n\n\nif __name__ == '__main__':\n sys.exit(main())\n",
"step-5": "#! /usr/bin/env python\n#\n# Copyright (c) 2015 Jason Ish\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions\n# are met:\n#\n# 1. Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# 2. Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n#\n# THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED\n# WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF\n# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,\n# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)\n# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,\n# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING\n# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n# POSSIBILITY OF SUCH DAMAGE.\n\n\"\"\"Read unified2 log files and output events as Suricata EVE JSON.\"\"\"\n\nfrom __future__ import print_function\n\nimport sys\nimport os\nimport os.path\nimport base64\n\nif sys.argv[0] == __file__:\n sys.path.insert(\n 0, os.path.abspath(os.path.join(__file__, \"..\", \"..\", \"..\")))\n\nimport socket\nimport time\nimport json\nimport logging\nimport struct\nfrom datetime import datetime\ntry:\n from collections import OrderedDict\nexcept ImportError as err:\n from idstools.compat.ordereddict import OrderedDict\n\ntry:\n import argparse\nexcept ImportError as err:\n from idstools.compat.argparse import argparse\n\nfrom idstools import unified2\nfrom idstools import maps\n\nlogging.basicConfig(level=logging.INFO, format=\"%(message)s\")\nLOG = logging.getLogger()\n\nproto_map = {\n 1: \"ICMP\",\n 6: \"TCP\",\n 17: \"UDP\",\n}\n\ndef get_tzoffset(sec):\n offset = datetime.fromtimestamp(sec) - datetime.utcfromtimestamp(sec)\n if offset.days == -1:\n return \"-%02d%02d\" % (\n (86400 - offset.seconds) / 3600, (86400 - offset.seconds) % 3600)\n else:\n return \"+%02d%02d\" % (\n offset.seconds / 3600, offset.seconds % 3600)\n\ndef render_timestamp(sec, usec):\n tt = time.localtime(sec)\n return \"%04d-%02d-%02dT%02d:%02d:%02d.%06d%s\" % (\n tt.tm_year, tt.tm_mon, tt.tm_mday, tt.tm_hour, tt.tm_min, tt.tm_sec,\n usec, get_tzoffset(sec))\n\ndef calculate_flow_id(event):\n\n flow_id = event[\"protocol\"] << 24\n\n if len(event[\"source-ip.raw\"]) == 4:\n flow_id = flow_id ^ \\\n struct.unpack(\">L\", event[\"source-ip.raw\"])[0] ^ \\\n struct.unpack(\">L\", event[\"destination-ip.raw\"])[0]\n else:\n for part in struct.unpack(\">LLLL\", event[\"source-ip.raw\"]):\n flow_id = flow_id ^ part\n for part in struct.unpack(\">LLLL\", event[\"destination-ip.raw\"]):\n flow_id = flow_id ^ part\n\n if \"src_port\" in event and \"dest_port\" in event:\n flow_id = flow_id ^ event[\"src_port\"] ^ event[\"dest_port\"]\n\n return flow_id\n\nclass EveFilter(object):\n\n def __init__(\n self, msgmap=None, classmap=None):\n self.msgmap = msgmap\n self.classmap = classmap\n\n def filter(self, event):\n output = OrderedDict()\n output[\"timestamp\"] = render_timestamp(\n event[\"event-second\"], event[\"event-microsecond\"])\n output[\"sensor_id\"] = event[\"sensor-id\"]\n output[\"event_type\"] = \"alert\"\n output[\"src_ip\"] = event[\"source-ip\"]\n if event[\"protocol\"] in [socket.IPPROTO_UDP, socket.IPPROTO_TCP]:\n output[\"src_port\"] = event[\"sport-itype\"]\n output[\"dest_ip\"] = event[\"destination-ip\"]\n if event[\"protocol\"] in [socket.IPPROTO_UDP, socket.IPPROTO_TCP]:\n output[\"dest_port\"] = event[\"dport-icode\"]\n output[\"proto\"] = self.getprotobynumber(event[\"protocol\"])\n\n if event[\"protocol\"] in [socket.IPPROTO_ICMP, socket.IPPROTO_ICMPV6]:\n output[\"icmp_type\"] = event[\"sport-itype\"]\n output[\"icmp_code\"] = event[\"dport-icode\"]\n\n output[\"flow_id\"] = calculate_flow_id(event)\n\n alert = OrderedDict()\n alert[\"action\"] = \"blocked\" if event[\"blocked\"] == 1 else \"allowed\"\n alert[\"gid\"] = event[\"generator-id\"]\n alert[\"signature_id\"] = event[\"signature-id\"]\n alert[\"rev\"] = event[\"signature-revision\"]\n alert[\"signature\"] = self.resolve_msg(event)\n alert[\"category\"] = self.resolve_classification(event)\n alert[\"severity\"] = event[\"priority\"]\n output[\"alert\"] = alert\n\n # EVE only includes one packet.\n if event[\"packets\"]:\n output[\"packet\"] = base64.b64encode(event[\"packets\"][0][\"data\"])\n\n return output\n\n def resolve_classification(self, event, default=None):\n if self.classmap:\n classinfo = self.classmap.get(event[\"classification-id\"])\n if classinfo:\n return classinfo[\"description\"]\n return default\n\n def resolve_msg(self, event, default=None):\n if self.msgmap:\n signature = self.msgmap.get(\n event[\"generator-id\"], event[\"signature-id\"])\n if signature:\n return signature[\"msg\"]\n return default\n\n def getprotobynumber(self, protocol):\n return proto_map.get(protocol, str(protocol))\n\nclass OutputWrapper(object):\n\n def __init__(self, filename, fileobj=None):\n self.filename = filename\n self.fileobj = fileobj\n\n if self.fileobj is None:\n self.reopen()\n self.isfile = True\n else:\n self.isfile = False\n\n def reopen(self):\n if self.fileobj:\n self.fileobj.close()\n self.fileobj = open(self.filename, \"ab\")\n\n def write(self, buf):\n if self.isfile:\n if not os.path.exists(self.filename):\n self.reopen()\n self.fileobj.write(buf)\n self.fileobj.write(\"\\n\")\n self.fileobj.flush()\n\ndef load_from_snort_conf(snort_conf, classmap, msgmap):\n snort_etc = os.path.dirname(os.path.expanduser(snort_conf))\n\n classification_config = os.path.join(snort_etc, \"classification.config\")\n if os.path.exists(classification_config):\n LOG.debug(\"Loading %s.\", classification_config)\n classmap.load_from_file(open(classification_config))\n\n genmsg_map = os.path.join(snort_etc, \"gen-msg.map\")\n if os.path.exists(genmsg_map):\n LOG.debug(\"Loading %s.\", genmsg_map)\n msgmap.load_generator_map(open(genmsg_map))\n\n sidmsg_map = os.path.join(snort_etc, \"sid-msg.map\")\n if os.path.exists(sidmsg_map):\n LOG.debug(\"Loading %s.\", sidmsg_map)\n msgmap.load_signature_map(open(sidmsg_map))\n\nepilog = \"\"\"If --directory and --prefix are provided files will be\nread from the specified 'spool' directory. Otherwise files on the\ncommand line will be processed.\n\"\"\"\n\ndef main():\n\n msgmap = maps.SignatureMap()\n classmap = maps.ClassificationMap()\n\n parser = argparse.ArgumentParser(\n fromfile_prefix_chars='@', epilog=epilog)\n parser.add_argument(\n \"-C\", dest=\"classification_path\", metavar=\"<classification.config>\",\n help=\"path to classification config\")\n parser.add_argument(\n \"-S\", dest=\"sidmsgmap_path\", metavar=\"<msg-msg.map>\",\n help=\"path to sid-msg.map\")\n parser.add_argument(\n \"-G\", dest=\"genmsgmap_path\", metavar=\"<gen-msg.map>\",\n help=\"path to gen-msg.map\")\n parser.add_argument(\n \"--snort-conf\", dest=\"snort_conf\", metavar=\"<snort.conf>\",\n help=\"attempt to load classifications and map files based on the \"\n \"location of the snort.conf\")\n parser.add_argument(\n \"--directory\", metavar=\"<spool directory>\",\n help=\"spool directory (eg: /var/log/snort)\")\n parser.add_argument(\n \"--prefix\", metavar=\"<spool file prefix>\",\n help=\"spool filename prefix (eg: unified2.log)\")\n parser.add_argument(\n \"--bookmark\", action=\"store_true\", default=False,\n help=\"enable bookmarking\")\n parser.add_argument(\n \"--follow\", action=\"store_true\", default=False,\n help=\"follow files/continuous mode (spool mode only)\")\n parser.add_argument(\n \"--delete\", action=\"store_true\", default=False,\n help=\"delete spool files\")\n parser.add_argument(\n \"--output\", metavar=\"<filename>\",\n help=\"output filename (eg: /var/log/snort/alerts.json\")\n parser.add_argument(\n \"--stdout\", action=\"store_true\", default=False,\n help=\"also log to stdout if --output is a file\")\n parser.add_argument(\n \"filenames\", nargs=\"*\")\n args = parser.parse_args()\n\n if args.snort_conf:\n load_from_snort_conf(args.snort_conf, classmap, msgmap)\n\n if args.classification_path:\n classmap.load_from_file(\n open(os.path.expanduser(args.classification_path)))\n if args.genmsgmap_path:\n msgmap.load_generator_map(open(os.path.expanduser(args.genmsgmap_path)))\n if args.sidmsgmap_path:\n msgmap.load_signature_map(open(os.path.expanduser(args.sidmsgmap_path)))\n\n if msgmap.size() == 0:\n LOG.warn(\"WARNING: No alert message map entries loaded.\")\n else:\n LOG.info(\"Loaded %s rule message map entries.\", msgmap.size())\n\n if classmap.size() == 0:\n LOG.warn(\"WARNING: No classifications loaded.\")\n else:\n LOG.info(\"Loaded %s classifications.\", classmap.size())\n\n eve_filter = EveFilter(msgmap, classmap)\n\n outputs = []\n\n if args.output:\n outputs.append(OutputWrapper(args.output))\n if args.stdout:\n outputs.append(OutputWrapper(\"-\", sys.stdout))\n else:\n outputs.append(OutputWrapper(\"-\", sys.stdout))\n\n if args.directory and args.prefix:\n reader = unified2.SpoolEventReader(\n directory=args.directory,\n prefix=args.prefix,\n follow=args.follow,\n delete=args.delete,\n bookmark=args.bookmark)\n elif args.filenames:\n reader = unified2.FileEventReader(*args.filenames)\n else:\n print(\"nothing to do.\")\n return\n\n for event in reader:\n try:\n encoded = json.dumps(eve_filter.filter(event), encoding=\"latin-1\")\n for out in outputs:\n out.write(encoded)\n except Exception as err:\n LOG.error(\"Failed to encode record as JSON: %s: %s\" % (\n str(err), str(event)))\n\nif __name__ == \"__main__\":\n sys.exit(main())\n",
"step-ids": [
12,
15,
16,
17,
19
]
}
|
[
12,
15,
16,
17,
19
] |
# Python library import
import asyncio, asyncssh, logging
# Module logging logger
log = logging.getLogger(__package__)
# Debug level
# logging.basicConfig(level=logging.WARNING)
# logging.basicConfig(level=logging.INFO)
logging.basicConfig(level=logging.DEBUG)
asyncssh.set_debug_level(2)
# Declaration of constant values
# Max data to read in read function
MAX_BUFFER_DATA = 65535
# Dictonary with all netmasks of IPv4
ipv4_netmask_list = {
"0.0.0.0": "0",
"128.0.0.0": "1",
"192.0.0.0": "2",
"224.0.0.0": "3",
"240.0.0.0": "4",
"248.0.0.0": "5",
"252.0.0.0": "6",
"254.0.0.0": "7",
"255.0.0.0": "8",
"255.128.0.0": "9",
"255.192.0.0": "10",
"255.224.0.0": "11",
"255.240.0.0": "12",
"255.248.0.0": "13",
"255.252.0.0": "14",
"255.254.0.0": "15",
"255.255.0.0": "16",
"255.255.128.0": "17",
"255.255.192.0": "18",
"255.255.224.0": "19",
"255.255.240.0": "20",
"255.255.248.0": "21",
"255.255.252.0": "22",
"255.255.254.0": "23",
"255.255.255.0": "24",
"255.255.255.128": "25",
"255.255.255.192": "26",
"255.255.255.224": "27",
"255.255.255.240": "28",
"255.255.255.248": "29",
"255.255.255.252": "30",
"255.255.255.254": "31",
"255.255.255.255": "32",
}
class NetworkDevice:
"""
Base class for network object
:param ip: IP address of a device
:type ip: str
:param username: Username used to connect to a device
:type username: str
:param password: Password used to connect to a device
:type password: str
:param device_type: Type of device used
:type device_type: str
:param port: TCP port used to connect a device. Default value is "22" for SSH
:type port: int, optional
:param timeout: TCP port used to connect a device. Default value is 10 seconds
:type timeout: int, optional
:param _protocol: Protocol used to connect a device. "ssh" or "telnet" are possible options. Default value is "ssh"
:type _protocol: str, optional
:param enable_mode: Enable mode for devices requiring it. Default value is "False"
:type enable_mode: bool, optional
:param enable_password: Enable password used for enable mode.
:type enable_password: str, optional
:param conn: Variable used for the management of the SSH connection
:type conn: SSHClientConnection object
:param _writer: Variable used for the management of the Telnet connection and writing channel
:type _writer: StreamWriter object
:param _reader: Variable used for the management of the Telnet reading channel
:type _reader: StreamReader object
:param possible_prompts: Used by the connect method to list all possible prompts of the device
:type possible_prompts: list
:param _connect_first_ending_prompt: Default possible ending prompts. Used only the time after login and password to discover the prompt
:type _connect_first_ending_prompt: list
:param list_of_possible_ending_prompts: Different strings at the end of a prompt the device can get. Used for detecting the prompt returned in sent commands
:type list_of_possible_ending_prompts: list
:param _telnet_connect_login: Login prompt for Telnet. Used to detect when a login is expected or when login and password access is failed
:type _telnet_connect_login: str
:param _telnet_connect_password: Password prompt for Telnet. Used to detect when a login is expected or when login and password access is failed
:type _telnet_connect_password: list
:param _telnet_connect_authentication_fail_prompt: Known failing messages or prompts when an authentication has failed. Used to get an answer faster than timeout events
:type _telnet_connect_authentication_fail_prompt: list
:param cmd_enable: Enable command for entering into enable mode
:type cmd_enable: str
:param cmd_disable_paging: Command used to disable paging on a device. That command is run at connection time
:type cmd_disable_paging: str
:param cmd_enter_config_mode: Command used to enter into a configuration mode on a device when this device support that feature.
:type cmd_enter_config_mode: str
:param cmd_exit_config_mode: Command used to leave a configuration mode on a device when this device support that feature.
:type cmd_exit_config_mode: str
:param cmd_get_version: API command used to get the software version of a device
:type cmd_get_version: str
:param cmd_get_hostname: API command used to get the hostname of a device
:type cmd_get_hostname: str
:param cmd_get_model: API command used to get the model of a device
:type cmd_get_model: str
:param cmd_get_serial_number: API command used to get the serial number of a device
:type cmd_get_serial_number: str
:param cmd_get_config: API command used to get the running configuration of a device
:type cmd_get_config: str
:param cmd_save_config: API command used to save the running configuration on the device
:type cmd_save_config: str
"""
def __init__(self, **kwargs):
# Display info message
log.info("__init__")
self.ip = ""
self.username = ""
self.password = ""
self.device_type = ""
self.port = 22
self.timeout = 10
self._protocol = "ssh"
self.enable_mode = False
self.enable_password = ""
self.conn = None
self._writer = None
self._reader = None
self.possible_prompts = []
self._connect_first_ending_prompt = ["#", ">"]
self.list_of_possible_ending_prompts = [
"(config-line)#",
"(config-if)#",
"(config)#",
">",
"#",
]
self._carriage_return_for_send_command = "\n"
self._send_command_error_in_returned_output = []
self._telnet_connect_login = "Username:"
self._telnet_connect_password = "Password:"
self._telnet_connect_authentication_fail_prompt = [":", "%"]
# General commands
self.cmd_enable = "enable"
self.cmd_disable_paging = "terminal length 0"
self.cmd_enter_config_mode = "configure terminal"
self.cmd_exit_config_mode = "exit"
self.cmd_get_version = "show version"
self.cmd_get_hostname = "show version | include uptime"
self.cmd_get_model = "show inventory"
self.cmd_get_serial_number = "show inventory | i SN"
self.cmd_get_config = "show running-config"
self.cmd_save_config = "write memory"
# Layer 1 commands
self.cmd_get_interfaces = [
"interface ethernet print terse without-paging",
"foreach i in=([/interface ethernet find]) do={/interface ethernet monitor $i once without-paging}",
"interface bridge port print terse without-paging",
]
self.cmd_set_interface = [
"interface ethernet enable <INTERFACE>",
"interface ethernet disable <INTERFACE>",
'interface ethernet comment <INTERFACE> "<COMMENT>"',
"interface ethernet set l2mtu=<MAXIMUMFRAMESIZE> <INTERFACE>",
"interface bridge port set frame-types=<MODE> ingress-filtering=<FILTERINGVLAN> [find interface=<INTERFACE>]",
]
# Layer 2 commands
self.cmd_get_mac_address_table = "interface bridge host print without-paging"
self.cmd_get_arp = "ip arp print terse without-paging"
self.cmd_get_lldp_neighbors = "ip neighbor print terse without-paging"
self.cmd_get_vlans = "interface bridge vlan print terse without-paging"
self.cmd_add_vlan = 'interface bridge vlan add vlan-ids=<VLAN> comment="<VLAN_NAME>" bridge=<BRIDGE>'
self.cmd_remove_vlan = "interface bridge vlan remove [find vlan-ids=<VLAN>]"
self.cmd_add_interface_to_vlan = [
"interface bridge vlan print terse",
"interface bridge vlan set [find vlan-ids=<VLAN>] untagged=<INTERFACE>",
"interface bridge vlan set [find vlan-ids=<VLAN>] tagged=<INTERFACE>",
"interface bridge port set [find interface=<INTERFACE>] pvid=<VLAN>",
]
self.cmd_remove_interface_from_vlan = [
"interface bridge vlan print terse",
"interface bridge vlan set [find vlan-ids=<VLAN>] untagged=<INTERFACE>",
"interface bridge vlan set [find vlan-ids=<VLAN>] tagged=<INTERFACE>",
"interface bridge port set [find interface=<INTERFACE>] pvid=<VLAN>",
]
# Layer 3 commands
self.cmd_get_routing_table = "ip route print without-paging terse"
self.cmd_get_interfaces_ip = "ip address print terse without-paging"
self.cmd_add_static_route = "ip route add dst-address=<NETWORK>/<PREFIXLENGTH> gateway=<DESTINATION> distance=<METRIC>"
self.cmd_remove_static_route = (
"ip route remove [find dst-address=<NETWORK>/<PREFIXLENGTH>]"
)
# Display info message
log.debug("__init__: kwargs: " + str(kwargs))
# Get information from dictionary
# "ip" found?
if "ip" in kwargs:
# Save "ip" parameter
self.ip = kwargs["ip"]
# Display info message
log.info("__init__: ip found: " + str(self.ip))
# "username" found?
if "username" in kwargs:
self.username = kwargs["username"]
# Display info message
log.info("__init__: username found: " + str(self.username))
# "password" found?
if "password" in kwargs:
self.password = kwargs["password"]
# Display info message
log.debug("__init__: password found: " + str(self.password))
# "device_type" found?
if "device_type" in kwargs:
self.device_type = kwargs["device_type"]
# Display info message
log.info("__init__: device_type found: " + str(self.device_type))
# "timeout" found?
if "timeout" in kwargs:
self.timeout = kwargs["timeout"]
# Display info message
log.info("__init__: timeout found: " + str(self.timeout))
# "protocol" found?
if "protocol" in kwargs:
self._protocol = kwargs["protocol"].lower()
# Display info message
log.info("__init__: protocol found: " + str(self._protocol))
# By default telnet port is 23
if self._protocol.lower() == "telnet":
self.port = 23
# "port" found?
if "port" in kwargs:
self.port = kwargs["port"]
# Display info message
log.info("__init__: port found: " + str(self.port))
# "enable_mode" found?
if "enable_mode" in kwargs:
self.enable_mode = kwargs["enable_mode"]
# Display info message
log.info("__init__: enable_mode found: " + str(self.enable_mode))
# "enable_password" found?
if "enable_password" in kwargs:
self.enable_password = kwargs["enable_password"]
# Display info message
log.info("__init__: enable_password found: " + str(self.enable_password))
async def __aenter__(self):
"""
Context manager opening connection
"""
try:
# Run an async method to connect a device
await self.connect()
except Exception:
# Disconnection (if needed) in case the connection is done but something failed
await self.disconnect()
# propagate exception if needed
raise
return self
# async def _aexit_(self, exc_type, exc_value, traceback):
async def __aexit__(self, exc_type, exc_value, traceback):
"""
Context manager closing connection
"""
# Close the connection
await self.disconnect()
def find_prompt(self, text):
"""
Method used to find a prompt inside an output string
This method is used during the first communication with the device.
First it find the prompt then caculate the different forms the prompt
can take. This will be useful later on while finding prompt in other
output stream (read).
:param text: data with a prompt
:type text: str
:return: the prompt found
:rtype: str
"""
# Get last line of the data
prompt = text.split("\n")[-1]
# Remove possible \r in the data
# prompt = prompt.replace("\r", "")
prompt = text.split("\r")[-1]
# Display info message
log.info(f"find_prompt: prompt: '{prompt}'")
# Get the possible prompts for future recognition
self.possible_prompts = self.get_possible_prompts(prompt)
# Return the prompt
return prompt
def get_possible_prompts(self, prompt):
"""
Method used to check if a prompt has one of the expected endings then
create a list with all possible prompts for the device
:param prompt: a prompt with a possible ending prompt (eg. "switch#")
:type prompt: str
:return: the list of prompts
:rtype: list
"""
# By default no prompts are returned
list_of_prompts = []
# Get all the ppossible values of the endings of the prompt
list_of_possible_ending_prompts = self.list_of_possible_ending_prompts
# Temporary variable storing the prompt value
my_prompt = prompt
# Test each possible prompt ending (i.e '#', '>', "(config-if)#", "(config)#")
for ending in list_of_possible_ending_prompts:
# Is this current prompt ending at the end of the prompt?
if my_prompt.endswith(ending):
# Yes
# Then remove the ending
my_prompt = my_prompt[: -len(ending)]
# Break the loop
break
# Prompt should be from "switch#" to "switch"
# Display info message
log.info(f"get_possible_prompts: prompt found: '{my_prompt}'")
# Display info message
log.info(f"get_possible_prompts: prompt found size: '{len(my_prompt)}'")
# Now create all the possible prompts for that device
for ending in list_of_possible_ending_prompts:
# Save the prompt name with a possible ending in the list
list_of_prompts.append(my_prompt + ending)
# Display info message
log.info(f"get_possible_prompts: list of possible prompts: {list_of_prompts}")
# Return the list of prompts
return list_of_prompts
def check_if_prompt_is_found(self, text):
"""
Method used to check if a prompt is detected inside a string
:param text: a string with prompt
:type text: str
:return: the prompt found
:rtype: str
"""
# By default the prompt is not found
prompt_found = False
# Check all possible prompts
for prompt in self.possible_prompts:
# Display info message
log.info(f"check_if_prompt_is_found: prompt: '{prompt}'")
# Is this prompt present in the text?
if prompt in text:
# Yes
prompt_found = True
# Display info message
log.info(f"check_if_prompt_is_found: prompt found: '{prompt}'")
# Leave the for loop
break
# Return the prompt found
return prompt_found
def remove_command_in_output(self, text, cmd):
"""
Method removing the command at the beginning of a string
After sending commands an "echo" of the command sent
is display in the output string. This method removes it.
:param text: the text with the command at the beginning
:type text: str
:param cmd: the command previously sent
:type cmd: str
:return: the output string without the command
:rtype: str
"""
# Display info message
log.info(f"remove_command_in_output: cmd = '{cmd}'")
# Display info message
log.info(f"remove_command_in_output: cmd (hex) = '{cmd.encode().hex()}'")
# Remove the command from the beginning of the output
# output = text.lstrip(cmd + "\n")
output = text.split(cmd + "\n")[-1]
# Display info message
log.info(f"remove_command_in_output: output = '{output}'")
# Return the string without the command
return output
def remove_starting_carriage_return_in_output(self, text):
"""
Method removing the carriage return at the beginning of a string
:param text: the text with the command at the beginning
:type text: str
:return: the output string without the starting carriage return
:rtype: str
"""
# Display info message
log.info("remove_starting_carriage_return_in_output")
# Remove the carriage return at the beginning of the string
output = text.lstrip("\r\n\r")
# Display info message
log.info(f"remove_starting_carriage_return_in_output: output = '{output}'")
# Return the string without the starting carriage return
return output
def remove_ending_prompt_in_output(self, text):
"""
Method removing the prompt at the end of a string
:param text: the text with a prompt at the beginning
:type text: str
:return: the output string without the ending prompt
:rtype: str
"""
# Display info message
log.info("remove_ending_prompt_in_output")
# Check all possible prompts
for prompt in self.possible_prompts:
# Display info message
log.info(f"remove_ending_prompt_in_output: prompt: '{prompt}'")
# Prompt found in the text?
if prompt in text:
# Yes
# Then it is removed from the text
# text = text.rstrip(prompt)
text = text[: -len(prompt)]
# Remove also carriage return
text = text.rstrip("\r\n")
# Leave the loop
break
# output = text.rstrip("\r\n" + self.prompt)
# Display info message
log.info(f"remove_ending_prompt_in_output: text without prompt:\n'{text}'")
# Return the text without prompt at the end
return text
def check_error_output(self, output):
"""
Check if an error is returned by the device ("% Unrecognized command", "% Ambiguous command", etc.)
If an error is found, then an exception is raised
"""
# Display info message
log.info("check_error_output")
# Check if output has some data
if output:
# Yes
# Display info message
log.info("check_error_output: output has some data")
# Check all elements in the list of output
for element in self._send_command_error_in_returned_output:
# Display info message
log.info(f"check_error_output: element: {element}")
# Display info message
log.info(f"check_error_output: output[0]: {output[0]}")
# Check if the output starts with a string with an error message (like "% Invalid input detected at '^' marker.")
# Error message?
if output.startswith(element):
# Yes
# Raise an exception
raise Exception(output)
def remove_ansi_escape_sequence(self, text):
"""
Method removing ANSI escape sequence from a string
Just CSI sequences are removed
:param text: the text with a prompt at the beginning
:type text: str
:return: the output string without the ending prompt
:rtype: str
"""
# By default no string returned
output = ""
# By default no escape sequence found
esc_found = 0
# Read char by char a string
for i in text:
# Display char
# log.info(f"{str(i).encode('ascii')}")
# No escape previously found?
if esc_found == 0:
# No escape sequence currently found
# Escape?
if i == "\x1b":
# Yes
log.info("Esc!")
# Escape found
esc_found = 1
else:
# No
# Then the current char can be saved
output += i
# Escape previously found?
elif esc_found == 1:
# Yes
# Then check if this is a CSI sequence
if i == "[":
# Beginning of CSI sequence
log.info("CSI sequence")
# CSI sequence
esc_found = 2
else:
# Another Escape sequence
# Keep the escape sequence in the string
output += "\x1b" + i
# No escape sequence next
esc_found = 0
else:
# Char between 'a' and 'z' or 'A' and 'Z'?
if (i >= "a" and i <= "z") or (i >= "A" and i <= "Z"):
# Yes
# Then it is the end of CSI escape sequence
log.info("End of escape sequence")
# No escape sequence next
esc_found = 0
# Return a string without ANSI escape sequence
return output
async def disable_paging(self):
"""
Async method disabling paging on a device
Use the "cmd_disable_paging" attribute
"""
# Display info message
log.info("disable_paging")
# Send command to the device to disable paging
await self.send_command(self.cmd_disable_paging)
async def connect(self):
"""
Async method used for connecting a device
Currently supported: SSH and Telnet
"""
# Display info message
log.info("connect")
try:
# SSH?
if self._protocol == "ssh":
# Yes
# Then Connect using SSH
await self.connectSSH()
# Telnet?
elif self._protocol == "telnet":
# Yes
# Then Connect using Telnet
await self.connectTelnet()
else:
# Unsupported protocol
# Raise an exception
raise Exception(f"connect: unsupported protocol: {self._protocol}")
except Exception:
# There was a problem with a connection method
# Display info message
log.info("connect: connection error")
raise
async def connectSSH(self):
"""
Async method used for connecting a device using SSH protocol
"""
# Display info message
log.info("connectSSH")
# Parameters of the connection
generator = asyncssh.connect(
self.ip,
username=self.username,
password=self.password,
known_hosts=None,
# encryption_algs="*", # Parameter that includes all encryption algorithms (even the old ones disabled by default)
encryption_algs=[
algs.decode("utf-8") for algs in asyncssh.encryption._enc_algs
], # Parameter that includes all encryption algorithms (even the old ones disabled by default)
)
# Trying to connect to the device
try:
self.conn = await asyncio.wait_for(generator, timeout=self.timeout)
except asyncio.exceptions.TimeoutError as error:
# Timeout
# Display error message
log.error(f"connectSSH: connection failed: {self.ip} timeout: '{error}'")
# Exception propagation
raise asyncio.exceptions.TimeoutError(
"Connection failed: connection timed out."
)
except Exception as error:
# Connection failed
# Display error message
log.error(f"connectSSH: connection failed: {self.ip} '{error}'")
# Exception propagation
raise
# Display info message
log.info("connectSSH: connection success")
# Create a session
self.stdinx, self.stdoutx, _ = await self.conn.open_session(term_type="netscud")
# Display info message
log.info("connectSSH: open_session success")
# By default no data has been read
data = ""
# By default no prompt found
prompt_not_found = True
try:
# Read data
while prompt_not_found:
# Display info message
log.info("connectSSH: beginning of the loop")
# Read the prompt
data += await asyncio.wait_for(
self.stdoutx.read(MAX_BUFFER_DATA), timeout=self.timeout
)
# Display info message
log.info(f"connectSSH: data: '{str(data)}'")
# Display info message
log.info(f"connectSSH: data: hex:'{data.encode('utf-8').hex()}'")
# Check if an initial prompt is found
for prompt in self._connect_first_ending_prompt:
# Ending prompt found?
if data.endswith(prompt):
# Yes
# Display info message
log.info(f"connectSSH: first ending prompt found: '{prompt}'")
# A ending prompt has been found
prompt_not_found = False
# Leave the loop
break
# Display info message
log.info("connectSSH: end of loop")
except Exception as error:
# Fail while reading the prompt
# Display error message
log.error(
f"connectSSH: timeout while reading the prompt: {self.ip} '{error}'"
)
# Exception propagation
raise
# Display info message
log.info(f"connectSSH: end of prompt loop")
# Remove possible escape sequence
data = self.remove_ansi_escape_sequence(data)
# Find prompt
self.prompt = self.find_prompt(str(data))
# Display info message
log.info(f"connectSSH: prompt found: '{self.prompt}'")
# Display info message
log.info(f"connectSSH: prompt found size: '{len(self.prompt)}'")
# Disable paging command available?
if self.cmd_disable_paging:
# Yes
# Disable paging
await self.disable_paging()
async def connectTelnet(self):
"""
Async method used for connecting a device using Telnet protocol
"""
# Display info message
log.info("connectTelnet")
try:
# Prepare connection with Telnet
conn = asyncio.open_connection(self.ip, self.port)
except Exception as error:
# Preparation to the connection failed
# Display error message
log.error(f"connectTelnet: preparation to the connection failed: '{error}'")
# Exception propagation
raise
# Display info message
log.info("connectTelnet: preparation to the connection success")
try:
# Connection with Telnet
self._reader, self._writer = await asyncio.wait_for(
conn, timeout=self.timeout
)
except asyncio.TimeoutError:
# Time out during connection
# Display error message
log.error("connectTelnet: connection: timeout")
# Exception propagation
raise
# Display info message
log.info("connectTelnet: connection success")
# Get prompt for the login
prompt = self._telnet_connect_login
# Get prompt for the password
prompt_password = self._telnet_connect_password
# By default a login is expected
use_login = True
# Temporary string variable
output = ""
# Temporary bytes variable
byte_data = b""
# Read the telnet information and first prompt (for login but a password prompt can be found for IOS for instance)
while True:
# Display info message
log.info(f"connectTelnet: read data for prompt")
# Read returned prompt
byte_data += await asyncio.wait_for(
self._reader.read(MAX_BUFFER_DATA), timeout=self.timeout
)
# Display info message
log.info(f"connectTelnet: byte_data: {byte_data}")
# Temporary convertion in string. This string has the following form: "b'....'"
output = str(byte_data)
# Display info message
log.info(f"connectTelnet: output: {output}")
# Prompt for the username found?
if prompt in output:
# Yes
# Leave the loop
break
# Prompt for the password found?
elif prompt_password in output:
# Yes
# That means only password is required
use_login = False
# Leave the loop
break
# Display info message
log.info(f"connectTelnet: login prompt: '{output}'")
# Login to use?
if use_login:
# Yes
# Display info message
log.info("connectTelnet: sending login")
try:
# Send login
await self.send_command(self.username, prompt_password)
# Display info message
log.info("connectTelnet: login sent")
except Exception:
# Problem with the login
# Propagate the exception
raise
# Display info message
log.info("connectTelnet: sending password")
try:
# Send password
output = await self.telnet_send_command_with_unexpected_pattern(
self.password,
self._connect_first_ending_prompt,
self._telnet_connect_authentication_fail_prompt,
)
except Exception:
# Problem with the password
# Propagate the exception
raise
# Display info message
log.info("connectTelnet: password sent")
# Find prompt
self.prompt = self.find_prompt(str(output))
# Display info message
log.info(f"connectTelnet: prompt found: '{self.prompt}'")
# Password enable?
if self.enable_mode:
# Yes
# Display info message
log.info("connectTelnet: enable mode to be activated")
try:
# Send enable command
await self.send_command(self.cmd_enable, prompt_password)
# Display info message
log.info("connectTelnet: enable command sent")
# Display info message
log.info("connectTelnet: sending enable password")
# Send enable password
await self.telnet_send_command_with_unexpected_pattern(
self.enable_password,
self._connect_first_ending_prompt,
self._telnet_connect_authentication_fail_prompt,
)
# Display info message
log.info("connectTelnet: enable password sent")
except Exception:
# Problem with the enable password
# Display info message
log.info("connectTelnet: enable password failure")
# Propagate the exception
raise
# Disable paging command available?
if self.cmd_disable_paging:
# Yes
# Disable paging
await self.disable_paging()
async def disconnect(self):
"""
Async method used to disconnect a device
If this method is not used then exceptions will happen
when the program will end
"""
# Debug info message
log.info("disconnect")
# SSH?
if self._protocol == "ssh":
# Yes
# Then disconnect using SSH
await self.disconnectSSH()
# Telnet?
elif self._protocol == "telnet":
# Yes
# Then disconnect using Telnet
await self.disconnectTelnet()
else:
# Unsupported protocol
# Raise an exception
raise Exception(f"Unsupported protocol: {self._protocol}")
async def disconnectSSH(self):
"""
Async method used to disconnect a device in SSH
If this method is not used then exceptions will happen
when the program will end
"""
# Debug info message
log.info("disconnectSSH")
# Connection previously open in SSH?
if self.conn:
# Yes
# Then close the SSH connection
self.conn.close()
# No more connection to disconnect
self.conn = None
async def disconnectTelnet(self):
"""
Async method used to disconnect a device in Telnet
If this method is not used then exceptions will happen
when the program will end
"""
# Debug info message
log.info("disconnectTelnet")
# Connection previously open in Telnet?
if self._writer:
# Yes
# Then close the SSH connection
self._writer.close()
# No more connection to disconnect
self._writer = None
async def send_command(self, cmd, pattern=None, timeout=None):
"""
Async method used to send data to a device
:param cmd: command to send
:type cmd: str
:param pattern: optional, a pattern replacing the prompt when the prompt is not expected
:type pattern: str
:param timeout: optional, a timeout for the command sent. Default value is self.timeout
:type timeout: str
:return: the output of command
:rtype: str
"""
# Debug info message
log.info("send_command")
# Default value of timeout variable
if timeout is None:
timeout = self.timeout
# SSH?
if self._protocol == "ssh":
# Yes
# Then disconnect using SSH
output = await self.send_commandSSH(cmd, pattern=pattern, timeout=timeout)
# Telnet?
elif self._protocol == "telnet":
# Yes
# Then disconnect using Telnet
output = await self.send_commandTelnet(
cmd, pattern=pattern, timeout=timeout
)
else:
# Unsupported protocol
# Raise an exception
raise Exception(f"send_command: unsupported protocol: {self._protocol}")
# Return the result of the command
return output
async def send_commandSSH(self, cmd, pattern=None, timeout=None):
"""
Async method used to send data to a device
:param cmd: command to send
:type cmd: str
:param pattern: optional, a pattern replacing the prompt when the prompt is not expected
:type pattern: str
:param timeout: optional, a timeout for the command sent. Default value is self.timeout
:type timeout: str
:return: the output of command
:rtype: str
"""
# Debug info message
log.info("send_commandSSH")
# Default value of timeout variable
if timeout is None:
timeout = self.timeout
# Add carriage return at the end of the command (mandatory to send the command)
# cmd = cmd + "\n"
# cmd = cmd + "\r\n"
# Debug info message
log.info(f"send_commandSSH: cmd = '{cmd}'")
# Sending command
self.stdinx.write(cmd + self._carriage_return_for_send_command)
# Display message
log.info("send_commandSSH: command sent")
# Variable used to gather data
output = ""
# Reading data
while True:
# await asyncio.sleep(1)
# Read the data received
output += await asyncio.wait_for(
self.stdoutx.read(MAX_BUFFER_DATA), timeout=timeout
)
# Debug info message
# log.info(f"send_commandSSH: output hex: '{str(output).encode("utf-8").hex()}'")
# Remove ANSI escape sequence
output = self.remove_ansi_escape_sequence(output)
# Remove possible "\r"
output = output.replace("\r", "")
# data = ""
# for i in output:
# data += i.encode("utf-8").hex()
# print(data)
# Debug info message
log.info(f"send_commandSSH: output: '{output}'")
# Is a patten used?
if pattern:
# Use pattern instead of prompt
if pattern in output:
# Yes
# Leave the loop
break
else:
# Check if prompt is found
if self.check_if_prompt_is_found(output):
# Yes
# Leave the loop
break
# Debug info message
log.debug(
f"send_commandSSH: raw output: '{output}'\nsend_commandSSH: raw output (hex): '{output.encode().hex()}'"
)
# Remove the command sent from the result of the command
output = self.remove_command_in_output(output, str(cmd))
# Remove the carriage return of the output
output = self.remove_starting_carriage_return_in_output(output)
# Remove the ending prompt of the output
output = self.remove_ending_prompt_in_output(output)
# Debug info message
log.debug(
f"send_commandSSH: cleaned output: '{output}'\nsend_commandSSH: cleaned output (hex): '{output.encode().hex()}'"
)
# Check if there is an error in the output string (like "% Unrecognized command")
# and generate an exception if needed
self.check_error_output(output)
# Return the result of the command
return output
async def send_commandTelnet(self, cmd, pattern=None, timeout=None):
"""
Async method used to send data to a device
:param cmd: command to send
:type cmd: str
:param pattern: optional, a pattern replacing the prompt when the prompt is not expected
:type pattern: str
:param timeout: optional, a timeout for the command sent. Default value is self.timeout
:type timeout: str
:return: the output of command
:rtype: str
"""
# Debug info message
log.info("send_commandTelnet")
# Default value of timeout variable
if timeout is None:
timeout = self.timeout
# Add carriage return at the end of the command (mandatory to send the command)
cmd = cmd + "\n"
# Sending command
self._writer.write(cmd.encode())
# Temporary string variable
output = ""
# Temporary bytes variable
byte_data = b""
try:
# Read data
while True:
# Read returned prompt
byte_data += await asyncio.wait_for(
self._reader.read(MAX_BUFFER_DATA), timeout=timeout
)
# Display info message
log.info(f"send_commandTelnet: byte_data: '{byte_data}'")
# Temporary convertion in string. This string has the following form: "b'....'"
output = str(byte_data)
# Display info message
log.info(f"send_commandTelnet: output: '{output}'")
# Is a patten used?
if pattern:
# Use pattern instead of prompt
if pattern in output:
# Yes
# Leave the loop
break
else:
# Check if prompt is found
if self.check_if_prompt_is_found(output):
# Yes
# Leave the loop
break
except asyncio.TimeoutError:
# Time out during when reading prompt
# Display error message
log.error("send_commandTelnet: connection: timeout")
# Exception propagation
raise
except Exception as error:
# Error during when reading prompt
# Display error message
log.error(f"send_commandTelnet: error: {error}")
# Exception propagation
raise
# Convert data (bytes) into string
output = byte_data.decode("utf-8", "ignore")
# Debug info message
log.debug(
f"send_commandTelnet: raw output: '{output}'\nsend_commandTelnet: raw output (hex): '{output.encode().hex()}'"
)
# Remove the command sent from the result of the command
output = self.remove_command_in_output(output, str(cmd))
# Remove the carriage return of the output
output = self.remove_starting_carriage_return_in_output(output)
# Remove the ending prompt of the output
output = self.remove_ending_prompt_in_output(output)
# Debug info message
log.debug(
f"send_commandTelnet: cleaned output: '{output}'\nsend_commandTelnet: cleaned output (hex): '{output.encode().hex()}'"
)
# Check if there is an error in the output string (like "% Unrecognized command")
# and generate an exception if needed
self.check_error_output(output)
# Return the result of the command
return output
async def telnet_send_command_with_unexpected_pattern(
self, cmd, pattern, error_pattern=None, timeout=None
):
"""
Async method used to send command for Telnet connection to a device with possible unexpected patterns
send_command can wait till time out if login and password are wrong. This method
speed up the returned error message when authentication failed is identified.
This method is limited to authentication whem password is required
:param cmd: command to send
:type cmd: str
:param pattern: optional, a list of patterns located at the very end of the a returned string. Can be used
to define a custom or unexpected prompt a the end of a string
:type pattern: str
:param timeout: optional, a timeout for the command sent. Default value is self.timeout
:type timeout: str
:param error_pattern: optional, a list of failed prompts found when the login and password are not correct
:type error_pattern: str
:return: the output of command
:rtype: str
"""
# Debug info message
log.info("telnet_send_command_with_unexpected_pattern")
# Default value of timeout variable
if timeout is None:
timeout = self.timeout
# Add carriage return at the end of the command (mandatory to send the command)
cmd = cmd + self._carriage_return_for_send_command
# Sending command
self._writer.write(cmd.encode())
# Temporary string variable
output = ""
# Temporary bytes variable
byte_data = b""
# By default pattern is not found
pattern_not_found = True
try:
# Read data
while pattern_not_found:
# Read returned prompt
byte_data += await asyncio.wait_for(
self._reader.read(MAX_BUFFER_DATA), timeout=timeout
)
# Display info message
log.info(
f"telnet_send_command_with_unexpected_pattern: byte_data: '{byte_data}'"
)
# Display debug message
log.debug(
f"telnet_send_command_with_unexpected_pattern: byte_data: hex: '{byte_data.hex()}'"
)
# Temporary convertion in string. This string has the following form: "b'....'"
output = str(byte_data)
# Display info message
log.info(
f"telnet_send_command_with_unexpected_pattern: output: '{output}'"
)
# Is a pattern used?
if pattern:
# Check all pattern of prompt in the output
for prompt in pattern:
# Display info message
log.info(
f"telnet_send_command_with_unexpected_pattern: checking prompt: '{prompt}'"
)
# A pattern found?
if prompt in output:
# Yes
# A pattern is found. The main loop can be stopped
pattern_not_found = False
# Display info message
log.info(
f"telnet_send_command_with_unexpected_pattern: prompt found: '{prompt}'"
)
# Leave the loop
break
# Is an unexpected pattern used?
if error_pattern and pattern_not_found:
# Check all unexpected pattern of prompt in the output
for bad_prompt in error_pattern:
# Display info message
log.info(
f"telnet_send_command_with_unexpected_pattern: checking unexpected prompt: '{bad_prompt}'"
)
# An error_pattern pattern found?
if bad_prompt in output:
# Yes
# Display error message
log.error(
"telnet_send_command_with_unexpected_pattern: authentication failed"
)
# Raise exception
raise Exception(
"telnet_send_command_with_unexpected_pattern: authentication failed"
)
# Leave the loop
# break
except asyncio.TimeoutError:
# Time out during when reading prompt
# Close the connection in order to not display RuntimeError
await self.disconnect()
# Display error message
log.error(
"telnet_send_command_with_unexpected_pattern: reading prompt: timeout"
)
# Exception propagation
raise
except Exception as error:
# Error during when reading prompt
# Close the connection in order to not display RuntimeError
await self.disconnect()
# Display error message
log.error(
f"telnet_send_command_with_unexpected_pattern: reading prompt: error: {error}"
)
# Exception propagation
raise
# Convert data (bytes) into string
output = byte_data.decode("utf-8", "ignore")
# Debug info message
log.debug(
f"telnet_send_command_with_unexpected_pattern: raw output: '{output}'\ntelnet_send_command_with_unexpected_pattern: raw output (hex): '{output.encode().hex()}'"
)
# Remove the command sent from the result of the command
output = self.remove_command_in_output(output, str(cmd))
# Remove the carriage return of the output
output = self.remove_starting_carriage_return_in_output(output)
# Remove the ending prompt of the output
output = self.remove_ending_prompt_in_output(output)
# Debug info message
log.debug(
f"telnet_send_command_with_unexpected_pattern: cleaned output: '{output}'\ntelnet_send_command_with_unexpected_pattern: cleaned output (hex): '{output.encode().hex()}'"
)
# Return the result of the command
return output
async def send_config_set(self, cmds=None, timeout=None):
"""
Async method used to send command in config mode
The commands send can be either a string a list of strings. There are
3 steps:
- Entering configuration mode
- Sending the commands
- Leaving configuration mode
:param cmds: The commands to the device
:type cmds: str or list
:param timeout: optional, a timeout for the command sent. Default value is self.timeout
:type timeout: str
:return: the results of the commands sent
:rtype: list of str
"""
# Display info message
log.info("send_config_set")
# Default value of timeout variable
if timeout is None:
timeout = self.timeout
# Debug info message
log.info("send_command")
# SSH?
if self._protocol == "ssh":
# Yes
# Then disconnect using SSH
output = await self.send_config_setSSH(cmds, timeout)
# Telnet?
elif self._protocol == "telnet":
# Yes
# Then disconnect using Telnet
output = await self.send_config_setTelnet(cmds, timeout)
else:
# Unsupported protocol
# Raise an exception
raise Exception(f"send_config_set: unsupported protocol: {self._protocol}")
# Return the result of the commands
return output
async def send_config_setSSH(self, cmds=None, timeout=None):
"""
Async method used to send command in config mode
The commands send can be either a string a list of strings. There are
3 steps:
- Entering configuration mode
- Sending the commands
- Leaving configuration mode
:param cmds: The commands to the device
:type cmds: str or list
:param timeout: optional, a timeout for the command sent. Default value is self.timeout
:type timeout: str
:return: the results of the commands sent
:rtype: list of str
"""
# Display info message
log.info("send_config_setSSH")
# Default value of timeout variable
if timeout is None:
timeout = self.timeout
# Clear returned output
returned_output = ""
# Check if cmds is a string
if isinstance(cmds, str):
# A string
# Convert the string into a list
cmds = [cmds]
# A list?
elif not isinstance(cmds, list):
# Not a list (and not a string)
# Display error message
log.error(
"send_config_setSSH: parameter cmds used in send_config_set is neither a string nor a list"
)
# Leave the method
return returned_output
##############################
# Entering configuration mode
##############################
# Display info message
log.info("send_config_set: entering configuration mode")
# Clear output
output = ""
# Get command for entering in config made
cmd = self.cmd_enter_config_mode
# Add carriage return at the end of the command (mandatory to send the command)
cmd = cmd + self._carriage_return_for_send_command
# Display info message
log.info(f"send_config_setSSH: cmd = '{cmd}'")
# Sending command
self.stdinx.write(cmd)
# Display message
log.info("send_config_setSSH: configuration mode entered")
while True:
# Read the data received
output += await asyncio.wait_for(
self.stdoutx.read(MAX_BUFFER_DATA), timeout=timeout
)
# Display info message
log.info(f"send_config_setSSH: output: '{output}'")
# Check if prompt is found
if self.check_if_prompt_is_found(output):
# Yes
# Leave the loop
break
# Debug info message
log.debug(
f"send_config_setSSH: raw output: '{output}'\nsend_config_setSSH: raw output (hex): '{output.encode().hex()}'"
)
# Add the output to the returned output
returned_output += output
# Remove the command sent from the result of the command
output = self.remove_command_in_output(output, str(cmd))
# Remove the carriage return of the output
output = self.remove_starting_carriage_return_in_output(output)
# Remove the ending prompt of the output
output = self.remove_ending_prompt_in_output(output)
# Display info message
log.debug(
f"send_config_setSSH: cleaned output: '{output}'\nsend_config_setSSH: cleaned output (hex): '{output.encode().hex()}'"
)
# Check if there is an error in the output string (like "% Unrecognized command")
# and generate an exception if needed
self.check_error_output(output)
##############################
# Sending commands
##############################
# Display info message
log.info("send_config_setSSH: sending commands")
# Clear output
output = ""
# Each command
for cmd in cmds:
# Add carriage return at the end of the command (mandatory to send the command)
cmd = cmd + self._carriage_return_for_send_command
# Display info message
log.info(f"send_config_setSSH: cmd = '{cmd}'")
# Sending command
self.stdinx.write(cmd)
# Display info message
log.info("send_config_setSSH: command sent")
while True:
# Read the data received
output += await asyncio.wait_for(
self.stdoutx.read(MAX_BUFFER_DATA), timeout=timeout
)
# Display info message
log.info(f"send_config_setSSH: output: '{output}'")
# Check if prompt is found
if self.check_if_prompt_is_found(output):
# Yes
# Leave the loop
break
# Debug info message
log.debug(
f"send_config_setSSH: raw output: '{output}'\nsend_config_setSSH: raw output (hex): '{output.encode().hex()}'"
)
# Add the output to the returned output
returned_output += output
# Remove the command sent from the result of the command
output = self.remove_command_in_output(output, str(cmd))
# Remove the carriage return of the output
output = self.remove_starting_carriage_return_in_output(output)
# Remove the ending prompt of the output
output = self.remove_ending_prompt_in_output(output)
# Display info message
log.debug(
f"send_config_setSSH: cleaned output: '{output}'\nsend_config_setSSH: cleaned output (hex): '{output.encode().hex()}'"
)
# Check if there is an error in the output string (like "% Unrecognized command")
# and generate an exception if needed
self.check_error_output(output)
##############################
# Leaving configuration mode
##############################
# Display info message
log.info("send_config_setSSH: leaving configuration mode")
# Clear output
output = ""
# Get command to leave config made
cmd = self.cmd_exit_config_mode
# Add carriage return at the end of the command (mandatory to send the command)
cmd = cmd + self._carriage_return_for_send_command
# Display info message
log.info(f"send_config_setSSH: cmd = '{cmd}'")
# Sending command
self.stdinx.write(cmd)
# Display info message
log.info("send_config_setSSH: command to leave configuration mode sent")
while True:
# Read the data received
output += await asyncio.wait_for(
self.stdoutx.read(MAX_BUFFER_DATA), timeout=timeout
)
# Display info message
log.info(f"send_config_setSSH: output: '{output}'")
# Check if prompt is found
if self.check_if_prompt_is_found(output):
# Yes
# Leave the loop
break
# Debug info message
log.debug(
f"send_config_setSSH: raw output: '{output}'\nsend_config_setSSH: raw output (hex): '{output.encode().hex()}'"
)
# Add the output to the returned output
returned_output += output
# Remove the command sent from the result of the command
output = self.remove_command_in_output(output, str(cmd))
# Remove the carriage return of the output
output = self.remove_starting_carriage_return_in_output(output)
# Remove the ending prompt of the output
output = self.remove_ending_prompt_in_output(output)
# Display info message
log.debug(
f"send_config_setSSH: cleaned output: '{output}'\nsend_config_setSSH: cleaned output (hex): '{output.encode().hex()}'"
)
# Check if there is an error in the output string (like "% Unrecognized command")
# and generate an exception if needed
self.check_error_output(output)
# Return the result of the commands
return returned_output
async def send_config_setTelnet(self, cmds=None, timeout=None):
"""
Async method used to send command in config mode
The commands send can be either a string a list of strings. There are
3 steps:
- Entering configuration mode
- Sending the commands
- Leaving configuration mode
:param cmds: The commands to the device
:type cmds: str or list
:param timeout: optional, a timeout for the command sent. Default value is self.timeout
:type timeout: str
:return: the results of the commands sent
:rtype: list of str
"""
# Display info message
log.info("send_config_setTelnet")
# Default value of timeout variable
if timeout is None:
timeout = self.timeout
# Clear returned output
returned_output = ""
# Check if cmds is a string
if isinstance(cmds, str):
# A string
# Convert the string into a list
cmds = [cmds]
# A list?
elif not isinstance(cmds, list):
# Not a list (and not a string)
# Display error message
log.error(
"send_config_setTelnet: parameter cmds used in send_config_set is neither a string or a list"
)
# Leave the method
return returned_output
##############################
# Entering configuration mode
##############################
# Display info message
log.info("send_config_setTelnet: entering configuration mode")
# Clear output
output = ""
# Get command for entering in config made
cmd = self.cmd_enter_config_mode
# Add carriage return at the end of the command (mandatory to send the command)
cmd = cmd + self._carriage_return_for_send_command
# Display info message
log.info(f"send_config_setTelnet: cmd = '{cmd}'")
# Sending command
self._writer.write(cmd.encode())
# Display message
log.info("send_config_setTelnet: configuration mode entered")
# Temporary string variable
output = ""
# Temporary bytes variable
byte_data = b""
try:
# Read data
while True:
# Read the data received
byte_data += await asyncio.wait_for(
self._reader.read(MAX_BUFFER_DATA), timeout=timeout
)
# Temporary convertion in string. This string has the following form: "b'....'"
output = str(byte_data)
# Display info message
log.info(f"send_config_setTelnet: output: '{output}'")
# Check if prompt is found
if self.check_if_prompt_is_found(output):
# Yes
# Leave the loop
break
except asyncio.TimeoutError:
# Time out during when reading prompt
# Display error message
log.error("send_config_setTelnet: connection: timeout")
# Exception propagation
raise
except Exception as error:
# Error during when reading prompt
# Display error message
log.error(f"send_config_setTelnet: error: {error}")
# Exception propagation
raise
# Convert data (bytes) into string
output = byte_data.decode("utf-8", "ignore")
# Debug info message
log.debug(
f"send_config_setTelnet: raw output: '{output}'\nsend_config_setTelnet: raw output (hex): '{output.encode().hex()}'"
)
# Add the output to the returned output
returned_output += output
# Remove the command sent from the result of the command
output = self.remove_command_in_output(output, str(cmd))
# Remove the carriage return of the output
output = self.remove_starting_carriage_return_in_output(output)
# Remove the ending prompt of the output
output = self.remove_ending_prompt_in_output(output)
# Display info message
log.debug(
f"send_config_setTelnet: cleaned output: '{output}'\nsend_config_setTelnet: cleaned output (hex): '{output.encode().hex()}'"
)
# Check if there is an error in the output string (like "% Unrecognized command")
# and generate an exception if needed
self.check_error_output(output)
##############################
# Sending commands
##############################
# Display info message
log.info("send_config_setTelnet: sending commands")
# Clear output
output = ""
# Each command
for cmd in cmds:
# Add carriage return at the end of the command (mandatory to send the command)
cmd = cmd + self._carriage_return_for_send_command
# Display info message
log.info(f"send_config_setTelnet: cmd = '{cmd}'")
# Sending command
self._writer.write(cmd.encode())
# Display info message
log.info("send_config_setTelnet: command sent")
# Temporary string variable
output = ""
# Temporary bytes variable
byte_data = b""
try:
# Read data
while True:
# Read the data received
byte_data += await asyncio.wait_for(
self._reader.read(MAX_BUFFER_DATA), timeout=timeout
)
# Temporary convertion in string. This string has the following form: "b'....'"
output = str(byte_data)
# Display info message
log.info(f"send_config_setTelnet: output: '{output}'")
# Check if prompt is found
if self.check_if_prompt_is_found(output):
# Yes
# Leave the loop
break
except asyncio.TimeoutError:
# Time out during when reading prompt
# Display error message
log.error("send_config_setTelnet: connection: timeout")
# Exception propagation
raise
except Exception as error:
# Error during when reading prompt
# Display error message
log.error(f"send_config_setTelnet: error: {error}")
# Exception propagation
raise
# Convert data (bytes) into string
output = byte_data.decode("utf-8", "ignore")
# Debug info message
log.debug(
f"send_config_setTelnet: raw output: '{output}'\nsend_config_setTelnet: raw output (hex): '{output.encode().hex()}'"
)
# Add the output to the returned output
returned_output += output
# Remove the command sent from the result of the command
output = self.remove_command_in_output(output, str(cmd))
# Remove the carriage return of the output
output = self.remove_starting_carriage_return_in_output(output)
# Remove the ending prompt of the output
output = self.remove_ending_prompt_in_output(output)
# Display info message
log.debug(
f"send_config_setTelnet: cleaned output: '{output}'\nsend_config_setTelnet: cleaned output (hex): '{output.encode().hex()}'"
)
# Check if there is an error in the output string (like "% Unrecognized command")
# and generate an exception if needed
self.check_error_output(output)
##############################
# Leaving configuration mode
##############################
# Display info message
log.info("send_config_setTelnet: leaving configuration mode")
# Clear output
output = ""
# Get command to leave config made
cmd = self.cmd_exit_config_mode
# Add carriage return at the end of the command (mandatory to send the command)
cmd = cmd + self._carriage_return_for_send_command
# Display info message
log.info(f"send_config_setTelnet: cmd = '{cmd}'")
# Sending command
self._writer.write(cmd.encode())
# Display info message
log.info("send_config_setTelnet: command to leave configuration mode sent")
# Temporary string variable
output = ""
# Temporary bytes variable
byte_data = b""
# Protection against infinite loop
loop = 3
try:
# Read data
while loop:
# Read the data received
byte_data += await asyncio.wait_for(
self._reader.read(MAX_BUFFER_DATA), timeout=timeout
)
# Temporary convertion in string. This string has the following form: "b'....'"
output = str(byte_data)
# Display info message
log.info(f"send_config_setTelnet: output: '{output}'")
await asyncio.sleep(0.5)
# Check if prompt is found
if self.check_if_prompt_is_found(output):
# Yes
# Leave the loop
break
# Protection for "exit" command infinite loop in Cisco when enable is not activated
loop -= 1
except asyncio.TimeoutError:
# Time out during when reading prompt
# Display error message
log.error("send_config_setTelnet: connection: timeout")
# Exception propagation
raise
except Exception as error:
# Error during when reading prompt
# Display error message
log.error(f"send_config_setTelnet: error: {error}")
# Exception propagation
raise
# Convert data (bytes) into string
output = byte_data.decode("utf-8", "ignore")
# Debug info message
log.debug(
f"send_config_setTelnet: raw output: '{output}'\nsend_config_setTelnet: raw output (hex): '{output.encode().hex()}'"
)
# Add the output to the returned output
returned_output += output
# Remove the command sent from the result of the command
output = self.remove_command_in_output(output, str(cmd))
# Remove the carriage return of the output
output = self.remove_starting_carriage_return_in_output(output)
# Remove the ending prompt of the output
output = self.remove_ending_prompt_in_output(output)
# Display info message
log.debug(
f"send_config_setTelnet: cleaned output: '{output}'\nsend_config_setTelnet: cleaned output (hex): '{output.encode().hex()}'"
)
# Check if there is an error in the output string (like "% Unrecognized command")
# and generate an exception if needed
self.check_error_output(output)
# Return the result of the commands
return returned_output
#########################################################
#
# List of API
#
#########################################################
async def get_version(self):
"""
Asyn method used to get the version of the software of the device
:return: Version of the software of the device
:rtype: str
"""
# Display info message
log.info("get_version")
# By default empty string
version = ""
# Run get version on the device
output = await self.send_command(self.cmd_get_version)
# Seek "Version " and "," to get the version in the returned output
version = output.split("Version ")[1].split(",")[0]
# Display info message
log.info(f"get_version: version: {version}")
# Return the version of the software of the device
return version
async def get_hostname(self):
"""
Asyn method used to get the name of the device
:return: Name of the device
:rtype: str
"""
# Display info message
log.info("get_hostname")
# Get hostname
output = await self.send_command(self.cmd_get_hostname)
# Display info message
log.info(f"get_hostname: output: '{output}'")
# Remove the useless information in the returned string
output = output.split()[0]
# Display info message
log.info(f"get_hostname: hostname found: '{output}'")
# Return the name of the device
return output
async def get_model(self):
"""
Asyn method used to get the model of the device
:return: Model of the device
:rtype: str
"""
# Display info message
log.info("get_model")
# Get model
output = await self.send_command(self.cmd_get_model)
# Display info message
log.info(f"get_model: output: '{output}'")
# Remove the useless information in the returned string
output = output.split('"')[3]
# Display info message
log.info(f"get_model: model found: '{output}'")
# Return the model of the device
return output
async def get_serial_number(self):
"""
Get serial number of the switch or the serial number of the first switch of a stack
:return: Serial number of the device
:rtype: str
"""
# Display info message
log.info("get_serial_number")
# Get serial number
output = await self.send_command(self.cmd_get_serial_number)
# Display info message
log.info(f"get_serial_number: output: '{output}'")
# Remove the useless information in the returned string
output = output.splitlines()[0].split()[-1]
# Display info message
log.info(f"get_hostname: hostname found: '{output}'")
# Return the serial number of the device
return output
async def get_config(self, timeout=None):
"""
Asyn method used to get the configuration of the device
:param timeout: optional, a timeout for the command sent. Default value is self.timeout
:type timeout: str
:return: Configuration of the device
:rtype: str
"""
# Display info message
log.info("get_config")
# Default value of timeout variable
if timeout is None:
timeout = self.timeout
# Get config
output = await self.send_command(self.cmd_get_config, timeout=timeout)
# Return de configuration of the device
return output
async def save_config(self):
"""
Asyn method used to save the current configuration on the device
:return: Commands of the configuration saving process
:rtype: str
"""
# Display info message
log.info("save_config")
# Send command
output = await self.send_command(self.cmd_save_config)
# Return the commands of the configuration saving process
return output
|
normal
|
{
"blob_id": "87baaf4a1b48fa248c65d26cc44e819a2ede1140",
"index": 3736,
"step-1": "<mask token>\n\n\nclass NetworkDevice:\n <mask token>\n\n def __init__(self, **kwargs):\n log.info('__init__')\n self.ip = ''\n self.username = ''\n self.password = ''\n self.device_type = ''\n self.port = 22\n self.timeout = 10\n self._protocol = 'ssh'\n self.enable_mode = False\n self.enable_password = ''\n self.conn = None\n self._writer = None\n self._reader = None\n self.possible_prompts = []\n self._connect_first_ending_prompt = ['#', '>']\n self.list_of_possible_ending_prompts = ['(config-line)#',\n '(config-if)#', '(config)#', '>', '#']\n self._carriage_return_for_send_command = '\\n'\n self._send_command_error_in_returned_output = []\n self._telnet_connect_login = 'Username:'\n self._telnet_connect_password = 'Password:'\n self._telnet_connect_authentication_fail_prompt = [':', '%']\n self.cmd_enable = 'enable'\n self.cmd_disable_paging = 'terminal length 0'\n self.cmd_enter_config_mode = 'configure terminal'\n self.cmd_exit_config_mode = 'exit'\n self.cmd_get_version = 'show version'\n self.cmd_get_hostname = 'show version | include uptime'\n self.cmd_get_model = 'show inventory'\n self.cmd_get_serial_number = 'show inventory | i SN'\n self.cmd_get_config = 'show running-config'\n self.cmd_save_config = 'write memory'\n self.cmd_get_interfaces = [\n 'interface ethernet print terse without-paging',\n 'foreach i in=([/interface ethernet find]) do={/interface ethernet monitor $i once without-paging}'\n , 'interface bridge port print terse without-paging']\n self.cmd_set_interface = ['interface ethernet enable <INTERFACE>',\n 'interface ethernet disable <INTERFACE>',\n 'interface ethernet comment <INTERFACE> \"<COMMENT>\"',\n 'interface ethernet set l2mtu=<MAXIMUMFRAMESIZE> <INTERFACE>',\n 'interface bridge port set frame-types=<MODE> ingress-filtering=<FILTERINGVLAN> [find interface=<INTERFACE>]'\n ]\n self.cmd_get_mac_address_table = (\n 'interface bridge host print without-paging')\n self.cmd_get_arp = 'ip arp print terse without-paging'\n self.cmd_get_lldp_neighbors = 'ip neighbor print terse without-paging'\n self.cmd_get_vlans = 'interface bridge vlan print terse without-paging'\n self.cmd_add_vlan = (\n 'interface bridge vlan add vlan-ids=<VLAN> comment=\"<VLAN_NAME>\" bridge=<BRIDGE>'\n )\n self.cmd_remove_vlan = (\n 'interface bridge vlan remove [find vlan-ids=<VLAN>]')\n self.cmd_add_interface_to_vlan = ['interface bridge vlan print terse',\n 'interface bridge vlan set [find vlan-ids=<VLAN>] untagged=<INTERFACE>'\n ,\n 'interface bridge vlan set [find vlan-ids=<VLAN>] tagged=<INTERFACE>'\n ,\n 'interface bridge port set [find interface=<INTERFACE>] pvid=<VLAN>'\n ]\n self.cmd_remove_interface_from_vlan = [\n 'interface bridge vlan print terse',\n 'interface bridge vlan set [find vlan-ids=<VLAN>] untagged=<INTERFACE>'\n ,\n 'interface bridge vlan set [find vlan-ids=<VLAN>] tagged=<INTERFACE>'\n ,\n 'interface bridge port set [find interface=<INTERFACE>] pvid=<VLAN>'\n ]\n self.cmd_get_routing_table = 'ip route print without-paging terse'\n self.cmd_get_interfaces_ip = 'ip address print terse without-paging'\n self.cmd_add_static_route = (\n 'ip route add dst-address=<NETWORK>/<PREFIXLENGTH> gateway=<DESTINATION> distance=<METRIC>'\n )\n self.cmd_remove_static_route = (\n 'ip route remove [find dst-address=<NETWORK>/<PREFIXLENGTH>]')\n log.debug('__init__: kwargs: ' + str(kwargs))\n if 'ip' in kwargs:\n self.ip = kwargs['ip']\n log.info('__init__: ip found: ' + str(self.ip))\n if 'username' in kwargs:\n self.username = kwargs['username']\n log.info('__init__: username found: ' + str(self.username))\n if 'password' in kwargs:\n self.password = kwargs['password']\n log.debug('__init__: password found: ' + str(self.password))\n if 'device_type' in kwargs:\n self.device_type = kwargs['device_type']\n log.info('__init__: device_type found: ' + str(self.device_type))\n if 'timeout' in kwargs:\n self.timeout = kwargs['timeout']\n log.info('__init__: timeout found: ' + str(self.timeout))\n if 'protocol' in kwargs:\n self._protocol = kwargs['protocol'].lower()\n log.info('__init__: protocol found: ' + str(self._protocol))\n if self._protocol.lower() == 'telnet':\n self.port = 23\n if 'port' in kwargs:\n self.port = kwargs['port']\n log.info('__init__: port found: ' + str(self.port))\n if 'enable_mode' in kwargs:\n self.enable_mode = kwargs['enable_mode']\n log.info('__init__: enable_mode found: ' + str(self.enable_mode))\n if 'enable_password' in kwargs:\n self.enable_password = kwargs['enable_password']\n log.info('__init__: enable_password found: ' + str(self.\n enable_password))\n\n async def __aenter__(self):\n \"\"\"\n Context manager opening connection\n \"\"\"\n try:\n await self.connect()\n except Exception:\n await self.disconnect()\n raise\n return self\n\n async def __aexit__(self, exc_type, exc_value, traceback):\n \"\"\"\n Context manager closing connection\n \"\"\"\n await self.disconnect()\n\n def find_prompt(self, text):\n \"\"\"\n Method used to find a prompt inside an output string\n\n This method is used during the first communication with the device.\n First it find the prompt then caculate the different forms the prompt\n can take. This will be useful later on while finding prompt in other\n output stream (read).\n\n :param text: data with a prompt\n :type text: str\n\n :return: the prompt found\n :rtype: str\n \"\"\"\n prompt = text.split('\\n')[-1]\n prompt = text.split('\\r')[-1]\n log.info(f\"find_prompt: prompt: '{prompt}'\")\n self.possible_prompts = self.get_possible_prompts(prompt)\n return prompt\n\n def get_possible_prompts(self, prompt):\n \"\"\"\n Method used to check if a prompt has one of the expected endings then\n create a list with all possible prompts for the device\n\n :param prompt: a prompt with a possible ending prompt (eg. \"switch#\")\n :type prompt: str\n\n :return: the list of prompts\n :rtype: list\n \"\"\"\n list_of_prompts = []\n list_of_possible_ending_prompts = self.list_of_possible_ending_prompts\n my_prompt = prompt\n for ending in list_of_possible_ending_prompts:\n if my_prompt.endswith(ending):\n my_prompt = my_prompt[:-len(ending)]\n break\n log.info(f\"get_possible_prompts: prompt found: '{my_prompt}'\")\n log.info(f\"get_possible_prompts: prompt found size: '{len(my_prompt)}'\"\n )\n for ending in list_of_possible_ending_prompts:\n list_of_prompts.append(my_prompt + ending)\n log.info(\n f'get_possible_prompts: list of possible prompts: {list_of_prompts}'\n )\n return list_of_prompts\n\n def check_if_prompt_is_found(self, text):\n \"\"\"\n Method used to check if a prompt is detected inside a string\n\n :param text: a string with prompt\n :type text: str\n\n :return: the prompt found\n :rtype: str\n \"\"\"\n prompt_found = False\n for prompt in self.possible_prompts:\n log.info(f\"check_if_prompt_is_found: prompt: '{prompt}'\")\n if prompt in text:\n prompt_found = True\n log.info(f\"check_if_prompt_is_found: prompt found: '{prompt}'\")\n break\n return prompt_found\n\n def remove_command_in_output(self, text, cmd):\n \"\"\"\n Method removing the command at the beginning of a string\n\n After sending commands an \"echo\" of the command sent\n is display in the output string. This method removes it.\n\n :param text: the text with the command at the beginning\n :type text: str\n\n :param cmd: the command previously sent\n :type cmd: str\n\n :return: the output string without the command\n :rtype: str\n \"\"\"\n log.info(f\"remove_command_in_output: cmd = '{cmd}'\")\n log.info(\n f\"remove_command_in_output: cmd (hex) = '{cmd.encode().hex()}'\")\n output = text.split(cmd + '\\n')[-1]\n log.info(f\"remove_command_in_output: output = '{output}'\")\n return output\n\n def remove_starting_carriage_return_in_output(self, text):\n \"\"\"\n Method removing the carriage return at the beginning of a string\n\n :param text: the text with the command at the beginning\n :type text: str\n\n :return: the output string without the starting carriage return\n :rtype: str\n \"\"\"\n log.info('remove_starting_carriage_return_in_output')\n output = text.lstrip('\\r\\n\\r')\n log.info(\n f\"remove_starting_carriage_return_in_output: output = '{output}'\")\n return output\n <mask token>\n\n def check_error_output(self, output):\n \"\"\"\n Check if an error is returned by the device (\"% Unrecognized command\", \"% Ambiguous command\", etc.)\n\n If an error is found, then an exception is raised\n \"\"\"\n log.info('check_error_output')\n if output:\n log.info('check_error_output: output has some data')\n for element in self._send_command_error_in_returned_output:\n log.info(f'check_error_output: element: {element}')\n log.info(f'check_error_output: output[0]: {output[0]}')\n if output.startswith(element):\n raise Exception(output)\n\n def remove_ansi_escape_sequence(self, text):\n \"\"\"\n Method removing ANSI escape sequence from a string\n Just CSI sequences are removed\n\n :param text: the text with a prompt at the beginning\n :type text: str\n\n :return: the output string without the ending prompt\n :rtype: str\n \"\"\"\n output = ''\n esc_found = 0\n for i in text:\n if esc_found == 0:\n if i == '\\x1b':\n log.info('Esc!')\n esc_found = 1\n else:\n output += i\n elif esc_found == 1:\n if i == '[':\n log.info('CSI sequence')\n esc_found = 2\n else:\n output += '\\x1b' + i\n esc_found = 0\n elif i >= 'a' and i <= 'z' or i >= 'A' and i <= 'Z':\n log.info('End of escape sequence')\n esc_found = 0\n return output\n\n async def disable_paging(self):\n \"\"\"\n Async method disabling paging on a device\n\n Use the \"cmd_disable_paging\" attribute\n \"\"\"\n log.info('disable_paging')\n await self.send_command(self.cmd_disable_paging)\n\n async def connect(self):\n \"\"\"\n Async method used for connecting a device\n\n Currently supported: SSH and Telnet\n \"\"\"\n log.info('connect')\n try:\n if self._protocol == 'ssh':\n await self.connectSSH()\n elif self._protocol == 'telnet':\n await self.connectTelnet()\n else:\n raise Exception(\n f'connect: unsupported protocol: {self._protocol}')\n except Exception:\n log.info('connect: connection error')\n raise\n\n async def connectSSH(self):\n \"\"\"\n Async method used for connecting a device using SSH protocol\n \"\"\"\n log.info('connectSSH')\n generator = asyncssh.connect(self.ip, username=self.username,\n password=self.password, known_hosts=None, encryption_algs=[algs\n .decode('utf-8') for algs in asyncssh.encryption._enc_algs])\n try:\n self.conn = await asyncio.wait_for(generator, timeout=self.timeout)\n except asyncio.exceptions.TimeoutError as error:\n log.error(\n f\"connectSSH: connection failed: {self.ip} timeout: '{error}'\")\n raise asyncio.exceptions.TimeoutError(\n 'Connection failed: connection timed out.')\n except Exception as error:\n log.error(f\"connectSSH: connection failed: {self.ip} '{error}'\")\n raise\n log.info('connectSSH: connection success')\n self.stdinx, self.stdoutx, _ = await self.conn.open_session(term_type\n ='netscud')\n log.info('connectSSH: open_session success')\n data = ''\n prompt_not_found = True\n try:\n while prompt_not_found:\n log.info('connectSSH: beginning of the loop')\n data += await asyncio.wait_for(self.stdoutx.read(\n MAX_BUFFER_DATA), timeout=self.timeout)\n log.info(f\"connectSSH: data: '{str(data)}'\")\n log.info(\n f\"connectSSH: data: hex:'{data.encode('utf-8').hex()}'\")\n for prompt in self._connect_first_ending_prompt:\n if data.endswith(prompt):\n log.info(\n f\"connectSSH: first ending prompt found: '{prompt}'\"\n )\n prompt_not_found = False\n break\n log.info('connectSSH: end of loop')\n except Exception as error:\n log.error(\n f\"connectSSH: timeout while reading the prompt: {self.ip} '{error}'\"\n )\n raise\n log.info(f'connectSSH: end of prompt loop')\n data = self.remove_ansi_escape_sequence(data)\n self.prompt = self.find_prompt(str(data))\n log.info(f\"connectSSH: prompt found: '{self.prompt}'\")\n log.info(f\"connectSSH: prompt found size: '{len(self.prompt)}'\")\n if self.cmd_disable_paging:\n await self.disable_paging()\n\n async def connectTelnet(self):\n \"\"\"\n Async method used for connecting a device using Telnet protocol\n \"\"\"\n log.info('connectTelnet')\n try:\n conn = asyncio.open_connection(self.ip, self.port)\n except Exception as error:\n log.error(\n f\"connectTelnet: preparation to the connection failed: '{error}'\"\n )\n raise\n log.info('connectTelnet: preparation to the connection success')\n try:\n self._reader, self._writer = await asyncio.wait_for(conn,\n timeout=self.timeout)\n except asyncio.TimeoutError:\n log.error('connectTelnet: connection: timeout')\n raise\n log.info('connectTelnet: connection success')\n prompt = self._telnet_connect_login\n prompt_password = self._telnet_connect_password\n use_login = True\n output = ''\n byte_data = b''\n while True:\n log.info(f'connectTelnet: read data for prompt')\n byte_data += await asyncio.wait_for(self._reader.read(\n MAX_BUFFER_DATA), timeout=self.timeout)\n log.info(f'connectTelnet: byte_data: {byte_data}')\n output = str(byte_data)\n log.info(f'connectTelnet: output: {output}')\n if prompt in output:\n break\n elif prompt_password in output:\n use_login = False\n break\n log.info(f\"connectTelnet: login prompt: '{output}'\")\n if use_login:\n log.info('connectTelnet: sending login')\n try:\n await self.send_command(self.username, prompt_password)\n log.info('connectTelnet: login sent')\n except Exception:\n raise\n log.info('connectTelnet: sending password')\n try:\n output = await self.telnet_send_command_with_unexpected_pattern(\n self.password, self._connect_first_ending_prompt, self.\n _telnet_connect_authentication_fail_prompt)\n except Exception:\n raise\n log.info('connectTelnet: password sent')\n self.prompt = self.find_prompt(str(output))\n log.info(f\"connectTelnet: prompt found: '{self.prompt}'\")\n if self.enable_mode:\n log.info('connectTelnet: enable mode to be activated')\n try:\n await self.send_command(self.cmd_enable, prompt_password)\n log.info('connectTelnet: enable command sent')\n log.info('connectTelnet: sending enable password')\n await self.telnet_send_command_with_unexpected_pattern(self\n .enable_password, self._connect_first_ending_prompt,\n self._telnet_connect_authentication_fail_prompt)\n log.info('connectTelnet: enable password sent')\n except Exception:\n log.info('connectTelnet: enable password failure')\n raise\n if self.cmd_disable_paging:\n await self.disable_paging()\n\n async def disconnect(self):\n \"\"\"\n Async method used to disconnect a device\n\n If this method is not used then exceptions will happen\n when the program will end\n \"\"\"\n log.info('disconnect')\n if self._protocol == 'ssh':\n await self.disconnectSSH()\n elif self._protocol == 'telnet':\n await self.disconnectTelnet()\n else:\n raise Exception(f'Unsupported protocol: {self._protocol}')\n\n async def disconnectSSH(self):\n \"\"\"\n Async method used to disconnect a device in SSH\n\n If this method is not used then exceptions will happen\n when the program will end\n \"\"\"\n log.info('disconnectSSH')\n if self.conn:\n self.conn.close()\n self.conn = None\n\n async def disconnectTelnet(self):\n \"\"\"\n Async method used to disconnect a device in Telnet\n\n If this method is not used then exceptions will happen\n when the program will end\n \"\"\"\n log.info('disconnectTelnet')\n if self._writer:\n self._writer.close()\n self._writer = None\n\n async def send_command(self, cmd, pattern=None, timeout=None):\n \"\"\"\n Async method used to send data to a device\n\n :param cmd: command to send\n :type cmd: str\n\n :param pattern: optional, a pattern replacing the prompt when the prompt is not expected\n :type pattern: str\n\n :param timeout: optional, a timeout for the command sent. Default value is self.timeout\n :type timeout: str\n\n :return: the output of command\n :rtype: str\n \"\"\"\n log.info('send_command')\n if timeout is None:\n timeout = self.timeout\n if self._protocol == 'ssh':\n output = await self.send_commandSSH(cmd, pattern=pattern,\n timeout=timeout)\n elif self._protocol == 'telnet':\n output = await self.send_commandTelnet(cmd, pattern=pattern,\n timeout=timeout)\n else:\n raise Exception(\n f'send_command: unsupported protocol: {self._protocol}')\n return output\n\n async def send_commandSSH(self, cmd, pattern=None, timeout=None):\n \"\"\"\n Async method used to send data to a device\n\n :param cmd: command to send\n :type cmd: str\n\n :param pattern: optional, a pattern replacing the prompt when the prompt is not expected\n :type pattern: str\n\n :param timeout: optional, a timeout for the command sent. Default value is self.timeout\n :type timeout: str\n\n :return: the output of command\n :rtype: str\n \"\"\"\n log.info('send_commandSSH')\n if timeout is None:\n timeout = self.timeout\n log.info(f\"send_commandSSH: cmd = '{cmd}'\")\n self.stdinx.write(cmd + self._carriage_return_for_send_command)\n log.info('send_commandSSH: command sent')\n output = ''\n while True:\n output += await asyncio.wait_for(self.stdoutx.read(\n MAX_BUFFER_DATA), timeout=timeout)\n output = self.remove_ansi_escape_sequence(output)\n output = output.replace('\\r', '')\n log.info(f\"send_commandSSH: output: '{output}'\")\n if pattern:\n if pattern in output:\n break\n elif self.check_if_prompt_is_found(output):\n break\n log.debug(\n f\"\"\"send_commandSSH: raw output: '{output}'\nsend_commandSSH: raw output (hex): '{output.encode().hex()}'\"\"\"\n )\n output = self.remove_command_in_output(output, str(cmd))\n output = self.remove_starting_carriage_return_in_output(output)\n output = self.remove_ending_prompt_in_output(output)\n log.debug(\n f\"\"\"send_commandSSH: cleaned output: '{output}'\nsend_commandSSH: cleaned output (hex): '{output.encode().hex()}'\"\"\"\n )\n self.check_error_output(output)\n return output\n\n async def send_commandTelnet(self, cmd, pattern=None, timeout=None):\n \"\"\"\n Async method used to send data to a device\n\n :param cmd: command to send\n :type cmd: str\n\n :param pattern: optional, a pattern replacing the prompt when the prompt is not expected\n :type pattern: str\n\n :param timeout: optional, a timeout for the command sent. Default value is self.timeout\n :type timeout: str\n\n :return: the output of command\n :rtype: str\n \"\"\"\n log.info('send_commandTelnet')\n if timeout is None:\n timeout = self.timeout\n cmd = cmd + '\\n'\n self._writer.write(cmd.encode())\n output = ''\n byte_data = b''\n try:\n while True:\n byte_data += await asyncio.wait_for(self._reader.read(\n MAX_BUFFER_DATA), timeout=timeout)\n log.info(f\"send_commandTelnet: byte_data: '{byte_data}'\")\n output = str(byte_data)\n log.info(f\"send_commandTelnet: output: '{output}'\")\n if pattern:\n if pattern in output:\n break\n elif self.check_if_prompt_is_found(output):\n break\n except asyncio.TimeoutError:\n log.error('send_commandTelnet: connection: timeout')\n raise\n except Exception as error:\n log.error(f'send_commandTelnet: error: {error}')\n raise\n output = byte_data.decode('utf-8', 'ignore')\n log.debug(\n f\"\"\"send_commandTelnet: raw output: '{output}'\nsend_commandTelnet: raw output (hex): '{output.encode().hex()}'\"\"\"\n )\n output = self.remove_command_in_output(output, str(cmd))\n output = self.remove_starting_carriage_return_in_output(output)\n output = self.remove_ending_prompt_in_output(output)\n log.debug(\n f\"\"\"send_commandTelnet: cleaned output: '{output}'\nsend_commandTelnet: cleaned output (hex): '{output.encode().hex()}'\"\"\"\n )\n self.check_error_output(output)\n return output\n\n async def telnet_send_command_with_unexpected_pattern(self, cmd,\n pattern, error_pattern=None, timeout=None):\n \"\"\"\n Async method used to send command for Telnet connection to a device with possible unexpected patterns\n\n send_command can wait till time out if login and password are wrong. This method\n speed up the returned error message when authentication failed is identified.\n This method is limited to authentication whem password is required\n\n :param cmd: command to send\n :type cmd: str\n\n :param pattern: optional, a list of patterns located at the very end of the a returned string. Can be used\n to define a custom or unexpected prompt a the end of a string\n :type pattern: str\n\n :param timeout: optional, a timeout for the command sent. Default value is self.timeout\n :type timeout: str\n\n :param error_pattern: optional, a list of failed prompts found when the login and password are not correct\n :type error_pattern: str\n\n :return: the output of command\n :rtype: str\n \"\"\"\n log.info('telnet_send_command_with_unexpected_pattern')\n if timeout is None:\n timeout = self.timeout\n cmd = cmd + self._carriage_return_for_send_command\n self._writer.write(cmd.encode())\n output = ''\n byte_data = b''\n pattern_not_found = True\n try:\n while pattern_not_found:\n byte_data += await asyncio.wait_for(self._reader.read(\n MAX_BUFFER_DATA), timeout=timeout)\n log.info(\n f\"telnet_send_command_with_unexpected_pattern: byte_data: '{byte_data}'\"\n )\n log.debug(\n f\"telnet_send_command_with_unexpected_pattern: byte_data: hex: '{byte_data.hex()}'\"\n )\n output = str(byte_data)\n log.info(\n f\"telnet_send_command_with_unexpected_pattern: output: '{output}'\"\n )\n if pattern:\n for prompt in pattern:\n log.info(\n f\"telnet_send_command_with_unexpected_pattern: checking prompt: '{prompt}'\"\n )\n if prompt in output:\n pattern_not_found = False\n log.info(\n f\"telnet_send_command_with_unexpected_pattern: prompt found: '{prompt}'\"\n )\n break\n if error_pattern and pattern_not_found:\n for bad_prompt in error_pattern:\n log.info(\n f\"telnet_send_command_with_unexpected_pattern: checking unexpected prompt: '{bad_prompt}'\"\n )\n if bad_prompt in output:\n log.error(\n 'telnet_send_command_with_unexpected_pattern: authentication failed'\n )\n raise Exception(\n 'telnet_send_command_with_unexpected_pattern: authentication failed'\n )\n except asyncio.TimeoutError:\n await self.disconnect()\n log.error(\n 'telnet_send_command_with_unexpected_pattern: reading prompt: timeout'\n )\n raise\n except Exception as error:\n await self.disconnect()\n log.error(\n f'telnet_send_command_with_unexpected_pattern: reading prompt: error: {error}'\n )\n raise\n output = byte_data.decode('utf-8', 'ignore')\n log.debug(\n f\"\"\"telnet_send_command_with_unexpected_pattern: raw output: '{output}'\ntelnet_send_command_with_unexpected_pattern: raw output (hex): '{output.encode().hex()}'\"\"\"\n )\n output = self.remove_command_in_output(output, str(cmd))\n output = self.remove_starting_carriage_return_in_output(output)\n output = self.remove_ending_prompt_in_output(output)\n log.debug(\n f\"\"\"telnet_send_command_with_unexpected_pattern: cleaned output: '{output}'\ntelnet_send_command_with_unexpected_pattern: cleaned output (hex): '{output.encode().hex()}'\"\"\"\n )\n return output\n\n async def send_config_set(self, cmds=None, timeout=None):\n \"\"\"\n Async method used to send command in config mode\n\n The commands send can be either a string a list of strings. There are\n 3 steps:\n - Entering configuration mode\n - Sending the commands\n - Leaving configuration mode\n\n :param cmds: The commands to the device\n :type cmds: str or list\n\n :param timeout: optional, a timeout for the command sent. Default value is self.timeout\n :type timeout: str\n\n :return: the results of the commands sent\n :rtype: list of str\n \"\"\"\n log.info('send_config_set')\n if timeout is None:\n timeout = self.timeout\n log.info('send_command')\n if self._protocol == 'ssh':\n output = await self.send_config_setSSH(cmds, timeout)\n elif self._protocol == 'telnet':\n output = await self.send_config_setTelnet(cmds, timeout)\n else:\n raise Exception(\n f'send_config_set: unsupported protocol: {self._protocol}')\n return output\n\n async def send_config_setSSH(self, cmds=None, timeout=None):\n \"\"\"\n Async method used to send command in config mode\n\n The commands send can be either a string a list of strings. There are\n 3 steps:\n - Entering configuration mode\n - Sending the commands\n - Leaving configuration mode\n\n :param cmds: The commands to the device\n :type cmds: str or list\n\n :param timeout: optional, a timeout for the command sent. Default value is self.timeout\n :type timeout: str\n\n :return: the results of the commands sent\n :rtype: list of str\n \"\"\"\n log.info('send_config_setSSH')\n if timeout is None:\n timeout = self.timeout\n returned_output = ''\n if isinstance(cmds, str):\n cmds = [cmds]\n elif not isinstance(cmds, list):\n log.error(\n 'send_config_setSSH: parameter cmds used in send_config_set is neither a string nor a list'\n )\n return returned_output\n log.info('send_config_set: entering configuration mode')\n output = ''\n cmd = self.cmd_enter_config_mode\n cmd = cmd + self._carriage_return_for_send_command\n log.info(f\"send_config_setSSH: cmd = '{cmd}'\")\n self.stdinx.write(cmd)\n log.info('send_config_setSSH: configuration mode entered')\n while True:\n output += await asyncio.wait_for(self.stdoutx.read(\n MAX_BUFFER_DATA), timeout=timeout)\n log.info(f\"send_config_setSSH: output: '{output}'\")\n if self.check_if_prompt_is_found(output):\n break\n log.debug(\n f\"\"\"send_config_setSSH: raw output: '{output}'\nsend_config_setSSH: raw output (hex): '{output.encode().hex()}'\"\"\"\n )\n returned_output += output\n output = self.remove_command_in_output(output, str(cmd))\n output = self.remove_starting_carriage_return_in_output(output)\n output = self.remove_ending_prompt_in_output(output)\n log.debug(\n f\"\"\"send_config_setSSH: cleaned output: '{output}'\nsend_config_setSSH: cleaned output (hex): '{output.encode().hex()}'\"\"\"\n )\n self.check_error_output(output)\n log.info('send_config_setSSH: sending commands')\n output = ''\n for cmd in cmds:\n cmd = cmd + self._carriage_return_for_send_command\n log.info(f\"send_config_setSSH: cmd = '{cmd}'\")\n self.stdinx.write(cmd)\n log.info('send_config_setSSH: command sent')\n while True:\n output += await asyncio.wait_for(self.stdoutx.read(\n MAX_BUFFER_DATA), timeout=timeout)\n log.info(f\"send_config_setSSH: output: '{output}'\")\n if self.check_if_prompt_is_found(output):\n break\n log.debug(\n f\"\"\"send_config_setSSH: raw output: '{output}'\nsend_config_setSSH: raw output (hex): '{output.encode().hex()}'\"\"\"\n )\n returned_output += output\n output = self.remove_command_in_output(output, str(cmd))\n output = self.remove_starting_carriage_return_in_output(output)\n output = self.remove_ending_prompt_in_output(output)\n log.debug(\n f\"\"\"send_config_setSSH: cleaned output: '{output}'\nsend_config_setSSH: cleaned output (hex): '{output.encode().hex()}'\"\"\"\n )\n self.check_error_output(output)\n log.info('send_config_setSSH: leaving configuration mode')\n output = ''\n cmd = self.cmd_exit_config_mode\n cmd = cmd + self._carriage_return_for_send_command\n log.info(f\"send_config_setSSH: cmd = '{cmd}'\")\n self.stdinx.write(cmd)\n log.info('send_config_setSSH: command to leave configuration mode sent'\n )\n while True:\n output += await asyncio.wait_for(self.stdoutx.read(\n MAX_BUFFER_DATA), timeout=timeout)\n log.info(f\"send_config_setSSH: output: '{output}'\")\n if self.check_if_prompt_is_found(output):\n break\n log.debug(\n f\"\"\"send_config_setSSH: raw output: '{output}'\nsend_config_setSSH: raw output (hex): '{output.encode().hex()}'\"\"\"\n )\n returned_output += output\n output = self.remove_command_in_output(output, str(cmd))\n output = self.remove_starting_carriage_return_in_output(output)\n output = self.remove_ending_prompt_in_output(output)\n log.debug(\n f\"\"\"send_config_setSSH: cleaned output: '{output}'\nsend_config_setSSH: cleaned output (hex): '{output.encode().hex()}'\"\"\"\n )\n self.check_error_output(output)\n return returned_output\n\n async def send_config_setTelnet(self, cmds=None, timeout=None):\n \"\"\"\n Async method used to send command in config mode\n\n The commands send can be either a string a list of strings. There are\n 3 steps:\n - Entering configuration mode\n - Sending the commands\n - Leaving configuration mode\n\n :param cmds: The commands to the device\n :type cmds: str or list\n\n :param timeout: optional, a timeout for the command sent. Default value is self.timeout\n :type timeout: str\n\n :return: the results of the commands sent\n :rtype: list of str\n \"\"\"\n log.info('send_config_setTelnet')\n if timeout is None:\n timeout = self.timeout\n returned_output = ''\n if isinstance(cmds, str):\n cmds = [cmds]\n elif not isinstance(cmds, list):\n log.error(\n 'send_config_setTelnet: parameter cmds used in send_config_set is neither a string or a list'\n )\n return returned_output\n log.info('send_config_setTelnet: entering configuration mode')\n output = ''\n cmd = self.cmd_enter_config_mode\n cmd = cmd + self._carriage_return_for_send_command\n log.info(f\"send_config_setTelnet: cmd = '{cmd}'\")\n self._writer.write(cmd.encode())\n log.info('send_config_setTelnet: configuration mode entered')\n output = ''\n byte_data = b''\n try:\n while True:\n byte_data += await asyncio.wait_for(self._reader.read(\n MAX_BUFFER_DATA), timeout=timeout)\n output = str(byte_data)\n log.info(f\"send_config_setTelnet: output: '{output}'\")\n if self.check_if_prompt_is_found(output):\n break\n except asyncio.TimeoutError:\n log.error('send_config_setTelnet: connection: timeout')\n raise\n except Exception as error:\n log.error(f'send_config_setTelnet: error: {error}')\n raise\n output = byte_data.decode('utf-8', 'ignore')\n log.debug(\n f\"\"\"send_config_setTelnet: raw output: '{output}'\nsend_config_setTelnet: raw output (hex): '{output.encode().hex()}'\"\"\"\n )\n returned_output += output\n output = self.remove_command_in_output(output, str(cmd))\n output = self.remove_starting_carriage_return_in_output(output)\n output = self.remove_ending_prompt_in_output(output)\n log.debug(\n f\"\"\"send_config_setTelnet: cleaned output: '{output}'\nsend_config_setTelnet: cleaned output (hex): '{output.encode().hex()}'\"\"\"\n )\n self.check_error_output(output)\n log.info('send_config_setTelnet: sending commands')\n output = ''\n for cmd in cmds:\n cmd = cmd + self._carriage_return_for_send_command\n log.info(f\"send_config_setTelnet: cmd = '{cmd}'\")\n self._writer.write(cmd.encode())\n log.info('send_config_setTelnet: command sent')\n output = ''\n byte_data = b''\n try:\n while True:\n byte_data += await asyncio.wait_for(self._reader.read(\n MAX_BUFFER_DATA), timeout=timeout)\n output = str(byte_data)\n log.info(f\"send_config_setTelnet: output: '{output}'\")\n if self.check_if_prompt_is_found(output):\n break\n except asyncio.TimeoutError:\n log.error('send_config_setTelnet: connection: timeout')\n raise\n except Exception as error:\n log.error(f'send_config_setTelnet: error: {error}')\n raise\n output = byte_data.decode('utf-8', 'ignore')\n log.debug(\n f\"\"\"send_config_setTelnet: raw output: '{output}'\nsend_config_setTelnet: raw output (hex): '{output.encode().hex()}'\"\"\"\n )\n returned_output += output\n output = self.remove_command_in_output(output, str(cmd))\n output = self.remove_starting_carriage_return_in_output(output)\n output = self.remove_ending_prompt_in_output(output)\n log.debug(\n f\"\"\"send_config_setTelnet: cleaned output: '{output}'\nsend_config_setTelnet: cleaned output (hex): '{output.encode().hex()}'\"\"\"\n )\n self.check_error_output(output)\n log.info('send_config_setTelnet: leaving configuration mode')\n output = ''\n cmd = self.cmd_exit_config_mode\n cmd = cmd + self._carriage_return_for_send_command\n log.info(f\"send_config_setTelnet: cmd = '{cmd}'\")\n self._writer.write(cmd.encode())\n log.info(\n 'send_config_setTelnet: command to leave configuration mode sent')\n output = ''\n byte_data = b''\n loop = 3\n try:\n while loop:\n byte_data += await asyncio.wait_for(self._reader.read(\n MAX_BUFFER_DATA), timeout=timeout)\n output = str(byte_data)\n log.info(f\"send_config_setTelnet: output: '{output}'\")\n await asyncio.sleep(0.5)\n if self.check_if_prompt_is_found(output):\n break\n loop -= 1\n except asyncio.TimeoutError:\n log.error('send_config_setTelnet: connection: timeout')\n raise\n except Exception as error:\n log.error(f'send_config_setTelnet: error: {error}')\n raise\n output = byte_data.decode('utf-8', 'ignore')\n log.debug(\n f\"\"\"send_config_setTelnet: raw output: '{output}'\nsend_config_setTelnet: raw output (hex): '{output.encode().hex()}'\"\"\"\n )\n returned_output += output\n output = self.remove_command_in_output(output, str(cmd))\n output = self.remove_starting_carriage_return_in_output(output)\n output = self.remove_ending_prompt_in_output(output)\n log.debug(\n f\"\"\"send_config_setTelnet: cleaned output: '{output}'\nsend_config_setTelnet: cleaned output (hex): '{output.encode().hex()}'\"\"\"\n )\n self.check_error_output(output)\n return returned_output\n\n async def get_version(self):\n \"\"\"\n Asyn method used to get the version of the software of the device\n\n :return: Version of the software of the device\n :rtype: str\n \"\"\"\n log.info('get_version')\n version = ''\n output = await self.send_command(self.cmd_get_version)\n version = output.split('Version ')[1].split(',')[0]\n log.info(f'get_version: version: {version}')\n return version\n\n async def get_hostname(self):\n \"\"\"\n Asyn method used to get the name of the device\n\n :return: Name of the device\n :rtype: str\n \"\"\"\n log.info('get_hostname')\n output = await self.send_command(self.cmd_get_hostname)\n log.info(f\"get_hostname: output: '{output}'\")\n output = output.split()[0]\n log.info(f\"get_hostname: hostname found: '{output}'\")\n return output\n\n async def get_model(self):\n \"\"\"\n Asyn method used to get the model of the device\n\n :return: Model of the device\n :rtype: str\n \"\"\"\n log.info('get_model')\n output = await self.send_command(self.cmd_get_model)\n log.info(f\"get_model: output: '{output}'\")\n output = output.split('\"')[3]\n log.info(f\"get_model: model found: '{output}'\")\n return output\n\n async def get_serial_number(self):\n \"\"\"\n Get serial number of the switch or the serial number of the first switch of a stack\n\n :return: Serial number of the device\n :rtype: str\n \"\"\"\n log.info('get_serial_number')\n output = await self.send_command(self.cmd_get_serial_number)\n log.info(f\"get_serial_number: output: '{output}'\")\n output = output.splitlines()[0].split()[-1]\n log.info(f\"get_hostname: hostname found: '{output}'\")\n return output\n\n async def get_config(self, timeout=None):\n \"\"\"\n Asyn method used to get the configuration of the device\n\n :param timeout: optional, a timeout for the command sent. Default value is self.timeout\n :type timeout: str\n\n :return: Configuration of the device\n :rtype: str\n \"\"\"\n log.info('get_config')\n if timeout is None:\n timeout = self.timeout\n output = await self.send_command(self.cmd_get_config, timeout=timeout)\n return output\n\n async def save_config(self):\n \"\"\"\n Asyn method used to save the current configuration on the device\n\n :return: Commands of the configuration saving process\n :rtype: str\n \"\"\"\n log.info('save_config')\n output = await self.send_command(self.cmd_save_config)\n return output\n",
"step-2": "<mask token>\n\n\nclass NetworkDevice:\n <mask token>\n\n def __init__(self, **kwargs):\n log.info('__init__')\n self.ip = ''\n self.username = ''\n self.password = ''\n self.device_type = ''\n self.port = 22\n self.timeout = 10\n self._protocol = 'ssh'\n self.enable_mode = False\n self.enable_password = ''\n self.conn = None\n self._writer = None\n self._reader = None\n self.possible_prompts = []\n self._connect_first_ending_prompt = ['#', '>']\n self.list_of_possible_ending_prompts = ['(config-line)#',\n '(config-if)#', '(config)#', '>', '#']\n self._carriage_return_for_send_command = '\\n'\n self._send_command_error_in_returned_output = []\n self._telnet_connect_login = 'Username:'\n self._telnet_connect_password = 'Password:'\n self._telnet_connect_authentication_fail_prompt = [':', '%']\n self.cmd_enable = 'enable'\n self.cmd_disable_paging = 'terminal length 0'\n self.cmd_enter_config_mode = 'configure terminal'\n self.cmd_exit_config_mode = 'exit'\n self.cmd_get_version = 'show version'\n self.cmd_get_hostname = 'show version | include uptime'\n self.cmd_get_model = 'show inventory'\n self.cmd_get_serial_number = 'show inventory | i SN'\n self.cmd_get_config = 'show running-config'\n self.cmd_save_config = 'write memory'\n self.cmd_get_interfaces = [\n 'interface ethernet print terse without-paging',\n 'foreach i in=([/interface ethernet find]) do={/interface ethernet monitor $i once without-paging}'\n , 'interface bridge port print terse without-paging']\n self.cmd_set_interface = ['interface ethernet enable <INTERFACE>',\n 'interface ethernet disable <INTERFACE>',\n 'interface ethernet comment <INTERFACE> \"<COMMENT>\"',\n 'interface ethernet set l2mtu=<MAXIMUMFRAMESIZE> <INTERFACE>',\n 'interface bridge port set frame-types=<MODE> ingress-filtering=<FILTERINGVLAN> [find interface=<INTERFACE>]'\n ]\n self.cmd_get_mac_address_table = (\n 'interface bridge host print without-paging')\n self.cmd_get_arp = 'ip arp print terse without-paging'\n self.cmd_get_lldp_neighbors = 'ip neighbor print terse without-paging'\n self.cmd_get_vlans = 'interface bridge vlan print terse without-paging'\n self.cmd_add_vlan = (\n 'interface bridge vlan add vlan-ids=<VLAN> comment=\"<VLAN_NAME>\" bridge=<BRIDGE>'\n )\n self.cmd_remove_vlan = (\n 'interface bridge vlan remove [find vlan-ids=<VLAN>]')\n self.cmd_add_interface_to_vlan = ['interface bridge vlan print terse',\n 'interface bridge vlan set [find vlan-ids=<VLAN>] untagged=<INTERFACE>'\n ,\n 'interface bridge vlan set [find vlan-ids=<VLAN>] tagged=<INTERFACE>'\n ,\n 'interface bridge port set [find interface=<INTERFACE>] pvid=<VLAN>'\n ]\n self.cmd_remove_interface_from_vlan = [\n 'interface bridge vlan print terse',\n 'interface bridge vlan set [find vlan-ids=<VLAN>] untagged=<INTERFACE>'\n ,\n 'interface bridge vlan set [find vlan-ids=<VLAN>] tagged=<INTERFACE>'\n ,\n 'interface bridge port set [find interface=<INTERFACE>] pvid=<VLAN>'\n ]\n self.cmd_get_routing_table = 'ip route print without-paging terse'\n self.cmd_get_interfaces_ip = 'ip address print terse without-paging'\n self.cmd_add_static_route = (\n 'ip route add dst-address=<NETWORK>/<PREFIXLENGTH> gateway=<DESTINATION> distance=<METRIC>'\n )\n self.cmd_remove_static_route = (\n 'ip route remove [find dst-address=<NETWORK>/<PREFIXLENGTH>]')\n log.debug('__init__: kwargs: ' + str(kwargs))\n if 'ip' in kwargs:\n self.ip = kwargs['ip']\n log.info('__init__: ip found: ' + str(self.ip))\n if 'username' in kwargs:\n self.username = kwargs['username']\n log.info('__init__: username found: ' + str(self.username))\n if 'password' in kwargs:\n self.password = kwargs['password']\n log.debug('__init__: password found: ' + str(self.password))\n if 'device_type' in kwargs:\n self.device_type = kwargs['device_type']\n log.info('__init__: device_type found: ' + str(self.device_type))\n if 'timeout' in kwargs:\n self.timeout = kwargs['timeout']\n log.info('__init__: timeout found: ' + str(self.timeout))\n if 'protocol' in kwargs:\n self._protocol = kwargs['protocol'].lower()\n log.info('__init__: protocol found: ' + str(self._protocol))\n if self._protocol.lower() == 'telnet':\n self.port = 23\n if 'port' in kwargs:\n self.port = kwargs['port']\n log.info('__init__: port found: ' + str(self.port))\n if 'enable_mode' in kwargs:\n self.enable_mode = kwargs['enable_mode']\n log.info('__init__: enable_mode found: ' + str(self.enable_mode))\n if 'enable_password' in kwargs:\n self.enable_password = kwargs['enable_password']\n log.info('__init__: enable_password found: ' + str(self.\n enable_password))\n\n async def __aenter__(self):\n \"\"\"\n Context manager opening connection\n \"\"\"\n try:\n await self.connect()\n except Exception:\n await self.disconnect()\n raise\n return self\n\n async def __aexit__(self, exc_type, exc_value, traceback):\n \"\"\"\n Context manager closing connection\n \"\"\"\n await self.disconnect()\n\n def find_prompt(self, text):\n \"\"\"\n Method used to find a prompt inside an output string\n\n This method is used during the first communication with the device.\n First it find the prompt then caculate the different forms the prompt\n can take. This will be useful later on while finding prompt in other\n output stream (read).\n\n :param text: data with a prompt\n :type text: str\n\n :return: the prompt found\n :rtype: str\n \"\"\"\n prompt = text.split('\\n')[-1]\n prompt = text.split('\\r')[-1]\n log.info(f\"find_prompt: prompt: '{prompt}'\")\n self.possible_prompts = self.get_possible_prompts(prompt)\n return prompt\n\n def get_possible_prompts(self, prompt):\n \"\"\"\n Method used to check if a prompt has one of the expected endings then\n create a list with all possible prompts for the device\n\n :param prompt: a prompt with a possible ending prompt (eg. \"switch#\")\n :type prompt: str\n\n :return: the list of prompts\n :rtype: list\n \"\"\"\n list_of_prompts = []\n list_of_possible_ending_prompts = self.list_of_possible_ending_prompts\n my_prompt = prompt\n for ending in list_of_possible_ending_prompts:\n if my_prompt.endswith(ending):\n my_prompt = my_prompt[:-len(ending)]\n break\n log.info(f\"get_possible_prompts: prompt found: '{my_prompt}'\")\n log.info(f\"get_possible_prompts: prompt found size: '{len(my_prompt)}'\"\n )\n for ending in list_of_possible_ending_prompts:\n list_of_prompts.append(my_prompt + ending)\n log.info(\n f'get_possible_prompts: list of possible prompts: {list_of_prompts}'\n )\n return list_of_prompts\n\n def check_if_prompt_is_found(self, text):\n \"\"\"\n Method used to check if a prompt is detected inside a string\n\n :param text: a string with prompt\n :type text: str\n\n :return: the prompt found\n :rtype: str\n \"\"\"\n prompt_found = False\n for prompt in self.possible_prompts:\n log.info(f\"check_if_prompt_is_found: prompt: '{prompt}'\")\n if prompt in text:\n prompt_found = True\n log.info(f\"check_if_prompt_is_found: prompt found: '{prompt}'\")\n break\n return prompt_found\n\n def remove_command_in_output(self, text, cmd):\n \"\"\"\n Method removing the command at the beginning of a string\n\n After sending commands an \"echo\" of the command sent\n is display in the output string. This method removes it.\n\n :param text: the text with the command at the beginning\n :type text: str\n\n :param cmd: the command previously sent\n :type cmd: str\n\n :return: the output string without the command\n :rtype: str\n \"\"\"\n log.info(f\"remove_command_in_output: cmd = '{cmd}'\")\n log.info(\n f\"remove_command_in_output: cmd (hex) = '{cmd.encode().hex()}'\")\n output = text.split(cmd + '\\n')[-1]\n log.info(f\"remove_command_in_output: output = '{output}'\")\n return output\n\n def remove_starting_carriage_return_in_output(self, text):\n \"\"\"\n Method removing the carriage return at the beginning of a string\n\n :param text: the text with the command at the beginning\n :type text: str\n\n :return: the output string without the starting carriage return\n :rtype: str\n \"\"\"\n log.info('remove_starting_carriage_return_in_output')\n output = text.lstrip('\\r\\n\\r')\n log.info(\n f\"remove_starting_carriage_return_in_output: output = '{output}'\")\n return output\n\n def remove_ending_prompt_in_output(self, text):\n \"\"\"\n Method removing the prompt at the end of a string\n\n :param text: the text with a prompt at the beginning\n :type text: str\n\n :return: the output string without the ending prompt\n :rtype: str\n \"\"\"\n log.info('remove_ending_prompt_in_output')\n for prompt in self.possible_prompts:\n log.info(f\"remove_ending_prompt_in_output: prompt: '{prompt}'\")\n if prompt in text:\n text = text[:-len(prompt)]\n text = text.rstrip('\\r\\n')\n break\n log.info(\n f\"remove_ending_prompt_in_output: text without prompt:\\n'{text}'\")\n return text\n\n def check_error_output(self, output):\n \"\"\"\n Check if an error is returned by the device (\"% Unrecognized command\", \"% Ambiguous command\", etc.)\n\n If an error is found, then an exception is raised\n \"\"\"\n log.info('check_error_output')\n if output:\n log.info('check_error_output: output has some data')\n for element in self._send_command_error_in_returned_output:\n log.info(f'check_error_output: element: {element}')\n log.info(f'check_error_output: output[0]: {output[0]}')\n if output.startswith(element):\n raise Exception(output)\n\n def remove_ansi_escape_sequence(self, text):\n \"\"\"\n Method removing ANSI escape sequence from a string\n Just CSI sequences are removed\n\n :param text: the text with a prompt at the beginning\n :type text: str\n\n :return: the output string without the ending prompt\n :rtype: str\n \"\"\"\n output = ''\n esc_found = 0\n for i in text:\n if esc_found == 0:\n if i == '\\x1b':\n log.info('Esc!')\n esc_found = 1\n else:\n output += i\n elif esc_found == 1:\n if i == '[':\n log.info('CSI sequence')\n esc_found = 2\n else:\n output += '\\x1b' + i\n esc_found = 0\n elif i >= 'a' and i <= 'z' or i >= 'A' and i <= 'Z':\n log.info('End of escape sequence')\n esc_found = 0\n return output\n\n async def disable_paging(self):\n \"\"\"\n Async method disabling paging on a device\n\n Use the \"cmd_disable_paging\" attribute\n \"\"\"\n log.info('disable_paging')\n await self.send_command(self.cmd_disable_paging)\n\n async def connect(self):\n \"\"\"\n Async method used for connecting a device\n\n Currently supported: SSH and Telnet\n \"\"\"\n log.info('connect')\n try:\n if self._protocol == 'ssh':\n await self.connectSSH()\n elif self._protocol == 'telnet':\n await self.connectTelnet()\n else:\n raise Exception(\n f'connect: unsupported protocol: {self._protocol}')\n except Exception:\n log.info('connect: connection error')\n raise\n\n async def connectSSH(self):\n \"\"\"\n Async method used for connecting a device using SSH protocol\n \"\"\"\n log.info('connectSSH')\n generator = asyncssh.connect(self.ip, username=self.username,\n password=self.password, known_hosts=None, encryption_algs=[algs\n .decode('utf-8') for algs in asyncssh.encryption._enc_algs])\n try:\n self.conn = await asyncio.wait_for(generator, timeout=self.timeout)\n except asyncio.exceptions.TimeoutError as error:\n log.error(\n f\"connectSSH: connection failed: {self.ip} timeout: '{error}'\")\n raise asyncio.exceptions.TimeoutError(\n 'Connection failed: connection timed out.')\n except Exception as error:\n log.error(f\"connectSSH: connection failed: {self.ip} '{error}'\")\n raise\n log.info('connectSSH: connection success')\n self.stdinx, self.stdoutx, _ = await self.conn.open_session(term_type\n ='netscud')\n log.info('connectSSH: open_session success')\n data = ''\n prompt_not_found = True\n try:\n while prompt_not_found:\n log.info('connectSSH: beginning of the loop')\n data += await asyncio.wait_for(self.stdoutx.read(\n MAX_BUFFER_DATA), timeout=self.timeout)\n log.info(f\"connectSSH: data: '{str(data)}'\")\n log.info(\n f\"connectSSH: data: hex:'{data.encode('utf-8').hex()}'\")\n for prompt in self._connect_first_ending_prompt:\n if data.endswith(prompt):\n log.info(\n f\"connectSSH: first ending prompt found: '{prompt}'\"\n )\n prompt_not_found = False\n break\n log.info('connectSSH: end of loop')\n except Exception as error:\n log.error(\n f\"connectSSH: timeout while reading the prompt: {self.ip} '{error}'\"\n )\n raise\n log.info(f'connectSSH: end of prompt loop')\n data = self.remove_ansi_escape_sequence(data)\n self.prompt = self.find_prompt(str(data))\n log.info(f\"connectSSH: prompt found: '{self.prompt}'\")\n log.info(f\"connectSSH: prompt found size: '{len(self.prompt)}'\")\n if self.cmd_disable_paging:\n await self.disable_paging()\n\n async def connectTelnet(self):\n \"\"\"\n Async method used for connecting a device using Telnet protocol\n \"\"\"\n log.info('connectTelnet')\n try:\n conn = asyncio.open_connection(self.ip, self.port)\n except Exception as error:\n log.error(\n f\"connectTelnet: preparation to the connection failed: '{error}'\"\n )\n raise\n log.info('connectTelnet: preparation to the connection success')\n try:\n self._reader, self._writer = await asyncio.wait_for(conn,\n timeout=self.timeout)\n except asyncio.TimeoutError:\n log.error('connectTelnet: connection: timeout')\n raise\n log.info('connectTelnet: connection success')\n prompt = self._telnet_connect_login\n prompt_password = self._telnet_connect_password\n use_login = True\n output = ''\n byte_data = b''\n while True:\n log.info(f'connectTelnet: read data for prompt')\n byte_data += await asyncio.wait_for(self._reader.read(\n MAX_BUFFER_DATA), timeout=self.timeout)\n log.info(f'connectTelnet: byte_data: {byte_data}')\n output = str(byte_data)\n log.info(f'connectTelnet: output: {output}')\n if prompt in output:\n break\n elif prompt_password in output:\n use_login = False\n break\n log.info(f\"connectTelnet: login prompt: '{output}'\")\n if use_login:\n log.info('connectTelnet: sending login')\n try:\n await self.send_command(self.username, prompt_password)\n log.info('connectTelnet: login sent')\n except Exception:\n raise\n log.info('connectTelnet: sending password')\n try:\n output = await self.telnet_send_command_with_unexpected_pattern(\n self.password, self._connect_first_ending_prompt, self.\n _telnet_connect_authentication_fail_prompt)\n except Exception:\n raise\n log.info('connectTelnet: password sent')\n self.prompt = self.find_prompt(str(output))\n log.info(f\"connectTelnet: prompt found: '{self.prompt}'\")\n if self.enable_mode:\n log.info('connectTelnet: enable mode to be activated')\n try:\n await self.send_command(self.cmd_enable, prompt_password)\n log.info('connectTelnet: enable command sent')\n log.info('connectTelnet: sending enable password')\n await self.telnet_send_command_with_unexpected_pattern(self\n .enable_password, self._connect_first_ending_prompt,\n self._telnet_connect_authentication_fail_prompt)\n log.info('connectTelnet: enable password sent')\n except Exception:\n log.info('connectTelnet: enable password failure')\n raise\n if self.cmd_disable_paging:\n await self.disable_paging()\n\n async def disconnect(self):\n \"\"\"\n Async method used to disconnect a device\n\n If this method is not used then exceptions will happen\n when the program will end\n \"\"\"\n log.info('disconnect')\n if self._protocol == 'ssh':\n await self.disconnectSSH()\n elif self._protocol == 'telnet':\n await self.disconnectTelnet()\n else:\n raise Exception(f'Unsupported protocol: {self._protocol}')\n\n async def disconnectSSH(self):\n \"\"\"\n Async method used to disconnect a device in SSH\n\n If this method is not used then exceptions will happen\n when the program will end\n \"\"\"\n log.info('disconnectSSH')\n if self.conn:\n self.conn.close()\n self.conn = None\n\n async def disconnectTelnet(self):\n \"\"\"\n Async method used to disconnect a device in Telnet\n\n If this method is not used then exceptions will happen\n when the program will end\n \"\"\"\n log.info('disconnectTelnet')\n if self._writer:\n self._writer.close()\n self._writer = None\n\n async def send_command(self, cmd, pattern=None, timeout=None):\n \"\"\"\n Async method used to send data to a device\n\n :param cmd: command to send\n :type cmd: str\n\n :param pattern: optional, a pattern replacing the prompt when the prompt is not expected\n :type pattern: str\n\n :param timeout: optional, a timeout for the command sent. Default value is self.timeout\n :type timeout: str\n\n :return: the output of command\n :rtype: str\n \"\"\"\n log.info('send_command')\n if timeout is None:\n timeout = self.timeout\n if self._protocol == 'ssh':\n output = await self.send_commandSSH(cmd, pattern=pattern,\n timeout=timeout)\n elif self._protocol == 'telnet':\n output = await self.send_commandTelnet(cmd, pattern=pattern,\n timeout=timeout)\n else:\n raise Exception(\n f'send_command: unsupported protocol: {self._protocol}')\n return output\n\n async def send_commandSSH(self, cmd, pattern=None, timeout=None):\n \"\"\"\n Async method used to send data to a device\n\n :param cmd: command to send\n :type cmd: str\n\n :param pattern: optional, a pattern replacing the prompt when the prompt is not expected\n :type pattern: str\n\n :param timeout: optional, a timeout for the command sent. Default value is self.timeout\n :type timeout: str\n\n :return: the output of command\n :rtype: str\n \"\"\"\n log.info('send_commandSSH')\n if timeout is None:\n timeout = self.timeout\n log.info(f\"send_commandSSH: cmd = '{cmd}'\")\n self.stdinx.write(cmd + self._carriage_return_for_send_command)\n log.info('send_commandSSH: command sent')\n output = ''\n while True:\n output += await asyncio.wait_for(self.stdoutx.read(\n MAX_BUFFER_DATA), timeout=timeout)\n output = self.remove_ansi_escape_sequence(output)\n output = output.replace('\\r', '')\n log.info(f\"send_commandSSH: output: '{output}'\")\n if pattern:\n if pattern in output:\n break\n elif self.check_if_prompt_is_found(output):\n break\n log.debug(\n f\"\"\"send_commandSSH: raw output: '{output}'\nsend_commandSSH: raw output (hex): '{output.encode().hex()}'\"\"\"\n )\n output = self.remove_command_in_output(output, str(cmd))\n output = self.remove_starting_carriage_return_in_output(output)\n output = self.remove_ending_prompt_in_output(output)\n log.debug(\n f\"\"\"send_commandSSH: cleaned output: '{output}'\nsend_commandSSH: cleaned output (hex): '{output.encode().hex()}'\"\"\"\n )\n self.check_error_output(output)\n return output\n\n async def send_commandTelnet(self, cmd, pattern=None, timeout=None):\n \"\"\"\n Async method used to send data to a device\n\n :param cmd: command to send\n :type cmd: str\n\n :param pattern: optional, a pattern replacing the prompt when the prompt is not expected\n :type pattern: str\n\n :param timeout: optional, a timeout for the command sent. Default value is self.timeout\n :type timeout: str\n\n :return: the output of command\n :rtype: str\n \"\"\"\n log.info('send_commandTelnet')\n if timeout is None:\n timeout = self.timeout\n cmd = cmd + '\\n'\n self._writer.write(cmd.encode())\n output = ''\n byte_data = b''\n try:\n while True:\n byte_data += await asyncio.wait_for(self._reader.read(\n MAX_BUFFER_DATA), timeout=timeout)\n log.info(f\"send_commandTelnet: byte_data: '{byte_data}'\")\n output = str(byte_data)\n log.info(f\"send_commandTelnet: output: '{output}'\")\n if pattern:\n if pattern in output:\n break\n elif self.check_if_prompt_is_found(output):\n break\n except asyncio.TimeoutError:\n log.error('send_commandTelnet: connection: timeout')\n raise\n except Exception as error:\n log.error(f'send_commandTelnet: error: {error}')\n raise\n output = byte_data.decode('utf-8', 'ignore')\n log.debug(\n f\"\"\"send_commandTelnet: raw output: '{output}'\nsend_commandTelnet: raw output (hex): '{output.encode().hex()}'\"\"\"\n )\n output = self.remove_command_in_output(output, str(cmd))\n output = self.remove_starting_carriage_return_in_output(output)\n output = self.remove_ending_prompt_in_output(output)\n log.debug(\n f\"\"\"send_commandTelnet: cleaned output: '{output}'\nsend_commandTelnet: cleaned output (hex): '{output.encode().hex()}'\"\"\"\n )\n self.check_error_output(output)\n return output\n\n async def telnet_send_command_with_unexpected_pattern(self, cmd,\n pattern, error_pattern=None, timeout=None):\n \"\"\"\n Async method used to send command for Telnet connection to a device with possible unexpected patterns\n\n send_command can wait till time out if login and password are wrong. This method\n speed up the returned error message when authentication failed is identified.\n This method is limited to authentication whem password is required\n\n :param cmd: command to send\n :type cmd: str\n\n :param pattern: optional, a list of patterns located at the very end of the a returned string. Can be used\n to define a custom or unexpected prompt a the end of a string\n :type pattern: str\n\n :param timeout: optional, a timeout for the command sent. Default value is self.timeout\n :type timeout: str\n\n :param error_pattern: optional, a list of failed prompts found when the login and password are not correct\n :type error_pattern: str\n\n :return: the output of command\n :rtype: str\n \"\"\"\n log.info('telnet_send_command_with_unexpected_pattern')\n if timeout is None:\n timeout = self.timeout\n cmd = cmd + self._carriage_return_for_send_command\n self._writer.write(cmd.encode())\n output = ''\n byte_data = b''\n pattern_not_found = True\n try:\n while pattern_not_found:\n byte_data += await asyncio.wait_for(self._reader.read(\n MAX_BUFFER_DATA), timeout=timeout)\n log.info(\n f\"telnet_send_command_with_unexpected_pattern: byte_data: '{byte_data}'\"\n )\n log.debug(\n f\"telnet_send_command_with_unexpected_pattern: byte_data: hex: '{byte_data.hex()}'\"\n )\n output = str(byte_data)\n log.info(\n f\"telnet_send_command_with_unexpected_pattern: output: '{output}'\"\n )\n if pattern:\n for prompt in pattern:\n log.info(\n f\"telnet_send_command_with_unexpected_pattern: checking prompt: '{prompt}'\"\n )\n if prompt in output:\n pattern_not_found = False\n log.info(\n f\"telnet_send_command_with_unexpected_pattern: prompt found: '{prompt}'\"\n )\n break\n if error_pattern and pattern_not_found:\n for bad_prompt in error_pattern:\n log.info(\n f\"telnet_send_command_with_unexpected_pattern: checking unexpected prompt: '{bad_prompt}'\"\n )\n if bad_prompt in output:\n log.error(\n 'telnet_send_command_with_unexpected_pattern: authentication failed'\n )\n raise Exception(\n 'telnet_send_command_with_unexpected_pattern: authentication failed'\n )\n except asyncio.TimeoutError:\n await self.disconnect()\n log.error(\n 'telnet_send_command_with_unexpected_pattern: reading prompt: timeout'\n )\n raise\n except Exception as error:\n await self.disconnect()\n log.error(\n f'telnet_send_command_with_unexpected_pattern: reading prompt: error: {error}'\n )\n raise\n output = byte_data.decode('utf-8', 'ignore')\n log.debug(\n f\"\"\"telnet_send_command_with_unexpected_pattern: raw output: '{output}'\ntelnet_send_command_with_unexpected_pattern: raw output (hex): '{output.encode().hex()}'\"\"\"\n )\n output = self.remove_command_in_output(output, str(cmd))\n output = self.remove_starting_carriage_return_in_output(output)\n output = self.remove_ending_prompt_in_output(output)\n log.debug(\n f\"\"\"telnet_send_command_with_unexpected_pattern: cleaned output: '{output}'\ntelnet_send_command_with_unexpected_pattern: cleaned output (hex): '{output.encode().hex()}'\"\"\"\n )\n return output\n\n async def send_config_set(self, cmds=None, timeout=None):\n \"\"\"\n Async method used to send command in config mode\n\n The commands send can be either a string a list of strings. There are\n 3 steps:\n - Entering configuration mode\n - Sending the commands\n - Leaving configuration mode\n\n :param cmds: The commands to the device\n :type cmds: str or list\n\n :param timeout: optional, a timeout for the command sent. Default value is self.timeout\n :type timeout: str\n\n :return: the results of the commands sent\n :rtype: list of str\n \"\"\"\n log.info('send_config_set')\n if timeout is None:\n timeout = self.timeout\n log.info('send_command')\n if self._protocol == 'ssh':\n output = await self.send_config_setSSH(cmds, timeout)\n elif self._protocol == 'telnet':\n output = await self.send_config_setTelnet(cmds, timeout)\n else:\n raise Exception(\n f'send_config_set: unsupported protocol: {self._protocol}')\n return output\n\n async def send_config_setSSH(self, cmds=None, timeout=None):\n \"\"\"\n Async method used to send command in config mode\n\n The commands send can be either a string a list of strings. There are\n 3 steps:\n - Entering configuration mode\n - Sending the commands\n - Leaving configuration mode\n\n :param cmds: The commands to the device\n :type cmds: str or list\n\n :param timeout: optional, a timeout for the command sent. Default value is self.timeout\n :type timeout: str\n\n :return: the results of the commands sent\n :rtype: list of str\n \"\"\"\n log.info('send_config_setSSH')\n if timeout is None:\n timeout = self.timeout\n returned_output = ''\n if isinstance(cmds, str):\n cmds = [cmds]\n elif not isinstance(cmds, list):\n log.error(\n 'send_config_setSSH: parameter cmds used in send_config_set is neither a string nor a list'\n )\n return returned_output\n log.info('send_config_set: entering configuration mode')\n output = ''\n cmd = self.cmd_enter_config_mode\n cmd = cmd + self._carriage_return_for_send_command\n log.info(f\"send_config_setSSH: cmd = '{cmd}'\")\n self.stdinx.write(cmd)\n log.info('send_config_setSSH: configuration mode entered')\n while True:\n output += await asyncio.wait_for(self.stdoutx.read(\n MAX_BUFFER_DATA), timeout=timeout)\n log.info(f\"send_config_setSSH: output: '{output}'\")\n if self.check_if_prompt_is_found(output):\n break\n log.debug(\n f\"\"\"send_config_setSSH: raw output: '{output}'\nsend_config_setSSH: raw output (hex): '{output.encode().hex()}'\"\"\"\n )\n returned_output += output\n output = self.remove_command_in_output(output, str(cmd))\n output = self.remove_starting_carriage_return_in_output(output)\n output = self.remove_ending_prompt_in_output(output)\n log.debug(\n f\"\"\"send_config_setSSH: cleaned output: '{output}'\nsend_config_setSSH: cleaned output (hex): '{output.encode().hex()}'\"\"\"\n )\n self.check_error_output(output)\n log.info('send_config_setSSH: sending commands')\n output = ''\n for cmd in cmds:\n cmd = cmd + self._carriage_return_for_send_command\n log.info(f\"send_config_setSSH: cmd = '{cmd}'\")\n self.stdinx.write(cmd)\n log.info('send_config_setSSH: command sent')\n while True:\n output += await asyncio.wait_for(self.stdoutx.read(\n MAX_BUFFER_DATA), timeout=timeout)\n log.info(f\"send_config_setSSH: output: '{output}'\")\n if self.check_if_prompt_is_found(output):\n break\n log.debug(\n f\"\"\"send_config_setSSH: raw output: '{output}'\nsend_config_setSSH: raw output (hex): '{output.encode().hex()}'\"\"\"\n )\n returned_output += output\n output = self.remove_command_in_output(output, str(cmd))\n output = self.remove_starting_carriage_return_in_output(output)\n output = self.remove_ending_prompt_in_output(output)\n log.debug(\n f\"\"\"send_config_setSSH: cleaned output: '{output}'\nsend_config_setSSH: cleaned output (hex): '{output.encode().hex()}'\"\"\"\n )\n self.check_error_output(output)\n log.info('send_config_setSSH: leaving configuration mode')\n output = ''\n cmd = self.cmd_exit_config_mode\n cmd = cmd + self._carriage_return_for_send_command\n log.info(f\"send_config_setSSH: cmd = '{cmd}'\")\n self.stdinx.write(cmd)\n log.info('send_config_setSSH: command to leave configuration mode sent'\n )\n while True:\n output += await asyncio.wait_for(self.stdoutx.read(\n MAX_BUFFER_DATA), timeout=timeout)\n log.info(f\"send_config_setSSH: output: '{output}'\")\n if self.check_if_prompt_is_found(output):\n break\n log.debug(\n f\"\"\"send_config_setSSH: raw output: '{output}'\nsend_config_setSSH: raw output (hex): '{output.encode().hex()}'\"\"\"\n )\n returned_output += output\n output = self.remove_command_in_output(output, str(cmd))\n output = self.remove_starting_carriage_return_in_output(output)\n output = self.remove_ending_prompt_in_output(output)\n log.debug(\n f\"\"\"send_config_setSSH: cleaned output: '{output}'\nsend_config_setSSH: cleaned output (hex): '{output.encode().hex()}'\"\"\"\n )\n self.check_error_output(output)\n return returned_output\n\n async def send_config_setTelnet(self, cmds=None, timeout=None):\n \"\"\"\n Async method used to send command in config mode\n\n The commands send can be either a string a list of strings. There are\n 3 steps:\n - Entering configuration mode\n - Sending the commands\n - Leaving configuration mode\n\n :param cmds: The commands to the device\n :type cmds: str or list\n\n :param timeout: optional, a timeout for the command sent. Default value is self.timeout\n :type timeout: str\n\n :return: the results of the commands sent\n :rtype: list of str\n \"\"\"\n log.info('send_config_setTelnet')\n if timeout is None:\n timeout = self.timeout\n returned_output = ''\n if isinstance(cmds, str):\n cmds = [cmds]\n elif not isinstance(cmds, list):\n log.error(\n 'send_config_setTelnet: parameter cmds used in send_config_set is neither a string or a list'\n )\n return returned_output\n log.info('send_config_setTelnet: entering configuration mode')\n output = ''\n cmd = self.cmd_enter_config_mode\n cmd = cmd + self._carriage_return_for_send_command\n log.info(f\"send_config_setTelnet: cmd = '{cmd}'\")\n self._writer.write(cmd.encode())\n log.info('send_config_setTelnet: configuration mode entered')\n output = ''\n byte_data = b''\n try:\n while True:\n byte_data += await asyncio.wait_for(self._reader.read(\n MAX_BUFFER_DATA), timeout=timeout)\n output = str(byte_data)\n log.info(f\"send_config_setTelnet: output: '{output}'\")\n if self.check_if_prompt_is_found(output):\n break\n except asyncio.TimeoutError:\n log.error('send_config_setTelnet: connection: timeout')\n raise\n except Exception as error:\n log.error(f'send_config_setTelnet: error: {error}')\n raise\n output = byte_data.decode('utf-8', 'ignore')\n log.debug(\n f\"\"\"send_config_setTelnet: raw output: '{output}'\nsend_config_setTelnet: raw output (hex): '{output.encode().hex()}'\"\"\"\n )\n returned_output += output\n output = self.remove_command_in_output(output, str(cmd))\n output = self.remove_starting_carriage_return_in_output(output)\n output = self.remove_ending_prompt_in_output(output)\n log.debug(\n f\"\"\"send_config_setTelnet: cleaned output: '{output}'\nsend_config_setTelnet: cleaned output (hex): '{output.encode().hex()}'\"\"\"\n )\n self.check_error_output(output)\n log.info('send_config_setTelnet: sending commands')\n output = ''\n for cmd in cmds:\n cmd = cmd + self._carriage_return_for_send_command\n log.info(f\"send_config_setTelnet: cmd = '{cmd}'\")\n self._writer.write(cmd.encode())\n log.info('send_config_setTelnet: command sent')\n output = ''\n byte_data = b''\n try:\n while True:\n byte_data += await asyncio.wait_for(self._reader.read(\n MAX_BUFFER_DATA), timeout=timeout)\n output = str(byte_data)\n log.info(f\"send_config_setTelnet: output: '{output}'\")\n if self.check_if_prompt_is_found(output):\n break\n except asyncio.TimeoutError:\n log.error('send_config_setTelnet: connection: timeout')\n raise\n except Exception as error:\n log.error(f'send_config_setTelnet: error: {error}')\n raise\n output = byte_data.decode('utf-8', 'ignore')\n log.debug(\n f\"\"\"send_config_setTelnet: raw output: '{output}'\nsend_config_setTelnet: raw output (hex): '{output.encode().hex()}'\"\"\"\n )\n returned_output += output\n output = self.remove_command_in_output(output, str(cmd))\n output = self.remove_starting_carriage_return_in_output(output)\n output = self.remove_ending_prompt_in_output(output)\n log.debug(\n f\"\"\"send_config_setTelnet: cleaned output: '{output}'\nsend_config_setTelnet: cleaned output (hex): '{output.encode().hex()}'\"\"\"\n )\n self.check_error_output(output)\n log.info('send_config_setTelnet: leaving configuration mode')\n output = ''\n cmd = self.cmd_exit_config_mode\n cmd = cmd + self._carriage_return_for_send_command\n log.info(f\"send_config_setTelnet: cmd = '{cmd}'\")\n self._writer.write(cmd.encode())\n log.info(\n 'send_config_setTelnet: command to leave configuration mode sent')\n output = ''\n byte_data = b''\n loop = 3\n try:\n while loop:\n byte_data += await asyncio.wait_for(self._reader.read(\n MAX_BUFFER_DATA), timeout=timeout)\n output = str(byte_data)\n log.info(f\"send_config_setTelnet: output: '{output}'\")\n await asyncio.sleep(0.5)\n if self.check_if_prompt_is_found(output):\n break\n loop -= 1\n except asyncio.TimeoutError:\n log.error('send_config_setTelnet: connection: timeout')\n raise\n except Exception as error:\n log.error(f'send_config_setTelnet: error: {error}')\n raise\n output = byte_data.decode('utf-8', 'ignore')\n log.debug(\n f\"\"\"send_config_setTelnet: raw output: '{output}'\nsend_config_setTelnet: raw output (hex): '{output.encode().hex()}'\"\"\"\n )\n returned_output += output\n output = self.remove_command_in_output(output, str(cmd))\n output = self.remove_starting_carriage_return_in_output(output)\n output = self.remove_ending_prompt_in_output(output)\n log.debug(\n f\"\"\"send_config_setTelnet: cleaned output: '{output}'\nsend_config_setTelnet: cleaned output (hex): '{output.encode().hex()}'\"\"\"\n )\n self.check_error_output(output)\n return returned_output\n\n async def get_version(self):\n \"\"\"\n Asyn method used to get the version of the software of the device\n\n :return: Version of the software of the device\n :rtype: str\n \"\"\"\n log.info('get_version')\n version = ''\n output = await self.send_command(self.cmd_get_version)\n version = output.split('Version ')[1].split(',')[0]\n log.info(f'get_version: version: {version}')\n return version\n\n async def get_hostname(self):\n \"\"\"\n Asyn method used to get the name of the device\n\n :return: Name of the device\n :rtype: str\n \"\"\"\n log.info('get_hostname')\n output = await self.send_command(self.cmd_get_hostname)\n log.info(f\"get_hostname: output: '{output}'\")\n output = output.split()[0]\n log.info(f\"get_hostname: hostname found: '{output}'\")\n return output\n\n async def get_model(self):\n \"\"\"\n Asyn method used to get the model of the device\n\n :return: Model of the device\n :rtype: str\n \"\"\"\n log.info('get_model')\n output = await self.send_command(self.cmd_get_model)\n log.info(f\"get_model: output: '{output}'\")\n output = output.split('\"')[3]\n log.info(f\"get_model: model found: '{output}'\")\n return output\n\n async def get_serial_number(self):\n \"\"\"\n Get serial number of the switch or the serial number of the first switch of a stack\n\n :return: Serial number of the device\n :rtype: str\n \"\"\"\n log.info('get_serial_number')\n output = await self.send_command(self.cmd_get_serial_number)\n log.info(f\"get_serial_number: output: '{output}'\")\n output = output.splitlines()[0].split()[-1]\n log.info(f\"get_hostname: hostname found: '{output}'\")\n return output\n\n async def get_config(self, timeout=None):\n \"\"\"\n Asyn method used to get the configuration of the device\n\n :param timeout: optional, a timeout for the command sent. Default value is self.timeout\n :type timeout: str\n\n :return: Configuration of the device\n :rtype: str\n \"\"\"\n log.info('get_config')\n if timeout is None:\n timeout = self.timeout\n output = await self.send_command(self.cmd_get_config, timeout=timeout)\n return output\n\n async def save_config(self):\n \"\"\"\n Asyn method used to save the current configuration on the device\n\n :return: Commands of the configuration saving process\n :rtype: str\n \"\"\"\n log.info('save_config')\n output = await self.send_command(self.cmd_save_config)\n return output\n",
"step-3": "<mask token>\nlogging.basicConfig(level=logging.DEBUG)\nasyncssh.set_debug_level(2)\n<mask token>\n\n\nclass NetworkDevice:\n \"\"\"\n Base class for network object\n\n\n :param ip: IP address of a device\n :type ip: str\n\n :param username: Username used to connect to a device\n :type username: str\n\n :param password: Password used to connect to a device\n :type password: str\n\n :param device_type: Type of device used\n :type device_type: str\n\n :param port: TCP port used to connect a device. Default value is \"22\" for SSH\n :type port: int, optional\n\n :param timeout: TCP port used to connect a device. Default value is 10 seconds\n :type timeout: int, optional\n\n :param _protocol: Protocol used to connect a device. \"ssh\" or \"telnet\" are possible options. Default value is \"ssh\"\n :type _protocol: str, optional\n\n :param enable_mode: Enable mode for devices requiring it. Default value is \"False\"\n :type enable_mode: bool, optional\n\n :param enable_password: Enable password used for enable mode.\n :type enable_password: str, optional\n\n :param conn: Variable used for the management of the SSH connection\n :type conn: SSHClientConnection object\n\n :param _writer: Variable used for the management of the Telnet connection and writing channel\n :type _writer: StreamWriter object\n\n :param _reader: Variable used for the management of the Telnet reading channel\n :type _reader: StreamReader object\n\n :param possible_prompts: Used by the connect method to list all possible prompts of the device\n :type possible_prompts: list\n\n :param _connect_first_ending_prompt: Default possible ending prompts. Used only the time after login and password to discover the prompt\n :type _connect_first_ending_prompt: list\n\n :param list_of_possible_ending_prompts: Different strings at the end of a prompt the device can get. Used for detecting the prompt returned in sent commands\n :type list_of_possible_ending_prompts: list\n\n :param _telnet_connect_login: Login prompt for Telnet. Used to detect when a login is expected or when login and password access is failed\n :type _telnet_connect_login: str\n\n :param _telnet_connect_password: Password prompt for Telnet. Used to detect when a login is expected or when login and password access is failed\n :type _telnet_connect_password: list\n\n :param _telnet_connect_authentication_fail_prompt: Known failing messages or prompts when an authentication has failed. Used to get an answer faster than timeout events\n :type _telnet_connect_authentication_fail_prompt: list\n\n :param cmd_enable: Enable command for entering into enable mode\n :type cmd_enable: str\n\n :param cmd_disable_paging: Command used to disable paging on a device. That command is run at connection time\n :type cmd_disable_paging: str\n\n :param cmd_enter_config_mode: Command used to enter into a configuration mode on a device when this device support that feature.\n :type cmd_enter_config_mode: str\n\n :param cmd_exit_config_mode: Command used to leave a configuration mode on a device when this device support that feature.\n :type cmd_exit_config_mode: str\n\n :param cmd_get_version: API command used to get the software version of a device\n :type cmd_get_version: str\n\n :param cmd_get_hostname: API command used to get the hostname of a device\n :type cmd_get_hostname: str\n\n :param cmd_get_model: API command used to get the model of a device\n :type cmd_get_model: str\n\n :param cmd_get_serial_number: API command used to get the serial number of a device\n :type cmd_get_serial_number: str\n\n :param cmd_get_config: API command used to get the running configuration of a device\n :type cmd_get_config: str\n\n :param cmd_save_config: API command used to save the running configuration on the device\n :type cmd_save_config: str\n \"\"\"\n\n def __init__(self, **kwargs):\n log.info('__init__')\n self.ip = ''\n self.username = ''\n self.password = ''\n self.device_type = ''\n self.port = 22\n self.timeout = 10\n self._protocol = 'ssh'\n self.enable_mode = False\n self.enable_password = ''\n self.conn = None\n self._writer = None\n self._reader = None\n self.possible_prompts = []\n self._connect_first_ending_prompt = ['#', '>']\n self.list_of_possible_ending_prompts = ['(config-line)#',\n '(config-if)#', '(config)#', '>', '#']\n self._carriage_return_for_send_command = '\\n'\n self._send_command_error_in_returned_output = []\n self._telnet_connect_login = 'Username:'\n self._telnet_connect_password = 'Password:'\n self._telnet_connect_authentication_fail_prompt = [':', '%']\n self.cmd_enable = 'enable'\n self.cmd_disable_paging = 'terminal length 0'\n self.cmd_enter_config_mode = 'configure terminal'\n self.cmd_exit_config_mode = 'exit'\n self.cmd_get_version = 'show version'\n self.cmd_get_hostname = 'show version | include uptime'\n self.cmd_get_model = 'show inventory'\n self.cmd_get_serial_number = 'show inventory | i SN'\n self.cmd_get_config = 'show running-config'\n self.cmd_save_config = 'write memory'\n self.cmd_get_interfaces = [\n 'interface ethernet print terse without-paging',\n 'foreach i in=([/interface ethernet find]) do={/interface ethernet monitor $i once without-paging}'\n , 'interface bridge port print terse without-paging']\n self.cmd_set_interface = ['interface ethernet enable <INTERFACE>',\n 'interface ethernet disable <INTERFACE>',\n 'interface ethernet comment <INTERFACE> \"<COMMENT>\"',\n 'interface ethernet set l2mtu=<MAXIMUMFRAMESIZE> <INTERFACE>',\n 'interface bridge port set frame-types=<MODE> ingress-filtering=<FILTERINGVLAN> [find interface=<INTERFACE>]'\n ]\n self.cmd_get_mac_address_table = (\n 'interface bridge host print without-paging')\n self.cmd_get_arp = 'ip arp print terse without-paging'\n self.cmd_get_lldp_neighbors = 'ip neighbor print terse without-paging'\n self.cmd_get_vlans = 'interface bridge vlan print terse without-paging'\n self.cmd_add_vlan = (\n 'interface bridge vlan add vlan-ids=<VLAN> comment=\"<VLAN_NAME>\" bridge=<BRIDGE>'\n )\n self.cmd_remove_vlan = (\n 'interface bridge vlan remove [find vlan-ids=<VLAN>]')\n self.cmd_add_interface_to_vlan = ['interface bridge vlan print terse',\n 'interface bridge vlan set [find vlan-ids=<VLAN>] untagged=<INTERFACE>'\n ,\n 'interface bridge vlan set [find vlan-ids=<VLAN>] tagged=<INTERFACE>'\n ,\n 'interface bridge port set [find interface=<INTERFACE>] pvid=<VLAN>'\n ]\n self.cmd_remove_interface_from_vlan = [\n 'interface bridge vlan print terse',\n 'interface bridge vlan set [find vlan-ids=<VLAN>] untagged=<INTERFACE>'\n ,\n 'interface bridge vlan set [find vlan-ids=<VLAN>] tagged=<INTERFACE>'\n ,\n 'interface bridge port set [find interface=<INTERFACE>] pvid=<VLAN>'\n ]\n self.cmd_get_routing_table = 'ip route print without-paging terse'\n self.cmd_get_interfaces_ip = 'ip address print terse without-paging'\n self.cmd_add_static_route = (\n 'ip route add dst-address=<NETWORK>/<PREFIXLENGTH> gateway=<DESTINATION> distance=<METRIC>'\n )\n self.cmd_remove_static_route = (\n 'ip route remove [find dst-address=<NETWORK>/<PREFIXLENGTH>]')\n log.debug('__init__: kwargs: ' + str(kwargs))\n if 'ip' in kwargs:\n self.ip = kwargs['ip']\n log.info('__init__: ip found: ' + str(self.ip))\n if 'username' in kwargs:\n self.username = kwargs['username']\n log.info('__init__: username found: ' + str(self.username))\n if 'password' in kwargs:\n self.password = kwargs['password']\n log.debug('__init__: password found: ' + str(self.password))\n if 'device_type' in kwargs:\n self.device_type = kwargs['device_type']\n log.info('__init__: device_type found: ' + str(self.device_type))\n if 'timeout' in kwargs:\n self.timeout = kwargs['timeout']\n log.info('__init__: timeout found: ' + str(self.timeout))\n if 'protocol' in kwargs:\n self._protocol = kwargs['protocol'].lower()\n log.info('__init__: protocol found: ' + str(self._protocol))\n if self._protocol.lower() == 'telnet':\n self.port = 23\n if 'port' in kwargs:\n self.port = kwargs['port']\n log.info('__init__: port found: ' + str(self.port))\n if 'enable_mode' in kwargs:\n self.enable_mode = kwargs['enable_mode']\n log.info('__init__: enable_mode found: ' + str(self.enable_mode))\n if 'enable_password' in kwargs:\n self.enable_password = kwargs['enable_password']\n log.info('__init__: enable_password found: ' + str(self.\n enable_password))\n\n async def __aenter__(self):\n \"\"\"\n Context manager opening connection\n \"\"\"\n try:\n await self.connect()\n except Exception:\n await self.disconnect()\n raise\n return self\n\n async def __aexit__(self, exc_type, exc_value, traceback):\n \"\"\"\n Context manager closing connection\n \"\"\"\n await self.disconnect()\n\n def find_prompt(self, text):\n \"\"\"\n Method used to find a prompt inside an output string\n\n This method is used during the first communication with the device.\n First it find the prompt then caculate the different forms the prompt\n can take. This will be useful later on while finding prompt in other\n output stream (read).\n\n :param text: data with a prompt\n :type text: str\n\n :return: the prompt found\n :rtype: str\n \"\"\"\n prompt = text.split('\\n')[-1]\n prompt = text.split('\\r')[-1]\n log.info(f\"find_prompt: prompt: '{prompt}'\")\n self.possible_prompts = self.get_possible_prompts(prompt)\n return prompt\n\n def get_possible_prompts(self, prompt):\n \"\"\"\n Method used to check if a prompt has one of the expected endings then\n create a list with all possible prompts for the device\n\n :param prompt: a prompt with a possible ending prompt (eg. \"switch#\")\n :type prompt: str\n\n :return: the list of prompts\n :rtype: list\n \"\"\"\n list_of_prompts = []\n list_of_possible_ending_prompts = self.list_of_possible_ending_prompts\n my_prompt = prompt\n for ending in list_of_possible_ending_prompts:\n if my_prompt.endswith(ending):\n my_prompt = my_prompt[:-len(ending)]\n break\n log.info(f\"get_possible_prompts: prompt found: '{my_prompt}'\")\n log.info(f\"get_possible_prompts: prompt found size: '{len(my_prompt)}'\"\n )\n for ending in list_of_possible_ending_prompts:\n list_of_prompts.append(my_prompt + ending)\n log.info(\n f'get_possible_prompts: list of possible prompts: {list_of_prompts}'\n )\n return list_of_prompts\n\n def check_if_prompt_is_found(self, text):\n \"\"\"\n Method used to check if a prompt is detected inside a string\n\n :param text: a string with prompt\n :type text: str\n\n :return: the prompt found\n :rtype: str\n \"\"\"\n prompt_found = False\n for prompt in self.possible_prompts:\n log.info(f\"check_if_prompt_is_found: prompt: '{prompt}'\")\n if prompt in text:\n prompt_found = True\n log.info(f\"check_if_prompt_is_found: prompt found: '{prompt}'\")\n break\n return prompt_found\n\n def remove_command_in_output(self, text, cmd):\n \"\"\"\n Method removing the command at the beginning of a string\n\n After sending commands an \"echo\" of the command sent\n is display in the output string. This method removes it.\n\n :param text: the text with the command at the beginning\n :type text: str\n\n :param cmd: the command previously sent\n :type cmd: str\n\n :return: the output string without the command\n :rtype: str\n \"\"\"\n log.info(f\"remove_command_in_output: cmd = '{cmd}'\")\n log.info(\n f\"remove_command_in_output: cmd (hex) = '{cmd.encode().hex()}'\")\n output = text.split(cmd + '\\n')[-1]\n log.info(f\"remove_command_in_output: output = '{output}'\")\n return output\n\n def remove_starting_carriage_return_in_output(self, text):\n \"\"\"\n Method removing the carriage return at the beginning of a string\n\n :param text: the text with the command at the beginning\n :type text: str\n\n :return: the output string without the starting carriage return\n :rtype: str\n \"\"\"\n log.info('remove_starting_carriage_return_in_output')\n output = text.lstrip('\\r\\n\\r')\n log.info(\n f\"remove_starting_carriage_return_in_output: output = '{output}'\")\n return output\n\n def remove_ending_prompt_in_output(self, text):\n \"\"\"\n Method removing the prompt at the end of a string\n\n :param text: the text with a prompt at the beginning\n :type text: str\n\n :return: the output string without the ending prompt\n :rtype: str\n \"\"\"\n log.info('remove_ending_prompt_in_output')\n for prompt in self.possible_prompts:\n log.info(f\"remove_ending_prompt_in_output: prompt: '{prompt}'\")\n if prompt in text:\n text = text[:-len(prompt)]\n text = text.rstrip('\\r\\n')\n break\n log.info(\n f\"remove_ending_prompt_in_output: text without prompt:\\n'{text}'\")\n return text\n\n def check_error_output(self, output):\n \"\"\"\n Check if an error is returned by the device (\"% Unrecognized command\", \"% Ambiguous command\", etc.)\n\n If an error is found, then an exception is raised\n \"\"\"\n log.info('check_error_output')\n if output:\n log.info('check_error_output: output has some data')\n for element in self._send_command_error_in_returned_output:\n log.info(f'check_error_output: element: {element}')\n log.info(f'check_error_output: output[0]: {output[0]}')\n if output.startswith(element):\n raise Exception(output)\n\n def remove_ansi_escape_sequence(self, text):\n \"\"\"\n Method removing ANSI escape sequence from a string\n Just CSI sequences are removed\n\n :param text: the text with a prompt at the beginning\n :type text: str\n\n :return: the output string without the ending prompt\n :rtype: str\n \"\"\"\n output = ''\n esc_found = 0\n for i in text:\n if esc_found == 0:\n if i == '\\x1b':\n log.info('Esc!')\n esc_found = 1\n else:\n output += i\n elif esc_found == 1:\n if i == '[':\n log.info('CSI sequence')\n esc_found = 2\n else:\n output += '\\x1b' + i\n esc_found = 0\n elif i >= 'a' and i <= 'z' or i >= 'A' and i <= 'Z':\n log.info('End of escape sequence')\n esc_found = 0\n return output\n\n async def disable_paging(self):\n \"\"\"\n Async method disabling paging on a device\n\n Use the \"cmd_disable_paging\" attribute\n \"\"\"\n log.info('disable_paging')\n await self.send_command(self.cmd_disable_paging)\n\n async def connect(self):\n \"\"\"\n Async method used for connecting a device\n\n Currently supported: SSH and Telnet\n \"\"\"\n log.info('connect')\n try:\n if self._protocol == 'ssh':\n await self.connectSSH()\n elif self._protocol == 'telnet':\n await self.connectTelnet()\n else:\n raise Exception(\n f'connect: unsupported protocol: {self._protocol}')\n except Exception:\n log.info('connect: connection error')\n raise\n\n async def connectSSH(self):\n \"\"\"\n Async method used for connecting a device using SSH protocol\n \"\"\"\n log.info('connectSSH')\n generator = asyncssh.connect(self.ip, username=self.username,\n password=self.password, known_hosts=None, encryption_algs=[algs\n .decode('utf-8') for algs in asyncssh.encryption._enc_algs])\n try:\n self.conn = await asyncio.wait_for(generator, timeout=self.timeout)\n except asyncio.exceptions.TimeoutError as error:\n log.error(\n f\"connectSSH: connection failed: {self.ip} timeout: '{error}'\")\n raise asyncio.exceptions.TimeoutError(\n 'Connection failed: connection timed out.')\n except Exception as error:\n log.error(f\"connectSSH: connection failed: {self.ip} '{error}'\")\n raise\n log.info('connectSSH: connection success')\n self.stdinx, self.stdoutx, _ = await self.conn.open_session(term_type\n ='netscud')\n log.info('connectSSH: open_session success')\n data = ''\n prompt_not_found = True\n try:\n while prompt_not_found:\n log.info('connectSSH: beginning of the loop')\n data += await asyncio.wait_for(self.stdoutx.read(\n MAX_BUFFER_DATA), timeout=self.timeout)\n log.info(f\"connectSSH: data: '{str(data)}'\")\n log.info(\n f\"connectSSH: data: hex:'{data.encode('utf-8').hex()}'\")\n for prompt in self._connect_first_ending_prompt:\n if data.endswith(prompt):\n log.info(\n f\"connectSSH: first ending prompt found: '{prompt}'\"\n )\n prompt_not_found = False\n break\n log.info('connectSSH: end of loop')\n except Exception as error:\n log.error(\n f\"connectSSH: timeout while reading the prompt: {self.ip} '{error}'\"\n )\n raise\n log.info(f'connectSSH: end of prompt loop')\n data = self.remove_ansi_escape_sequence(data)\n self.prompt = self.find_prompt(str(data))\n log.info(f\"connectSSH: prompt found: '{self.prompt}'\")\n log.info(f\"connectSSH: prompt found size: '{len(self.prompt)}'\")\n if self.cmd_disable_paging:\n await self.disable_paging()\n\n async def connectTelnet(self):\n \"\"\"\n Async method used for connecting a device using Telnet protocol\n \"\"\"\n log.info('connectTelnet')\n try:\n conn = asyncio.open_connection(self.ip, self.port)\n except Exception as error:\n log.error(\n f\"connectTelnet: preparation to the connection failed: '{error}'\"\n )\n raise\n log.info('connectTelnet: preparation to the connection success')\n try:\n self._reader, self._writer = await asyncio.wait_for(conn,\n timeout=self.timeout)\n except asyncio.TimeoutError:\n log.error('connectTelnet: connection: timeout')\n raise\n log.info('connectTelnet: connection success')\n prompt = self._telnet_connect_login\n prompt_password = self._telnet_connect_password\n use_login = True\n output = ''\n byte_data = b''\n while True:\n log.info(f'connectTelnet: read data for prompt')\n byte_data += await asyncio.wait_for(self._reader.read(\n MAX_BUFFER_DATA), timeout=self.timeout)\n log.info(f'connectTelnet: byte_data: {byte_data}')\n output = str(byte_data)\n log.info(f'connectTelnet: output: {output}')\n if prompt in output:\n break\n elif prompt_password in output:\n use_login = False\n break\n log.info(f\"connectTelnet: login prompt: '{output}'\")\n if use_login:\n log.info('connectTelnet: sending login')\n try:\n await self.send_command(self.username, prompt_password)\n log.info('connectTelnet: login sent')\n except Exception:\n raise\n log.info('connectTelnet: sending password')\n try:\n output = await self.telnet_send_command_with_unexpected_pattern(\n self.password, self._connect_first_ending_prompt, self.\n _telnet_connect_authentication_fail_prompt)\n except Exception:\n raise\n log.info('connectTelnet: password sent')\n self.prompt = self.find_prompt(str(output))\n log.info(f\"connectTelnet: prompt found: '{self.prompt}'\")\n if self.enable_mode:\n log.info('connectTelnet: enable mode to be activated')\n try:\n await self.send_command(self.cmd_enable, prompt_password)\n log.info('connectTelnet: enable command sent')\n log.info('connectTelnet: sending enable password')\n await self.telnet_send_command_with_unexpected_pattern(self\n .enable_password, self._connect_first_ending_prompt,\n self._telnet_connect_authentication_fail_prompt)\n log.info('connectTelnet: enable password sent')\n except Exception:\n log.info('connectTelnet: enable password failure')\n raise\n if self.cmd_disable_paging:\n await self.disable_paging()\n\n async def disconnect(self):\n \"\"\"\n Async method used to disconnect a device\n\n If this method is not used then exceptions will happen\n when the program will end\n \"\"\"\n log.info('disconnect')\n if self._protocol == 'ssh':\n await self.disconnectSSH()\n elif self._protocol == 'telnet':\n await self.disconnectTelnet()\n else:\n raise Exception(f'Unsupported protocol: {self._protocol}')\n\n async def disconnectSSH(self):\n \"\"\"\n Async method used to disconnect a device in SSH\n\n If this method is not used then exceptions will happen\n when the program will end\n \"\"\"\n log.info('disconnectSSH')\n if self.conn:\n self.conn.close()\n self.conn = None\n\n async def disconnectTelnet(self):\n \"\"\"\n Async method used to disconnect a device in Telnet\n\n If this method is not used then exceptions will happen\n when the program will end\n \"\"\"\n log.info('disconnectTelnet')\n if self._writer:\n self._writer.close()\n self._writer = None\n\n async def send_command(self, cmd, pattern=None, timeout=None):\n \"\"\"\n Async method used to send data to a device\n\n :param cmd: command to send\n :type cmd: str\n\n :param pattern: optional, a pattern replacing the prompt when the prompt is not expected\n :type pattern: str\n\n :param timeout: optional, a timeout for the command sent. Default value is self.timeout\n :type timeout: str\n\n :return: the output of command\n :rtype: str\n \"\"\"\n log.info('send_command')\n if timeout is None:\n timeout = self.timeout\n if self._protocol == 'ssh':\n output = await self.send_commandSSH(cmd, pattern=pattern,\n timeout=timeout)\n elif self._protocol == 'telnet':\n output = await self.send_commandTelnet(cmd, pattern=pattern,\n timeout=timeout)\n else:\n raise Exception(\n f'send_command: unsupported protocol: {self._protocol}')\n return output\n\n async def send_commandSSH(self, cmd, pattern=None, timeout=None):\n \"\"\"\n Async method used to send data to a device\n\n :param cmd: command to send\n :type cmd: str\n\n :param pattern: optional, a pattern replacing the prompt when the prompt is not expected\n :type pattern: str\n\n :param timeout: optional, a timeout for the command sent. Default value is self.timeout\n :type timeout: str\n\n :return: the output of command\n :rtype: str\n \"\"\"\n log.info('send_commandSSH')\n if timeout is None:\n timeout = self.timeout\n log.info(f\"send_commandSSH: cmd = '{cmd}'\")\n self.stdinx.write(cmd + self._carriage_return_for_send_command)\n log.info('send_commandSSH: command sent')\n output = ''\n while True:\n output += await asyncio.wait_for(self.stdoutx.read(\n MAX_BUFFER_DATA), timeout=timeout)\n output = self.remove_ansi_escape_sequence(output)\n output = output.replace('\\r', '')\n log.info(f\"send_commandSSH: output: '{output}'\")\n if pattern:\n if pattern in output:\n break\n elif self.check_if_prompt_is_found(output):\n break\n log.debug(\n f\"\"\"send_commandSSH: raw output: '{output}'\nsend_commandSSH: raw output (hex): '{output.encode().hex()}'\"\"\"\n )\n output = self.remove_command_in_output(output, str(cmd))\n output = self.remove_starting_carriage_return_in_output(output)\n output = self.remove_ending_prompt_in_output(output)\n log.debug(\n f\"\"\"send_commandSSH: cleaned output: '{output}'\nsend_commandSSH: cleaned output (hex): '{output.encode().hex()}'\"\"\"\n )\n self.check_error_output(output)\n return output\n\n async def send_commandTelnet(self, cmd, pattern=None, timeout=None):\n \"\"\"\n Async method used to send data to a device\n\n :param cmd: command to send\n :type cmd: str\n\n :param pattern: optional, a pattern replacing the prompt when the prompt is not expected\n :type pattern: str\n\n :param timeout: optional, a timeout for the command sent. Default value is self.timeout\n :type timeout: str\n\n :return: the output of command\n :rtype: str\n \"\"\"\n log.info('send_commandTelnet')\n if timeout is None:\n timeout = self.timeout\n cmd = cmd + '\\n'\n self._writer.write(cmd.encode())\n output = ''\n byte_data = b''\n try:\n while True:\n byte_data += await asyncio.wait_for(self._reader.read(\n MAX_BUFFER_DATA), timeout=timeout)\n log.info(f\"send_commandTelnet: byte_data: '{byte_data}'\")\n output = str(byte_data)\n log.info(f\"send_commandTelnet: output: '{output}'\")\n if pattern:\n if pattern in output:\n break\n elif self.check_if_prompt_is_found(output):\n break\n except asyncio.TimeoutError:\n log.error('send_commandTelnet: connection: timeout')\n raise\n except Exception as error:\n log.error(f'send_commandTelnet: error: {error}')\n raise\n output = byte_data.decode('utf-8', 'ignore')\n log.debug(\n f\"\"\"send_commandTelnet: raw output: '{output}'\nsend_commandTelnet: raw output (hex): '{output.encode().hex()}'\"\"\"\n )\n output = self.remove_command_in_output(output, str(cmd))\n output = self.remove_starting_carriage_return_in_output(output)\n output = self.remove_ending_prompt_in_output(output)\n log.debug(\n f\"\"\"send_commandTelnet: cleaned output: '{output}'\nsend_commandTelnet: cleaned output (hex): '{output.encode().hex()}'\"\"\"\n )\n self.check_error_output(output)\n return output\n\n async def telnet_send_command_with_unexpected_pattern(self, cmd,\n pattern, error_pattern=None, timeout=None):\n \"\"\"\n Async method used to send command for Telnet connection to a device with possible unexpected patterns\n\n send_command can wait till time out if login and password are wrong. This method\n speed up the returned error message when authentication failed is identified.\n This method is limited to authentication whem password is required\n\n :param cmd: command to send\n :type cmd: str\n\n :param pattern: optional, a list of patterns located at the very end of the a returned string. Can be used\n to define a custom or unexpected prompt a the end of a string\n :type pattern: str\n\n :param timeout: optional, a timeout for the command sent. Default value is self.timeout\n :type timeout: str\n\n :param error_pattern: optional, a list of failed prompts found when the login and password are not correct\n :type error_pattern: str\n\n :return: the output of command\n :rtype: str\n \"\"\"\n log.info('telnet_send_command_with_unexpected_pattern')\n if timeout is None:\n timeout = self.timeout\n cmd = cmd + self._carriage_return_for_send_command\n self._writer.write(cmd.encode())\n output = ''\n byte_data = b''\n pattern_not_found = True\n try:\n while pattern_not_found:\n byte_data += await asyncio.wait_for(self._reader.read(\n MAX_BUFFER_DATA), timeout=timeout)\n log.info(\n f\"telnet_send_command_with_unexpected_pattern: byte_data: '{byte_data}'\"\n )\n log.debug(\n f\"telnet_send_command_with_unexpected_pattern: byte_data: hex: '{byte_data.hex()}'\"\n )\n output = str(byte_data)\n log.info(\n f\"telnet_send_command_with_unexpected_pattern: output: '{output}'\"\n )\n if pattern:\n for prompt in pattern:\n log.info(\n f\"telnet_send_command_with_unexpected_pattern: checking prompt: '{prompt}'\"\n )\n if prompt in output:\n pattern_not_found = False\n log.info(\n f\"telnet_send_command_with_unexpected_pattern: prompt found: '{prompt}'\"\n )\n break\n if error_pattern and pattern_not_found:\n for bad_prompt in error_pattern:\n log.info(\n f\"telnet_send_command_with_unexpected_pattern: checking unexpected prompt: '{bad_prompt}'\"\n )\n if bad_prompt in output:\n log.error(\n 'telnet_send_command_with_unexpected_pattern: authentication failed'\n )\n raise Exception(\n 'telnet_send_command_with_unexpected_pattern: authentication failed'\n )\n except asyncio.TimeoutError:\n await self.disconnect()\n log.error(\n 'telnet_send_command_with_unexpected_pattern: reading prompt: timeout'\n )\n raise\n except Exception as error:\n await self.disconnect()\n log.error(\n f'telnet_send_command_with_unexpected_pattern: reading prompt: error: {error}'\n )\n raise\n output = byte_data.decode('utf-8', 'ignore')\n log.debug(\n f\"\"\"telnet_send_command_with_unexpected_pattern: raw output: '{output}'\ntelnet_send_command_with_unexpected_pattern: raw output (hex): '{output.encode().hex()}'\"\"\"\n )\n output = self.remove_command_in_output(output, str(cmd))\n output = self.remove_starting_carriage_return_in_output(output)\n output = self.remove_ending_prompt_in_output(output)\n log.debug(\n f\"\"\"telnet_send_command_with_unexpected_pattern: cleaned output: '{output}'\ntelnet_send_command_with_unexpected_pattern: cleaned output (hex): '{output.encode().hex()}'\"\"\"\n )\n return output\n\n async def send_config_set(self, cmds=None, timeout=None):\n \"\"\"\n Async method used to send command in config mode\n\n The commands send can be either a string a list of strings. There are\n 3 steps:\n - Entering configuration mode\n - Sending the commands\n - Leaving configuration mode\n\n :param cmds: The commands to the device\n :type cmds: str or list\n\n :param timeout: optional, a timeout for the command sent. Default value is self.timeout\n :type timeout: str\n\n :return: the results of the commands sent\n :rtype: list of str\n \"\"\"\n log.info('send_config_set')\n if timeout is None:\n timeout = self.timeout\n log.info('send_command')\n if self._protocol == 'ssh':\n output = await self.send_config_setSSH(cmds, timeout)\n elif self._protocol == 'telnet':\n output = await self.send_config_setTelnet(cmds, timeout)\n else:\n raise Exception(\n f'send_config_set: unsupported protocol: {self._protocol}')\n return output\n\n async def send_config_setSSH(self, cmds=None, timeout=None):\n \"\"\"\n Async method used to send command in config mode\n\n The commands send can be either a string a list of strings. There are\n 3 steps:\n - Entering configuration mode\n - Sending the commands\n - Leaving configuration mode\n\n :param cmds: The commands to the device\n :type cmds: str or list\n\n :param timeout: optional, a timeout for the command sent. Default value is self.timeout\n :type timeout: str\n\n :return: the results of the commands sent\n :rtype: list of str\n \"\"\"\n log.info('send_config_setSSH')\n if timeout is None:\n timeout = self.timeout\n returned_output = ''\n if isinstance(cmds, str):\n cmds = [cmds]\n elif not isinstance(cmds, list):\n log.error(\n 'send_config_setSSH: parameter cmds used in send_config_set is neither a string nor a list'\n )\n return returned_output\n log.info('send_config_set: entering configuration mode')\n output = ''\n cmd = self.cmd_enter_config_mode\n cmd = cmd + self._carriage_return_for_send_command\n log.info(f\"send_config_setSSH: cmd = '{cmd}'\")\n self.stdinx.write(cmd)\n log.info('send_config_setSSH: configuration mode entered')\n while True:\n output += await asyncio.wait_for(self.stdoutx.read(\n MAX_BUFFER_DATA), timeout=timeout)\n log.info(f\"send_config_setSSH: output: '{output}'\")\n if self.check_if_prompt_is_found(output):\n break\n log.debug(\n f\"\"\"send_config_setSSH: raw output: '{output}'\nsend_config_setSSH: raw output (hex): '{output.encode().hex()}'\"\"\"\n )\n returned_output += output\n output = self.remove_command_in_output(output, str(cmd))\n output = self.remove_starting_carriage_return_in_output(output)\n output = self.remove_ending_prompt_in_output(output)\n log.debug(\n f\"\"\"send_config_setSSH: cleaned output: '{output}'\nsend_config_setSSH: cleaned output (hex): '{output.encode().hex()}'\"\"\"\n )\n self.check_error_output(output)\n log.info('send_config_setSSH: sending commands')\n output = ''\n for cmd in cmds:\n cmd = cmd + self._carriage_return_for_send_command\n log.info(f\"send_config_setSSH: cmd = '{cmd}'\")\n self.stdinx.write(cmd)\n log.info('send_config_setSSH: command sent')\n while True:\n output += await asyncio.wait_for(self.stdoutx.read(\n MAX_BUFFER_DATA), timeout=timeout)\n log.info(f\"send_config_setSSH: output: '{output}'\")\n if self.check_if_prompt_is_found(output):\n break\n log.debug(\n f\"\"\"send_config_setSSH: raw output: '{output}'\nsend_config_setSSH: raw output (hex): '{output.encode().hex()}'\"\"\"\n )\n returned_output += output\n output = self.remove_command_in_output(output, str(cmd))\n output = self.remove_starting_carriage_return_in_output(output)\n output = self.remove_ending_prompt_in_output(output)\n log.debug(\n f\"\"\"send_config_setSSH: cleaned output: '{output}'\nsend_config_setSSH: cleaned output (hex): '{output.encode().hex()}'\"\"\"\n )\n self.check_error_output(output)\n log.info('send_config_setSSH: leaving configuration mode')\n output = ''\n cmd = self.cmd_exit_config_mode\n cmd = cmd + self._carriage_return_for_send_command\n log.info(f\"send_config_setSSH: cmd = '{cmd}'\")\n self.stdinx.write(cmd)\n log.info('send_config_setSSH: command to leave configuration mode sent'\n )\n while True:\n output += await asyncio.wait_for(self.stdoutx.read(\n MAX_BUFFER_DATA), timeout=timeout)\n log.info(f\"send_config_setSSH: output: '{output}'\")\n if self.check_if_prompt_is_found(output):\n break\n log.debug(\n f\"\"\"send_config_setSSH: raw output: '{output}'\nsend_config_setSSH: raw output (hex): '{output.encode().hex()}'\"\"\"\n )\n returned_output += output\n output = self.remove_command_in_output(output, str(cmd))\n output = self.remove_starting_carriage_return_in_output(output)\n output = self.remove_ending_prompt_in_output(output)\n log.debug(\n f\"\"\"send_config_setSSH: cleaned output: '{output}'\nsend_config_setSSH: cleaned output (hex): '{output.encode().hex()}'\"\"\"\n )\n self.check_error_output(output)\n return returned_output\n\n async def send_config_setTelnet(self, cmds=None, timeout=None):\n \"\"\"\n Async method used to send command in config mode\n\n The commands send can be either a string a list of strings. There are\n 3 steps:\n - Entering configuration mode\n - Sending the commands\n - Leaving configuration mode\n\n :param cmds: The commands to the device\n :type cmds: str or list\n\n :param timeout: optional, a timeout for the command sent. Default value is self.timeout\n :type timeout: str\n\n :return: the results of the commands sent\n :rtype: list of str\n \"\"\"\n log.info('send_config_setTelnet')\n if timeout is None:\n timeout = self.timeout\n returned_output = ''\n if isinstance(cmds, str):\n cmds = [cmds]\n elif not isinstance(cmds, list):\n log.error(\n 'send_config_setTelnet: parameter cmds used in send_config_set is neither a string or a list'\n )\n return returned_output\n log.info('send_config_setTelnet: entering configuration mode')\n output = ''\n cmd = self.cmd_enter_config_mode\n cmd = cmd + self._carriage_return_for_send_command\n log.info(f\"send_config_setTelnet: cmd = '{cmd}'\")\n self._writer.write(cmd.encode())\n log.info('send_config_setTelnet: configuration mode entered')\n output = ''\n byte_data = b''\n try:\n while True:\n byte_data += await asyncio.wait_for(self._reader.read(\n MAX_BUFFER_DATA), timeout=timeout)\n output = str(byte_data)\n log.info(f\"send_config_setTelnet: output: '{output}'\")\n if self.check_if_prompt_is_found(output):\n break\n except asyncio.TimeoutError:\n log.error('send_config_setTelnet: connection: timeout')\n raise\n except Exception as error:\n log.error(f'send_config_setTelnet: error: {error}')\n raise\n output = byte_data.decode('utf-8', 'ignore')\n log.debug(\n f\"\"\"send_config_setTelnet: raw output: '{output}'\nsend_config_setTelnet: raw output (hex): '{output.encode().hex()}'\"\"\"\n )\n returned_output += output\n output = self.remove_command_in_output(output, str(cmd))\n output = self.remove_starting_carriage_return_in_output(output)\n output = self.remove_ending_prompt_in_output(output)\n log.debug(\n f\"\"\"send_config_setTelnet: cleaned output: '{output}'\nsend_config_setTelnet: cleaned output (hex): '{output.encode().hex()}'\"\"\"\n )\n self.check_error_output(output)\n log.info('send_config_setTelnet: sending commands')\n output = ''\n for cmd in cmds:\n cmd = cmd + self._carriage_return_for_send_command\n log.info(f\"send_config_setTelnet: cmd = '{cmd}'\")\n self._writer.write(cmd.encode())\n log.info('send_config_setTelnet: command sent')\n output = ''\n byte_data = b''\n try:\n while True:\n byte_data += await asyncio.wait_for(self._reader.read(\n MAX_BUFFER_DATA), timeout=timeout)\n output = str(byte_data)\n log.info(f\"send_config_setTelnet: output: '{output}'\")\n if self.check_if_prompt_is_found(output):\n break\n except asyncio.TimeoutError:\n log.error('send_config_setTelnet: connection: timeout')\n raise\n except Exception as error:\n log.error(f'send_config_setTelnet: error: {error}')\n raise\n output = byte_data.decode('utf-8', 'ignore')\n log.debug(\n f\"\"\"send_config_setTelnet: raw output: '{output}'\nsend_config_setTelnet: raw output (hex): '{output.encode().hex()}'\"\"\"\n )\n returned_output += output\n output = self.remove_command_in_output(output, str(cmd))\n output = self.remove_starting_carriage_return_in_output(output)\n output = self.remove_ending_prompt_in_output(output)\n log.debug(\n f\"\"\"send_config_setTelnet: cleaned output: '{output}'\nsend_config_setTelnet: cleaned output (hex): '{output.encode().hex()}'\"\"\"\n )\n self.check_error_output(output)\n log.info('send_config_setTelnet: leaving configuration mode')\n output = ''\n cmd = self.cmd_exit_config_mode\n cmd = cmd + self._carriage_return_for_send_command\n log.info(f\"send_config_setTelnet: cmd = '{cmd}'\")\n self._writer.write(cmd.encode())\n log.info(\n 'send_config_setTelnet: command to leave configuration mode sent')\n output = ''\n byte_data = b''\n loop = 3\n try:\n while loop:\n byte_data += await asyncio.wait_for(self._reader.read(\n MAX_BUFFER_DATA), timeout=timeout)\n output = str(byte_data)\n log.info(f\"send_config_setTelnet: output: '{output}'\")\n await asyncio.sleep(0.5)\n if self.check_if_prompt_is_found(output):\n break\n loop -= 1\n except asyncio.TimeoutError:\n log.error('send_config_setTelnet: connection: timeout')\n raise\n except Exception as error:\n log.error(f'send_config_setTelnet: error: {error}')\n raise\n output = byte_data.decode('utf-8', 'ignore')\n log.debug(\n f\"\"\"send_config_setTelnet: raw output: '{output}'\nsend_config_setTelnet: raw output (hex): '{output.encode().hex()}'\"\"\"\n )\n returned_output += output\n output = self.remove_command_in_output(output, str(cmd))\n output = self.remove_starting_carriage_return_in_output(output)\n output = self.remove_ending_prompt_in_output(output)\n log.debug(\n f\"\"\"send_config_setTelnet: cleaned output: '{output}'\nsend_config_setTelnet: cleaned output (hex): '{output.encode().hex()}'\"\"\"\n )\n self.check_error_output(output)\n return returned_output\n\n async def get_version(self):\n \"\"\"\n Asyn method used to get the version of the software of the device\n\n :return: Version of the software of the device\n :rtype: str\n \"\"\"\n log.info('get_version')\n version = ''\n output = await self.send_command(self.cmd_get_version)\n version = output.split('Version ')[1].split(',')[0]\n log.info(f'get_version: version: {version}')\n return version\n\n async def get_hostname(self):\n \"\"\"\n Asyn method used to get the name of the device\n\n :return: Name of the device\n :rtype: str\n \"\"\"\n log.info('get_hostname')\n output = await self.send_command(self.cmd_get_hostname)\n log.info(f\"get_hostname: output: '{output}'\")\n output = output.split()[0]\n log.info(f\"get_hostname: hostname found: '{output}'\")\n return output\n\n async def get_model(self):\n \"\"\"\n Asyn method used to get the model of the device\n\n :return: Model of the device\n :rtype: str\n \"\"\"\n log.info('get_model')\n output = await self.send_command(self.cmd_get_model)\n log.info(f\"get_model: output: '{output}'\")\n output = output.split('\"')[3]\n log.info(f\"get_model: model found: '{output}'\")\n return output\n\n async def get_serial_number(self):\n \"\"\"\n Get serial number of the switch or the serial number of the first switch of a stack\n\n :return: Serial number of the device\n :rtype: str\n \"\"\"\n log.info('get_serial_number')\n output = await self.send_command(self.cmd_get_serial_number)\n log.info(f\"get_serial_number: output: '{output}'\")\n output = output.splitlines()[0].split()[-1]\n log.info(f\"get_hostname: hostname found: '{output}'\")\n return output\n\n async def get_config(self, timeout=None):\n \"\"\"\n Asyn method used to get the configuration of the device\n\n :param timeout: optional, a timeout for the command sent. Default value is self.timeout\n :type timeout: str\n\n :return: Configuration of the device\n :rtype: str\n \"\"\"\n log.info('get_config')\n if timeout is None:\n timeout = self.timeout\n output = await self.send_command(self.cmd_get_config, timeout=timeout)\n return output\n\n async def save_config(self):\n \"\"\"\n Asyn method used to save the current configuration on the device\n\n :return: Commands of the configuration saving process\n :rtype: str\n \"\"\"\n log.info('save_config')\n output = await self.send_command(self.cmd_save_config)\n return output\n",
"step-4": "import asyncio, asyncssh, logging\nlog = logging.getLogger(__package__)\nlogging.basicConfig(level=logging.DEBUG)\nasyncssh.set_debug_level(2)\nMAX_BUFFER_DATA = 65535\nipv4_netmask_list = {'0.0.0.0': '0', '128.0.0.0': '1', '192.0.0.0': '2',\n '224.0.0.0': '3', '240.0.0.0': '4', '248.0.0.0': '5', '252.0.0.0': '6',\n '254.0.0.0': '7', '255.0.0.0': '8', '255.128.0.0': '9', '255.192.0.0':\n '10', '255.224.0.0': '11', '255.240.0.0': '12', '255.248.0.0': '13',\n '255.252.0.0': '14', '255.254.0.0': '15', '255.255.0.0': '16',\n '255.255.128.0': '17', '255.255.192.0': '18', '255.255.224.0': '19',\n '255.255.240.0': '20', '255.255.248.0': '21', '255.255.252.0': '22',\n '255.255.254.0': '23', '255.255.255.0': '24', '255.255.255.128': '25',\n '255.255.255.192': '26', '255.255.255.224': '27', '255.255.255.240':\n '28', '255.255.255.248': '29', '255.255.255.252': '30',\n '255.255.255.254': '31', '255.255.255.255': '32'}\n\n\nclass NetworkDevice:\n \"\"\"\n Base class for network object\n\n\n :param ip: IP address of a device\n :type ip: str\n\n :param username: Username used to connect to a device\n :type username: str\n\n :param password: Password used to connect to a device\n :type password: str\n\n :param device_type: Type of device used\n :type device_type: str\n\n :param port: TCP port used to connect a device. Default value is \"22\" for SSH\n :type port: int, optional\n\n :param timeout: TCP port used to connect a device. Default value is 10 seconds\n :type timeout: int, optional\n\n :param _protocol: Protocol used to connect a device. \"ssh\" or \"telnet\" are possible options. Default value is \"ssh\"\n :type _protocol: str, optional\n\n :param enable_mode: Enable mode for devices requiring it. Default value is \"False\"\n :type enable_mode: bool, optional\n\n :param enable_password: Enable password used for enable mode.\n :type enable_password: str, optional\n\n :param conn: Variable used for the management of the SSH connection\n :type conn: SSHClientConnection object\n\n :param _writer: Variable used for the management of the Telnet connection and writing channel\n :type _writer: StreamWriter object\n\n :param _reader: Variable used for the management of the Telnet reading channel\n :type _reader: StreamReader object\n\n :param possible_prompts: Used by the connect method to list all possible prompts of the device\n :type possible_prompts: list\n\n :param _connect_first_ending_prompt: Default possible ending prompts. Used only the time after login and password to discover the prompt\n :type _connect_first_ending_prompt: list\n\n :param list_of_possible_ending_prompts: Different strings at the end of a prompt the device can get. Used for detecting the prompt returned in sent commands\n :type list_of_possible_ending_prompts: list\n\n :param _telnet_connect_login: Login prompt for Telnet. Used to detect when a login is expected or when login and password access is failed\n :type _telnet_connect_login: str\n\n :param _telnet_connect_password: Password prompt for Telnet. Used to detect when a login is expected or when login and password access is failed\n :type _telnet_connect_password: list\n\n :param _telnet_connect_authentication_fail_prompt: Known failing messages or prompts when an authentication has failed. Used to get an answer faster than timeout events\n :type _telnet_connect_authentication_fail_prompt: list\n\n :param cmd_enable: Enable command for entering into enable mode\n :type cmd_enable: str\n\n :param cmd_disable_paging: Command used to disable paging on a device. That command is run at connection time\n :type cmd_disable_paging: str\n\n :param cmd_enter_config_mode: Command used to enter into a configuration mode on a device when this device support that feature.\n :type cmd_enter_config_mode: str\n\n :param cmd_exit_config_mode: Command used to leave a configuration mode on a device when this device support that feature.\n :type cmd_exit_config_mode: str\n\n :param cmd_get_version: API command used to get the software version of a device\n :type cmd_get_version: str\n\n :param cmd_get_hostname: API command used to get the hostname of a device\n :type cmd_get_hostname: str\n\n :param cmd_get_model: API command used to get the model of a device\n :type cmd_get_model: str\n\n :param cmd_get_serial_number: API command used to get the serial number of a device\n :type cmd_get_serial_number: str\n\n :param cmd_get_config: API command used to get the running configuration of a device\n :type cmd_get_config: str\n\n :param cmd_save_config: API command used to save the running configuration on the device\n :type cmd_save_config: str\n \"\"\"\n\n def __init__(self, **kwargs):\n log.info('__init__')\n self.ip = ''\n self.username = ''\n self.password = ''\n self.device_type = ''\n self.port = 22\n self.timeout = 10\n self._protocol = 'ssh'\n self.enable_mode = False\n self.enable_password = ''\n self.conn = None\n self._writer = None\n self._reader = None\n self.possible_prompts = []\n self._connect_first_ending_prompt = ['#', '>']\n self.list_of_possible_ending_prompts = ['(config-line)#',\n '(config-if)#', '(config)#', '>', '#']\n self._carriage_return_for_send_command = '\\n'\n self._send_command_error_in_returned_output = []\n self._telnet_connect_login = 'Username:'\n self._telnet_connect_password = 'Password:'\n self._telnet_connect_authentication_fail_prompt = [':', '%']\n self.cmd_enable = 'enable'\n self.cmd_disable_paging = 'terminal length 0'\n self.cmd_enter_config_mode = 'configure terminal'\n self.cmd_exit_config_mode = 'exit'\n self.cmd_get_version = 'show version'\n self.cmd_get_hostname = 'show version | include uptime'\n self.cmd_get_model = 'show inventory'\n self.cmd_get_serial_number = 'show inventory | i SN'\n self.cmd_get_config = 'show running-config'\n self.cmd_save_config = 'write memory'\n self.cmd_get_interfaces = [\n 'interface ethernet print terse without-paging',\n 'foreach i in=([/interface ethernet find]) do={/interface ethernet monitor $i once without-paging}'\n , 'interface bridge port print terse without-paging']\n self.cmd_set_interface = ['interface ethernet enable <INTERFACE>',\n 'interface ethernet disable <INTERFACE>',\n 'interface ethernet comment <INTERFACE> \"<COMMENT>\"',\n 'interface ethernet set l2mtu=<MAXIMUMFRAMESIZE> <INTERFACE>',\n 'interface bridge port set frame-types=<MODE> ingress-filtering=<FILTERINGVLAN> [find interface=<INTERFACE>]'\n ]\n self.cmd_get_mac_address_table = (\n 'interface bridge host print without-paging')\n self.cmd_get_arp = 'ip arp print terse without-paging'\n self.cmd_get_lldp_neighbors = 'ip neighbor print terse without-paging'\n self.cmd_get_vlans = 'interface bridge vlan print terse without-paging'\n self.cmd_add_vlan = (\n 'interface bridge vlan add vlan-ids=<VLAN> comment=\"<VLAN_NAME>\" bridge=<BRIDGE>'\n )\n self.cmd_remove_vlan = (\n 'interface bridge vlan remove [find vlan-ids=<VLAN>]')\n self.cmd_add_interface_to_vlan = ['interface bridge vlan print terse',\n 'interface bridge vlan set [find vlan-ids=<VLAN>] untagged=<INTERFACE>'\n ,\n 'interface bridge vlan set [find vlan-ids=<VLAN>] tagged=<INTERFACE>'\n ,\n 'interface bridge port set [find interface=<INTERFACE>] pvid=<VLAN>'\n ]\n self.cmd_remove_interface_from_vlan = [\n 'interface bridge vlan print terse',\n 'interface bridge vlan set [find vlan-ids=<VLAN>] untagged=<INTERFACE>'\n ,\n 'interface bridge vlan set [find vlan-ids=<VLAN>] tagged=<INTERFACE>'\n ,\n 'interface bridge port set [find interface=<INTERFACE>] pvid=<VLAN>'\n ]\n self.cmd_get_routing_table = 'ip route print without-paging terse'\n self.cmd_get_interfaces_ip = 'ip address print terse without-paging'\n self.cmd_add_static_route = (\n 'ip route add dst-address=<NETWORK>/<PREFIXLENGTH> gateway=<DESTINATION> distance=<METRIC>'\n )\n self.cmd_remove_static_route = (\n 'ip route remove [find dst-address=<NETWORK>/<PREFIXLENGTH>]')\n log.debug('__init__: kwargs: ' + str(kwargs))\n if 'ip' in kwargs:\n self.ip = kwargs['ip']\n log.info('__init__: ip found: ' + str(self.ip))\n if 'username' in kwargs:\n self.username = kwargs['username']\n log.info('__init__: username found: ' + str(self.username))\n if 'password' in kwargs:\n self.password = kwargs['password']\n log.debug('__init__: password found: ' + str(self.password))\n if 'device_type' in kwargs:\n self.device_type = kwargs['device_type']\n log.info('__init__: device_type found: ' + str(self.device_type))\n if 'timeout' in kwargs:\n self.timeout = kwargs['timeout']\n log.info('__init__: timeout found: ' + str(self.timeout))\n if 'protocol' in kwargs:\n self._protocol = kwargs['protocol'].lower()\n log.info('__init__: protocol found: ' + str(self._protocol))\n if self._protocol.lower() == 'telnet':\n self.port = 23\n if 'port' in kwargs:\n self.port = kwargs['port']\n log.info('__init__: port found: ' + str(self.port))\n if 'enable_mode' in kwargs:\n self.enable_mode = kwargs['enable_mode']\n log.info('__init__: enable_mode found: ' + str(self.enable_mode))\n if 'enable_password' in kwargs:\n self.enable_password = kwargs['enable_password']\n log.info('__init__: enable_password found: ' + str(self.\n enable_password))\n\n async def __aenter__(self):\n \"\"\"\n Context manager opening connection\n \"\"\"\n try:\n await self.connect()\n except Exception:\n await self.disconnect()\n raise\n return self\n\n async def __aexit__(self, exc_type, exc_value, traceback):\n \"\"\"\n Context manager closing connection\n \"\"\"\n await self.disconnect()\n\n def find_prompt(self, text):\n \"\"\"\n Method used to find a prompt inside an output string\n\n This method is used during the first communication with the device.\n First it find the prompt then caculate the different forms the prompt\n can take. This will be useful later on while finding prompt in other\n output stream (read).\n\n :param text: data with a prompt\n :type text: str\n\n :return: the prompt found\n :rtype: str\n \"\"\"\n prompt = text.split('\\n')[-1]\n prompt = text.split('\\r')[-1]\n log.info(f\"find_prompt: prompt: '{prompt}'\")\n self.possible_prompts = self.get_possible_prompts(prompt)\n return prompt\n\n def get_possible_prompts(self, prompt):\n \"\"\"\n Method used to check if a prompt has one of the expected endings then\n create a list with all possible prompts for the device\n\n :param prompt: a prompt with a possible ending prompt (eg. \"switch#\")\n :type prompt: str\n\n :return: the list of prompts\n :rtype: list\n \"\"\"\n list_of_prompts = []\n list_of_possible_ending_prompts = self.list_of_possible_ending_prompts\n my_prompt = prompt\n for ending in list_of_possible_ending_prompts:\n if my_prompt.endswith(ending):\n my_prompt = my_prompt[:-len(ending)]\n break\n log.info(f\"get_possible_prompts: prompt found: '{my_prompt}'\")\n log.info(f\"get_possible_prompts: prompt found size: '{len(my_prompt)}'\"\n )\n for ending in list_of_possible_ending_prompts:\n list_of_prompts.append(my_prompt + ending)\n log.info(\n f'get_possible_prompts: list of possible prompts: {list_of_prompts}'\n )\n return list_of_prompts\n\n def check_if_prompt_is_found(self, text):\n \"\"\"\n Method used to check if a prompt is detected inside a string\n\n :param text: a string with prompt\n :type text: str\n\n :return: the prompt found\n :rtype: str\n \"\"\"\n prompt_found = False\n for prompt in self.possible_prompts:\n log.info(f\"check_if_prompt_is_found: prompt: '{prompt}'\")\n if prompt in text:\n prompt_found = True\n log.info(f\"check_if_prompt_is_found: prompt found: '{prompt}'\")\n break\n return prompt_found\n\n def remove_command_in_output(self, text, cmd):\n \"\"\"\n Method removing the command at the beginning of a string\n\n After sending commands an \"echo\" of the command sent\n is display in the output string. This method removes it.\n\n :param text: the text with the command at the beginning\n :type text: str\n\n :param cmd: the command previously sent\n :type cmd: str\n\n :return: the output string without the command\n :rtype: str\n \"\"\"\n log.info(f\"remove_command_in_output: cmd = '{cmd}'\")\n log.info(\n f\"remove_command_in_output: cmd (hex) = '{cmd.encode().hex()}'\")\n output = text.split(cmd + '\\n')[-1]\n log.info(f\"remove_command_in_output: output = '{output}'\")\n return output\n\n def remove_starting_carriage_return_in_output(self, text):\n \"\"\"\n Method removing the carriage return at the beginning of a string\n\n :param text: the text with the command at the beginning\n :type text: str\n\n :return: the output string without the starting carriage return\n :rtype: str\n \"\"\"\n log.info('remove_starting_carriage_return_in_output')\n output = text.lstrip('\\r\\n\\r')\n log.info(\n f\"remove_starting_carriage_return_in_output: output = '{output}'\")\n return output\n\n def remove_ending_prompt_in_output(self, text):\n \"\"\"\n Method removing the prompt at the end of a string\n\n :param text: the text with a prompt at the beginning\n :type text: str\n\n :return: the output string without the ending prompt\n :rtype: str\n \"\"\"\n log.info('remove_ending_prompt_in_output')\n for prompt in self.possible_prompts:\n log.info(f\"remove_ending_prompt_in_output: prompt: '{prompt}'\")\n if prompt in text:\n text = text[:-len(prompt)]\n text = text.rstrip('\\r\\n')\n break\n log.info(\n f\"remove_ending_prompt_in_output: text without prompt:\\n'{text}'\")\n return text\n\n def check_error_output(self, output):\n \"\"\"\n Check if an error is returned by the device (\"% Unrecognized command\", \"% Ambiguous command\", etc.)\n\n If an error is found, then an exception is raised\n \"\"\"\n log.info('check_error_output')\n if output:\n log.info('check_error_output: output has some data')\n for element in self._send_command_error_in_returned_output:\n log.info(f'check_error_output: element: {element}')\n log.info(f'check_error_output: output[0]: {output[0]}')\n if output.startswith(element):\n raise Exception(output)\n\n def remove_ansi_escape_sequence(self, text):\n \"\"\"\n Method removing ANSI escape sequence from a string\n Just CSI sequences are removed\n\n :param text: the text with a prompt at the beginning\n :type text: str\n\n :return: the output string without the ending prompt\n :rtype: str\n \"\"\"\n output = ''\n esc_found = 0\n for i in text:\n if esc_found == 0:\n if i == '\\x1b':\n log.info('Esc!')\n esc_found = 1\n else:\n output += i\n elif esc_found == 1:\n if i == '[':\n log.info('CSI sequence')\n esc_found = 2\n else:\n output += '\\x1b' + i\n esc_found = 0\n elif i >= 'a' and i <= 'z' or i >= 'A' and i <= 'Z':\n log.info('End of escape sequence')\n esc_found = 0\n return output\n\n async def disable_paging(self):\n \"\"\"\n Async method disabling paging on a device\n\n Use the \"cmd_disable_paging\" attribute\n \"\"\"\n log.info('disable_paging')\n await self.send_command(self.cmd_disable_paging)\n\n async def connect(self):\n \"\"\"\n Async method used for connecting a device\n\n Currently supported: SSH and Telnet\n \"\"\"\n log.info('connect')\n try:\n if self._protocol == 'ssh':\n await self.connectSSH()\n elif self._protocol == 'telnet':\n await self.connectTelnet()\n else:\n raise Exception(\n f'connect: unsupported protocol: {self._protocol}')\n except Exception:\n log.info('connect: connection error')\n raise\n\n async def connectSSH(self):\n \"\"\"\n Async method used for connecting a device using SSH protocol\n \"\"\"\n log.info('connectSSH')\n generator = asyncssh.connect(self.ip, username=self.username,\n password=self.password, known_hosts=None, encryption_algs=[algs\n .decode('utf-8') for algs in asyncssh.encryption._enc_algs])\n try:\n self.conn = await asyncio.wait_for(generator, timeout=self.timeout)\n except asyncio.exceptions.TimeoutError as error:\n log.error(\n f\"connectSSH: connection failed: {self.ip} timeout: '{error}'\")\n raise asyncio.exceptions.TimeoutError(\n 'Connection failed: connection timed out.')\n except Exception as error:\n log.error(f\"connectSSH: connection failed: {self.ip} '{error}'\")\n raise\n log.info('connectSSH: connection success')\n self.stdinx, self.stdoutx, _ = await self.conn.open_session(term_type\n ='netscud')\n log.info('connectSSH: open_session success')\n data = ''\n prompt_not_found = True\n try:\n while prompt_not_found:\n log.info('connectSSH: beginning of the loop')\n data += await asyncio.wait_for(self.stdoutx.read(\n MAX_BUFFER_DATA), timeout=self.timeout)\n log.info(f\"connectSSH: data: '{str(data)}'\")\n log.info(\n f\"connectSSH: data: hex:'{data.encode('utf-8').hex()}'\")\n for prompt in self._connect_first_ending_prompt:\n if data.endswith(prompt):\n log.info(\n f\"connectSSH: first ending prompt found: '{prompt}'\"\n )\n prompt_not_found = False\n break\n log.info('connectSSH: end of loop')\n except Exception as error:\n log.error(\n f\"connectSSH: timeout while reading the prompt: {self.ip} '{error}'\"\n )\n raise\n log.info(f'connectSSH: end of prompt loop')\n data = self.remove_ansi_escape_sequence(data)\n self.prompt = self.find_prompt(str(data))\n log.info(f\"connectSSH: prompt found: '{self.prompt}'\")\n log.info(f\"connectSSH: prompt found size: '{len(self.prompt)}'\")\n if self.cmd_disable_paging:\n await self.disable_paging()\n\n async def connectTelnet(self):\n \"\"\"\n Async method used for connecting a device using Telnet protocol\n \"\"\"\n log.info('connectTelnet')\n try:\n conn = asyncio.open_connection(self.ip, self.port)\n except Exception as error:\n log.error(\n f\"connectTelnet: preparation to the connection failed: '{error}'\"\n )\n raise\n log.info('connectTelnet: preparation to the connection success')\n try:\n self._reader, self._writer = await asyncio.wait_for(conn,\n timeout=self.timeout)\n except asyncio.TimeoutError:\n log.error('connectTelnet: connection: timeout')\n raise\n log.info('connectTelnet: connection success')\n prompt = self._telnet_connect_login\n prompt_password = self._telnet_connect_password\n use_login = True\n output = ''\n byte_data = b''\n while True:\n log.info(f'connectTelnet: read data for prompt')\n byte_data += await asyncio.wait_for(self._reader.read(\n MAX_BUFFER_DATA), timeout=self.timeout)\n log.info(f'connectTelnet: byte_data: {byte_data}')\n output = str(byte_data)\n log.info(f'connectTelnet: output: {output}')\n if prompt in output:\n break\n elif prompt_password in output:\n use_login = False\n break\n log.info(f\"connectTelnet: login prompt: '{output}'\")\n if use_login:\n log.info('connectTelnet: sending login')\n try:\n await self.send_command(self.username, prompt_password)\n log.info('connectTelnet: login sent')\n except Exception:\n raise\n log.info('connectTelnet: sending password')\n try:\n output = await self.telnet_send_command_with_unexpected_pattern(\n self.password, self._connect_first_ending_prompt, self.\n _telnet_connect_authentication_fail_prompt)\n except Exception:\n raise\n log.info('connectTelnet: password sent')\n self.prompt = self.find_prompt(str(output))\n log.info(f\"connectTelnet: prompt found: '{self.prompt}'\")\n if self.enable_mode:\n log.info('connectTelnet: enable mode to be activated')\n try:\n await self.send_command(self.cmd_enable, prompt_password)\n log.info('connectTelnet: enable command sent')\n log.info('connectTelnet: sending enable password')\n await self.telnet_send_command_with_unexpected_pattern(self\n .enable_password, self._connect_first_ending_prompt,\n self._telnet_connect_authentication_fail_prompt)\n log.info('connectTelnet: enable password sent')\n except Exception:\n log.info('connectTelnet: enable password failure')\n raise\n if self.cmd_disable_paging:\n await self.disable_paging()\n\n async def disconnect(self):\n \"\"\"\n Async method used to disconnect a device\n\n If this method is not used then exceptions will happen\n when the program will end\n \"\"\"\n log.info('disconnect')\n if self._protocol == 'ssh':\n await self.disconnectSSH()\n elif self._protocol == 'telnet':\n await self.disconnectTelnet()\n else:\n raise Exception(f'Unsupported protocol: {self._protocol}')\n\n async def disconnectSSH(self):\n \"\"\"\n Async method used to disconnect a device in SSH\n\n If this method is not used then exceptions will happen\n when the program will end\n \"\"\"\n log.info('disconnectSSH')\n if self.conn:\n self.conn.close()\n self.conn = None\n\n async def disconnectTelnet(self):\n \"\"\"\n Async method used to disconnect a device in Telnet\n\n If this method is not used then exceptions will happen\n when the program will end\n \"\"\"\n log.info('disconnectTelnet')\n if self._writer:\n self._writer.close()\n self._writer = None\n\n async def send_command(self, cmd, pattern=None, timeout=None):\n \"\"\"\n Async method used to send data to a device\n\n :param cmd: command to send\n :type cmd: str\n\n :param pattern: optional, a pattern replacing the prompt when the prompt is not expected\n :type pattern: str\n\n :param timeout: optional, a timeout for the command sent. Default value is self.timeout\n :type timeout: str\n\n :return: the output of command\n :rtype: str\n \"\"\"\n log.info('send_command')\n if timeout is None:\n timeout = self.timeout\n if self._protocol == 'ssh':\n output = await self.send_commandSSH(cmd, pattern=pattern,\n timeout=timeout)\n elif self._protocol == 'telnet':\n output = await self.send_commandTelnet(cmd, pattern=pattern,\n timeout=timeout)\n else:\n raise Exception(\n f'send_command: unsupported protocol: {self._protocol}')\n return output\n\n async def send_commandSSH(self, cmd, pattern=None, timeout=None):\n \"\"\"\n Async method used to send data to a device\n\n :param cmd: command to send\n :type cmd: str\n\n :param pattern: optional, a pattern replacing the prompt when the prompt is not expected\n :type pattern: str\n\n :param timeout: optional, a timeout for the command sent. Default value is self.timeout\n :type timeout: str\n\n :return: the output of command\n :rtype: str\n \"\"\"\n log.info('send_commandSSH')\n if timeout is None:\n timeout = self.timeout\n log.info(f\"send_commandSSH: cmd = '{cmd}'\")\n self.stdinx.write(cmd + self._carriage_return_for_send_command)\n log.info('send_commandSSH: command sent')\n output = ''\n while True:\n output += await asyncio.wait_for(self.stdoutx.read(\n MAX_BUFFER_DATA), timeout=timeout)\n output = self.remove_ansi_escape_sequence(output)\n output = output.replace('\\r', '')\n log.info(f\"send_commandSSH: output: '{output}'\")\n if pattern:\n if pattern in output:\n break\n elif self.check_if_prompt_is_found(output):\n break\n log.debug(\n f\"\"\"send_commandSSH: raw output: '{output}'\nsend_commandSSH: raw output (hex): '{output.encode().hex()}'\"\"\"\n )\n output = self.remove_command_in_output(output, str(cmd))\n output = self.remove_starting_carriage_return_in_output(output)\n output = self.remove_ending_prompt_in_output(output)\n log.debug(\n f\"\"\"send_commandSSH: cleaned output: '{output}'\nsend_commandSSH: cleaned output (hex): '{output.encode().hex()}'\"\"\"\n )\n self.check_error_output(output)\n return output\n\n async def send_commandTelnet(self, cmd, pattern=None, timeout=None):\n \"\"\"\n Async method used to send data to a device\n\n :param cmd: command to send\n :type cmd: str\n\n :param pattern: optional, a pattern replacing the prompt when the prompt is not expected\n :type pattern: str\n\n :param timeout: optional, a timeout for the command sent. Default value is self.timeout\n :type timeout: str\n\n :return: the output of command\n :rtype: str\n \"\"\"\n log.info('send_commandTelnet')\n if timeout is None:\n timeout = self.timeout\n cmd = cmd + '\\n'\n self._writer.write(cmd.encode())\n output = ''\n byte_data = b''\n try:\n while True:\n byte_data += await asyncio.wait_for(self._reader.read(\n MAX_BUFFER_DATA), timeout=timeout)\n log.info(f\"send_commandTelnet: byte_data: '{byte_data}'\")\n output = str(byte_data)\n log.info(f\"send_commandTelnet: output: '{output}'\")\n if pattern:\n if pattern in output:\n break\n elif self.check_if_prompt_is_found(output):\n break\n except asyncio.TimeoutError:\n log.error('send_commandTelnet: connection: timeout')\n raise\n except Exception as error:\n log.error(f'send_commandTelnet: error: {error}')\n raise\n output = byte_data.decode('utf-8', 'ignore')\n log.debug(\n f\"\"\"send_commandTelnet: raw output: '{output}'\nsend_commandTelnet: raw output (hex): '{output.encode().hex()}'\"\"\"\n )\n output = self.remove_command_in_output(output, str(cmd))\n output = self.remove_starting_carriage_return_in_output(output)\n output = self.remove_ending_prompt_in_output(output)\n log.debug(\n f\"\"\"send_commandTelnet: cleaned output: '{output}'\nsend_commandTelnet: cleaned output (hex): '{output.encode().hex()}'\"\"\"\n )\n self.check_error_output(output)\n return output\n\n async def telnet_send_command_with_unexpected_pattern(self, cmd,\n pattern, error_pattern=None, timeout=None):\n \"\"\"\n Async method used to send command for Telnet connection to a device with possible unexpected patterns\n\n send_command can wait till time out if login and password are wrong. This method\n speed up the returned error message when authentication failed is identified.\n This method is limited to authentication whem password is required\n\n :param cmd: command to send\n :type cmd: str\n\n :param pattern: optional, a list of patterns located at the very end of the a returned string. Can be used\n to define a custom or unexpected prompt a the end of a string\n :type pattern: str\n\n :param timeout: optional, a timeout for the command sent. Default value is self.timeout\n :type timeout: str\n\n :param error_pattern: optional, a list of failed prompts found when the login and password are not correct\n :type error_pattern: str\n\n :return: the output of command\n :rtype: str\n \"\"\"\n log.info('telnet_send_command_with_unexpected_pattern')\n if timeout is None:\n timeout = self.timeout\n cmd = cmd + self._carriage_return_for_send_command\n self._writer.write(cmd.encode())\n output = ''\n byte_data = b''\n pattern_not_found = True\n try:\n while pattern_not_found:\n byte_data += await asyncio.wait_for(self._reader.read(\n MAX_BUFFER_DATA), timeout=timeout)\n log.info(\n f\"telnet_send_command_with_unexpected_pattern: byte_data: '{byte_data}'\"\n )\n log.debug(\n f\"telnet_send_command_with_unexpected_pattern: byte_data: hex: '{byte_data.hex()}'\"\n )\n output = str(byte_data)\n log.info(\n f\"telnet_send_command_with_unexpected_pattern: output: '{output}'\"\n )\n if pattern:\n for prompt in pattern:\n log.info(\n f\"telnet_send_command_with_unexpected_pattern: checking prompt: '{prompt}'\"\n )\n if prompt in output:\n pattern_not_found = False\n log.info(\n f\"telnet_send_command_with_unexpected_pattern: prompt found: '{prompt}'\"\n )\n break\n if error_pattern and pattern_not_found:\n for bad_prompt in error_pattern:\n log.info(\n f\"telnet_send_command_with_unexpected_pattern: checking unexpected prompt: '{bad_prompt}'\"\n )\n if bad_prompt in output:\n log.error(\n 'telnet_send_command_with_unexpected_pattern: authentication failed'\n )\n raise Exception(\n 'telnet_send_command_with_unexpected_pattern: authentication failed'\n )\n except asyncio.TimeoutError:\n await self.disconnect()\n log.error(\n 'telnet_send_command_with_unexpected_pattern: reading prompt: timeout'\n )\n raise\n except Exception as error:\n await self.disconnect()\n log.error(\n f'telnet_send_command_with_unexpected_pattern: reading prompt: error: {error}'\n )\n raise\n output = byte_data.decode('utf-8', 'ignore')\n log.debug(\n f\"\"\"telnet_send_command_with_unexpected_pattern: raw output: '{output}'\ntelnet_send_command_with_unexpected_pattern: raw output (hex): '{output.encode().hex()}'\"\"\"\n )\n output = self.remove_command_in_output(output, str(cmd))\n output = self.remove_starting_carriage_return_in_output(output)\n output = self.remove_ending_prompt_in_output(output)\n log.debug(\n f\"\"\"telnet_send_command_with_unexpected_pattern: cleaned output: '{output}'\ntelnet_send_command_with_unexpected_pattern: cleaned output (hex): '{output.encode().hex()}'\"\"\"\n )\n return output\n\n async def send_config_set(self, cmds=None, timeout=None):\n \"\"\"\n Async method used to send command in config mode\n\n The commands send can be either a string a list of strings. There are\n 3 steps:\n - Entering configuration mode\n - Sending the commands\n - Leaving configuration mode\n\n :param cmds: The commands to the device\n :type cmds: str or list\n\n :param timeout: optional, a timeout for the command sent. Default value is self.timeout\n :type timeout: str\n\n :return: the results of the commands sent\n :rtype: list of str\n \"\"\"\n log.info('send_config_set')\n if timeout is None:\n timeout = self.timeout\n log.info('send_command')\n if self._protocol == 'ssh':\n output = await self.send_config_setSSH(cmds, timeout)\n elif self._protocol == 'telnet':\n output = await self.send_config_setTelnet(cmds, timeout)\n else:\n raise Exception(\n f'send_config_set: unsupported protocol: {self._protocol}')\n return output\n\n async def send_config_setSSH(self, cmds=None, timeout=None):\n \"\"\"\n Async method used to send command in config mode\n\n The commands send can be either a string a list of strings. There are\n 3 steps:\n - Entering configuration mode\n - Sending the commands\n - Leaving configuration mode\n\n :param cmds: The commands to the device\n :type cmds: str or list\n\n :param timeout: optional, a timeout for the command sent. Default value is self.timeout\n :type timeout: str\n\n :return: the results of the commands sent\n :rtype: list of str\n \"\"\"\n log.info('send_config_setSSH')\n if timeout is None:\n timeout = self.timeout\n returned_output = ''\n if isinstance(cmds, str):\n cmds = [cmds]\n elif not isinstance(cmds, list):\n log.error(\n 'send_config_setSSH: parameter cmds used in send_config_set is neither a string nor a list'\n )\n return returned_output\n log.info('send_config_set: entering configuration mode')\n output = ''\n cmd = self.cmd_enter_config_mode\n cmd = cmd + self._carriage_return_for_send_command\n log.info(f\"send_config_setSSH: cmd = '{cmd}'\")\n self.stdinx.write(cmd)\n log.info('send_config_setSSH: configuration mode entered')\n while True:\n output += await asyncio.wait_for(self.stdoutx.read(\n MAX_BUFFER_DATA), timeout=timeout)\n log.info(f\"send_config_setSSH: output: '{output}'\")\n if self.check_if_prompt_is_found(output):\n break\n log.debug(\n f\"\"\"send_config_setSSH: raw output: '{output}'\nsend_config_setSSH: raw output (hex): '{output.encode().hex()}'\"\"\"\n )\n returned_output += output\n output = self.remove_command_in_output(output, str(cmd))\n output = self.remove_starting_carriage_return_in_output(output)\n output = self.remove_ending_prompt_in_output(output)\n log.debug(\n f\"\"\"send_config_setSSH: cleaned output: '{output}'\nsend_config_setSSH: cleaned output (hex): '{output.encode().hex()}'\"\"\"\n )\n self.check_error_output(output)\n log.info('send_config_setSSH: sending commands')\n output = ''\n for cmd in cmds:\n cmd = cmd + self._carriage_return_for_send_command\n log.info(f\"send_config_setSSH: cmd = '{cmd}'\")\n self.stdinx.write(cmd)\n log.info('send_config_setSSH: command sent')\n while True:\n output += await asyncio.wait_for(self.stdoutx.read(\n MAX_BUFFER_DATA), timeout=timeout)\n log.info(f\"send_config_setSSH: output: '{output}'\")\n if self.check_if_prompt_is_found(output):\n break\n log.debug(\n f\"\"\"send_config_setSSH: raw output: '{output}'\nsend_config_setSSH: raw output (hex): '{output.encode().hex()}'\"\"\"\n )\n returned_output += output\n output = self.remove_command_in_output(output, str(cmd))\n output = self.remove_starting_carriage_return_in_output(output)\n output = self.remove_ending_prompt_in_output(output)\n log.debug(\n f\"\"\"send_config_setSSH: cleaned output: '{output}'\nsend_config_setSSH: cleaned output (hex): '{output.encode().hex()}'\"\"\"\n )\n self.check_error_output(output)\n log.info('send_config_setSSH: leaving configuration mode')\n output = ''\n cmd = self.cmd_exit_config_mode\n cmd = cmd + self._carriage_return_for_send_command\n log.info(f\"send_config_setSSH: cmd = '{cmd}'\")\n self.stdinx.write(cmd)\n log.info('send_config_setSSH: command to leave configuration mode sent'\n )\n while True:\n output += await asyncio.wait_for(self.stdoutx.read(\n MAX_BUFFER_DATA), timeout=timeout)\n log.info(f\"send_config_setSSH: output: '{output}'\")\n if self.check_if_prompt_is_found(output):\n break\n log.debug(\n f\"\"\"send_config_setSSH: raw output: '{output}'\nsend_config_setSSH: raw output (hex): '{output.encode().hex()}'\"\"\"\n )\n returned_output += output\n output = self.remove_command_in_output(output, str(cmd))\n output = self.remove_starting_carriage_return_in_output(output)\n output = self.remove_ending_prompt_in_output(output)\n log.debug(\n f\"\"\"send_config_setSSH: cleaned output: '{output}'\nsend_config_setSSH: cleaned output (hex): '{output.encode().hex()}'\"\"\"\n )\n self.check_error_output(output)\n return returned_output\n\n async def send_config_setTelnet(self, cmds=None, timeout=None):\n \"\"\"\n Async method used to send command in config mode\n\n The commands send can be either a string a list of strings. There are\n 3 steps:\n - Entering configuration mode\n - Sending the commands\n - Leaving configuration mode\n\n :param cmds: The commands to the device\n :type cmds: str or list\n\n :param timeout: optional, a timeout for the command sent. Default value is self.timeout\n :type timeout: str\n\n :return: the results of the commands sent\n :rtype: list of str\n \"\"\"\n log.info('send_config_setTelnet')\n if timeout is None:\n timeout = self.timeout\n returned_output = ''\n if isinstance(cmds, str):\n cmds = [cmds]\n elif not isinstance(cmds, list):\n log.error(\n 'send_config_setTelnet: parameter cmds used in send_config_set is neither a string or a list'\n )\n return returned_output\n log.info('send_config_setTelnet: entering configuration mode')\n output = ''\n cmd = self.cmd_enter_config_mode\n cmd = cmd + self._carriage_return_for_send_command\n log.info(f\"send_config_setTelnet: cmd = '{cmd}'\")\n self._writer.write(cmd.encode())\n log.info('send_config_setTelnet: configuration mode entered')\n output = ''\n byte_data = b''\n try:\n while True:\n byte_data += await asyncio.wait_for(self._reader.read(\n MAX_BUFFER_DATA), timeout=timeout)\n output = str(byte_data)\n log.info(f\"send_config_setTelnet: output: '{output}'\")\n if self.check_if_prompt_is_found(output):\n break\n except asyncio.TimeoutError:\n log.error('send_config_setTelnet: connection: timeout')\n raise\n except Exception as error:\n log.error(f'send_config_setTelnet: error: {error}')\n raise\n output = byte_data.decode('utf-8', 'ignore')\n log.debug(\n f\"\"\"send_config_setTelnet: raw output: '{output}'\nsend_config_setTelnet: raw output (hex): '{output.encode().hex()}'\"\"\"\n )\n returned_output += output\n output = self.remove_command_in_output(output, str(cmd))\n output = self.remove_starting_carriage_return_in_output(output)\n output = self.remove_ending_prompt_in_output(output)\n log.debug(\n f\"\"\"send_config_setTelnet: cleaned output: '{output}'\nsend_config_setTelnet: cleaned output (hex): '{output.encode().hex()}'\"\"\"\n )\n self.check_error_output(output)\n log.info('send_config_setTelnet: sending commands')\n output = ''\n for cmd in cmds:\n cmd = cmd + self._carriage_return_for_send_command\n log.info(f\"send_config_setTelnet: cmd = '{cmd}'\")\n self._writer.write(cmd.encode())\n log.info('send_config_setTelnet: command sent')\n output = ''\n byte_data = b''\n try:\n while True:\n byte_data += await asyncio.wait_for(self._reader.read(\n MAX_BUFFER_DATA), timeout=timeout)\n output = str(byte_data)\n log.info(f\"send_config_setTelnet: output: '{output}'\")\n if self.check_if_prompt_is_found(output):\n break\n except asyncio.TimeoutError:\n log.error('send_config_setTelnet: connection: timeout')\n raise\n except Exception as error:\n log.error(f'send_config_setTelnet: error: {error}')\n raise\n output = byte_data.decode('utf-8', 'ignore')\n log.debug(\n f\"\"\"send_config_setTelnet: raw output: '{output}'\nsend_config_setTelnet: raw output (hex): '{output.encode().hex()}'\"\"\"\n )\n returned_output += output\n output = self.remove_command_in_output(output, str(cmd))\n output = self.remove_starting_carriage_return_in_output(output)\n output = self.remove_ending_prompt_in_output(output)\n log.debug(\n f\"\"\"send_config_setTelnet: cleaned output: '{output}'\nsend_config_setTelnet: cleaned output (hex): '{output.encode().hex()}'\"\"\"\n )\n self.check_error_output(output)\n log.info('send_config_setTelnet: leaving configuration mode')\n output = ''\n cmd = self.cmd_exit_config_mode\n cmd = cmd + self._carriage_return_for_send_command\n log.info(f\"send_config_setTelnet: cmd = '{cmd}'\")\n self._writer.write(cmd.encode())\n log.info(\n 'send_config_setTelnet: command to leave configuration mode sent')\n output = ''\n byte_data = b''\n loop = 3\n try:\n while loop:\n byte_data += await asyncio.wait_for(self._reader.read(\n MAX_BUFFER_DATA), timeout=timeout)\n output = str(byte_data)\n log.info(f\"send_config_setTelnet: output: '{output}'\")\n await asyncio.sleep(0.5)\n if self.check_if_prompt_is_found(output):\n break\n loop -= 1\n except asyncio.TimeoutError:\n log.error('send_config_setTelnet: connection: timeout')\n raise\n except Exception as error:\n log.error(f'send_config_setTelnet: error: {error}')\n raise\n output = byte_data.decode('utf-8', 'ignore')\n log.debug(\n f\"\"\"send_config_setTelnet: raw output: '{output}'\nsend_config_setTelnet: raw output (hex): '{output.encode().hex()}'\"\"\"\n )\n returned_output += output\n output = self.remove_command_in_output(output, str(cmd))\n output = self.remove_starting_carriage_return_in_output(output)\n output = self.remove_ending_prompt_in_output(output)\n log.debug(\n f\"\"\"send_config_setTelnet: cleaned output: '{output}'\nsend_config_setTelnet: cleaned output (hex): '{output.encode().hex()}'\"\"\"\n )\n self.check_error_output(output)\n return returned_output\n\n async def get_version(self):\n \"\"\"\n Asyn method used to get the version of the software of the device\n\n :return: Version of the software of the device\n :rtype: str\n \"\"\"\n log.info('get_version')\n version = ''\n output = await self.send_command(self.cmd_get_version)\n version = output.split('Version ')[1].split(',')[0]\n log.info(f'get_version: version: {version}')\n return version\n\n async def get_hostname(self):\n \"\"\"\n Asyn method used to get the name of the device\n\n :return: Name of the device\n :rtype: str\n \"\"\"\n log.info('get_hostname')\n output = await self.send_command(self.cmd_get_hostname)\n log.info(f\"get_hostname: output: '{output}'\")\n output = output.split()[0]\n log.info(f\"get_hostname: hostname found: '{output}'\")\n return output\n\n async def get_model(self):\n \"\"\"\n Asyn method used to get the model of the device\n\n :return: Model of the device\n :rtype: str\n \"\"\"\n log.info('get_model')\n output = await self.send_command(self.cmd_get_model)\n log.info(f\"get_model: output: '{output}'\")\n output = output.split('\"')[3]\n log.info(f\"get_model: model found: '{output}'\")\n return output\n\n async def get_serial_number(self):\n \"\"\"\n Get serial number of the switch or the serial number of the first switch of a stack\n\n :return: Serial number of the device\n :rtype: str\n \"\"\"\n log.info('get_serial_number')\n output = await self.send_command(self.cmd_get_serial_number)\n log.info(f\"get_serial_number: output: '{output}'\")\n output = output.splitlines()[0].split()[-1]\n log.info(f\"get_hostname: hostname found: '{output}'\")\n return output\n\n async def get_config(self, timeout=None):\n \"\"\"\n Asyn method used to get the configuration of the device\n\n :param timeout: optional, a timeout for the command sent. Default value is self.timeout\n :type timeout: str\n\n :return: Configuration of the device\n :rtype: str\n \"\"\"\n log.info('get_config')\n if timeout is None:\n timeout = self.timeout\n output = await self.send_command(self.cmd_get_config, timeout=timeout)\n return output\n\n async def save_config(self):\n \"\"\"\n Asyn method used to save the current configuration on the device\n\n :return: Commands of the configuration saving process\n :rtype: str\n \"\"\"\n log.info('save_config')\n output = await self.send_command(self.cmd_save_config)\n return output\n",
"step-5": "# Python library import\nimport asyncio, asyncssh, logging\n\n# Module logging logger\nlog = logging.getLogger(__package__)\n\n# Debug level\n# logging.basicConfig(level=logging.WARNING)\n# logging.basicConfig(level=logging.INFO)\nlogging.basicConfig(level=logging.DEBUG)\nasyncssh.set_debug_level(2)\n\n\n# Declaration of constant values\n\n# Max data to read in read function\nMAX_BUFFER_DATA = 65535\n\n\n# Dictonary with all netmasks of IPv4\nipv4_netmask_list = {\n \"0.0.0.0\": \"0\",\n \"128.0.0.0\": \"1\",\n \"192.0.0.0\": \"2\",\n \"224.0.0.0\": \"3\",\n \"240.0.0.0\": \"4\",\n \"248.0.0.0\": \"5\",\n \"252.0.0.0\": \"6\",\n \"254.0.0.0\": \"7\",\n \"255.0.0.0\": \"8\",\n \"255.128.0.0\": \"9\",\n \"255.192.0.0\": \"10\",\n \"255.224.0.0\": \"11\",\n \"255.240.0.0\": \"12\",\n \"255.248.0.0\": \"13\",\n \"255.252.0.0\": \"14\",\n \"255.254.0.0\": \"15\",\n \"255.255.0.0\": \"16\",\n \"255.255.128.0\": \"17\",\n \"255.255.192.0\": \"18\",\n \"255.255.224.0\": \"19\",\n \"255.255.240.0\": \"20\",\n \"255.255.248.0\": \"21\",\n \"255.255.252.0\": \"22\",\n \"255.255.254.0\": \"23\",\n \"255.255.255.0\": \"24\",\n \"255.255.255.128\": \"25\",\n \"255.255.255.192\": \"26\",\n \"255.255.255.224\": \"27\",\n \"255.255.255.240\": \"28\",\n \"255.255.255.248\": \"29\",\n \"255.255.255.252\": \"30\",\n \"255.255.255.254\": \"31\",\n \"255.255.255.255\": \"32\",\n}\n\n\nclass NetworkDevice:\n \"\"\"\n Base class for network object\n\n\n :param ip: IP address of a device\n :type ip: str\n\n :param username: Username used to connect to a device\n :type username: str\n\n :param password: Password used to connect to a device\n :type password: str\n\n :param device_type: Type of device used\n :type device_type: str\n\n :param port: TCP port used to connect a device. Default value is \"22\" for SSH\n :type port: int, optional\n\n :param timeout: TCP port used to connect a device. Default value is 10 seconds\n :type timeout: int, optional\n\n :param _protocol: Protocol used to connect a device. \"ssh\" or \"telnet\" are possible options. Default value is \"ssh\"\n :type _protocol: str, optional\n\n :param enable_mode: Enable mode for devices requiring it. Default value is \"False\"\n :type enable_mode: bool, optional\n\n :param enable_password: Enable password used for enable mode.\n :type enable_password: str, optional\n\n :param conn: Variable used for the management of the SSH connection\n :type conn: SSHClientConnection object\n\n :param _writer: Variable used for the management of the Telnet connection and writing channel\n :type _writer: StreamWriter object\n\n :param _reader: Variable used for the management of the Telnet reading channel\n :type _reader: StreamReader object\n\n :param possible_prompts: Used by the connect method to list all possible prompts of the device\n :type possible_prompts: list\n\n :param _connect_first_ending_prompt: Default possible ending prompts. Used only the time after login and password to discover the prompt\n :type _connect_first_ending_prompt: list\n\n :param list_of_possible_ending_prompts: Different strings at the end of a prompt the device can get. Used for detecting the prompt returned in sent commands\n :type list_of_possible_ending_prompts: list\n\n :param _telnet_connect_login: Login prompt for Telnet. Used to detect when a login is expected or when login and password access is failed\n :type _telnet_connect_login: str\n\n :param _telnet_connect_password: Password prompt for Telnet. Used to detect when a login is expected or when login and password access is failed\n :type _telnet_connect_password: list\n\n :param _telnet_connect_authentication_fail_prompt: Known failing messages or prompts when an authentication has failed. Used to get an answer faster than timeout events\n :type _telnet_connect_authentication_fail_prompt: list\n\n :param cmd_enable: Enable command for entering into enable mode\n :type cmd_enable: str\n\n :param cmd_disable_paging: Command used to disable paging on a device. That command is run at connection time\n :type cmd_disable_paging: str\n\n :param cmd_enter_config_mode: Command used to enter into a configuration mode on a device when this device support that feature.\n :type cmd_enter_config_mode: str\n\n :param cmd_exit_config_mode: Command used to leave a configuration mode on a device when this device support that feature.\n :type cmd_exit_config_mode: str\n\n :param cmd_get_version: API command used to get the software version of a device\n :type cmd_get_version: str\n\n :param cmd_get_hostname: API command used to get the hostname of a device\n :type cmd_get_hostname: str\n\n :param cmd_get_model: API command used to get the model of a device\n :type cmd_get_model: str\n\n :param cmd_get_serial_number: API command used to get the serial number of a device\n :type cmd_get_serial_number: str\n\n :param cmd_get_config: API command used to get the running configuration of a device\n :type cmd_get_config: str\n\n :param cmd_save_config: API command used to save the running configuration on the device\n :type cmd_save_config: str\n \"\"\"\n\n def __init__(self, **kwargs):\n\n # Display info message\n log.info(\"__init__\")\n\n self.ip = \"\"\n self.username = \"\"\n self.password = \"\"\n self.device_type = \"\"\n self.port = 22\n self.timeout = 10\n self._protocol = \"ssh\"\n self.enable_mode = False\n self.enable_password = \"\"\n self.conn = None\n self._writer = None\n self._reader = None\n self.possible_prompts = []\n self._connect_first_ending_prompt = [\"#\", \">\"]\n self.list_of_possible_ending_prompts = [\n \"(config-line)#\",\n \"(config-if)#\",\n \"(config)#\",\n \">\",\n \"#\",\n ]\n self._carriage_return_for_send_command = \"\\n\"\n self._send_command_error_in_returned_output = []\n self._telnet_connect_login = \"Username:\"\n self._telnet_connect_password = \"Password:\"\n self._telnet_connect_authentication_fail_prompt = [\":\", \"%\"]\n\n # General commands\n self.cmd_enable = \"enable\"\n self.cmd_disable_paging = \"terminal length 0\"\n self.cmd_enter_config_mode = \"configure terminal\"\n self.cmd_exit_config_mode = \"exit\"\n self.cmd_get_version = \"show version\"\n self.cmd_get_hostname = \"show version | include uptime\"\n self.cmd_get_model = \"show inventory\"\n self.cmd_get_serial_number = \"show inventory | i SN\"\n self.cmd_get_config = \"show running-config\"\n self.cmd_save_config = \"write memory\"\n\n # Layer 1 commands\n self.cmd_get_interfaces = [\n \"interface ethernet print terse without-paging\",\n \"foreach i in=([/interface ethernet find]) do={/interface ethernet monitor $i once without-paging}\",\n \"interface bridge port print terse without-paging\",\n ]\n self.cmd_set_interface = [\n \"interface ethernet enable <INTERFACE>\",\n \"interface ethernet disable <INTERFACE>\",\n 'interface ethernet comment <INTERFACE> \"<COMMENT>\"',\n \"interface ethernet set l2mtu=<MAXIMUMFRAMESIZE> <INTERFACE>\",\n \"interface bridge port set frame-types=<MODE> ingress-filtering=<FILTERINGVLAN> [find interface=<INTERFACE>]\",\n ]\n\n # Layer 2 commands\n self.cmd_get_mac_address_table = \"interface bridge host print without-paging\"\n self.cmd_get_arp = \"ip arp print terse without-paging\"\n self.cmd_get_lldp_neighbors = \"ip neighbor print terse without-paging\"\n self.cmd_get_vlans = \"interface bridge vlan print terse without-paging\"\n self.cmd_add_vlan = 'interface bridge vlan add vlan-ids=<VLAN> comment=\"<VLAN_NAME>\" bridge=<BRIDGE>'\n self.cmd_remove_vlan = \"interface bridge vlan remove [find vlan-ids=<VLAN>]\"\n self.cmd_add_interface_to_vlan = [\n \"interface bridge vlan print terse\",\n \"interface bridge vlan set [find vlan-ids=<VLAN>] untagged=<INTERFACE>\",\n \"interface bridge vlan set [find vlan-ids=<VLAN>] tagged=<INTERFACE>\",\n \"interface bridge port set [find interface=<INTERFACE>] pvid=<VLAN>\",\n ]\n self.cmd_remove_interface_from_vlan = [\n \"interface bridge vlan print terse\",\n \"interface bridge vlan set [find vlan-ids=<VLAN>] untagged=<INTERFACE>\",\n \"interface bridge vlan set [find vlan-ids=<VLAN>] tagged=<INTERFACE>\",\n \"interface bridge port set [find interface=<INTERFACE>] pvid=<VLAN>\",\n ]\n\n # Layer 3 commands\n self.cmd_get_routing_table = \"ip route print without-paging terse\"\n self.cmd_get_interfaces_ip = \"ip address print terse without-paging\"\n self.cmd_add_static_route = \"ip route add dst-address=<NETWORK>/<PREFIXLENGTH> gateway=<DESTINATION> distance=<METRIC>\"\n self.cmd_remove_static_route = (\n \"ip route remove [find dst-address=<NETWORK>/<PREFIXLENGTH>]\"\n )\n\n # Display info message\n log.debug(\"__init__: kwargs: \" + str(kwargs))\n\n # Get information from dictionary\n\n # \"ip\" found?\n if \"ip\" in kwargs:\n\n # Save \"ip\" parameter\n self.ip = kwargs[\"ip\"]\n\n # Display info message\n log.info(\"__init__: ip found: \" + str(self.ip))\n\n # \"username\" found?\n if \"username\" in kwargs:\n self.username = kwargs[\"username\"]\n\n # Display info message\n log.info(\"__init__: username found: \" + str(self.username))\n\n # \"password\" found?\n if \"password\" in kwargs:\n self.password = kwargs[\"password\"]\n\n # Display info message\n log.debug(\"__init__: password found: \" + str(self.password))\n\n # \"device_type\" found?\n if \"device_type\" in kwargs:\n self.device_type = kwargs[\"device_type\"]\n\n # Display info message\n log.info(\"__init__: device_type found: \" + str(self.device_type))\n\n # \"timeout\" found?\n if \"timeout\" in kwargs:\n self.timeout = kwargs[\"timeout\"]\n\n # Display info message\n log.info(\"__init__: timeout found: \" + str(self.timeout))\n\n # \"protocol\" found?\n if \"protocol\" in kwargs:\n self._protocol = kwargs[\"protocol\"].lower()\n\n # Display info message\n log.info(\"__init__: protocol found: \" + str(self._protocol))\n\n # By default telnet port is 23\n if self._protocol.lower() == \"telnet\":\n self.port = 23\n\n # \"port\" found?\n if \"port\" in kwargs:\n self.port = kwargs[\"port\"]\n\n # Display info message\n log.info(\"__init__: port found: \" + str(self.port))\n\n # \"enable_mode\" found?\n if \"enable_mode\" in kwargs:\n self.enable_mode = kwargs[\"enable_mode\"]\n\n # Display info message\n log.info(\"__init__: enable_mode found: \" + str(self.enable_mode))\n\n # \"enable_password\" found?\n if \"enable_password\" in kwargs:\n self.enable_password = kwargs[\"enable_password\"]\n\n # Display info message\n log.info(\"__init__: enable_password found: \" + str(self.enable_password))\n\n async def __aenter__(self):\n \"\"\"\n Context manager opening connection\n \"\"\"\n\n try:\n # Run an async method to connect a device\n await self.connect()\n\n except Exception:\n\n # Disconnection (if needed) in case the connection is done but something failed\n await self.disconnect()\n\n # propagate exception if needed\n raise\n\n return self\n\n # async def _aexit_(self, exc_type, exc_value, traceback):\n async def __aexit__(self, exc_type, exc_value, traceback):\n\n \"\"\"\n Context manager closing connection\n \"\"\"\n\n # Close the connection\n await self.disconnect()\n\n def find_prompt(self, text):\n \"\"\"\n Method used to find a prompt inside an output string\n\n This method is used during the first communication with the device.\n First it find the prompt then caculate the different forms the prompt\n can take. This will be useful later on while finding prompt in other\n output stream (read).\n\n :param text: data with a prompt\n :type text: str\n\n :return: the prompt found\n :rtype: str\n \"\"\"\n\n # Get last line of the data\n prompt = text.split(\"\\n\")[-1]\n\n # Remove possible \\r in the data\n # prompt = prompt.replace(\"\\r\", \"\")\n prompt = text.split(\"\\r\")[-1]\n\n # Display info message\n log.info(f\"find_prompt: prompt: '{prompt}'\")\n\n # Get the possible prompts for future recognition\n self.possible_prompts = self.get_possible_prompts(prompt)\n\n # Return the prompt\n return prompt\n\n def get_possible_prompts(self, prompt):\n \"\"\"\n Method used to check if a prompt has one of the expected endings then\n create a list with all possible prompts for the device\n\n :param prompt: a prompt with a possible ending prompt (eg. \"switch#\")\n :type prompt: str\n\n :return: the list of prompts\n :rtype: list\n \"\"\"\n\n # By default no prompts are returned\n list_of_prompts = []\n\n # Get all the ppossible values of the endings of the prompt\n list_of_possible_ending_prompts = self.list_of_possible_ending_prompts\n\n # Temporary variable storing the prompt value\n my_prompt = prompt\n\n # Test each possible prompt ending (i.e '#', '>', \"(config-if)#\", \"(config)#\")\n for ending in list_of_possible_ending_prompts:\n\n # Is this current prompt ending at the end of the prompt?\n if my_prompt.endswith(ending):\n\n # Yes\n\n # Then remove the ending\n my_prompt = my_prompt[: -len(ending)]\n\n # Break the loop\n break\n\n # Prompt should be from \"switch#\" to \"switch\"\n\n # Display info message\n log.info(f\"get_possible_prompts: prompt found: '{my_prompt}'\")\n\n # Display info message\n log.info(f\"get_possible_prompts: prompt found size: '{len(my_prompt)}'\")\n\n # Now create all the possible prompts for that device\n for ending in list_of_possible_ending_prompts:\n\n # Save the prompt name with a possible ending in the list\n list_of_prompts.append(my_prompt + ending)\n\n # Display info message\n log.info(f\"get_possible_prompts: list of possible prompts: {list_of_prompts}\")\n\n # Return the list of prompts\n return list_of_prompts\n\n def check_if_prompt_is_found(self, text):\n \"\"\"\n Method used to check if a prompt is detected inside a string\n\n :param text: a string with prompt\n :type text: str\n\n :return: the prompt found\n :rtype: str\n \"\"\"\n\n # By default the prompt is not found\n prompt_found = False\n\n # Check all possible prompts\n for prompt in self.possible_prompts:\n\n # Display info message\n log.info(f\"check_if_prompt_is_found: prompt: '{prompt}'\")\n\n # Is this prompt present in the text?\n if prompt in text:\n\n # Yes\n prompt_found = True\n\n # Display info message\n log.info(f\"check_if_prompt_is_found: prompt found: '{prompt}'\")\n\n # Leave the for loop\n break\n\n # Return the prompt found\n return prompt_found\n\n def remove_command_in_output(self, text, cmd):\n \"\"\"\n Method removing the command at the beginning of a string\n\n After sending commands an \"echo\" of the command sent\n is display in the output string. This method removes it.\n\n :param text: the text with the command at the beginning\n :type text: str\n\n :param cmd: the command previously sent\n :type cmd: str\n\n :return: the output string without the command\n :rtype: str\n \"\"\"\n\n # Display info message\n log.info(f\"remove_command_in_output: cmd = '{cmd}'\")\n\n # Display info message\n log.info(f\"remove_command_in_output: cmd (hex) = '{cmd.encode().hex()}'\")\n\n # Remove the command from the beginning of the output\n # output = text.lstrip(cmd + \"\\n\")\n output = text.split(cmd + \"\\n\")[-1]\n\n # Display info message\n log.info(f\"remove_command_in_output: output = '{output}'\")\n\n # Return the string without the command\n return output\n\n def remove_starting_carriage_return_in_output(self, text):\n\n \"\"\"\n Method removing the carriage return at the beginning of a string\n\n :param text: the text with the command at the beginning\n :type text: str\n\n :return: the output string without the starting carriage return\n :rtype: str\n \"\"\"\n\n # Display info message\n log.info(\"remove_starting_carriage_return_in_output\")\n\n # Remove the carriage return at the beginning of the string\n output = text.lstrip(\"\\r\\n\\r\")\n\n # Display info message\n log.info(f\"remove_starting_carriage_return_in_output: output = '{output}'\")\n\n # Return the string without the starting carriage return\n return output\n\n def remove_ending_prompt_in_output(self, text):\n\n \"\"\"\n Method removing the prompt at the end of a string\n\n :param text: the text with a prompt at the beginning\n :type text: str\n\n :return: the output string without the ending prompt\n :rtype: str\n \"\"\"\n\n # Display info message\n log.info(\"remove_ending_prompt_in_output\")\n\n # Check all possible prompts\n for prompt in self.possible_prompts:\n\n # Display info message\n log.info(f\"remove_ending_prompt_in_output: prompt: '{prompt}'\")\n\n # Prompt found in the text?\n if prompt in text:\n\n # Yes\n\n # Then it is removed from the text\n # text = text.rstrip(prompt)\n text = text[: -len(prompt)]\n\n # Remove also carriage return\n text = text.rstrip(\"\\r\\n\")\n\n # Leave the loop\n break\n\n # output = text.rstrip(\"\\r\\n\" + self.prompt)\n\n # Display info message\n log.info(f\"remove_ending_prompt_in_output: text without prompt:\\n'{text}'\")\n\n # Return the text without prompt at the end\n return text\n\n def check_error_output(self, output):\n \"\"\"\n Check if an error is returned by the device (\"% Unrecognized command\", \"% Ambiguous command\", etc.)\n\n If an error is found, then an exception is raised\n \"\"\"\n\n # Display info message\n log.info(\"check_error_output\")\n\n # Check if output has some data\n if output:\n\n # Yes\n\n # Display info message\n log.info(\"check_error_output: output has some data\")\n\n # Check all elements in the list of output\n for element in self._send_command_error_in_returned_output:\n\n # Display info message\n log.info(f\"check_error_output: element: {element}\")\n\n # Display info message\n log.info(f\"check_error_output: output[0]: {output[0]}\")\n\n # Check if the output starts with a string with an error message (like \"% Invalid input detected at '^' marker.\")\n\n # Error message?\n if output.startswith(element):\n\n # Yes\n\n # Raise an exception\n raise Exception(output)\n\n def remove_ansi_escape_sequence(self, text):\n\n \"\"\"\n Method removing ANSI escape sequence from a string\n Just CSI sequences are removed\n\n :param text: the text with a prompt at the beginning\n :type text: str\n\n :return: the output string without the ending prompt\n :rtype: str\n \"\"\"\n\n # By default no string returned\n output = \"\"\n\n # By default no escape sequence found\n esc_found = 0\n\n # Read char by char a string\n for i in text:\n\n # Display char\n # log.info(f\"{str(i).encode('ascii')}\")\n\n # No escape previously found?\n if esc_found == 0:\n\n # No escape sequence currently found\n\n # Escape?\n if i == \"\\x1b\":\n\n # Yes\n log.info(\"Esc!\")\n\n # Escape found\n esc_found = 1\n\n else:\n\n # No\n\n # Then the current char can be saved\n output += i\n\n # Escape previously found?\n elif esc_found == 1:\n\n # Yes\n\n # Then check if this is a CSI sequence\n if i == \"[\":\n\n # Beginning of CSI sequence\n log.info(\"CSI sequence\")\n\n # CSI sequence\n esc_found = 2\n\n else:\n\n # Another Escape sequence\n\n # Keep the escape sequence in the string\n output += \"\\x1b\" + i\n\n # No escape sequence next\n esc_found = 0\n\n else:\n\n # Char between 'a' and 'z' or 'A' and 'Z'?\n if (i >= \"a\" and i <= \"z\") or (i >= \"A\" and i <= \"Z\"):\n\n # Yes\n\n # Then it is the end of CSI escape sequence\n log.info(\"End of escape sequence\")\n\n # No escape sequence next\n esc_found = 0\n\n # Return a string without ANSI escape sequence\n return output\n\n async def disable_paging(self):\n \"\"\"\n Async method disabling paging on a device\n\n Use the \"cmd_disable_paging\" attribute\n \"\"\"\n\n # Display info message\n log.info(\"disable_paging\")\n\n # Send command to the device to disable paging\n await self.send_command(self.cmd_disable_paging)\n\n async def connect(self):\n \"\"\"\n Async method used for connecting a device\n\n Currently supported: SSH and Telnet\n \"\"\"\n\n # Display info message\n log.info(\"connect\")\n\n try:\n\n # SSH?\n if self._protocol == \"ssh\":\n\n # Yes\n\n # Then Connect using SSH\n await self.connectSSH()\n\n # Telnet?\n elif self._protocol == \"telnet\":\n\n # Yes\n\n # Then Connect using Telnet\n await self.connectTelnet()\n\n else:\n\n # Unsupported protocol\n\n # Raise an exception\n raise Exception(f\"connect: unsupported protocol: {self._protocol}\")\n\n except Exception:\n\n # There was a problem with a connection method\n\n # Display info message\n log.info(\"connect: connection error\")\n\n raise\n\n async def connectSSH(self):\n \"\"\"\n Async method used for connecting a device using SSH protocol\n \"\"\"\n\n # Display info message\n log.info(\"connectSSH\")\n\n # Parameters of the connection\n generator = asyncssh.connect(\n self.ip,\n username=self.username,\n password=self.password,\n known_hosts=None,\n # encryption_algs=\"*\", # Parameter that includes all encryption algorithms (even the old ones disabled by default)\n encryption_algs=[\n algs.decode(\"utf-8\") for algs in asyncssh.encryption._enc_algs\n ], # Parameter that includes all encryption algorithms (even the old ones disabled by default)\n )\n\n # Trying to connect to the device\n try:\n\n self.conn = await asyncio.wait_for(generator, timeout=self.timeout)\n\n except asyncio.exceptions.TimeoutError as error:\n\n # Timeout\n\n # Display error message\n log.error(f\"connectSSH: connection failed: {self.ip} timeout: '{error}'\")\n\n # Exception propagation\n raise asyncio.exceptions.TimeoutError(\n \"Connection failed: connection timed out.\"\n )\n\n except Exception as error:\n\n # Connection failed\n\n # Display error message\n log.error(f\"connectSSH: connection failed: {self.ip} '{error}'\")\n\n # Exception propagation\n raise\n\n # Display info message\n log.info(\"connectSSH: connection success\")\n\n # Create a session\n self.stdinx, self.stdoutx, _ = await self.conn.open_session(term_type=\"netscud\")\n\n # Display info message\n log.info(\"connectSSH: open_session success\")\n\n # By default no data has been read\n data = \"\"\n\n # By default no prompt found\n prompt_not_found = True\n\n try:\n\n # Read data\n while prompt_not_found:\n\n # Display info message\n log.info(\"connectSSH: beginning of the loop\")\n\n # Read the prompt\n data += await asyncio.wait_for(\n self.stdoutx.read(MAX_BUFFER_DATA), timeout=self.timeout\n )\n\n # Display info message\n log.info(f\"connectSSH: data: '{str(data)}'\")\n\n # Display info message\n log.info(f\"connectSSH: data: hex:'{data.encode('utf-8').hex()}'\")\n\n # Check if an initial prompt is found\n for prompt in self._connect_first_ending_prompt:\n\n # Ending prompt found?\n if data.endswith(prompt):\n\n # Yes\n\n # Display info message\n log.info(f\"connectSSH: first ending prompt found: '{prompt}'\")\n\n # A ending prompt has been found\n prompt_not_found = False\n\n # Leave the loop\n break\n\n # Display info message\n log.info(\"connectSSH: end of loop\")\n\n except Exception as error:\n\n # Fail while reading the prompt\n\n # Display error message\n log.error(\n f\"connectSSH: timeout while reading the prompt: {self.ip} '{error}'\"\n )\n\n # Exception propagation\n raise\n\n # Display info message\n log.info(f\"connectSSH: end of prompt loop\")\n\n # Remove possible escape sequence\n data = self.remove_ansi_escape_sequence(data)\n\n # Find prompt\n self.prompt = self.find_prompt(str(data))\n\n # Display info message\n log.info(f\"connectSSH: prompt found: '{self.prompt}'\")\n\n # Display info message\n log.info(f\"connectSSH: prompt found size: '{len(self.prompt)}'\")\n\n # Disable paging command available?\n if self.cmd_disable_paging:\n # Yes\n\n # Disable paging\n await self.disable_paging()\n\n async def connectTelnet(self):\n \"\"\"\n Async method used for connecting a device using Telnet protocol\n \"\"\"\n\n # Display info message\n log.info(\"connectTelnet\")\n\n try:\n\n # Prepare connection with Telnet\n conn = asyncio.open_connection(self.ip, self.port)\n\n except Exception as error:\n\n # Preparation to the connection failed\n\n # Display error message\n log.error(f\"connectTelnet: preparation to the connection failed: '{error}'\")\n\n # Exception propagation\n raise\n\n # Display info message\n log.info(\"connectTelnet: preparation to the connection success\")\n\n try:\n\n # Connection with Telnet\n self._reader, self._writer = await asyncio.wait_for(\n conn, timeout=self.timeout\n )\n\n except asyncio.TimeoutError:\n\n # Time out during connection\n\n # Display error message\n log.error(\"connectTelnet: connection: timeout\")\n\n # Exception propagation\n raise\n\n # Display info message\n log.info(\"connectTelnet: connection success\")\n\n # Get prompt for the login\n prompt = self._telnet_connect_login\n\n # Get prompt for the password\n prompt_password = self._telnet_connect_password\n\n # By default a login is expected\n use_login = True\n\n # Temporary string variable\n output = \"\"\n\n # Temporary bytes variable\n byte_data = b\"\"\n\n # Read the telnet information and first prompt (for login but a password prompt can be found for IOS for instance)\n while True:\n\n # Display info message\n log.info(f\"connectTelnet: read data for prompt\")\n\n # Read returned prompt\n byte_data += await asyncio.wait_for(\n self._reader.read(MAX_BUFFER_DATA), timeout=self.timeout\n )\n\n # Display info message\n log.info(f\"connectTelnet: byte_data: {byte_data}\")\n\n # Temporary convertion in string. This string has the following form: \"b'....'\"\n output = str(byte_data)\n\n # Display info message\n log.info(f\"connectTelnet: output: {output}\")\n\n # Prompt for the username found?\n if prompt in output:\n\n # Yes\n\n # Leave the loop\n break\n\n # Prompt for the password found?\n elif prompt_password in output:\n\n # Yes\n\n # That means only password is required\n use_login = False\n\n # Leave the loop\n break\n\n # Display info message\n log.info(f\"connectTelnet: login prompt: '{output}'\")\n\n # Login to use?\n if use_login:\n\n # Yes\n\n # Display info message\n log.info(\"connectTelnet: sending login\")\n\n try:\n\n # Send login\n await self.send_command(self.username, prompt_password)\n\n # Display info message\n log.info(\"connectTelnet: login sent\")\n\n except Exception:\n\n # Problem with the login\n\n # Propagate the exception\n raise\n\n # Display info message\n log.info(\"connectTelnet: sending password\")\n\n try:\n # Send password\n output = await self.telnet_send_command_with_unexpected_pattern(\n self.password,\n self._connect_first_ending_prompt,\n self._telnet_connect_authentication_fail_prompt,\n )\n\n except Exception:\n\n # Problem with the password\n\n # Propagate the exception\n raise\n\n # Display info message\n log.info(\"connectTelnet: password sent\")\n\n # Find prompt\n self.prompt = self.find_prompt(str(output))\n\n # Display info message\n log.info(f\"connectTelnet: prompt found: '{self.prompt}'\")\n\n # Password enable?\n if self.enable_mode:\n\n # Yes\n\n # Display info message\n log.info(\"connectTelnet: enable mode to be activated\")\n\n try:\n\n # Send enable command\n await self.send_command(self.cmd_enable, prompt_password)\n\n # Display info message\n log.info(\"connectTelnet: enable command sent\")\n\n # Display info message\n log.info(\"connectTelnet: sending enable password\")\n\n # Send enable password\n await self.telnet_send_command_with_unexpected_pattern(\n self.enable_password,\n self._connect_first_ending_prompt,\n self._telnet_connect_authentication_fail_prompt,\n )\n\n # Display info message\n log.info(\"connectTelnet: enable password sent\")\n\n except Exception:\n\n # Problem with the enable password\n\n # Display info message\n log.info(\"connectTelnet: enable password failure\")\n\n # Propagate the exception\n raise\n\n # Disable paging command available?\n if self.cmd_disable_paging:\n\n # Yes\n\n # Disable paging\n await self.disable_paging()\n\n async def disconnect(self):\n \"\"\"\n Async method used to disconnect a device\n\n If this method is not used then exceptions will happen\n when the program will end\n \"\"\"\n\n # Debug info message\n log.info(\"disconnect\")\n\n # SSH?\n if self._protocol == \"ssh\":\n\n # Yes\n\n # Then disconnect using SSH\n await self.disconnectSSH()\n\n # Telnet?\n elif self._protocol == \"telnet\":\n\n # Yes\n\n # Then disconnect using Telnet\n await self.disconnectTelnet()\n\n else:\n\n # Unsupported protocol\n\n # Raise an exception\n raise Exception(f\"Unsupported protocol: {self._protocol}\")\n\n async def disconnectSSH(self):\n \"\"\"\n Async method used to disconnect a device in SSH\n\n If this method is not used then exceptions will happen\n when the program will end\n \"\"\"\n\n # Debug info message\n log.info(\"disconnectSSH\")\n\n # Connection previously open in SSH?\n if self.conn:\n\n # Yes\n\n # Then close the SSH connection\n self.conn.close()\n\n # No more connection to disconnect\n self.conn = None\n\n async def disconnectTelnet(self):\n \"\"\"\n Async method used to disconnect a device in Telnet\n\n If this method is not used then exceptions will happen\n when the program will end\n \"\"\"\n\n # Debug info message\n log.info(\"disconnectTelnet\")\n\n # Connection previously open in Telnet?\n if self._writer:\n\n # Yes\n\n # Then close the SSH connection\n self._writer.close()\n\n # No more connection to disconnect\n self._writer = None\n\n async def send_command(self, cmd, pattern=None, timeout=None):\n \"\"\"\n Async method used to send data to a device\n\n :param cmd: command to send\n :type cmd: str\n\n :param pattern: optional, a pattern replacing the prompt when the prompt is not expected\n :type pattern: str\n\n :param timeout: optional, a timeout for the command sent. Default value is self.timeout\n :type timeout: str\n\n :return: the output of command\n :rtype: str\n \"\"\"\n\n # Debug info message\n log.info(\"send_command\")\n\n # Default value of timeout variable\n if timeout is None:\n timeout = self.timeout\n\n # SSH?\n if self._protocol == \"ssh\":\n\n # Yes\n\n # Then disconnect using SSH\n output = await self.send_commandSSH(cmd, pattern=pattern, timeout=timeout)\n\n # Telnet?\n elif self._protocol == \"telnet\":\n\n # Yes\n\n # Then disconnect using Telnet\n output = await self.send_commandTelnet(\n cmd, pattern=pattern, timeout=timeout\n )\n\n else:\n\n # Unsupported protocol\n\n # Raise an exception\n raise Exception(f\"send_command: unsupported protocol: {self._protocol}\")\n\n # Return the result of the command\n return output\n\n async def send_commandSSH(self, cmd, pattern=None, timeout=None):\n \"\"\"\n Async method used to send data to a device\n\n :param cmd: command to send\n :type cmd: str\n\n :param pattern: optional, a pattern replacing the prompt when the prompt is not expected\n :type pattern: str\n\n :param timeout: optional, a timeout for the command sent. Default value is self.timeout\n :type timeout: str\n\n :return: the output of command\n :rtype: str\n \"\"\"\n\n # Debug info message\n log.info(\"send_commandSSH\")\n\n # Default value of timeout variable\n if timeout is None:\n timeout = self.timeout\n\n # Add carriage return at the end of the command (mandatory to send the command)\n # cmd = cmd + \"\\n\"\n # cmd = cmd + \"\\r\\n\"\n\n # Debug info message\n log.info(f\"send_commandSSH: cmd = '{cmd}'\")\n\n # Sending command\n self.stdinx.write(cmd + self._carriage_return_for_send_command)\n\n # Display message\n log.info(\"send_commandSSH: command sent\")\n\n # Variable used to gather data\n output = \"\"\n\n # Reading data\n while True:\n\n # await asyncio.sleep(1)\n\n # Read the data received\n output += await asyncio.wait_for(\n self.stdoutx.read(MAX_BUFFER_DATA), timeout=timeout\n )\n\n # Debug info message\n # log.info(f\"send_commandSSH: output hex: '{str(output).encode(\"utf-8\").hex()}'\")\n\n # Remove ANSI escape sequence\n output = self.remove_ansi_escape_sequence(output)\n\n # Remove possible \"\\r\"\n output = output.replace(\"\\r\", \"\")\n\n # data = \"\"\n # for i in output:\n # data += i.encode(\"utf-8\").hex()\n\n # print(data)\n\n # Debug info message\n log.info(f\"send_commandSSH: output: '{output}'\")\n\n # Is a patten used?\n if pattern:\n\n # Use pattern instead of prompt\n if pattern in output:\n\n # Yes\n\n # Leave the loop\n break\n\n else:\n\n # Check if prompt is found\n if self.check_if_prompt_is_found(output):\n\n # Yes\n\n # Leave the loop\n break\n\n # Debug info message\n log.debug(\n f\"send_commandSSH: raw output: '{output}'\\nsend_commandSSH: raw output (hex): '{output.encode().hex()}'\"\n )\n\n # Remove the command sent from the result of the command\n output = self.remove_command_in_output(output, str(cmd))\n # Remove the carriage return of the output\n output = self.remove_starting_carriage_return_in_output(output)\n # Remove the ending prompt of the output\n output = self.remove_ending_prompt_in_output(output)\n\n # Debug info message\n log.debug(\n f\"send_commandSSH: cleaned output: '{output}'\\nsend_commandSSH: cleaned output (hex): '{output.encode().hex()}'\"\n )\n\n # Check if there is an error in the output string (like \"% Unrecognized command\")\n # and generate an exception if needed\n self.check_error_output(output)\n\n # Return the result of the command\n return output\n\n async def send_commandTelnet(self, cmd, pattern=None, timeout=None):\n \"\"\"\n Async method used to send data to a device\n\n :param cmd: command to send\n :type cmd: str\n\n :param pattern: optional, a pattern replacing the prompt when the prompt is not expected\n :type pattern: str\n\n :param timeout: optional, a timeout for the command sent. Default value is self.timeout\n :type timeout: str\n\n :return: the output of command\n :rtype: str\n \"\"\"\n\n # Debug info message\n log.info(\"send_commandTelnet\")\n\n # Default value of timeout variable\n if timeout is None:\n timeout = self.timeout\n\n # Add carriage return at the end of the command (mandatory to send the command)\n cmd = cmd + \"\\n\"\n\n # Sending command\n self._writer.write(cmd.encode())\n\n # Temporary string variable\n output = \"\"\n\n # Temporary bytes variable\n byte_data = b\"\"\n\n try:\n\n # Read data\n while True:\n\n # Read returned prompt\n byte_data += await asyncio.wait_for(\n self._reader.read(MAX_BUFFER_DATA), timeout=timeout\n )\n\n # Display info message\n log.info(f\"send_commandTelnet: byte_data: '{byte_data}'\")\n\n # Temporary convertion in string. This string has the following form: \"b'....'\"\n output = str(byte_data)\n\n # Display info message\n log.info(f\"send_commandTelnet: output: '{output}'\")\n\n # Is a patten used?\n if pattern:\n\n # Use pattern instead of prompt\n if pattern in output:\n\n # Yes\n\n # Leave the loop\n break\n\n else:\n\n # Check if prompt is found\n if self.check_if_prompt_is_found(output):\n\n # Yes\n\n # Leave the loop\n break\n\n except asyncio.TimeoutError:\n\n # Time out during when reading prompt\n\n # Display error message\n log.error(\"send_commandTelnet: connection: timeout\")\n\n # Exception propagation\n raise\n\n except Exception as error:\n\n # Error during when reading prompt\n\n # Display error message\n log.error(f\"send_commandTelnet: error: {error}\")\n\n # Exception propagation\n raise\n\n # Convert data (bytes) into string\n output = byte_data.decode(\"utf-8\", \"ignore\")\n\n # Debug info message\n log.debug(\n f\"send_commandTelnet: raw output: '{output}'\\nsend_commandTelnet: raw output (hex): '{output.encode().hex()}'\"\n )\n\n # Remove the command sent from the result of the command\n output = self.remove_command_in_output(output, str(cmd))\n # Remove the carriage return of the output\n output = self.remove_starting_carriage_return_in_output(output)\n # Remove the ending prompt of the output\n output = self.remove_ending_prompt_in_output(output)\n\n # Debug info message\n log.debug(\n f\"send_commandTelnet: cleaned output: '{output}'\\nsend_commandTelnet: cleaned output (hex): '{output.encode().hex()}'\"\n )\n\n # Check if there is an error in the output string (like \"% Unrecognized command\")\n # and generate an exception if needed\n self.check_error_output(output)\n\n # Return the result of the command\n return output\n\n async def telnet_send_command_with_unexpected_pattern(\n self, cmd, pattern, error_pattern=None, timeout=None\n ):\n \"\"\"\n Async method used to send command for Telnet connection to a device with possible unexpected patterns\n\n send_command can wait till time out if login and password are wrong. This method\n speed up the returned error message when authentication failed is identified.\n This method is limited to authentication whem password is required\n\n :param cmd: command to send\n :type cmd: str\n\n :param pattern: optional, a list of patterns located at the very end of the a returned string. Can be used\n to define a custom or unexpected prompt a the end of a string\n :type pattern: str\n\n :param timeout: optional, a timeout for the command sent. Default value is self.timeout\n :type timeout: str\n\n :param error_pattern: optional, a list of failed prompts found when the login and password are not correct\n :type error_pattern: str\n\n :return: the output of command\n :rtype: str\n \"\"\"\n\n # Debug info message\n log.info(\"telnet_send_command_with_unexpected_pattern\")\n\n # Default value of timeout variable\n if timeout is None:\n timeout = self.timeout\n\n # Add carriage return at the end of the command (mandatory to send the command)\n cmd = cmd + self._carriage_return_for_send_command\n\n # Sending command\n self._writer.write(cmd.encode())\n\n # Temporary string variable\n output = \"\"\n\n # Temporary bytes variable\n byte_data = b\"\"\n\n # By default pattern is not found\n pattern_not_found = True\n\n try:\n\n # Read data\n while pattern_not_found:\n\n # Read returned prompt\n byte_data += await asyncio.wait_for(\n self._reader.read(MAX_BUFFER_DATA), timeout=timeout\n )\n\n # Display info message\n log.info(\n f\"telnet_send_command_with_unexpected_pattern: byte_data: '{byte_data}'\"\n )\n\n # Display debug message\n log.debug(\n f\"telnet_send_command_with_unexpected_pattern: byte_data: hex: '{byte_data.hex()}'\"\n )\n\n # Temporary convertion in string. This string has the following form: \"b'....'\"\n output = str(byte_data)\n\n # Display info message\n log.info(\n f\"telnet_send_command_with_unexpected_pattern: output: '{output}'\"\n )\n\n # Is a pattern used?\n if pattern:\n\n # Check all pattern of prompt in the output\n for prompt in pattern:\n\n # Display info message\n log.info(\n f\"telnet_send_command_with_unexpected_pattern: checking prompt: '{prompt}'\"\n )\n\n # A pattern found?\n if prompt in output:\n\n # Yes\n\n # A pattern is found. The main loop can be stopped\n pattern_not_found = False\n\n # Display info message\n log.info(\n f\"telnet_send_command_with_unexpected_pattern: prompt found: '{prompt}'\"\n )\n\n # Leave the loop\n break\n\n # Is an unexpected pattern used?\n if error_pattern and pattern_not_found:\n\n # Check all unexpected pattern of prompt in the output\n for bad_prompt in error_pattern:\n\n # Display info message\n log.info(\n f\"telnet_send_command_with_unexpected_pattern: checking unexpected prompt: '{bad_prompt}'\"\n )\n\n # An error_pattern pattern found?\n if bad_prompt in output:\n\n # Yes\n\n # Display error message\n log.error(\n \"telnet_send_command_with_unexpected_pattern: authentication failed\"\n )\n\n # Raise exception\n raise Exception(\n \"telnet_send_command_with_unexpected_pattern: authentication failed\"\n )\n\n # Leave the loop\n # break\n\n except asyncio.TimeoutError:\n\n # Time out during when reading prompt\n\n # Close the connection in order to not display RuntimeError\n await self.disconnect()\n\n # Display error message\n log.error(\n \"telnet_send_command_with_unexpected_pattern: reading prompt: timeout\"\n )\n\n # Exception propagation\n raise\n\n except Exception as error:\n\n # Error during when reading prompt\n\n # Close the connection in order to not display RuntimeError\n await self.disconnect()\n\n # Display error message\n log.error(\n f\"telnet_send_command_with_unexpected_pattern: reading prompt: error: {error}\"\n )\n\n # Exception propagation\n raise\n\n # Convert data (bytes) into string\n output = byte_data.decode(\"utf-8\", \"ignore\")\n\n # Debug info message\n log.debug(\n f\"telnet_send_command_with_unexpected_pattern: raw output: '{output}'\\ntelnet_send_command_with_unexpected_pattern: raw output (hex): '{output.encode().hex()}'\"\n )\n\n # Remove the command sent from the result of the command\n output = self.remove_command_in_output(output, str(cmd))\n # Remove the carriage return of the output\n output = self.remove_starting_carriage_return_in_output(output)\n # Remove the ending prompt of the output\n output = self.remove_ending_prompt_in_output(output)\n\n # Debug info message\n log.debug(\n f\"telnet_send_command_with_unexpected_pattern: cleaned output: '{output}'\\ntelnet_send_command_with_unexpected_pattern: cleaned output (hex): '{output.encode().hex()}'\"\n )\n\n # Return the result of the command\n return output\n\n async def send_config_set(self, cmds=None, timeout=None):\n \"\"\"\n Async method used to send command in config mode\n\n The commands send can be either a string a list of strings. There are\n 3 steps:\n - Entering configuration mode\n - Sending the commands\n - Leaving configuration mode\n\n :param cmds: The commands to the device\n :type cmds: str or list\n\n :param timeout: optional, a timeout for the command sent. Default value is self.timeout\n :type timeout: str\n\n :return: the results of the commands sent\n :rtype: list of str\n \"\"\"\n\n # Display info message\n log.info(\"send_config_set\")\n\n # Default value of timeout variable\n if timeout is None:\n timeout = self.timeout\n\n # Debug info message\n log.info(\"send_command\")\n\n # SSH?\n if self._protocol == \"ssh\":\n\n # Yes\n\n # Then disconnect using SSH\n output = await self.send_config_setSSH(cmds, timeout)\n\n # Telnet?\n elif self._protocol == \"telnet\":\n\n # Yes\n\n # Then disconnect using Telnet\n output = await self.send_config_setTelnet(cmds, timeout)\n\n else:\n\n # Unsupported protocol\n\n # Raise an exception\n raise Exception(f\"send_config_set: unsupported protocol: {self._protocol}\")\n\n # Return the result of the commands\n return output\n\n async def send_config_setSSH(self, cmds=None, timeout=None):\n \"\"\"\n Async method used to send command in config mode\n\n The commands send can be either a string a list of strings. There are\n 3 steps:\n - Entering configuration mode\n - Sending the commands\n - Leaving configuration mode\n\n :param cmds: The commands to the device\n :type cmds: str or list\n\n :param timeout: optional, a timeout for the command sent. Default value is self.timeout\n :type timeout: str\n\n :return: the results of the commands sent\n :rtype: list of str\n \"\"\"\n\n # Display info message\n log.info(\"send_config_setSSH\")\n\n # Default value of timeout variable\n if timeout is None:\n timeout = self.timeout\n\n # Clear returned output\n returned_output = \"\"\n\n # Check if cmds is a string\n if isinstance(cmds, str):\n\n # A string\n\n # Convert the string into a list\n cmds = [cmds]\n\n # A list?\n elif not isinstance(cmds, list):\n\n # Not a list (and not a string)\n\n # Display error message\n log.error(\n \"send_config_setSSH: parameter cmds used in send_config_set is neither a string nor a list\"\n )\n\n # Leave the method\n return returned_output\n\n ##############################\n # Entering configuration mode\n ##############################\n\n # Display info message\n log.info(\"send_config_set: entering configuration mode\")\n\n # Clear output\n output = \"\"\n\n # Get command for entering in config made\n cmd = self.cmd_enter_config_mode\n\n # Add carriage return at the end of the command (mandatory to send the command)\n cmd = cmd + self._carriage_return_for_send_command\n\n # Display info message\n log.info(f\"send_config_setSSH: cmd = '{cmd}'\")\n\n # Sending command\n self.stdinx.write(cmd)\n\n # Display message\n log.info(\"send_config_setSSH: configuration mode entered\")\n\n while True:\n\n # Read the data received\n output += await asyncio.wait_for(\n self.stdoutx.read(MAX_BUFFER_DATA), timeout=timeout\n )\n\n # Display info message\n log.info(f\"send_config_setSSH: output: '{output}'\")\n\n # Check if prompt is found\n if self.check_if_prompt_is_found(output):\n\n # Yes\n\n # Leave the loop\n break\n\n # Debug info message\n log.debug(\n f\"send_config_setSSH: raw output: '{output}'\\nsend_config_setSSH: raw output (hex): '{output.encode().hex()}'\"\n )\n\n # Add the output to the returned output\n returned_output += output\n\n # Remove the command sent from the result of the command\n output = self.remove_command_in_output(output, str(cmd))\n # Remove the carriage return of the output\n output = self.remove_starting_carriage_return_in_output(output)\n # Remove the ending prompt of the output\n output = self.remove_ending_prompt_in_output(output)\n\n # Display info message\n log.debug(\n f\"send_config_setSSH: cleaned output: '{output}'\\nsend_config_setSSH: cleaned output (hex): '{output.encode().hex()}'\"\n )\n\n # Check if there is an error in the output string (like \"% Unrecognized command\")\n # and generate an exception if needed\n self.check_error_output(output)\n\n ##############################\n # Sending commands\n ##############################\n\n # Display info message\n log.info(\"send_config_setSSH: sending commands\")\n\n # Clear output\n output = \"\"\n\n # Each command\n for cmd in cmds:\n\n # Add carriage return at the end of the command (mandatory to send the command)\n cmd = cmd + self._carriage_return_for_send_command\n\n # Display info message\n log.info(f\"send_config_setSSH: cmd = '{cmd}'\")\n\n # Sending command\n self.stdinx.write(cmd)\n\n # Display info message\n log.info(\"send_config_setSSH: command sent\")\n\n while True:\n\n # Read the data received\n output += await asyncio.wait_for(\n self.stdoutx.read(MAX_BUFFER_DATA), timeout=timeout\n )\n\n # Display info message\n log.info(f\"send_config_setSSH: output: '{output}'\")\n\n # Check if prompt is found\n if self.check_if_prompt_is_found(output):\n\n # Yes\n\n # Leave the loop\n break\n\n # Debug info message\n log.debug(\n f\"send_config_setSSH: raw output: '{output}'\\nsend_config_setSSH: raw output (hex): '{output.encode().hex()}'\"\n )\n\n # Add the output to the returned output\n returned_output += output\n\n # Remove the command sent from the result of the command\n output = self.remove_command_in_output(output, str(cmd))\n # Remove the carriage return of the output\n output = self.remove_starting_carriage_return_in_output(output)\n # Remove the ending prompt of the output\n output = self.remove_ending_prompt_in_output(output)\n\n # Display info message\n log.debug(\n f\"send_config_setSSH: cleaned output: '{output}'\\nsend_config_setSSH: cleaned output (hex): '{output.encode().hex()}'\"\n )\n\n # Check if there is an error in the output string (like \"% Unrecognized command\")\n # and generate an exception if needed\n self.check_error_output(output)\n\n ##############################\n # Leaving configuration mode\n ##############################\n\n # Display info message\n log.info(\"send_config_setSSH: leaving configuration mode\")\n\n # Clear output\n output = \"\"\n\n # Get command to leave config made\n cmd = self.cmd_exit_config_mode\n\n # Add carriage return at the end of the command (mandatory to send the command)\n cmd = cmd + self._carriage_return_for_send_command\n\n # Display info message\n log.info(f\"send_config_setSSH: cmd = '{cmd}'\")\n\n # Sending command\n self.stdinx.write(cmd)\n\n # Display info message\n log.info(\"send_config_setSSH: command to leave configuration mode sent\")\n\n while True:\n\n # Read the data received\n output += await asyncio.wait_for(\n self.stdoutx.read(MAX_BUFFER_DATA), timeout=timeout\n )\n\n # Display info message\n log.info(f\"send_config_setSSH: output: '{output}'\")\n\n # Check if prompt is found\n if self.check_if_prompt_is_found(output):\n\n # Yes\n\n # Leave the loop\n break\n\n # Debug info message\n log.debug(\n f\"send_config_setSSH: raw output: '{output}'\\nsend_config_setSSH: raw output (hex): '{output.encode().hex()}'\"\n )\n\n # Add the output to the returned output\n returned_output += output\n\n # Remove the command sent from the result of the command\n output = self.remove_command_in_output(output, str(cmd))\n # Remove the carriage return of the output\n output = self.remove_starting_carriage_return_in_output(output)\n # Remove the ending prompt of the output\n output = self.remove_ending_prompt_in_output(output)\n\n # Display info message\n log.debug(\n f\"send_config_setSSH: cleaned output: '{output}'\\nsend_config_setSSH: cleaned output (hex): '{output.encode().hex()}'\"\n )\n\n # Check if there is an error in the output string (like \"% Unrecognized command\")\n # and generate an exception if needed\n self.check_error_output(output)\n\n # Return the result of the commands\n return returned_output\n\n async def send_config_setTelnet(self, cmds=None, timeout=None):\n \"\"\"\n Async method used to send command in config mode\n\n The commands send can be either a string a list of strings. There are\n 3 steps:\n - Entering configuration mode\n - Sending the commands\n - Leaving configuration mode\n\n :param cmds: The commands to the device\n :type cmds: str or list\n\n :param timeout: optional, a timeout for the command sent. Default value is self.timeout\n :type timeout: str\n\n :return: the results of the commands sent\n :rtype: list of str\n \"\"\"\n\n # Display info message\n log.info(\"send_config_setTelnet\")\n\n # Default value of timeout variable\n if timeout is None:\n timeout = self.timeout\n\n # Clear returned output\n returned_output = \"\"\n\n # Check if cmds is a string\n if isinstance(cmds, str):\n\n # A string\n\n # Convert the string into a list\n cmds = [cmds]\n\n # A list?\n elif not isinstance(cmds, list):\n\n # Not a list (and not a string)\n\n # Display error message\n log.error(\n \"send_config_setTelnet: parameter cmds used in send_config_set is neither a string or a list\"\n )\n\n # Leave the method\n return returned_output\n\n ##############################\n # Entering configuration mode\n ##############################\n\n # Display info message\n log.info(\"send_config_setTelnet: entering configuration mode\")\n\n # Clear output\n output = \"\"\n\n # Get command for entering in config made\n cmd = self.cmd_enter_config_mode\n\n # Add carriage return at the end of the command (mandatory to send the command)\n cmd = cmd + self._carriage_return_for_send_command\n\n # Display info message\n log.info(f\"send_config_setTelnet: cmd = '{cmd}'\")\n\n # Sending command\n self._writer.write(cmd.encode())\n\n # Display message\n log.info(\"send_config_setTelnet: configuration mode entered\")\n\n # Temporary string variable\n output = \"\"\n\n # Temporary bytes variable\n byte_data = b\"\"\n\n try:\n\n # Read data\n while True:\n\n # Read the data received\n byte_data += await asyncio.wait_for(\n self._reader.read(MAX_BUFFER_DATA), timeout=timeout\n )\n\n # Temporary convertion in string. This string has the following form: \"b'....'\"\n output = str(byte_data)\n\n # Display info message\n log.info(f\"send_config_setTelnet: output: '{output}'\")\n\n # Check if prompt is found\n if self.check_if_prompt_is_found(output):\n\n # Yes\n\n # Leave the loop\n break\n\n except asyncio.TimeoutError:\n\n # Time out during when reading prompt\n\n # Display error message\n log.error(\"send_config_setTelnet: connection: timeout\")\n\n # Exception propagation\n raise\n\n except Exception as error:\n\n # Error during when reading prompt\n\n # Display error message\n log.error(f\"send_config_setTelnet: error: {error}\")\n\n # Exception propagation\n raise\n\n # Convert data (bytes) into string\n output = byte_data.decode(\"utf-8\", \"ignore\")\n\n # Debug info message\n log.debug(\n f\"send_config_setTelnet: raw output: '{output}'\\nsend_config_setTelnet: raw output (hex): '{output.encode().hex()}'\"\n )\n\n # Add the output to the returned output\n returned_output += output\n\n # Remove the command sent from the result of the command\n output = self.remove_command_in_output(output, str(cmd))\n # Remove the carriage return of the output\n output = self.remove_starting_carriage_return_in_output(output)\n # Remove the ending prompt of the output\n output = self.remove_ending_prompt_in_output(output)\n\n # Display info message\n log.debug(\n f\"send_config_setTelnet: cleaned output: '{output}'\\nsend_config_setTelnet: cleaned output (hex): '{output.encode().hex()}'\"\n )\n\n # Check if there is an error in the output string (like \"% Unrecognized command\")\n # and generate an exception if needed\n self.check_error_output(output)\n\n ##############################\n # Sending commands\n ##############################\n\n # Display info message\n log.info(\"send_config_setTelnet: sending commands\")\n\n # Clear output\n output = \"\"\n\n # Each command\n for cmd in cmds:\n\n # Add carriage return at the end of the command (mandatory to send the command)\n cmd = cmd + self._carriage_return_for_send_command\n\n # Display info message\n log.info(f\"send_config_setTelnet: cmd = '{cmd}'\")\n\n # Sending command\n self._writer.write(cmd.encode())\n\n # Display info message\n log.info(\"send_config_setTelnet: command sent\")\n\n # Temporary string variable\n output = \"\"\n\n # Temporary bytes variable\n byte_data = b\"\"\n\n try:\n\n # Read data\n while True:\n\n # Read the data received\n byte_data += await asyncio.wait_for(\n self._reader.read(MAX_BUFFER_DATA), timeout=timeout\n )\n\n # Temporary convertion in string. This string has the following form: \"b'....'\"\n output = str(byte_data)\n\n # Display info message\n log.info(f\"send_config_setTelnet: output: '{output}'\")\n\n # Check if prompt is found\n if self.check_if_prompt_is_found(output):\n\n # Yes\n\n # Leave the loop\n break\n\n except asyncio.TimeoutError:\n\n # Time out during when reading prompt\n\n # Display error message\n log.error(\"send_config_setTelnet: connection: timeout\")\n\n # Exception propagation\n raise\n\n except Exception as error:\n\n # Error during when reading prompt\n\n # Display error message\n log.error(f\"send_config_setTelnet: error: {error}\")\n\n # Exception propagation\n raise\n\n # Convert data (bytes) into string\n output = byte_data.decode(\"utf-8\", \"ignore\")\n\n # Debug info message\n log.debug(\n f\"send_config_setTelnet: raw output: '{output}'\\nsend_config_setTelnet: raw output (hex): '{output.encode().hex()}'\"\n )\n\n # Add the output to the returned output\n returned_output += output\n\n # Remove the command sent from the result of the command\n output = self.remove_command_in_output(output, str(cmd))\n # Remove the carriage return of the output\n output = self.remove_starting_carriage_return_in_output(output)\n # Remove the ending prompt of the output\n output = self.remove_ending_prompt_in_output(output)\n\n # Display info message\n log.debug(\n f\"send_config_setTelnet: cleaned output: '{output}'\\nsend_config_setTelnet: cleaned output (hex): '{output.encode().hex()}'\"\n )\n\n # Check if there is an error in the output string (like \"% Unrecognized command\")\n # and generate an exception if needed\n self.check_error_output(output)\n\n ##############################\n # Leaving configuration mode\n ##############################\n\n # Display info message\n log.info(\"send_config_setTelnet: leaving configuration mode\")\n\n # Clear output\n output = \"\"\n\n # Get command to leave config made\n cmd = self.cmd_exit_config_mode\n\n # Add carriage return at the end of the command (mandatory to send the command)\n cmd = cmd + self._carriage_return_for_send_command\n\n # Display info message\n log.info(f\"send_config_setTelnet: cmd = '{cmd}'\")\n\n # Sending command\n self._writer.write(cmd.encode())\n\n # Display info message\n log.info(\"send_config_setTelnet: command to leave configuration mode sent\")\n\n # Temporary string variable\n output = \"\"\n\n # Temporary bytes variable\n byte_data = b\"\"\n\n # Protection against infinite loop\n loop = 3\n\n try:\n\n # Read data\n while loop:\n\n # Read the data received\n byte_data += await asyncio.wait_for(\n self._reader.read(MAX_BUFFER_DATA), timeout=timeout\n )\n\n # Temporary convertion in string. This string has the following form: \"b'....'\"\n output = str(byte_data)\n\n # Display info message\n log.info(f\"send_config_setTelnet: output: '{output}'\")\n\n await asyncio.sleep(0.5)\n\n # Check if prompt is found\n if self.check_if_prompt_is_found(output):\n\n # Yes\n\n # Leave the loop\n break\n\n # Protection for \"exit\" command infinite loop in Cisco when enable is not activated\n loop -= 1\n\n except asyncio.TimeoutError:\n\n # Time out during when reading prompt\n\n # Display error message\n log.error(\"send_config_setTelnet: connection: timeout\")\n\n # Exception propagation\n raise\n\n except Exception as error:\n\n # Error during when reading prompt\n\n # Display error message\n log.error(f\"send_config_setTelnet: error: {error}\")\n\n # Exception propagation\n raise\n\n # Convert data (bytes) into string\n output = byte_data.decode(\"utf-8\", \"ignore\")\n\n # Debug info message\n log.debug(\n f\"send_config_setTelnet: raw output: '{output}'\\nsend_config_setTelnet: raw output (hex): '{output.encode().hex()}'\"\n )\n\n # Add the output to the returned output\n returned_output += output\n\n # Remove the command sent from the result of the command\n output = self.remove_command_in_output(output, str(cmd))\n # Remove the carriage return of the output\n output = self.remove_starting_carriage_return_in_output(output)\n # Remove the ending prompt of the output\n output = self.remove_ending_prompt_in_output(output)\n\n # Display info message\n log.debug(\n f\"send_config_setTelnet: cleaned output: '{output}'\\nsend_config_setTelnet: cleaned output (hex): '{output.encode().hex()}'\"\n )\n\n # Check if there is an error in the output string (like \"% Unrecognized command\")\n # and generate an exception if needed\n self.check_error_output(output)\n\n # Return the result of the commands\n return returned_output\n\n #########################################################\n #\n # List of API\n #\n #########################################################\n\n async def get_version(self):\n \"\"\"\n Asyn method used to get the version of the software of the device\n\n :return: Version of the software of the device\n :rtype: str\n \"\"\"\n\n # Display info message\n log.info(\"get_version\")\n\n # By default empty string\n version = \"\"\n\n # Run get version on the device\n output = await self.send_command(self.cmd_get_version)\n\n # Seek \"Version \" and \",\" to get the version in the returned output\n version = output.split(\"Version \")[1].split(\",\")[0]\n\n # Display info message\n log.info(f\"get_version: version: {version}\")\n\n # Return the version of the software of the device\n return version\n\n async def get_hostname(self):\n \"\"\"\n Asyn method used to get the name of the device\n\n :return: Name of the device\n :rtype: str\n \"\"\"\n\n # Display info message\n log.info(\"get_hostname\")\n\n # Get hostname\n output = await self.send_command(self.cmd_get_hostname)\n\n # Display info message\n log.info(f\"get_hostname: output: '{output}'\")\n\n # Remove the useless information in the returned string\n output = output.split()[0]\n\n # Display info message\n log.info(f\"get_hostname: hostname found: '{output}'\")\n\n # Return the name of the device\n return output\n\n async def get_model(self):\n \"\"\"\n Asyn method used to get the model of the device\n\n :return: Model of the device\n :rtype: str\n \"\"\"\n\n # Display info message\n log.info(\"get_model\")\n\n # Get model\n output = await self.send_command(self.cmd_get_model)\n\n # Display info message\n log.info(f\"get_model: output: '{output}'\")\n\n # Remove the useless information in the returned string\n output = output.split('\"')[3]\n\n # Display info message\n log.info(f\"get_model: model found: '{output}'\")\n\n # Return the model of the device\n return output\n\n async def get_serial_number(self):\n \"\"\"\n Get serial number of the switch or the serial number of the first switch of a stack\n\n :return: Serial number of the device\n :rtype: str\n \"\"\"\n\n # Display info message\n log.info(\"get_serial_number\")\n\n # Get serial number\n output = await self.send_command(self.cmd_get_serial_number)\n\n # Display info message\n log.info(f\"get_serial_number: output: '{output}'\")\n\n # Remove the useless information in the returned string\n output = output.splitlines()[0].split()[-1]\n\n # Display info message\n log.info(f\"get_hostname: hostname found: '{output}'\")\n\n # Return the serial number of the device\n return output\n\n async def get_config(self, timeout=None):\n \"\"\"\n Asyn method used to get the configuration of the device\n\n :param timeout: optional, a timeout for the command sent. Default value is self.timeout\n :type timeout: str\n\n :return: Configuration of the device\n :rtype: str\n \"\"\"\n\n # Display info message\n log.info(\"get_config\")\n\n # Default value of timeout variable\n if timeout is None:\n timeout = self.timeout\n\n # Get config\n output = await self.send_command(self.cmd_get_config, timeout=timeout)\n\n # Return de configuration of the device\n return output\n\n async def save_config(self):\n \"\"\"\n Asyn method used to save the current configuration on the device\n\n :return: Commands of the configuration saving process\n :rtype: str\n \"\"\"\n\n # Display info message\n log.info(\"save_config\")\n\n # Send command\n output = await self.send_command(self.cmd_save_config)\n\n # Return the commands of the configuration saving process\n return output\n",
"step-ids": [
9,
10,
12,
14,
15
]
}
|
[
9,
10,
12,
14,
15
] |
<|reserved_special_token_0|>
def _unique_predict(solve_list):
valid_solve_list = filter(lambda x: x[0] is not None, solve_list)
valid_solve_list = sorted(valid_solve_list, key=lambda x: x[0])
unique_solve_list = list()
current_no = -1
for e in valid_solve_list:
if current_no != e[0]:
current_no = e[0]
unique_solve_list.append(e)
return unique_solve_list
<|reserved_special_token_0|>
@safe_one_retval_wrapper
def _run_analysis(data_root, work_root, answer_root):
with open(os.path.join(data_root, 'config.json'), 'r', encoding='utf-8'
) as fh:
config = json.load(fh)
predict_file = os.path.join(work_root, 'output.answer.json')
with open(predict_file, 'r', encoding='utf-8') as fh:
predict = json.load(fh)
analysis_result = {}
for kind, result in predict.items():
analysis_result[kind] = analysis_data(answer_root, kind, result)
path = os.path.join(work_root, 'result.json')
with open(path, 'w', encoding='utf-8') as fh:
json.dump(analysis_result, fh, ensure_ascii=False)
return True
def run_analysis(data_root, work_root, answer_root):
msg, code = _run_analysis(data_root, work_root, answer_root)
result_file = os.path.join(work_root, 'result.json')
if msg is None:
print('Succ:output to %s' % result_file)
else:
with open(result_file, 'w', encoding='utf-8') as fh:
fh.write(msg)
print('Fail:output to %s' % result_file)
return msg, code
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def _unique_predict(solve_list):
valid_solve_list = filter(lambda x: x[0] is not None, solve_list)
valid_solve_list = sorted(valid_solve_list, key=lambda x: x[0])
unique_solve_list = list()
current_no = -1
for e in valid_solve_list:
if current_no != e[0]:
current_no = e[0]
unique_solve_list.append(e)
return unique_solve_list
@safe_one_retval_wrapper
def _analysis_data(answer_root, kind, result):
if result['pass'] != 1:
result['score'] = -1
raise Exception(result['message'])
predict_suites = result['predict_suites']
total = 0
correct = 0
for suite in predict_suites:
with open(os.path.join(answer_root, suite + '.answer.json'), 'r',
encoding='utf-8') as fh:
answer_dict = json.load(fh)
solve_list = _unique_predict(predict_suites[suite])
total = total + len(answer_dict)
for q in solve_list:
if q[1] == answer_dict[str(q[0])]['answer']:
correct = correct + 1
total = total if total else 1
return correct / total
<|reserved_special_token_0|>
@safe_one_retval_wrapper
def _run_analysis(data_root, work_root, answer_root):
with open(os.path.join(data_root, 'config.json'), 'r', encoding='utf-8'
) as fh:
config = json.load(fh)
predict_file = os.path.join(work_root, 'output.answer.json')
with open(predict_file, 'r', encoding='utf-8') as fh:
predict = json.load(fh)
analysis_result = {}
for kind, result in predict.items():
analysis_result[kind] = analysis_data(answer_root, kind, result)
path = os.path.join(work_root, 'result.json')
with open(path, 'w', encoding='utf-8') as fh:
json.dump(analysis_result, fh, ensure_ascii=False)
return True
def run_analysis(data_root, work_root, answer_root):
msg, code = _run_analysis(data_root, work_root, answer_root)
result_file = os.path.join(work_root, 'result.json')
if msg is None:
print('Succ:output to %s' % result_file)
else:
with open(result_file, 'w', encoding='utf-8') as fh:
fh.write(msg)
print('Fail:output to %s' % result_file)
return msg, code
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def _unique_predict(solve_list):
valid_solve_list = filter(lambda x: x[0] is not None, solve_list)
valid_solve_list = sorted(valid_solve_list, key=lambda x: x[0])
unique_solve_list = list()
current_no = -1
for e in valid_solve_list:
if current_no != e[0]:
current_no = e[0]
unique_solve_list.append(e)
return unique_solve_list
@safe_one_retval_wrapper
def _analysis_data(answer_root, kind, result):
if result['pass'] != 1:
result['score'] = -1
raise Exception(result['message'])
predict_suites = result['predict_suites']
total = 0
correct = 0
for suite in predict_suites:
with open(os.path.join(answer_root, suite + '.answer.json'), 'r',
encoding='utf-8') as fh:
answer_dict = json.load(fh)
solve_list = _unique_predict(predict_suites[suite])
total = total + len(answer_dict)
for q in solve_list:
if q[1] == answer_dict[str(q[0])]['answer']:
correct = correct + 1
total = total if total else 1
return correct / total
def analysis_data(answer_root, kind, result):
if result.get('pass') == -1:
return {'pass': -1, 'score': -1, 'message': None}
message, score = _analysis_data(answer_root, kind, result)
if message is None:
return {'pass': 1, 'score': score, 'message': message}
return {'pass': 0, 'score': -1, 'message': message}
@safe_one_retval_wrapper
def _run_analysis(data_root, work_root, answer_root):
with open(os.path.join(data_root, 'config.json'), 'r', encoding='utf-8'
) as fh:
config = json.load(fh)
predict_file = os.path.join(work_root, 'output.answer.json')
with open(predict_file, 'r', encoding='utf-8') as fh:
predict = json.load(fh)
analysis_result = {}
for kind, result in predict.items():
analysis_result[kind] = analysis_data(answer_root, kind, result)
path = os.path.join(work_root, 'result.json')
with open(path, 'w', encoding='utf-8') as fh:
json.dump(analysis_result, fh, ensure_ascii=False)
return True
def run_analysis(data_root, work_root, answer_root):
msg, code = _run_analysis(data_root, work_root, answer_root)
result_file = os.path.join(work_root, 'result.json')
if msg is None:
print('Succ:output to %s' % result_file)
else:
with open(result_file, 'w', encoding='utf-8') as fh:
fh.write(msg)
print('Fail:output to %s' % result_file)
return msg, code
<|reserved_special_token_1|>
import os
import json
from .utils import *
def _unique_predict(solve_list):
valid_solve_list = filter(lambda x: x[0] is not None, solve_list)
valid_solve_list = sorted(valid_solve_list, key=lambda x: x[0])
unique_solve_list = list()
current_no = -1
for e in valid_solve_list:
if current_no != e[0]:
current_no = e[0]
unique_solve_list.append(e)
return unique_solve_list
@safe_one_retval_wrapper
def _analysis_data(answer_root, kind, result):
if result['pass'] != 1:
result['score'] = -1
raise Exception(result['message'])
predict_suites = result['predict_suites']
total = 0
correct = 0
for suite in predict_suites:
with open(os.path.join(answer_root, suite + '.answer.json'), 'r',
encoding='utf-8') as fh:
answer_dict = json.load(fh)
solve_list = _unique_predict(predict_suites[suite])
total = total + len(answer_dict)
for q in solve_list:
if q[1] == answer_dict[str(q[0])]['answer']:
correct = correct + 1
total = total if total else 1
return correct / total
def analysis_data(answer_root, kind, result):
if result.get('pass') == -1:
return {'pass': -1, 'score': -1, 'message': None}
message, score = _analysis_data(answer_root, kind, result)
if message is None:
return {'pass': 1, 'score': score, 'message': message}
return {'pass': 0, 'score': -1, 'message': message}
@safe_one_retval_wrapper
def _run_analysis(data_root, work_root, answer_root):
with open(os.path.join(data_root, 'config.json'), 'r', encoding='utf-8'
) as fh:
config = json.load(fh)
predict_file = os.path.join(work_root, 'output.answer.json')
with open(predict_file, 'r', encoding='utf-8') as fh:
predict = json.load(fh)
analysis_result = {}
for kind, result in predict.items():
analysis_result[kind] = analysis_data(answer_root, kind, result)
path = os.path.join(work_root, 'result.json')
with open(path, 'w', encoding='utf-8') as fh:
json.dump(analysis_result, fh, ensure_ascii=False)
return True
def run_analysis(data_root, work_root, answer_root):
msg, code = _run_analysis(data_root, work_root, answer_root)
result_file = os.path.join(work_root, 'result.json')
if msg is None:
print('Succ:output to %s' % result_file)
else:
with open(result_file, 'w', encoding='utf-8') as fh:
fh.write(msg)
print('Fail:output to %s' % result_file)
return msg, code
<|reserved_special_token_1|>
import os
import json
from .utils import *
def _unique_predict(solve_list):
valid_solve_list = filter(lambda x: x[0] is not None, solve_list)
valid_solve_list = sorted(valid_solve_list, key=lambda x: x[0])
unique_solve_list = list()
current_no = -1
for e in valid_solve_list:
if current_no != e[0]:
current_no = e[0]
unique_solve_list.append(e)
return unique_solve_list
@safe_one_retval_wrapper
def _analysis_data(answer_root, kind, result):
if result["pass"] != 1:
result["score"] = -1
raise Exception(result['message'])
predict_suites = result["predict_suites"]
total = 0
correct = 0
# unique predict suites
for suite in predict_suites:
with open(os.path.join(answer_root, suite + ".answer.json"), "r", encoding="utf-8") as fh:
answer_dict = json.load(fh)
# get unique solve list by id (the first element)
solve_list = _unique_predict(predict_suites[suite])
total = total + len(answer_dict)
for q in solve_list:
if q[1] == answer_dict[str(q[0])]['answer']:
correct = correct + 1
total = total if total else 1
return correct / total
def analysis_data(answer_root, kind, result):
if result.get('pass') == -1:
return {"pass": -1, "score": -1, "message": None}
message, score = _analysis_data(answer_root, kind, result)
if message is None:
return {"pass": 1, "score": score, "message": message}
return {"pass": 0, "score": -1, "message": message}
@safe_one_retval_wrapper
def _run_analysis(data_root, work_root, answer_root):
with open(os.path.join(data_root, "config.json"), "r", encoding="utf-8") as fh:
config = json.load(fh)
predict_file = os.path.join(work_root, "output.answer.json")
with open(predict_file, "r", encoding="utf-8") as fh:
predict = json.load(fh)
analysis_result = {}
for kind, result in predict.items():
analysis_result[kind] = analysis_data(answer_root, kind, result)
path = os.path.join(work_root, "result.json")
with open(path, "w", encoding="utf-8") as fh:
json.dump(analysis_result, fh, ensure_ascii=False)
return True
def run_analysis(data_root, work_root, answer_root):
msg, code = _run_analysis(data_root, work_root, answer_root)
result_file = os.path.join(work_root, "result.json")
if msg is None:
print("Succ:output to %s" % result_file)
else:
with open(result_file, "w", encoding="utf-8") as fh:
fh.write(msg)
print("Fail:output to %s" % result_file)
return msg, code
|
flexible
|
{
"blob_id": "00a1b5f20f15994a659eda56201ba7c45d49a4db",
"index": 4186,
"step-1": "<mask token>\n\n\ndef _unique_predict(solve_list):\n valid_solve_list = filter(lambda x: x[0] is not None, solve_list)\n valid_solve_list = sorted(valid_solve_list, key=lambda x: x[0])\n unique_solve_list = list()\n current_no = -1\n for e in valid_solve_list:\n if current_no != e[0]:\n current_no = e[0]\n unique_solve_list.append(e)\n return unique_solve_list\n\n\n<mask token>\n\n\n@safe_one_retval_wrapper\ndef _run_analysis(data_root, work_root, answer_root):\n with open(os.path.join(data_root, 'config.json'), 'r', encoding='utf-8'\n ) as fh:\n config = json.load(fh)\n predict_file = os.path.join(work_root, 'output.answer.json')\n with open(predict_file, 'r', encoding='utf-8') as fh:\n predict = json.load(fh)\n analysis_result = {}\n for kind, result in predict.items():\n analysis_result[kind] = analysis_data(answer_root, kind, result)\n path = os.path.join(work_root, 'result.json')\n with open(path, 'w', encoding='utf-8') as fh:\n json.dump(analysis_result, fh, ensure_ascii=False)\n return True\n\n\ndef run_analysis(data_root, work_root, answer_root):\n msg, code = _run_analysis(data_root, work_root, answer_root)\n result_file = os.path.join(work_root, 'result.json')\n if msg is None:\n print('Succ:output to %s' % result_file)\n else:\n with open(result_file, 'w', encoding='utf-8') as fh:\n fh.write(msg)\n print('Fail:output to %s' % result_file)\n return msg, code\n",
"step-2": "<mask token>\n\n\ndef _unique_predict(solve_list):\n valid_solve_list = filter(lambda x: x[0] is not None, solve_list)\n valid_solve_list = sorted(valid_solve_list, key=lambda x: x[0])\n unique_solve_list = list()\n current_no = -1\n for e in valid_solve_list:\n if current_no != e[0]:\n current_no = e[0]\n unique_solve_list.append(e)\n return unique_solve_list\n\n\n@safe_one_retval_wrapper\ndef _analysis_data(answer_root, kind, result):\n if result['pass'] != 1:\n result['score'] = -1\n raise Exception(result['message'])\n predict_suites = result['predict_suites']\n total = 0\n correct = 0\n for suite in predict_suites:\n with open(os.path.join(answer_root, suite + '.answer.json'), 'r',\n encoding='utf-8') as fh:\n answer_dict = json.load(fh)\n solve_list = _unique_predict(predict_suites[suite])\n total = total + len(answer_dict)\n for q in solve_list:\n if q[1] == answer_dict[str(q[0])]['answer']:\n correct = correct + 1\n total = total if total else 1\n return correct / total\n\n\n<mask token>\n\n\n@safe_one_retval_wrapper\ndef _run_analysis(data_root, work_root, answer_root):\n with open(os.path.join(data_root, 'config.json'), 'r', encoding='utf-8'\n ) as fh:\n config = json.load(fh)\n predict_file = os.path.join(work_root, 'output.answer.json')\n with open(predict_file, 'r', encoding='utf-8') as fh:\n predict = json.load(fh)\n analysis_result = {}\n for kind, result in predict.items():\n analysis_result[kind] = analysis_data(answer_root, kind, result)\n path = os.path.join(work_root, 'result.json')\n with open(path, 'w', encoding='utf-8') as fh:\n json.dump(analysis_result, fh, ensure_ascii=False)\n return True\n\n\ndef run_analysis(data_root, work_root, answer_root):\n msg, code = _run_analysis(data_root, work_root, answer_root)\n result_file = os.path.join(work_root, 'result.json')\n if msg is None:\n print('Succ:output to %s' % result_file)\n else:\n with open(result_file, 'w', encoding='utf-8') as fh:\n fh.write(msg)\n print('Fail:output to %s' % result_file)\n return msg, code\n",
"step-3": "<mask token>\n\n\ndef _unique_predict(solve_list):\n valid_solve_list = filter(lambda x: x[0] is not None, solve_list)\n valid_solve_list = sorted(valid_solve_list, key=lambda x: x[0])\n unique_solve_list = list()\n current_no = -1\n for e in valid_solve_list:\n if current_no != e[0]:\n current_no = e[0]\n unique_solve_list.append(e)\n return unique_solve_list\n\n\n@safe_one_retval_wrapper\ndef _analysis_data(answer_root, kind, result):\n if result['pass'] != 1:\n result['score'] = -1\n raise Exception(result['message'])\n predict_suites = result['predict_suites']\n total = 0\n correct = 0\n for suite in predict_suites:\n with open(os.path.join(answer_root, suite + '.answer.json'), 'r',\n encoding='utf-8') as fh:\n answer_dict = json.load(fh)\n solve_list = _unique_predict(predict_suites[suite])\n total = total + len(answer_dict)\n for q in solve_list:\n if q[1] == answer_dict[str(q[0])]['answer']:\n correct = correct + 1\n total = total if total else 1\n return correct / total\n\n\ndef analysis_data(answer_root, kind, result):\n if result.get('pass') == -1:\n return {'pass': -1, 'score': -1, 'message': None}\n message, score = _analysis_data(answer_root, kind, result)\n if message is None:\n return {'pass': 1, 'score': score, 'message': message}\n return {'pass': 0, 'score': -1, 'message': message}\n\n\n@safe_one_retval_wrapper\ndef _run_analysis(data_root, work_root, answer_root):\n with open(os.path.join(data_root, 'config.json'), 'r', encoding='utf-8'\n ) as fh:\n config = json.load(fh)\n predict_file = os.path.join(work_root, 'output.answer.json')\n with open(predict_file, 'r', encoding='utf-8') as fh:\n predict = json.load(fh)\n analysis_result = {}\n for kind, result in predict.items():\n analysis_result[kind] = analysis_data(answer_root, kind, result)\n path = os.path.join(work_root, 'result.json')\n with open(path, 'w', encoding='utf-8') as fh:\n json.dump(analysis_result, fh, ensure_ascii=False)\n return True\n\n\ndef run_analysis(data_root, work_root, answer_root):\n msg, code = _run_analysis(data_root, work_root, answer_root)\n result_file = os.path.join(work_root, 'result.json')\n if msg is None:\n print('Succ:output to %s' % result_file)\n else:\n with open(result_file, 'w', encoding='utf-8') as fh:\n fh.write(msg)\n print('Fail:output to %s' % result_file)\n return msg, code\n",
"step-4": "import os\nimport json\nfrom .utils import *\n\n\ndef _unique_predict(solve_list):\n valid_solve_list = filter(lambda x: x[0] is not None, solve_list)\n valid_solve_list = sorted(valid_solve_list, key=lambda x: x[0])\n unique_solve_list = list()\n current_no = -1\n for e in valid_solve_list:\n if current_no != e[0]:\n current_no = e[0]\n unique_solve_list.append(e)\n return unique_solve_list\n\n\n@safe_one_retval_wrapper\ndef _analysis_data(answer_root, kind, result):\n if result['pass'] != 1:\n result['score'] = -1\n raise Exception(result['message'])\n predict_suites = result['predict_suites']\n total = 0\n correct = 0\n for suite in predict_suites:\n with open(os.path.join(answer_root, suite + '.answer.json'), 'r',\n encoding='utf-8') as fh:\n answer_dict = json.load(fh)\n solve_list = _unique_predict(predict_suites[suite])\n total = total + len(answer_dict)\n for q in solve_list:\n if q[1] == answer_dict[str(q[0])]['answer']:\n correct = correct + 1\n total = total if total else 1\n return correct / total\n\n\ndef analysis_data(answer_root, kind, result):\n if result.get('pass') == -1:\n return {'pass': -1, 'score': -1, 'message': None}\n message, score = _analysis_data(answer_root, kind, result)\n if message is None:\n return {'pass': 1, 'score': score, 'message': message}\n return {'pass': 0, 'score': -1, 'message': message}\n\n\n@safe_one_retval_wrapper\ndef _run_analysis(data_root, work_root, answer_root):\n with open(os.path.join(data_root, 'config.json'), 'r', encoding='utf-8'\n ) as fh:\n config = json.load(fh)\n predict_file = os.path.join(work_root, 'output.answer.json')\n with open(predict_file, 'r', encoding='utf-8') as fh:\n predict = json.load(fh)\n analysis_result = {}\n for kind, result in predict.items():\n analysis_result[kind] = analysis_data(answer_root, kind, result)\n path = os.path.join(work_root, 'result.json')\n with open(path, 'w', encoding='utf-8') as fh:\n json.dump(analysis_result, fh, ensure_ascii=False)\n return True\n\n\ndef run_analysis(data_root, work_root, answer_root):\n msg, code = _run_analysis(data_root, work_root, answer_root)\n result_file = os.path.join(work_root, 'result.json')\n if msg is None:\n print('Succ:output to %s' % result_file)\n else:\n with open(result_file, 'w', encoding='utf-8') as fh:\n fh.write(msg)\n print('Fail:output to %s' % result_file)\n return msg, code\n",
"step-5": "import os\nimport json\nfrom .utils import *\n\n\ndef _unique_predict(solve_list):\n valid_solve_list = filter(lambda x: x[0] is not None, solve_list)\n valid_solve_list = sorted(valid_solve_list, key=lambda x: x[0])\n unique_solve_list = list()\n current_no = -1\n for e in valid_solve_list:\n if current_no != e[0]:\n current_no = e[0]\n unique_solve_list.append(e)\n return unique_solve_list\n\n\n@safe_one_retval_wrapper\ndef _analysis_data(answer_root, kind, result):\n if result[\"pass\"] != 1:\n result[\"score\"] = -1\n raise Exception(result['message'])\n\n predict_suites = result[\"predict_suites\"]\n total = 0\n correct = 0\n\n # unique predict suites\n\n for suite in predict_suites:\n with open(os.path.join(answer_root, suite + \".answer.json\"), \"r\", encoding=\"utf-8\") as fh:\n answer_dict = json.load(fh)\n # get unique solve list by id (the first element)\n solve_list = _unique_predict(predict_suites[suite])\n\n total = total + len(answer_dict)\n\n for q in solve_list:\n if q[1] == answer_dict[str(q[0])]['answer']:\n correct = correct + 1\n total = total if total else 1\n return correct / total\n\n\ndef analysis_data(answer_root, kind, result):\n if result.get('pass') == -1:\n return {\"pass\": -1, \"score\": -1, \"message\": None}\n\n message, score = _analysis_data(answer_root, kind, result)\n if message is None:\n return {\"pass\": 1, \"score\": score, \"message\": message}\n\n return {\"pass\": 0, \"score\": -1, \"message\": message}\n\n\n@safe_one_retval_wrapper\ndef _run_analysis(data_root, work_root, answer_root):\n with open(os.path.join(data_root, \"config.json\"), \"r\", encoding=\"utf-8\") as fh:\n config = json.load(fh)\n predict_file = os.path.join(work_root, \"output.answer.json\")\n with open(predict_file, \"r\", encoding=\"utf-8\") as fh:\n predict = json.load(fh)\n\n analysis_result = {}\n for kind, result in predict.items():\n analysis_result[kind] = analysis_data(answer_root, kind, result)\n path = os.path.join(work_root, \"result.json\")\n with open(path, \"w\", encoding=\"utf-8\") as fh:\n json.dump(analysis_result, fh, ensure_ascii=False)\n return True\n\n\ndef run_analysis(data_root, work_root, answer_root):\n msg, code = _run_analysis(data_root, work_root, answer_root)\n result_file = os.path.join(work_root, \"result.json\")\n if msg is None:\n print(\"Succ:output to %s\" % result_file)\n else:\n with open(result_file, \"w\", encoding=\"utf-8\") as fh:\n fh.write(msg)\n\n print(\"Fail:output to %s\" % result_file)\n return msg, code\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def get_mapped_sku(sku):
try:
cursor = connect(aws_access_key_id=config2['aws_access_key_id'],
aws_secret_access_key=config2['aws_secret_access_key'],
s3_staging_dir=config2['s3_staging_dir'], region_name=config2[
'region_name']).cursor()
cursor.execute(
'SELECT seller_sku, seller FROM optivations.master_product_list where sc_sku = %(sku)s '
, {'sku': str(sku)})
result = cursor.fetchall()
for row in result:
return {'Cross-Reference No': row[0], 'brand': row[1]}
except Exception as e:
print(e)
return {}
return {}
def get_sku(seller_sku, sc_sku, seller):
try:
cursor = connect(aws_access_key_id=config2['aws_access_key_id'],
aws_secret_access_key=config2['aws_secret_access_key'],
s3_staging_dir=config2['s3_staging_dir'], region_name=config2[
'region_name']).cursor()
cursor.execute(
'SELECT seller_sku FROM optivations.master_product_list where sc_sku = %(sku)s '
, {'sku': str(sku)})
for row in cursor:
return row[0]
except Exception as e:
print(e)
return False
return True
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def get_mapped_sku(sku):
try:
cursor = connect(aws_access_key_id=config2['aws_access_key_id'],
aws_secret_access_key=config2['aws_secret_access_key'],
s3_staging_dir=config2['s3_staging_dir'], region_name=config2[
'region_name']).cursor()
cursor.execute(
'SELECT seller_sku, seller FROM optivations.master_product_list where sc_sku = %(sku)s '
, {'sku': str(sku)})
result = cursor.fetchall()
for row in result:
return {'Cross-Reference No': row[0], 'brand': row[1]}
except Exception as e:
print(e)
return {}
return {}
def get_sku(seller_sku, sc_sku, seller):
try:
cursor = connect(aws_access_key_id=config2['aws_access_key_id'],
aws_secret_access_key=config2['aws_secret_access_key'],
s3_staging_dir=config2['s3_staging_dir'], region_name=config2[
'region_name']).cursor()
cursor.execute(
'SELECT seller_sku FROM optivations.master_product_list where sc_sku = %(sku)s '
, {'sku': str(sku)})
for row in cursor:
return row[0]
except Exception as e:
print(e)
return False
return True
def add_sku(sc_sku, seller_sku, seller):
try:
cursor = connect(aws_access_key_id=config2['aws_access_key_id'],
aws_secret_access_key=config2['aws_secret_access_key'],
s3_staging_dir=config2['s3_staging_dir'], region_name=config2[
'region_name']).cursor()
cursor.execute(
'INSERT INTO optivations.master_product_list VALUES ( %(scsku)s, %(sellersku)s, %(seller)s )'
, {'scsku': str(sc_sku), 'sellersku': str(seller_sku), 'seller':
str(seller)})
return cursor.description
except Exception as e:
print(e)
return False
return True
<|reserved_special_token_1|>
from pyathena import connect
from Config import config2
from Config import merchants
def get_mapped_sku(sku):
try:
cursor = connect(aws_access_key_id=config2['aws_access_key_id'],
aws_secret_access_key=config2['aws_secret_access_key'],
s3_staging_dir=config2['s3_staging_dir'], region_name=config2[
'region_name']).cursor()
cursor.execute(
'SELECT seller_sku, seller FROM optivations.master_product_list where sc_sku = %(sku)s '
, {'sku': str(sku)})
result = cursor.fetchall()
for row in result:
return {'Cross-Reference No': row[0], 'brand': row[1]}
except Exception as e:
print(e)
return {}
return {}
def get_sku(seller_sku, sc_sku, seller):
try:
cursor = connect(aws_access_key_id=config2['aws_access_key_id'],
aws_secret_access_key=config2['aws_secret_access_key'],
s3_staging_dir=config2['s3_staging_dir'], region_name=config2[
'region_name']).cursor()
cursor.execute(
'SELECT seller_sku FROM optivations.master_product_list where sc_sku = %(sku)s '
, {'sku': str(sku)})
for row in cursor:
return row[0]
except Exception as e:
print(e)
return False
return True
def add_sku(sc_sku, seller_sku, seller):
try:
cursor = connect(aws_access_key_id=config2['aws_access_key_id'],
aws_secret_access_key=config2['aws_secret_access_key'],
s3_staging_dir=config2['s3_staging_dir'], region_name=config2[
'region_name']).cursor()
cursor.execute(
'INSERT INTO optivations.master_product_list VALUES ( %(scsku)s, %(sellersku)s, %(seller)s )'
, {'scsku': str(sc_sku), 'sellersku': str(seller_sku), 'seller':
str(seller)})
return cursor.description
except Exception as e:
print(e)
return False
return True
<|reserved_special_token_1|>
from pyathena import connect
from Config import config2
from Config import merchants
def get_mapped_sku(sku):
try:
cursor = connect(aws_access_key_id=config2["aws_access_key_id"],
aws_secret_access_key=config2["aws_secret_access_key"],
s3_staging_dir=config2["s3_staging_dir"],
region_name=config2["region_name"]).cursor()
cursor.execute("SELECT seller_sku, seller FROM optivations.master_product_list where sc_sku = %(sku)s ",
{"sku": str(sku)})
# print(cursor.description)
result = cursor.fetchall()
for row in result:
return {'Cross-Reference No': row[0], 'brand': row[1]}
except Exception as e:
print(e)
return {}
return {}
def get_sku(seller_sku, sc_sku, seller):
try:
cursor = connect(aws_access_key_id=config2["aws_access_key_id"],
aws_secret_access_key=config2["aws_secret_access_key"],
s3_staging_dir=config2["s3_staging_dir"],
region_name=config2["region_name"]).cursor()
cursor.execute("SELECT seller_sku FROM optivations.master_product_list where sc_sku = %(sku)s ",
{"sku": str(sku)})
# print(cursor.description)
# print(cursor.fetchall())
for row in cursor:
return (row[0])
except Exception as e:
print(e)
return False
return True
def add_sku(sc_sku, seller_sku, seller):
try:
cursor = connect(aws_access_key_id=config2["aws_access_key_id"],
aws_secret_access_key=config2["aws_secret_access_key"],
s3_staging_dir=config2["s3_staging_dir"],
region_name=config2["region_name"]).cursor()
cursor.execute("INSERT INTO optivations.master_product_list VALUES ( %(scsku)s, %(sellersku)s, %(seller)s )",
{"scsku": str(sc_sku), "sellersku": str(seller_sku), "seller": str(seller)})
return (cursor.description)
# print(cursor.fetchall())
# for row in cursor:
# return (row[0])
except Exception as e:
print(e)
return False
return True
# print(add_sku('test', 'test', 'Adean'))
# result = (get_mapped_sku('HDS-3571'))
# print(result['Cross-Reference No'])
|
flexible
|
{
"blob_id": "6add599035573842475c7f9155c5dbbea6c96a8a",
"index": 3618,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_mapped_sku(sku):\n try:\n cursor = connect(aws_access_key_id=config2['aws_access_key_id'],\n aws_secret_access_key=config2['aws_secret_access_key'],\n s3_staging_dir=config2['s3_staging_dir'], region_name=config2[\n 'region_name']).cursor()\n cursor.execute(\n 'SELECT seller_sku, seller FROM optivations.master_product_list where sc_sku = %(sku)s '\n , {'sku': str(sku)})\n result = cursor.fetchall()\n for row in result:\n return {'Cross-Reference No': row[0], 'brand': row[1]}\n except Exception as e:\n print(e)\n return {}\n return {}\n\n\ndef get_sku(seller_sku, sc_sku, seller):\n try:\n cursor = connect(aws_access_key_id=config2['aws_access_key_id'],\n aws_secret_access_key=config2['aws_secret_access_key'],\n s3_staging_dir=config2['s3_staging_dir'], region_name=config2[\n 'region_name']).cursor()\n cursor.execute(\n 'SELECT seller_sku FROM optivations.master_product_list where sc_sku = %(sku)s '\n , {'sku': str(sku)})\n for row in cursor:\n return row[0]\n except Exception as e:\n print(e)\n return False\n return True\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef get_mapped_sku(sku):\n try:\n cursor = connect(aws_access_key_id=config2['aws_access_key_id'],\n aws_secret_access_key=config2['aws_secret_access_key'],\n s3_staging_dir=config2['s3_staging_dir'], region_name=config2[\n 'region_name']).cursor()\n cursor.execute(\n 'SELECT seller_sku, seller FROM optivations.master_product_list where sc_sku = %(sku)s '\n , {'sku': str(sku)})\n result = cursor.fetchall()\n for row in result:\n return {'Cross-Reference No': row[0], 'brand': row[1]}\n except Exception as e:\n print(e)\n return {}\n return {}\n\n\ndef get_sku(seller_sku, sc_sku, seller):\n try:\n cursor = connect(aws_access_key_id=config2['aws_access_key_id'],\n aws_secret_access_key=config2['aws_secret_access_key'],\n s3_staging_dir=config2['s3_staging_dir'], region_name=config2[\n 'region_name']).cursor()\n cursor.execute(\n 'SELECT seller_sku FROM optivations.master_product_list where sc_sku = %(sku)s '\n , {'sku': str(sku)})\n for row in cursor:\n return row[0]\n except Exception as e:\n print(e)\n return False\n return True\n\n\ndef add_sku(sc_sku, seller_sku, seller):\n try:\n cursor = connect(aws_access_key_id=config2['aws_access_key_id'],\n aws_secret_access_key=config2['aws_secret_access_key'],\n s3_staging_dir=config2['s3_staging_dir'], region_name=config2[\n 'region_name']).cursor()\n cursor.execute(\n 'INSERT INTO optivations.master_product_list VALUES ( %(scsku)s, %(sellersku)s, %(seller)s )'\n , {'scsku': str(sc_sku), 'sellersku': str(seller_sku), 'seller':\n str(seller)})\n return cursor.description\n except Exception as e:\n print(e)\n return False\n return True\n",
"step-4": "from pyathena import connect\nfrom Config import config2\nfrom Config import merchants\n\n\ndef get_mapped_sku(sku):\n try:\n cursor = connect(aws_access_key_id=config2['aws_access_key_id'],\n aws_secret_access_key=config2['aws_secret_access_key'],\n s3_staging_dir=config2['s3_staging_dir'], region_name=config2[\n 'region_name']).cursor()\n cursor.execute(\n 'SELECT seller_sku, seller FROM optivations.master_product_list where sc_sku = %(sku)s '\n , {'sku': str(sku)})\n result = cursor.fetchall()\n for row in result:\n return {'Cross-Reference No': row[0], 'brand': row[1]}\n except Exception as e:\n print(e)\n return {}\n return {}\n\n\ndef get_sku(seller_sku, sc_sku, seller):\n try:\n cursor = connect(aws_access_key_id=config2['aws_access_key_id'],\n aws_secret_access_key=config2['aws_secret_access_key'],\n s3_staging_dir=config2['s3_staging_dir'], region_name=config2[\n 'region_name']).cursor()\n cursor.execute(\n 'SELECT seller_sku FROM optivations.master_product_list where sc_sku = %(sku)s '\n , {'sku': str(sku)})\n for row in cursor:\n return row[0]\n except Exception as e:\n print(e)\n return False\n return True\n\n\ndef add_sku(sc_sku, seller_sku, seller):\n try:\n cursor = connect(aws_access_key_id=config2['aws_access_key_id'],\n aws_secret_access_key=config2['aws_secret_access_key'],\n s3_staging_dir=config2['s3_staging_dir'], region_name=config2[\n 'region_name']).cursor()\n cursor.execute(\n 'INSERT INTO optivations.master_product_list VALUES ( %(scsku)s, %(sellersku)s, %(seller)s )'\n , {'scsku': str(sc_sku), 'sellersku': str(seller_sku), 'seller':\n str(seller)})\n return cursor.description\n except Exception as e:\n print(e)\n return False\n return True\n",
"step-5": "from pyathena import connect\nfrom Config import config2\nfrom Config import merchants\n\n\ndef get_mapped_sku(sku):\n try:\n cursor = connect(aws_access_key_id=config2[\"aws_access_key_id\"],\n aws_secret_access_key=config2[\"aws_secret_access_key\"],\n s3_staging_dir=config2[\"s3_staging_dir\"],\n region_name=config2[\"region_name\"]).cursor()\n cursor.execute(\"SELECT seller_sku, seller FROM optivations.master_product_list where sc_sku = %(sku)s \",\n {\"sku\": str(sku)})\n\n # print(cursor.description)\n result = cursor.fetchall()\n for row in result:\n return {'Cross-Reference No': row[0], 'brand': row[1]}\n\n except Exception as e:\n print(e)\n return {}\n return {}\n\n\ndef get_sku(seller_sku, sc_sku, seller):\n try:\n cursor = connect(aws_access_key_id=config2[\"aws_access_key_id\"],\n aws_secret_access_key=config2[\"aws_secret_access_key\"],\n s3_staging_dir=config2[\"s3_staging_dir\"],\n region_name=config2[\"region_name\"]).cursor()\n cursor.execute(\"SELECT seller_sku FROM optivations.master_product_list where sc_sku = %(sku)s \",\n {\"sku\": str(sku)})\n\n # print(cursor.description)\n # print(cursor.fetchall())\n for row in cursor:\n return (row[0])\n except Exception as e:\n print(e)\n return False\n return True\n\n\ndef add_sku(sc_sku, seller_sku, seller):\n try:\n cursor = connect(aws_access_key_id=config2[\"aws_access_key_id\"],\n aws_secret_access_key=config2[\"aws_secret_access_key\"],\n s3_staging_dir=config2[\"s3_staging_dir\"],\n region_name=config2[\"region_name\"]).cursor()\n cursor.execute(\"INSERT INTO optivations.master_product_list VALUES ( %(scsku)s, %(sellersku)s, %(seller)s )\",\n {\"scsku\": str(sc_sku), \"sellersku\": str(seller_sku), \"seller\": str(seller)})\n\n return (cursor.description)\n # print(cursor.fetchall())\n # for row in cursor:\n # return (row[0])\n except Exception as e:\n print(e)\n return False\n return True\n# print(add_sku('test', 'test', 'Adean'))\n# result = (get_mapped_sku('HDS-3571'))\n# print(result['Cross-Reference No'])\n",
"step-ids": [
0,
2,
3,
4,
5
]
}
|
[
0,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
admin.autodiscover()
<|reserved_special_token_0|>
urlpatterns += patterns('piston.authentication', url(
'^oauth/request_token/$', 'oauth_request_token'), url(
'^oauth/authorize/$', 'oauth_user_auth'), url('^oauth/access_token/$',
'oauth_access_token'))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
admin.autodiscover()
urlpatterns = patterns('', url('^admin/', include(admin.site.urls)), url(
'^accounts/login/$', 'django.contrib.auth.views.login', {
'template_name': 'login.html'}))
urlpatterns += patterns('piston.authentication', url(
'^oauth/request_token/$', 'oauth_request_token'), url(
'^oauth/authorize/$', 'oauth_user_auth'), url('^oauth/access_token/$',
'oauth_access_token'))
<|reserved_special_token_1|>
from django.conf.urls.defaults import patterns, include, url
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('', url('^admin/', include(admin.site.urls)), url(
'^accounts/login/$', 'django.contrib.auth.views.login', {
'template_name': 'login.html'}))
urlpatterns += patterns('piston.authentication', url(
'^oauth/request_token/$', 'oauth_request_token'), url(
'^oauth/authorize/$', 'oauth_user_auth'), url('^oauth/access_token/$',
'oauth_access_token'))
<|reserved_special_token_1|>
from django.conf.urls.defaults import patterns, include, url
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'foo.views.home', name='home'),
# url(r'^foo/', include('foo.foo.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
url(r'^admin/', include(admin.site.urls)),
# required url to login so you can authorize token
url(r'^accounts/login/$', 'django.contrib.auth.views.login', {'template_name': 'login.html'}),
)
# piston, oauth urls
urlpatterns += patterns(
'piston.authentication',
url(r'^oauth/request_token/$','oauth_request_token'),
url(r'^oauth/authorize/$','oauth_user_auth'),
url(r'^oauth/access_token/$','oauth_access_token'),
)
|
flexible
|
{
"blob_id": "266ce1aaa3283cf2aaa271a317a80c3860880a49",
"index": 4901,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nadmin.autodiscover()\n<mask token>\nurlpatterns += patterns('piston.authentication', url(\n '^oauth/request_token/$', 'oauth_request_token'), url(\n '^oauth/authorize/$', 'oauth_user_auth'), url('^oauth/access_token/$',\n 'oauth_access_token'))\n",
"step-3": "<mask token>\nadmin.autodiscover()\nurlpatterns = patterns('', url('^admin/', include(admin.site.urls)), url(\n '^accounts/login/$', 'django.contrib.auth.views.login', {\n 'template_name': 'login.html'}))\nurlpatterns += patterns('piston.authentication', url(\n '^oauth/request_token/$', 'oauth_request_token'), url(\n '^oauth/authorize/$', 'oauth_user_auth'), url('^oauth/access_token/$',\n 'oauth_access_token'))\n",
"step-4": "from django.conf.urls.defaults import patterns, include, url\nfrom django.contrib import admin\nadmin.autodiscover()\nurlpatterns = patterns('', url('^admin/', include(admin.site.urls)), url(\n '^accounts/login/$', 'django.contrib.auth.views.login', {\n 'template_name': 'login.html'}))\nurlpatterns += patterns('piston.authentication', url(\n '^oauth/request_token/$', 'oauth_request_token'), url(\n '^oauth/authorize/$', 'oauth_user_auth'), url('^oauth/access_token/$',\n 'oauth_access_token'))\n",
"step-5": "from django.conf.urls.defaults import patterns, include, url\n\n# Uncomment the next two lines to enable the admin:\nfrom django.contrib import admin\nadmin.autodiscover()\n\nurlpatterns = patterns('',\n # Examples:\n # url(r'^$', 'foo.views.home', name='home'),\n # url(r'^foo/', include('foo.foo.urls')),\n\n # Uncomment the admin/doc line below to enable admin documentation:\n # url(r'^admin/doc/', include('django.contrib.admindocs.urls')),\n\n # Uncomment the next line to enable the admin:\n url(r'^admin/', include(admin.site.urls)),\n\n # required url to login so you can authorize token\n url(r'^accounts/login/$', 'django.contrib.auth.views.login', {'template_name': 'login.html'}),\n)\n\n# piston, oauth urls\nurlpatterns += patterns(\n 'piston.authentication',\n url(r'^oauth/request_token/$','oauth_request_token'),\n url(r'^oauth/authorize/$','oauth_user_auth'),\n url(r'^oauth/access_token/$','oauth_access_token'),\n)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import httplib
def get_status_code(host, path="/"):
try:
connect = httplib.HTTPConnection(host)
connect.request("HEAD", path)
return connect.getresponse().status
except StandardError:
return None
if __name__ == '__main__':
print get_status_code("google.com")
|
normal
|
{
"blob_id": "891a490410fd8c7b8879f1e71f24df2db62ff85d",
"index": 7748,
"step-1": "import httplib\n\ndef get_status_code(host, path=\"/\"):\n try:\n connect = httplib.HTTPConnection(host)\n connect.request(\"HEAD\", path)\n return connect.getresponse().status\n except StandardError:\n return None\n\nif __name__ == '__main__':\n print get_status_code(\"google.com\")\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
import os
import shutil
# root_path = '../from_1691'
root_path = 'C:/Users/koyou/Desktop/test'
# 실수할 수도 있으므로 dry_run 을 설정해서 로그만 찍을 것인지
# 실제 작동도 진행할 것인지 결정한다.
# dry_run = True
dry_run = False
def move_directory(input_directory_path, output_directory_path):
print("moving %s to %s" % (input_directory_path, output_directory_path))
if not dry_run:
shutil.move(input_directory_path, output_directory_path)
#
# main
#
print("Root dir is %s" % root_path)
for level1 in os.listdir(root_path): # level1 == test1
level1_path = os.path.join(root_path, level1)
if os.path.isdir(level1_path):
# 디렉토리 이름을 출력해줘야 진행상황 알 수 있음
print("> %s" % level1)
for level2 in os.listdir(level1_path): # level2 == test1-1
level2_path = os.path.join(level1_path, level2)
if os.path.isdir(level2_path):
# level2 이름 출력
print(">> %s" % level2)
move_directory(level2_path, root_path)
# 2. deleting dir
print("Deleting %s" % level1_path)
if not dry_run:
shutil.rmtree(level1_path)
|
normal
|
{
"blob_id": "7de19a85a6a05bd2972b11571d5f05219c6beb1a",
"index": 916,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef move_directory(input_directory_path, output_directory_path):\n print('moving %s to %s' % (input_directory_path, output_directory_path))\n if not dry_run:\n shutil.move(input_directory_path, output_directory_path)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef move_directory(input_directory_path, output_directory_path):\n print('moving %s to %s' % (input_directory_path, output_directory_path))\n if not dry_run:\n shutil.move(input_directory_path, output_directory_path)\n\n\nprint('Root dir is %s' % root_path)\nfor level1 in os.listdir(root_path):\n level1_path = os.path.join(root_path, level1)\n if os.path.isdir(level1_path):\n print('> %s' % level1)\n for level2 in os.listdir(level1_path):\n level2_path = os.path.join(level1_path, level2)\n if os.path.isdir(level2_path):\n print('>> %s' % level2)\n move_directory(level2_path, root_path)\n print('Deleting %s' % level1_path)\n if not dry_run:\n shutil.rmtree(level1_path)\n",
"step-4": "<mask token>\nroot_path = 'C:/Users/koyou/Desktop/test'\ndry_run = False\n\n\ndef move_directory(input_directory_path, output_directory_path):\n print('moving %s to %s' % (input_directory_path, output_directory_path))\n if not dry_run:\n shutil.move(input_directory_path, output_directory_path)\n\n\nprint('Root dir is %s' % root_path)\nfor level1 in os.listdir(root_path):\n level1_path = os.path.join(root_path, level1)\n if os.path.isdir(level1_path):\n print('> %s' % level1)\n for level2 in os.listdir(level1_path):\n level2_path = os.path.join(level1_path, level2)\n if os.path.isdir(level2_path):\n print('>> %s' % level2)\n move_directory(level2_path, root_path)\n print('Deleting %s' % level1_path)\n if not dry_run:\n shutil.rmtree(level1_path)\n",
"step-5": "import os\nimport shutil\n\n# root_path = '../from_1691'\nroot_path = 'C:/Users/koyou/Desktop/test'\n\n# 실수할 수도 있으므로 dry_run 을 설정해서 로그만 찍을 것인지\n# 실제 작동도 진행할 것인지 결정한다.\n# dry_run = True\ndry_run = False\n\ndef move_directory(input_directory_path, output_directory_path):\n print(\"moving %s to %s\" % (input_directory_path, output_directory_path))\n if not dry_run:\n shutil.move(input_directory_path, output_directory_path)\n\n\n#\n# main\n#\nprint(\"Root dir is %s\" % root_path)\n\nfor level1 in os.listdir(root_path): # level1 == test1\n level1_path = os.path.join(root_path, level1)\n if os.path.isdir(level1_path):\n # 디렉토리 이름을 출력해줘야 진행상황 알 수 있음\n print(\"> %s\" % level1)\n\n for level2 in os.listdir(level1_path): # level2 == test1-1\n level2_path = os.path.join(level1_path, level2)\n if os.path.isdir(level2_path):\n # level2 이름 출력\n print(\">> %s\" % level2)\n\n move_directory(level2_path, root_path)\n\n # 2. deleting dir\n print(\"Deleting %s\" % level1_path)\n if not dry_run:\n shutil.rmtree(level1_path)\n",
"step-ids": [
0,
1,
2,
3,
5
]
}
|
[
0,
1,
2,
3,
5
] |
import smtplib
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
import base64
import configobj
import datetime
import os
config = configobj.ConfigObj('.env')
port = 2525
smtp_server = "smtp.mailtrap.io"
login = config['SMTP_USERNAME']
password = config['SMTP_PASSWORD']
sender_email = "mailtrap@example.com"
receiver_email = "new@example.com"
last_sent = datetime.datetime.now()
last_index_sent = 0
def timeFromLastSent():
if(last_sent is None):
return 10
else:
return (datetime.datetime.now() - last_sent).total_seconds()
# send your email
def send():
global last_index_sent
global last_sent
DIR = './videos'
videosToSend = len([name for name in os.listdir(DIR) if os.path.isfile(os.path.join(DIR, name))])
for i in range(last_index_sent, videosToSend + 1):
last_index_sent=i
last_sent = datetime.datetime.now()
encoded = base64.b64encode(open("frame.jpg", "rb").read()).decode()
html = f"""\
<html>
<body>
<img src="data:image/jpg;base64,{encoded}">
<a href="http://localhost:3000/{last_index_sent}">Gravar</a>
</body>
</html>
"""
message = MIMEMultipart("alternative")
message["Subject"] = "inline embedding"
message["From"] = sender_email
message["To"] = receiver_email
part = MIMEText(html, "html")
message.attach(part)
with smtplib.SMTP("smtp.mailtrap.io", 2525) as server:
server.login(login, password)
server.sendmail(
sender_email, receiver_email, message.as_string() )
print('Sent')
return
|
normal
|
{
"blob_id": "a21ac29911931bb71460175cba584e0011fa2ece",
"index": 1055,
"step-1": "<mask token>\n\n\ndef send():\n global last_index_sent\n global last_sent\n DIR = './videos'\n videosToSend = len([name for name in os.listdir(DIR) if os.path.isfile(\n os.path.join(DIR, name))])\n for i in range(last_index_sent, videosToSend + 1):\n last_index_sent = i\n last_sent = datetime.datetime.now()\n encoded = base64.b64encode(open('frame.jpg', 'rb').read()).decode()\n html = f\"\"\" <html>\n <body>\n <img src=\"data:image/jpg;base64,{encoded}\">\n <a href=\"http://localhost:3000/{last_index_sent}\">Gravar</a>\n </body>\n </html>\n \"\"\"\n message = MIMEMultipart('alternative')\n message['Subject'] = 'inline embedding'\n message['From'] = sender_email\n message['To'] = receiver_email\n part = MIMEText(html, 'html')\n message.attach(part)\n with smtplib.SMTP('smtp.mailtrap.io', 2525) as server:\n server.login(login, password)\n server.sendmail(sender_email, receiver_email, message.as_string())\n print('Sent')\n return\n",
"step-2": "<mask token>\n\n\ndef timeFromLastSent():\n if last_sent is None:\n return 10\n else:\n return (datetime.datetime.now() - last_sent).total_seconds()\n\n\ndef send():\n global last_index_sent\n global last_sent\n DIR = './videos'\n videosToSend = len([name for name in os.listdir(DIR) if os.path.isfile(\n os.path.join(DIR, name))])\n for i in range(last_index_sent, videosToSend + 1):\n last_index_sent = i\n last_sent = datetime.datetime.now()\n encoded = base64.b64encode(open('frame.jpg', 'rb').read()).decode()\n html = f\"\"\" <html>\n <body>\n <img src=\"data:image/jpg;base64,{encoded}\">\n <a href=\"http://localhost:3000/{last_index_sent}\">Gravar</a>\n </body>\n </html>\n \"\"\"\n message = MIMEMultipart('alternative')\n message['Subject'] = 'inline embedding'\n message['From'] = sender_email\n message['To'] = receiver_email\n part = MIMEText(html, 'html')\n message.attach(part)\n with smtplib.SMTP('smtp.mailtrap.io', 2525) as server:\n server.login(login, password)\n server.sendmail(sender_email, receiver_email, message.as_string())\n print('Sent')\n return\n",
"step-3": "<mask token>\nconfig = configobj.ConfigObj('.env')\nport = 2525\nsmtp_server = 'smtp.mailtrap.io'\nlogin = config['SMTP_USERNAME']\npassword = config['SMTP_PASSWORD']\nsender_email = 'mailtrap@example.com'\nreceiver_email = 'new@example.com'\nlast_sent = datetime.datetime.now()\nlast_index_sent = 0\n\n\ndef timeFromLastSent():\n if last_sent is None:\n return 10\n else:\n return (datetime.datetime.now() - last_sent).total_seconds()\n\n\ndef send():\n global last_index_sent\n global last_sent\n DIR = './videos'\n videosToSend = len([name for name in os.listdir(DIR) if os.path.isfile(\n os.path.join(DIR, name))])\n for i in range(last_index_sent, videosToSend + 1):\n last_index_sent = i\n last_sent = datetime.datetime.now()\n encoded = base64.b64encode(open('frame.jpg', 'rb').read()).decode()\n html = f\"\"\" <html>\n <body>\n <img src=\"data:image/jpg;base64,{encoded}\">\n <a href=\"http://localhost:3000/{last_index_sent}\">Gravar</a>\n </body>\n </html>\n \"\"\"\n message = MIMEMultipart('alternative')\n message['Subject'] = 'inline embedding'\n message['From'] = sender_email\n message['To'] = receiver_email\n part = MIMEText(html, 'html')\n message.attach(part)\n with smtplib.SMTP('smtp.mailtrap.io', 2525) as server:\n server.login(login, password)\n server.sendmail(sender_email, receiver_email, message.as_string())\n print('Sent')\n return\n",
"step-4": "import smtplib\nfrom email.mime.text import MIMEText\nfrom email.mime.multipart import MIMEMultipart\nimport base64\nimport configobj\nimport datetime\nimport os\nconfig = configobj.ConfigObj('.env')\nport = 2525\nsmtp_server = 'smtp.mailtrap.io'\nlogin = config['SMTP_USERNAME']\npassword = config['SMTP_PASSWORD']\nsender_email = 'mailtrap@example.com'\nreceiver_email = 'new@example.com'\nlast_sent = datetime.datetime.now()\nlast_index_sent = 0\n\n\ndef timeFromLastSent():\n if last_sent is None:\n return 10\n else:\n return (datetime.datetime.now() - last_sent).total_seconds()\n\n\ndef send():\n global last_index_sent\n global last_sent\n DIR = './videos'\n videosToSend = len([name for name in os.listdir(DIR) if os.path.isfile(\n os.path.join(DIR, name))])\n for i in range(last_index_sent, videosToSend + 1):\n last_index_sent = i\n last_sent = datetime.datetime.now()\n encoded = base64.b64encode(open('frame.jpg', 'rb').read()).decode()\n html = f\"\"\" <html>\n <body>\n <img src=\"data:image/jpg;base64,{encoded}\">\n <a href=\"http://localhost:3000/{last_index_sent}\">Gravar</a>\n </body>\n </html>\n \"\"\"\n message = MIMEMultipart('alternative')\n message['Subject'] = 'inline embedding'\n message['From'] = sender_email\n message['To'] = receiver_email\n part = MIMEText(html, 'html')\n message.attach(part)\n with smtplib.SMTP('smtp.mailtrap.io', 2525) as server:\n server.login(login, password)\n server.sendmail(sender_email, receiver_email, message.as_string())\n print('Sent')\n return\n",
"step-5": "import smtplib\nfrom email.mime.text import MIMEText\nfrom email.mime.multipart import MIMEMultipart\nimport base64\nimport configobj\nimport datetime\nimport os\nconfig = configobj.ConfigObj('.env')\nport = 2525\nsmtp_server = \"smtp.mailtrap.io\"\nlogin = config['SMTP_USERNAME'] \npassword = config['SMTP_PASSWORD'] \n\nsender_email = \"mailtrap@example.com\"\nreceiver_email = \"new@example.com\"\n\n\n\nlast_sent = datetime.datetime.now()\nlast_index_sent = 0\ndef timeFromLastSent():\n if(last_sent is None):\n return 10\n else:\n return (datetime.datetime.now() - last_sent).total_seconds()\n\n# send your email\ndef send():\n global last_index_sent\n global last_sent\n DIR = './videos'\n videosToSend = len([name for name in os.listdir(DIR) if os.path.isfile(os.path.join(DIR, name))])\n for i in range(last_index_sent, videosToSend + 1):\n last_index_sent=i\n last_sent = datetime.datetime.now()\n encoded = base64.b64encode(open(\"frame.jpg\", \"rb\").read()).decode()\n html = f\"\"\"\\\n <html>\n <body>\n <img src=\"data:image/jpg;base64,{encoded}\">\n <a href=\"http://localhost:3000/{last_index_sent}\">Gravar</a>\n </body>\n </html>\n \"\"\"\n\n message = MIMEMultipart(\"alternative\")\n message[\"Subject\"] = \"inline embedding\"\n message[\"From\"] = sender_email\n message[\"To\"] = receiver_email\n\n part = MIMEText(html, \"html\")\n message.attach(part)\n \n with smtplib.SMTP(\"smtp.mailtrap.io\", 2525) as server:\n server.login(login, password)\n server.sendmail(\n sender_email, receiver_email, message.as_string() )\n print('Sent')\n return\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for i in range(25000):
pval, costval = train(inputs, outputs)
print(costval)
val1.append(pval)
cost1.append(costval)
print('the final outputs are:')
for i in range(len(inputs)):
print('the output of x1=%d | x2=%d is %.2f' % (inputs[i][0], inputs[i][
1], pval[i]))
plt.plot(cost1, color='red')
plt.show()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
inputs = [[0, 0], [1, 0], [0, 1], [1, 1]]
outputs = [1, 0, 0, 1]
x = theano.tensor.matrix(name='x')
w1val = np.asarray([np.random.randn(), np.random.randn()])
w1 = theano.shared(w1val, name='w1')
w2val = np.asarray([np.random.randn(), np.random.randn()])
w2 = theano.shared(w2val, name='w2')
w3val = np.asarray([np.random.randn(), np.random.randn()])
w3 = theano.shared(w3val, name='w3')
b1 = theano.shared(1.1, name='b1')
b2 = theano.shared(1.2, name='b2')
b3 = theano.shared(1.3, name='b3')
a1sum = theano.tensor.dot(x, w1) + b1
a2sum = theano.tensor.dot(x, w2) + b2
a1 = 1 / (1 + theano.tensor.exp(-1 * a1sum))
a2 = 1 / (1 + theano.tensor.exp(-1 * a2sum))
x2 = theano.tensor.stack([a1, a2], axis=1)
<|reserved_special_token_0|>
a3sum = theano.tensor.dot(x2, w3) + b3
a3 = 1 / (1 + theano.tensor.exp(-1 * a3sum))
ahat = a3
a = theano.tensor.vector(name='a')
cost = -(a * theano.tensor.log(ahat) + (1 - a) * theano.tensor.log(1 - ahat)
).sum()
dcostdw1 = theano.tensor.grad(cost, w1)
dcostdw2 = theano.tensor.grad(cost, w2)
dcostdw3 = theano.tensor.grad(cost, w3)
dcostdb1 = theano.tensor.grad(cost, b1)
dcostdb2 = theano.tensor.grad(cost, b2)
dcostdb3 = theano.tensor.grad(cost, b3)
wn1 = w1 - 0.02 * dcostdw1
wn2 = w2 - 0.02 * dcostdw2
wn3 = w3 - 0.02 * dcostdw3
wb1 = b1 - 0.02 * dcostdb1
wb2 = b2 - 0.02 * dcostdb2
wb3 = b3 - 0.02 * dcostdb3
train = theano.function([x, a], [ahat, cost], updates=[(w1, wn1), (w2, wn2),
(w3, wn3), (b1, wb1), (b2, wb2), (b3, wb3)])
cost1 = []
val1 = []
for i in range(25000):
pval, costval = train(inputs, outputs)
print(costval)
val1.append(pval)
cost1.append(costval)
print('the final outputs are:')
for i in range(len(inputs)):
print('the output of x1=%d | x2=%d is %.2f' % (inputs[i][0], inputs[i][
1], pval[i]))
plt.plot(cost1, color='red')
plt.show()
<|reserved_special_token_1|>
import numpy as np
import theano
import matplotlib.pyplot as plt
inputs = [[0, 0], [1, 0], [0, 1], [1, 1]]
outputs = [1, 0, 0, 1]
x = theano.tensor.matrix(name='x')
w1val = np.asarray([np.random.randn(), np.random.randn()])
w1 = theano.shared(w1val, name='w1')
w2val = np.asarray([np.random.randn(), np.random.randn()])
w2 = theano.shared(w2val, name='w2')
w3val = np.asarray([np.random.randn(), np.random.randn()])
w3 = theano.shared(w3val, name='w3')
b1 = theano.shared(1.1, name='b1')
b2 = theano.shared(1.2, name='b2')
b3 = theano.shared(1.3, name='b3')
a1sum = theano.tensor.dot(x, w1) + b1
a2sum = theano.tensor.dot(x, w2) + b2
a1 = 1 / (1 + theano.tensor.exp(-1 * a1sum))
a2 = 1 / (1 + theano.tensor.exp(-1 * a2sum))
x2 = theano.tensor.stack([a1, a2], axis=1)
<|reserved_special_token_0|>
a3sum = theano.tensor.dot(x2, w3) + b3
a3 = 1 / (1 + theano.tensor.exp(-1 * a3sum))
ahat = a3
a = theano.tensor.vector(name='a')
cost = -(a * theano.tensor.log(ahat) + (1 - a) * theano.tensor.log(1 - ahat)
).sum()
dcostdw1 = theano.tensor.grad(cost, w1)
dcostdw2 = theano.tensor.grad(cost, w2)
dcostdw3 = theano.tensor.grad(cost, w3)
dcostdb1 = theano.tensor.grad(cost, b1)
dcostdb2 = theano.tensor.grad(cost, b2)
dcostdb3 = theano.tensor.grad(cost, b3)
wn1 = w1 - 0.02 * dcostdw1
wn2 = w2 - 0.02 * dcostdw2
wn3 = w3 - 0.02 * dcostdw3
wb1 = b1 - 0.02 * dcostdb1
wb2 = b2 - 0.02 * dcostdb2
wb3 = b3 - 0.02 * dcostdb3
train = theano.function([x, a], [ahat, cost], updates=[(w1, wn1), (w2, wn2),
(w3, wn3), (b1, wb1), (b2, wb2), (b3, wb3)])
cost1 = []
val1 = []
for i in range(25000):
pval, costval = train(inputs, outputs)
print(costval)
val1.append(pval)
cost1.append(costval)
print('the final outputs are:')
for i in range(len(inputs)):
print('the output of x1=%d | x2=%d is %.2f' % (inputs[i][0], inputs[i][
1], pval[i]))
plt.plot(cost1, color='red')
plt.show()
<|reserved_special_token_1|>
#!/usr/bin/env python
# coding: utf-8
# In[1]:
#multi layer perceptron with back propogation
import numpy as np
import theano
import matplotlib.pyplot as plt
# In[2]:
inputs=[[0,0],
[1,0],
[0,1],
[1,1]]
outputs=[1,0,0,1]
# In[3]:
x=theano.tensor.matrix(name='x')
# In[4]:
#Hidden layer as inputs from every neuron are 2 and we have 3 neuron
w1val=np.asarray([np.random.randn(),np.random.randn()])#weight of synapse
w1=theano.shared(w1val,name='w1')
w2val=np.asarray([np.random.randn(),np.random.randn()])#weight of synapse
w2=theano.shared(w2val,name='w2')
w3val=np.asarray([np.random.randn(),np.random.randn()])#weight of synapse
w3=theano.shared(w3val,name='w3')
# In[5]:
#Bias value is 1
b1 = theano.shared(1.1,name='b1')
b2 = theano.shared(1.2,name='b2')
b3 = theano.shared(1.3,name='b3')
# In[6]:
#computation foe every neuron
#hidden layer
a1sum=theano.tensor.dot(x,w1)+b1
a2sum=theano.tensor.dot(x,w2)+b2
a1=1/(1+theano.tensor.exp(-1*a1sum))
a2=1/(1+theano.tensor.exp(-1*a2sum))
#output layer neuron
#stack is combining two hiding layer values & feeding to the output layer
x2 = theano.tensor.stack([a1,a2],axis=1)
# In[7]:
'''if we write
[[a11,a12,a21,a22],[a33,a34,a43,a44]]-> inputs
what stack will do is
[a11,a33],[a12,a34],[a21,a43],[a22,a44]'''
a3sum=theano.tensor.dot(x2,w3)+b3
a3=1/(1+theano.tensor.exp(-1*a3sum))
#final output
ahat=a3
#actual output
a=theano.tensor.vector(name='a')
# In[8]:
#cost function
cost=-(a*theano.tensor.log(ahat)+(1-a)*theano.tensor.log(1-ahat)).sum()#it is defined for 1/1+eraise to -z
#GDA role
#for calculating gradient
dcostdw1 = theano.tensor.grad(cost,w1)
dcostdw2 = theano.tensor.grad(cost,w2)
dcostdw3 = theano.tensor.grad(cost,w3)
dcostdb1=theano.tensor.grad(cost,b1)
dcostdb2=theano.tensor.grad(cost,b2)
dcostdb3=theano.tensor.grad(cost,b3)
#apply GDA to update the weights
wn1=w1-0.02*dcostdw1
wn2=w2-0.02*dcostdw2
wn3=w3-0.02*dcostdw3
wb1=b1-0.02*dcostdb1
wb2=b2-0.02*dcostdb2
wb3=b3-0.02*dcostdb3
#theano function for training the algorithm
train=theano.function([x,a],[ahat,cost],updates=[(w1,wn1),(w2,wn2),(w3,wn3),(b1,wb1),(b2,wb2),(b3,wb3)])
cost1=[]
val1=[]
#training a model
for i in range(25000):
pval,costval=train(inputs,outputs)
print(costval)
val1.append(pval)
cost1.append(costval)
# In[9]:
print('the final outputs are:')
for i in range(len(inputs)):
print("the output of x1=%d | x2=%d is %.2f"%(inputs[i][0],inputs[i][1],pval[i]))
plt.plot(cost1,color='red')
plt.show()
# In[ ]:
# In[ ]:
|
flexible
|
{
"blob_id": "adec7efceb038c0ecb23c256c23c2ea212752d64",
"index": 4010,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in range(25000):\n pval, costval = train(inputs, outputs)\n print(costval)\n val1.append(pval)\n cost1.append(costval)\nprint('the final outputs are:')\nfor i in range(len(inputs)):\n print('the output of x1=%d | x2=%d is %.2f' % (inputs[i][0], inputs[i][\n 1], pval[i]))\nplt.plot(cost1, color='red')\nplt.show()\n",
"step-3": "<mask token>\ninputs = [[0, 0], [1, 0], [0, 1], [1, 1]]\noutputs = [1, 0, 0, 1]\nx = theano.tensor.matrix(name='x')\nw1val = np.asarray([np.random.randn(), np.random.randn()])\nw1 = theano.shared(w1val, name='w1')\nw2val = np.asarray([np.random.randn(), np.random.randn()])\nw2 = theano.shared(w2val, name='w2')\nw3val = np.asarray([np.random.randn(), np.random.randn()])\nw3 = theano.shared(w3val, name='w3')\nb1 = theano.shared(1.1, name='b1')\nb2 = theano.shared(1.2, name='b2')\nb3 = theano.shared(1.3, name='b3')\na1sum = theano.tensor.dot(x, w1) + b1\na2sum = theano.tensor.dot(x, w2) + b2\na1 = 1 / (1 + theano.tensor.exp(-1 * a1sum))\na2 = 1 / (1 + theano.tensor.exp(-1 * a2sum))\nx2 = theano.tensor.stack([a1, a2], axis=1)\n<mask token>\na3sum = theano.tensor.dot(x2, w3) + b3\na3 = 1 / (1 + theano.tensor.exp(-1 * a3sum))\nahat = a3\na = theano.tensor.vector(name='a')\ncost = -(a * theano.tensor.log(ahat) + (1 - a) * theano.tensor.log(1 - ahat)\n ).sum()\ndcostdw1 = theano.tensor.grad(cost, w1)\ndcostdw2 = theano.tensor.grad(cost, w2)\ndcostdw3 = theano.tensor.grad(cost, w3)\ndcostdb1 = theano.tensor.grad(cost, b1)\ndcostdb2 = theano.tensor.grad(cost, b2)\ndcostdb3 = theano.tensor.grad(cost, b3)\nwn1 = w1 - 0.02 * dcostdw1\nwn2 = w2 - 0.02 * dcostdw2\nwn3 = w3 - 0.02 * dcostdw3\nwb1 = b1 - 0.02 * dcostdb1\nwb2 = b2 - 0.02 * dcostdb2\nwb3 = b3 - 0.02 * dcostdb3\ntrain = theano.function([x, a], [ahat, cost], updates=[(w1, wn1), (w2, wn2),\n (w3, wn3), (b1, wb1), (b2, wb2), (b3, wb3)])\ncost1 = []\nval1 = []\nfor i in range(25000):\n pval, costval = train(inputs, outputs)\n print(costval)\n val1.append(pval)\n cost1.append(costval)\nprint('the final outputs are:')\nfor i in range(len(inputs)):\n print('the output of x1=%d | x2=%d is %.2f' % (inputs[i][0], inputs[i][\n 1], pval[i]))\nplt.plot(cost1, color='red')\nplt.show()\n",
"step-4": "import numpy as np\nimport theano\nimport matplotlib.pyplot as plt\ninputs = [[0, 0], [1, 0], [0, 1], [1, 1]]\noutputs = [1, 0, 0, 1]\nx = theano.tensor.matrix(name='x')\nw1val = np.asarray([np.random.randn(), np.random.randn()])\nw1 = theano.shared(w1val, name='w1')\nw2val = np.asarray([np.random.randn(), np.random.randn()])\nw2 = theano.shared(w2val, name='w2')\nw3val = np.asarray([np.random.randn(), np.random.randn()])\nw3 = theano.shared(w3val, name='w3')\nb1 = theano.shared(1.1, name='b1')\nb2 = theano.shared(1.2, name='b2')\nb3 = theano.shared(1.3, name='b3')\na1sum = theano.tensor.dot(x, w1) + b1\na2sum = theano.tensor.dot(x, w2) + b2\na1 = 1 / (1 + theano.tensor.exp(-1 * a1sum))\na2 = 1 / (1 + theano.tensor.exp(-1 * a2sum))\nx2 = theano.tensor.stack([a1, a2], axis=1)\n<mask token>\na3sum = theano.tensor.dot(x2, w3) + b3\na3 = 1 / (1 + theano.tensor.exp(-1 * a3sum))\nahat = a3\na = theano.tensor.vector(name='a')\ncost = -(a * theano.tensor.log(ahat) + (1 - a) * theano.tensor.log(1 - ahat)\n ).sum()\ndcostdw1 = theano.tensor.grad(cost, w1)\ndcostdw2 = theano.tensor.grad(cost, w2)\ndcostdw3 = theano.tensor.grad(cost, w3)\ndcostdb1 = theano.tensor.grad(cost, b1)\ndcostdb2 = theano.tensor.grad(cost, b2)\ndcostdb3 = theano.tensor.grad(cost, b3)\nwn1 = w1 - 0.02 * dcostdw1\nwn2 = w2 - 0.02 * dcostdw2\nwn3 = w3 - 0.02 * dcostdw3\nwb1 = b1 - 0.02 * dcostdb1\nwb2 = b2 - 0.02 * dcostdb2\nwb3 = b3 - 0.02 * dcostdb3\ntrain = theano.function([x, a], [ahat, cost], updates=[(w1, wn1), (w2, wn2),\n (w3, wn3), (b1, wb1), (b2, wb2), (b3, wb3)])\ncost1 = []\nval1 = []\nfor i in range(25000):\n pval, costval = train(inputs, outputs)\n print(costval)\n val1.append(pval)\n cost1.append(costval)\nprint('the final outputs are:')\nfor i in range(len(inputs)):\n print('the output of x1=%d | x2=%d is %.2f' % (inputs[i][0], inputs[i][\n 1], pval[i]))\nplt.plot(cost1, color='red')\nplt.show()\n",
"step-5": "#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\n#multi layer perceptron with back propogation\nimport numpy as np\nimport theano\nimport matplotlib.pyplot as plt\n\n\n# In[2]:\n\n\ninputs=[[0,0],\n [1,0],\n [0,1],\n [1,1]]\noutputs=[1,0,0,1]\n\n\n# In[3]:\n\n\nx=theano.tensor.matrix(name='x')\n\n\n# In[4]:\n\n\n#Hidden layer as inputs from every neuron are 2 and we have 3 neuron\nw1val=np.asarray([np.random.randn(),np.random.randn()])#weight of synapse\nw1=theano.shared(w1val,name='w1')\nw2val=np.asarray([np.random.randn(),np.random.randn()])#weight of synapse\nw2=theano.shared(w2val,name='w2')\nw3val=np.asarray([np.random.randn(),np.random.randn()])#weight of synapse\nw3=theano.shared(w3val,name='w3')\n\n\n# In[5]:\n\n\n#Bias value is 1\nb1 = theano.shared(1.1,name='b1')\nb2 = theano.shared(1.2,name='b2')\nb3 = theano.shared(1.3,name='b3')\n\n\n# In[6]:\n\n\n#computation foe every neuron\n#hidden layer\na1sum=theano.tensor.dot(x,w1)+b1\na2sum=theano.tensor.dot(x,w2)+b2\n\na1=1/(1+theano.tensor.exp(-1*a1sum))\na2=1/(1+theano.tensor.exp(-1*a2sum))\n\n#output layer neuron\n#stack is combining two hiding layer values & feeding to the output layer\nx2 = theano.tensor.stack([a1,a2],axis=1)\n\n\n# In[7]:\n\n\n'''if we write\n[[a11,a12,a21,a22],[a33,a34,a43,a44]]-> inputs\nwhat stack will do is\n[a11,a33],[a12,a34],[a21,a43],[a22,a44]'''\n\na3sum=theano.tensor.dot(x2,w3)+b3\na3=1/(1+theano.tensor.exp(-1*a3sum))\n\n#final output\nahat=a3\n\n#actual output\na=theano.tensor.vector(name='a')\n\n\n# In[8]:\n\n\n#cost function\ncost=-(a*theano.tensor.log(ahat)+(1-a)*theano.tensor.log(1-ahat)).sum()#it is defined for 1/1+eraise to -z\n#GDA role\n#for calculating gradient\n\ndcostdw1 = theano.tensor.grad(cost,w1)\ndcostdw2 = theano.tensor.grad(cost,w2)\ndcostdw3 = theano.tensor.grad(cost,w3)\n\ndcostdb1=theano.tensor.grad(cost,b1)\ndcostdb2=theano.tensor.grad(cost,b2)\ndcostdb3=theano.tensor.grad(cost,b3)\n\n#apply GDA to update the weights\nwn1=w1-0.02*dcostdw1\nwn2=w2-0.02*dcostdw2\nwn3=w3-0.02*dcostdw3\n\nwb1=b1-0.02*dcostdb1\nwb2=b2-0.02*dcostdb2\nwb3=b3-0.02*dcostdb3\n#theano function for training the algorithm\ntrain=theano.function([x,a],[ahat,cost],updates=[(w1,wn1),(w2,wn2),(w3,wn3),(b1,wb1),(b2,wb2),(b3,wb3)])\n\ncost1=[]\nval1=[]\n\n#training a model\nfor i in range(25000):\n pval,costval=train(inputs,outputs)\n print(costval)\n val1.append(pval)\n cost1.append(costval)\n\n\n# In[9]:\n\n\nprint('the final outputs are:')\nfor i in range(len(inputs)):\n print(\"the output of x1=%d | x2=%d is %.2f\"%(inputs[i][0],inputs[i][1],pval[i]))\nplt.plot(cost1,color='red')\nplt.show()\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
command('strip', '-S', '-x', input_file(
'bin/darwin-4.2.1/release/target-os-darwin/test'))
main()
<|reserved_special_token_1|>
from MockProgram import *
command('strip', '-S', '-x', input_file(
'bin/darwin-4.2.1/release/target-os-darwin/test'))
main()
<|reserved_special_token_1|>
#!/usr/bin/python
#
# Copyright 2017 Steven Watanabe
#
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
from MockProgram import *
command('strip', '-S', '-x', input_file('bin/darwin-4.2.1/release/target-os-darwin/test'))
main()
|
flexible
|
{
"blob_id": "d2f77afd0d282b1fa4859c5368c9d2c745a5625e",
"index": 3293,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ncommand('strip', '-S', '-x', input_file(\n 'bin/darwin-4.2.1/release/target-os-darwin/test'))\nmain()\n",
"step-3": "from MockProgram import *\ncommand('strip', '-S', '-x', input_file(\n 'bin/darwin-4.2.1/release/target-os-darwin/test'))\nmain()\n",
"step-4": "#!/usr/bin/python\n#\n# Copyright 2017 Steven Watanabe\n#\n# Distributed under the Boost Software License, Version 1.0.\n# (See accompanying file LICENSE_1_0.txt or copy at\n# http://www.boost.org/LICENSE_1_0.txt)\n\nfrom MockProgram import *\n\ncommand('strip', '-S', '-x', input_file('bin/darwin-4.2.1/release/target-os-darwin/test'))\n\nmain()\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
def login_required(f):
"""
Decorate routes to require login.
http://flask.pocoo.org/docs/1.0/patterns/viewdecorators/
"""
@wraps(f)
def decorated_function(*args, **kwargs):
if session.get('user_id') is None:
return redirect('/sign_in')
return f(*args, **kwargs)
return decorated_function
def allowed_file(filename):
return '.' in filename and filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS
<|reserved_special_token_0|>
def send_mail(subject, recipient, template, **kwargs):
msg = Message(subject, recipients=[recipient])
msg.html = render_template(template, **kwargs)
thr = Thread(target=async_send_mail, args=[app, msg])
thr.start()
return thr
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def login_required(f):
"""
Decorate routes to require login.
http://flask.pocoo.org/docs/1.0/patterns/viewdecorators/
"""
@wraps(f)
def decorated_function(*args, **kwargs):
if session.get('user_id') is None:
return redirect('/sign_in')
return f(*args, **kwargs)
return decorated_function
def allowed_file(filename):
return '.' in filename and filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS
def async_send_mail(applic, msg):
with applic.app_context():
mail.send(msg)
def send_mail(subject, recipient, template, **kwargs):
msg = Message(subject, recipients=[recipient])
msg.html = render_template(template, **kwargs)
thr = Thread(target=async_send_mail, args=[app, msg])
thr.start()
return thr
<|reserved_special_token_1|>
<|reserved_special_token_0|>
ALLOWED_EXTENSIONS = {'png', 'PNG', 'jpg', 'jpeg', 'JPG', 'JPEG'}
def login_required(f):
"""
Decorate routes to require login.
http://flask.pocoo.org/docs/1.0/patterns/viewdecorators/
"""
@wraps(f)
def decorated_function(*args, **kwargs):
if session.get('user_id') is None:
return redirect('/sign_in')
return f(*args, **kwargs)
return decorated_function
def allowed_file(filename):
return '.' in filename and filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS
def async_send_mail(applic, msg):
with applic.app_context():
mail.send(msg)
def send_mail(subject, recipient, template, **kwargs):
msg = Message(subject, recipients=[recipient])
msg.html = render_template(template, **kwargs)
thr = Thread(target=async_send_mail, args=[app, msg])
thr.start()
return thr
<|reserved_special_token_1|>
from functools import wraps
from flask import redirect, render_template, session
from threading import Thread
from flask_mail import Message
from application import app, mail
ALLOWED_EXTENSIONS = {'png', 'PNG', 'jpg', 'jpeg', 'JPG', 'JPEG'}
def login_required(f):
"""
Decorate routes to require login.
http://flask.pocoo.org/docs/1.0/patterns/viewdecorators/
"""
@wraps(f)
def decorated_function(*args, **kwargs):
if session.get('user_id') is None:
return redirect('/sign_in')
return f(*args, **kwargs)
return decorated_function
def allowed_file(filename):
return '.' in filename and filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS
def async_send_mail(applic, msg):
with applic.app_context():
mail.send(msg)
def send_mail(subject, recipient, template, **kwargs):
msg = Message(subject, recipients=[recipient])
msg.html = render_template(template, **kwargs)
thr = Thread(target=async_send_mail, args=[app, msg])
thr.start()
return thr
<|reserved_special_token_1|>
#This is a file from CS50 Finance
from functools import wraps
from flask import redirect, render_template, session
from threading import Thread
from flask_mail import Message
from application import app, mail
ALLOWED_EXTENSIONS = {"png", "PNG", "jpg", "jpeg", "JPG", "JPEG"}
def login_required(f):
"""
Decorate routes to require login.
http://flask.pocoo.org/docs/1.0/patterns/viewdecorators/
"""
@wraps(f)
def decorated_function(*args, **kwargs):
if session.get("user_id") is None:
return redirect("/sign_in")
return f(*args, **kwargs)
return decorated_function
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS
# Send message function
def async_send_mail(applic, msg):
with applic.app_context():
mail.send(msg)
def send_mail(subject, recipient, template, **kwargs):
msg = Message(subject, recipients=[recipient])
msg.html = render_template(template, **kwargs)
thr = Thread(target=async_send_mail, args=[app, msg])
thr.start()
return thr
|
flexible
|
{
"blob_id": "1a4da621add157fa6d1f578370d64594b102eeb5",
"index": 4245,
"step-1": "<mask token>\n\n\ndef login_required(f):\n \"\"\"\n Decorate routes to require login.\n\n http://flask.pocoo.org/docs/1.0/patterns/viewdecorators/\n \"\"\"\n\n @wraps(f)\n def decorated_function(*args, **kwargs):\n if session.get('user_id') is None:\n return redirect('/sign_in')\n return f(*args, **kwargs)\n return decorated_function\n\n\ndef allowed_file(filename):\n return '.' in filename and filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS\n\n\n<mask token>\n\n\ndef send_mail(subject, recipient, template, **kwargs):\n msg = Message(subject, recipients=[recipient])\n msg.html = render_template(template, **kwargs)\n thr = Thread(target=async_send_mail, args=[app, msg])\n thr.start()\n return thr\n",
"step-2": "<mask token>\n\n\ndef login_required(f):\n \"\"\"\n Decorate routes to require login.\n\n http://flask.pocoo.org/docs/1.0/patterns/viewdecorators/\n \"\"\"\n\n @wraps(f)\n def decorated_function(*args, **kwargs):\n if session.get('user_id') is None:\n return redirect('/sign_in')\n return f(*args, **kwargs)\n return decorated_function\n\n\ndef allowed_file(filename):\n return '.' in filename and filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS\n\n\ndef async_send_mail(applic, msg):\n with applic.app_context():\n mail.send(msg)\n\n\ndef send_mail(subject, recipient, template, **kwargs):\n msg = Message(subject, recipients=[recipient])\n msg.html = render_template(template, **kwargs)\n thr = Thread(target=async_send_mail, args=[app, msg])\n thr.start()\n return thr\n",
"step-3": "<mask token>\nALLOWED_EXTENSIONS = {'png', 'PNG', 'jpg', 'jpeg', 'JPG', 'JPEG'}\n\n\ndef login_required(f):\n \"\"\"\n Decorate routes to require login.\n\n http://flask.pocoo.org/docs/1.0/patterns/viewdecorators/\n \"\"\"\n\n @wraps(f)\n def decorated_function(*args, **kwargs):\n if session.get('user_id') is None:\n return redirect('/sign_in')\n return f(*args, **kwargs)\n return decorated_function\n\n\ndef allowed_file(filename):\n return '.' in filename and filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS\n\n\ndef async_send_mail(applic, msg):\n with applic.app_context():\n mail.send(msg)\n\n\ndef send_mail(subject, recipient, template, **kwargs):\n msg = Message(subject, recipients=[recipient])\n msg.html = render_template(template, **kwargs)\n thr = Thread(target=async_send_mail, args=[app, msg])\n thr.start()\n return thr\n",
"step-4": "from functools import wraps\nfrom flask import redirect, render_template, session\nfrom threading import Thread\nfrom flask_mail import Message\nfrom application import app, mail\nALLOWED_EXTENSIONS = {'png', 'PNG', 'jpg', 'jpeg', 'JPG', 'JPEG'}\n\n\ndef login_required(f):\n \"\"\"\n Decorate routes to require login.\n\n http://flask.pocoo.org/docs/1.0/patterns/viewdecorators/\n \"\"\"\n\n @wraps(f)\n def decorated_function(*args, **kwargs):\n if session.get('user_id') is None:\n return redirect('/sign_in')\n return f(*args, **kwargs)\n return decorated_function\n\n\ndef allowed_file(filename):\n return '.' in filename and filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS\n\n\ndef async_send_mail(applic, msg):\n with applic.app_context():\n mail.send(msg)\n\n\ndef send_mail(subject, recipient, template, **kwargs):\n msg = Message(subject, recipients=[recipient])\n msg.html = render_template(template, **kwargs)\n thr = Thread(target=async_send_mail, args=[app, msg])\n thr.start()\n return thr\n",
"step-5": "#This is a file from CS50 Finance\nfrom functools import wraps\n\nfrom flask import redirect, render_template, session\nfrom threading import Thread\nfrom flask_mail import Message\nfrom application import app, mail\n\nALLOWED_EXTENSIONS = {\"png\", \"PNG\", \"jpg\", \"jpeg\", \"JPG\", \"JPEG\"}\n\ndef login_required(f):\n \"\"\"\n Decorate routes to require login.\n\n http://flask.pocoo.org/docs/1.0/patterns/viewdecorators/\n \"\"\"\n @wraps(f)\n def decorated_function(*args, **kwargs):\n if session.get(\"user_id\") is None:\n return redirect(\"/sign_in\")\n return f(*args, **kwargs)\n return decorated_function\n\ndef allowed_file(filename):\n return '.' in filename and \\\n filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS\n\n# Send message function\ndef async_send_mail(applic, msg):\n with applic.app_context():\n mail.send(msg)\n\ndef send_mail(subject, recipient, template, **kwargs):\n msg = Message(subject, recipients=[recipient])\n msg.html = render_template(template, **kwargs)\n thr = Thread(target=async_send_mail, args=[app, msg])\n thr.start()\n return thr",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
# SPDX-FileCopyrightText: 2021 John Park for Adafruit Industries
# SPDX-License-Identifier: MIT
import time
import random
import board
import audiomp3
import audiopwmio
from adafruit_crickit import crickit
ss = crickit.seesaw # Crickit seesaw setup
button = crickit.SIGNAL1 # momentary switch to trigger animation
ss.pin_mode(button, ss.INPUT_PULLUP)
LED = crickit.SIGNAL4 # standard LED for eyeball lighting
ss.pin_mode(LED, ss.OUTPUT)
attract_switch = crickit.SIGNAL8 # attract mode switch or jumper
ss.pin_mode(attract_switch, ss.INPUT_PULLUP)
audio = audiopwmio.PWMAudioOut(board.A0) # Feather outputs this pin to Crickit amplifier
audio_files = [ # use your own mono .mp3 files
"phrase_01.mp3",
"phrase_02.mp3",
"phrase_03.mp3"
]
current_audio_file = 0
# two motors
motor_eye = crickit.dc_motor_1
motor_lid = crickit.dc_motor_2
def open_lid():
motor_lid.throttle = 1 # full speed open
time.sleep(0.25)
motor_lid.throttle = 0 # hold
def close_lid():
motor_lid.throttle = -1 # full speed closed
time.sleep(0.25)
motor_lid.throttle = 0
def blink(times):
for _ in range(times):
ss.digital_write(LED, True)
time.sleep(0.1)
ss.digital_write(LED, False)
time.sleep(0.1)
def eye_look():
motor_eye.throttle = random.uniform(0.6, 1.0)
time.sleep(random.random()) # 0 to 1.0 seconds
motor_eye.throttle = 0
time.sleep(random.random())
motor_eye.throttle = random.uniform(-1.0, -0.6)
time.sleep(random.random())
motor_eye.throttle = 0
time.sleep(random.random())
while True:
if ss.digital_read(attract_switch): # regular mode, attrack switch not closed/shorted
if not ss.digital_read(button): # button has been pressed
decoder = audiomp3.MP3Decoder(open("ring.mp3", "rb"))
audio.play(decoder)
while audio.playing:
pass
open_lid()
blink(3)
ss.digital_write(LED, True) # light the eye
decoder = audiomp3.MP3Decoder(open(audio_files[current_audio_file], "rb"))
audio.play(decoder)
while audio.playing:
eye_look()
motor_eye.throttle = 0 # audio is finished, pause the eye
blink(5)
close_lid()
current_audio_file = ((current_audio_file + 1) % (len(audio_files))) # go to next file
else: # attract mode
open_lid()
blink(3)
ss.digital_write(LED, True)
for _ in range(4):
eye_look()
time.sleep(1)
blink(5)
close_lid()
time.sleep(random.randint(2, 8))
|
normal
|
{
"blob_id": "608c116cd42132bd63be5056f0aaf5c78933886e",
"index": 7536,
"step-1": "<mask token>\n\n\ndef open_lid():\n motor_lid.throttle = 1\n time.sleep(0.25)\n motor_lid.throttle = 0\n\n\ndef close_lid():\n motor_lid.throttle = -1\n time.sleep(0.25)\n motor_lid.throttle = 0\n\n\ndef blink(times):\n for _ in range(times):\n ss.digital_write(LED, True)\n time.sleep(0.1)\n ss.digital_write(LED, False)\n time.sleep(0.1)\n\n\n<mask token>\n",
"step-2": "<mask token>\nss.pin_mode(button, ss.INPUT_PULLUP)\n<mask token>\nss.pin_mode(LED, ss.OUTPUT)\n<mask token>\nss.pin_mode(attract_switch, ss.INPUT_PULLUP)\n<mask token>\n\n\ndef open_lid():\n motor_lid.throttle = 1\n time.sleep(0.25)\n motor_lid.throttle = 0\n\n\ndef close_lid():\n motor_lid.throttle = -1\n time.sleep(0.25)\n motor_lid.throttle = 0\n\n\ndef blink(times):\n for _ in range(times):\n ss.digital_write(LED, True)\n time.sleep(0.1)\n ss.digital_write(LED, False)\n time.sleep(0.1)\n\n\ndef eye_look():\n motor_eye.throttle = random.uniform(0.6, 1.0)\n time.sleep(random.random())\n motor_eye.throttle = 0\n time.sleep(random.random())\n motor_eye.throttle = random.uniform(-1.0, -0.6)\n time.sleep(random.random())\n motor_eye.throttle = 0\n time.sleep(random.random())\n\n\nwhile True:\n if ss.digital_read(attract_switch):\n if not ss.digital_read(button):\n decoder = audiomp3.MP3Decoder(open('ring.mp3', 'rb'))\n audio.play(decoder)\n while audio.playing:\n pass\n open_lid()\n blink(3)\n ss.digital_write(LED, True)\n decoder = audiomp3.MP3Decoder(open(audio_files[\n current_audio_file], 'rb'))\n audio.play(decoder)\n while audio.playing:\n eye_look()\n motor_eye.throttle = 0\n blink(5)\n close_lid()\n current_audio_file = (current_audio_file + 1) % len(audio_files)\n else:\n open_lid()\n blink(3)\n ss.digital_write(LED, True)\n for _ in range(4):\n eye_look()\n time.sleep(1)\n blink(5)\n close_lid()\n time.sleep(random.randint(2, 8))\n",
"step-3": "<mask token>\nss = crickit.seesaw\nbutton = crickit.SIGNAL1\nss.pin_mode(button, ss.INPUT_PULLUP)\nLED = crickit.SIGNAL4\nss.pin_mode(LED, ss.OUTPUT)\nattract_switch = crickit.SIGNAL8\nss.pin_mode(attract_switch, ss.INPUT_PULLUP)\naudio = audiopwmio.PWMAudioOut(board.A0)\naudio_files = ['phrase_01.mp3', 'phrase_02.mp3', 'phrase_03.mp3']\ncurrent_audio_file = 0\nmotor_eye = crickit.dc_motor_1\nmotor_lid = crickit.dc_motor_2\n\n\ndef open_lid():\n motor_lid.throttle = 1\n time.sleep(0.25)\n motor_lid.throttle = 0\n\n\ndef close_lid():\n motor_lid.throttle = -1\n time.sleep(0.25)\n motor_lid.throttle = 0\n\n\ndef blink(times):\n for _ in range(times):\n ss.digital_write(LED, True)\n time.sleep(0.1)\n ss.digital_write(LED, False)\n time.sleep(0.1)\n\n\ndef eye_look():\n motor_eye.throttle = random.uniform(0.6, 1.0)\n time.sleep(random.random())\n motor_eye.throttle = 0\n time.sleep(random.random())\n motor_eye.throttle = random.uniform(-1.0, -0.6)\n time.sleep(random.random())\n motor_eye.throttle = 0\n time.sleep(random.random())\n\n\nwhile True:\n if ss.digital_read(attract_switch):\n if not ss.digital_read(button):\n decoder = audiomp3.MP3Decoder(open('ring.mp3', 'rb'))\n audio.play(decoder)\n while audio.playing:\n pass\n open_lid()\n blink(3)\n ss.digital_write(LED, True)\n decoder = audiomp3.MP3Decoder(open(audio_files[\n current_audio_file], 'rb'))\n audio.play(decoder)\n while audio.playing:\n eye_look()\n motor_eye.throttle = 0\n blink(5)\n close_lid()\n current_audio_file = (current_audio_file + 1) % len(audio_files)\n else:\n open_lid()\n blink(3)\n ss.digital_write(LED, True)\n for _ in range(4):\n eye_look()\n time.sleep(1)\n blink(5)\n close_lid()\n time.sleep(random.randint(2, 8))\n",
"step-4": "import time\nimport random\nimport board\nimport audiomp3\nimport audiopwmio\nfrom adafruit_crickit import crickit\nss = crickit.seesaw\nbutton = crickit.SIGNAL1\nss.pin_mode(button, ss.INPUT_PULLUP)\nLED = crickit.SIGNAL4\nss.pin_mode(LED, ss.OUTPUT)\nattract_switch = crickit.SIGNAL8\nss.pin_mode(attract_switch, ss.INPUT_PULLUP)\naudio = audiopwmio.PWMAudioOut(board.A0)\naudio_files = ['phrase_01.mp3', 'phrase_02.mp3', 'phrase_03.mp3']\ncurrent_audio_file = 0\nmotor_eye = crickit.dc_motor_1\nmotor_lid = crickit.dc_motor_2\n\n\ndef open_lid():\n motor_lid.throttle = 1\n time.sleep(0.25)\n motor_lid.throttle = 0\n\n\ndef close_lid():\n motor_lid.throttle = -1\n time.sleep(0.25)\n motor_lid.throttle = 0\n\n\ndef blink(times):\n for _ in range(times):\n ss.digital_write(LED, True)\n time.sleep(0.1)\n ss.digital_write(LED, False)\n time.sleep(0.1)\n\n\ndef eye_look():\n motor_eye.throttle = random.uniform(0.6, 1.0)\n time.sleep(random.random())\n motor_eye.throttle = 0\n time.sleep(random.random())\n motor_eye.throttle = random.uniform(-1.0, -0.6)\n time.sleep(random.random())\n motor_eye.throttle = 0\n time.sleep(random.random())\n\n\nwhile True:\n if ss.digital_read(attract_switch):\n if not ss.digital_read(button):\n decoder = audiomp3.MP3Decoder(open('ring.mp3', 'rb'))\n audio.play(decoder)\n while audio.playing:\n pass\n open_lid()\n blink(3)\n ss.digital_write(LED, True)\n decoder = audiomp3.MP3Decoder(open(audio_files[\n current_audio_file], 'rb'))\n audio.play(decoder)\n while audio.playing:\n eye_look()\n motor_eye.throttle = 0\n blink(5)\n close_lid()\n current_audio_file = (current_audio_file + 1) % len(audio_files)\n else:\n open_lid()\n blink(3)\n ss.digital_write(LED, True)\n for _ in range(4):\n eye_look()\n time.sleep(1)\n blink(5)\n close_lid()\n time.sleep(random.randint(2, 8))\n",
"step-5": "# SPDX-FileCopyrightText: 2021 John Park for Adafruit Industries\n# SPDX-License-Identifier: MIT\nimport time\nimport random\nimport board\nimport audiomp3\nimport audiopwmio\nfrom adafruit_crickit import crickit\n\nss = crickit.seesaw # Crickit seesaw setup\n\nbutton = crickit.SIGNAL1 # momentary switch to trigger animation\nss.pin_mode(button, ss.INPUT_PULLUP)\n\nLED = crickit.SIGNAL4 # standard LED for eyeball lighting\nss.pin_mode(LED, ss.OUTPUT)\n\nattract_switch = crickit.SIGNAL8 # attract mode switch or jumper\nss.pin_mode(attract_switch, ss.INPUT_PULLUP)\n\naudio = audiopwmio.PWMAudioOut(board.A0) # Feather outputs this pin to Crickit amplifier\naudio_files = [ # use your own mono .mp3 files\n \"phrase_01.mp3\",\n \"phrase_02.mp3\",\n \"phrase_03.mp3\"\n]\ncurrent_audio_file = 0\n\n# two motors\nmotor_eye = crickit.dc_motor_1\nmotor_lid = crickit.dc_motor_2\n\ndef open_lid():\n motor_lid.throttle = 1 # full speed open\n time.sleep(0.25)\n motor_lid.throttle = 0 # hold\n\ndef close_lid():\n motor_lid.throttle = -1 # full speed closed\n time.sleep(0.25)\n motor_lid.throttle = 0\n\ndef blink(times):\n for _ in range(times):\n ss.digital_write(LED, True)\n time.sleep(0.1)\n ss.digital_write(LED, False)\n time.sleep(0.1)\n\ndef eye_look():\n motor_eye.throttle = random.uniform(0.6, 1.0)\n time.sleep(random.random()) # 0 to 1.0 seconds\n motor_eye.throttle = 0\n time.sleep(random.random())\n motor_eye.throttle = random.uniform(-1.0, -0.6)\n time.sleep(random.random())\n motor_eye.throttle = 0\n time.sleep(random.random())\n\n\n\nwhile True:\n if ss.digital_read(attract_switch): # regular mode, attrack switch not closed/shorted\n if not ss.digital_read(button): # button has been pressed\n decoder = audiomp3.MP3Decoder(open(\"ring.mp3\", \"rb\"))\n audio.play(decoder)\n while audio.playing:\n pass\n open_lid()\n blink(3)\n ss.digital_write(LED, True) # light the eye\n decoder = audiomp3.MP3Decoder(open(audio_files[current_audio_file], \"rb\"))\n audio.play(decoder)\n while audio.playing:\n eye_look()\n motor_eye.throttle = 0 # audio is finished, pause the eye\n blink(5)\n close_lid()\n current_audio_file = ((current_audio_file + 1) % (len(audio_files))) # go to next file\n\n else: # attract mode\n open_lid()\n blink(3)\n ss.digital_write(LED, True)\n for _ in range(4):\n eye_look()\n time.sleep(1)\n blink(5)\n close_lid()\n time.sleep(random.randint(2, 8))\n",
"step-ids": [
3,
5,
6,
7,
8
]
}
|
[
3,
5,
6,
7,
8
] |
#!usr/bin/python
# -*- coding:utf8 -*-
import time
import random
import asyncio
async def consumer(queue, name):
while True:
val = await queue.get()
print(f'{name} get a val: {val} at {time.strftime("%X")}')
await asyncio.sleep(1)
async def producer(queue, name):
for i in range(20):
await queue.put(i)
print(f'{name} put a val: {i}')
await asyncio.sleep(0.1)
async def main():
queue = asyncio.Queue()
tasks = [asyncio.create_task(producer(queue, 'producer'))]
for i in range(3):
tasks.append(asyncio.create_task(consumer(queue, f'consumer_{i}')))
# await asyncio.sleep(10)
await asyncio.gather(*tasks, return_exceptions=True)
# start = time.perf_counter()
asyncio.run(main())
# end = time.perf_counter()
# print(end - start)
|
normal
|
{
"blob_id": "e1172e2d9f20e56241829b3e4ccb4bcf6b5440be",
"index": 9233,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nasync def consumer(queue, name):\n while True:\n val = await queue.get()\n print(f\"{name} get a val: {val} at {time.strftime('%X')}\")\n await asyncio.sleep(1)\n\n\nasync def producer(queue, name):\n for i in range(20):\n await queue.put(i)\n print(f'{name} put a val: {i}')\n await asyncio.sleep(0.1)\n\n\nasync def main():\n queue = asyncio.Queue()\n tasks = [asyncio.create_task(producer(queue, 'producer'))]\n for i in range(3):\n tasks.append(asyncio.create_task(consumer(queue, f'consumer_{i}')))\n await asyncio.gather(*tasks, return_exceptions=True)\n\n\nasyncio.run(main())\n",
"step-3": "import time\nimport random\nimport asyncio\n\n\nasync def consumer(queue, name):\n while True:\n val = await queue.get()\n print(f\"{name} get a val: {val} at {time.strftime('%X')}\")\n await asyncio.sleep(1)\n\n\nasync def producer(queue, name):\n for i in range(20):\n await queue.put(i)\n print(f'{name} put a val: {i}')\n await asyncio.sleep(0.1)\n\n\nasync def main():\n queue = asyncio.Queue()\n tasks = [asyncio.create_task(producer(queue, 'producer'))]\n for i in range(3):\n tasks.append(asyncio.create_task(consumer(queue, f'consumer_{i}')))\n await asyncio.gather(*tasks, return_exceptions=True)\n\n\nasyncio.run(main())\n",
"step-4": "#!usr/bin/python\n# -*- coding:utf8 -*-\nimport time\nimport random\nimport asyncio\n\n\nasync def consumer(queue, name):\n while True:\n val = await queue.get()\n print(f'{name} get a val: {val} at {time.strftime(\"%X\")}')\n await asyncio.sleep(1)\n\n\nasync def producer(queue, name):\n for i in range(20):\n await queue.put(i)\n print(f'{name} put a val: {i}')\n await asyncio.sleep(0.1)\n\n\nasync def main():\n queue = asyncio.Queue()\n\n tasks = [asyncio.create_task(producer(queue, 'producer'))]\n for i in range(3):\n tasks.append(asyncio.create_task(consumer(queue, f'consumer_{i}')))\n\n # await asyncio.sleep(10)\n\n await asyncio.gather(*tasks, return_exceptions=True)\n\n\n# start = time.perf_counter()\nasyncio.run(main())\n# end = time.perf_counter()\n# print(end - start)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class TestHand(unittest.TestCase):
def test_max_straight(self):
cards = map(makeCard, ['10S', '6S', '9S', '8S', '7S'])
straight = max_straight(cards)
self.assertEqual(straight, sorted(map(makeCard, ['10S', '6S', '9S',
'8S', '7S']), reverse=True))
cards = map(makeCard, ['10S', '6S', '9S', '8S', '8C', '7S'])
straight = max_straight(cards)
self.assertEqual(straight, sorted(map(makeCard, ['10S', '6S', '9S',
'8S', '7S']), reverse=True))
cards = map(makeCard, ['10S', '6S', '9S', '8S', '5C', '7S'])
straight = max_straight(cards)
self.assertEqual(straight, sorted(map(makeCard, ['10S', '6S', '9S',
'8S', '7S']), reverse=True))
def test_categories(self):
my_hand = Hand(['KH', 'QH', 'JH', 'AH', '10H'])
self.assertEqual(my_hand.category, Hand.Categories.straight_flush)
my_hand = Hand(['10S', '6S', '9S', '8S', '7S'])
self.assertEqual(my_hand.category, Hand.Categories.straight_flush)
my_hand = Hand(['JH', 'JC', '9H', 'JS', 'JD'])
self.assertEqual(my_hand.category, Hand.Categories.four_of_a_kind)
my_hand = Hand(['JH', 'JC', 'JS', '9D', '9H'])
self.assertEqual(my_hand.category, Hand.Categories.full_house)
my_hand = Hand(['10S', '9S', '8S', '5S', '6S'])
self.assertEqual(my_hand.category, Hand.Categories.flush)
my_hand = Hand(['10H', '6S', '9D', '8S', '7S'])
self.assertEqual(my_hand.category, Hand.Categories.straight)
my_hand = Hand(['JH', 'JC', '9H', 'JS', '8D'])
self.assertEqual(my_hand.category, Hand.Categories.three_of_a_kind)
my_hand = Hand(['JH', 'JC', 'QS', '9D', '9H'])
self.assertEqual(my_hand.category, Hand.Categories.two_pair)
my_hand = Hand(['JH', 'JC', 'QS', '5D', '9H'])
self.assertEqual(my_hand.category, Hand.Categories.pair)
my_hand = Hand(['JH', '3C', '4S', '5C', '9H'])
self.assertEqual(my_hand.category, Hand.Categories.high_card)
<|reserved_special_token_0|>
def test_cmp(self):
pair_to_high_card = Hand(['JH', 'JC', 'QS', '5D', '9H']) < Hand([
'JH', '3C', '4S', '5C', '9H'])
self.assertEqual(pair_to_high_card, False)
straight_to_flush = Hand(['10H', '6S', '9D', '8S', '7S']) < Hand([
'10S', '9S', '8S', '5S', '6S'])
self.assertEqual(straight_to_flush, True)
def test_deck_validation(self):
"""
Test with some hands that are impossible to form with a 52-card deck
Five-of-a-kind
Something that is both a flush and has a pair (flush wins)
Something that is both a flush and four-of-a-kind (four-of-a-kind wins)
"""
pass
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TestHand(unittest.TestCase):
def test_max_straight(self):
cards = map(makeCard, ['10S', '6S', '9S', '8S', '7S'])
straight = max_straight(cards)
self.assertEqual(straight, sorted(map(makeCard, ['10S', '6S', '9S',
'8S', '7S']), reverse=True))
cards = map(makeCard, ['10S', '6S', '9S', '8S', '8C', '7S'])
straight = max_straight(cards)
self.assertEqual(straight, sorted(map(makeCard, ['10S', '6S', '9S',
'8S', '7S']), reverse=True))
cards = map(makeCard, ['10S', '6S', '9S', '8S', '5C', '7S'])
straight = max_straight(cards)
self.assertEqual(straight, sorted(map(makeCard, ['10S', '6S', '9S',
'8S', '7S']), reverse=True))
def test_categories(self):
my_hand = Hand(['KH', 'QH', 'JH', 'AH', '10H'])
self.assertEqual(my_hand.category, Hand.Categories.straight_flush)
my_hand = Hand(['10S', '6S', '9S', '8S', '7S'])
self.assertEqual(my_hand.category, Hand.Categories.straight_flush)
my_hand = Hand(['JH', 'JC', '9H', 'JS', 'JD'])
self.assertEqual(my_hand.category, Hand.Categories.four_of_a_kind)
my_hand = Hand(['JH', 'JC', 'JS', '9D', '9H'])
self.assertEqual(my_hand.category, Hand.Categories.full_house)
my_hand = Hand(['10S', '9S', '8S', '5S', '6S'])
self.assertEqual(my_hand.category, Hand.Categories.flush)
my_hand = Hand(['10H', '6S', '9D', '8S', '7S'])
self.assertEqual(my_hand.category, Hand.Categories.straight)
my_hand = Hand(['JH', 'JC', '9H', 'JS', '8D'])
self.assertEqual(my_hand.category, Hand.Categories.three_of_a_kind)
my_hand = Hand(['JH', 'JC', 'QS', '9D', '9H'])
self.assertEqual(my_hand.category, Hand.Categories.two_pair)
my_hand = Hand(['JH', 'JC', 'QS', '5D', '9H'])
self.assertEqual(my_hand.category, Hand.Categories.pair)
my_hand = Hand(['JH', '3C', '4S', '5C', '9H'])
self.assertEqual(my_hand.category, Hand.Categories.high_card)
def test_category_options(self):
my_hand = Hand(['10H', '6S', '9D', '8S', '7S', '7D', '7H'])
self.assertEqual(my_hand.category, Hand.Categories.straight)
my_hand = Hand(['10H', '6S', '9D', '8S', '7S', '7D', '7H', '7C'])
self.assertEqual(my_hand.category, Hand.Categories.four_of_a_kind)
my_hand = Hand(['10H', '6S', '9D', '8S', '7S', '7D', '7H', '8C'])
self.assertEqual(my_hand.category, Hand.Categories.full_house)
my_hand = Hand(['10S', '9S', '8S', '5S', '6S', '10H', '6D', '9D',
'8C', '7C'])
self.assertEqual(my_hand.category, Hand.Categories.flush)
my_hand = Hand(['KH', 'QH', 'JH', 'AH', '10H', '10S', '6S', '9S',
'8S', '7S'])
self.assertEqual(my_hand.category, Hand.Categories.straight_flush)
my_hand = Hand(['5S', '5H', '5D', '4S', '4H', '4D', '3D', '3S'])
self.assertEqual(my_hand.category, Hand.Categories.full_house)
my_hand = Hand(['5S', '5H', '5D', '5C', '4S', '4H', '3C', '3D', '3S'])
self.assertEqual(my_hand.category, Hand.Categories.four_of_a_kind)
def test_cmp(self):
pair_to_high_card = Hand(['JH', 'JC', 'QS', '5D', '9H']) < Hand([
'JH', '3C', '4S', '5C', '9H'])
self.assertEqual(pair_to_high_card, False)
straight_to_flush = Hand(['10H', '6S', '9D', '8S', '7S']) < Hand([
'10S', '9S', '8S', '5S', '6S'])
self.assertEqual(straight_to_flush, True)
def test_deck_validation(self):
"""
Test with some hands that are impossible to form with a 52-card deck
Five-of-a-kind
Something that is both a flush and has a pair (flush wins)
Something that is both a flush and four-of-a-kind (four-of-a-kind wins)
"""
pass
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TestHand(unittest.TestCase):
def test_max_straight(self):
cards = map(makeCard, ['10S', '6S', '9S', '8S', '7S'])
straight = max_straight(cards)
self.assertEqual(straight, sorted(map(makeCard, ['10S', '6S', '9S',
'8S', '7S']), reverse=True))
cards = map(makeCard, ['10S', '6S', '9S', '8S', '8C', '7S'])
straight = max_straight(cards)
self.assertEqual(straight, sorted(map(makeCard, ['10S', '6S', '9S',
'8S', '7S']), reverse=True))
cards = map(makeCard, ['10S', '6S', '9S', '8S', '5C', '7S'])
straight = max_straight(cards)
self.assertEqual(straight, sorted(map(makeCard, ['10S', '6S', '9S',
'8S', '7S']), reverse=True))
def test_categories(self):
my_hand = Hand(['KH', 'QH', 'JH', 'AH', '10H'])
self.assertEqual(my_hand.category, Hand.Categories.straight_flush)
my_hand = Hand(['10S', '6S', '9S', '8S', '7S'])
self.assertEqual(my_hand.category, Hand.Categories.straight_flush)
my_hand = Hand(['JH', 'JC', '9H', 'JS', 'JD'])
self.assertEqual(my_hand.category, Hand.Categories.four_of_a_kind)
my_hand = Hand(['JH', 'JC', 'JS', '9D', '9H'])
self.assertEqual(my_hand.category, Hand.Categories.full_house)
my_hand = Hand(['10S', '9S', '8S', '5S', '6S'])
self.assertEqual(my_hand.category, Hand.Categories.flush)
my_hand = Hand(['10H', '6S', '9D', '8S', '7S'])
self.assertEqual(my_hand.category, Hand.Categories.straight)
my_hand = Hand(['JH', 'JC', '9H', 'JS', '8D'])
self.assertEqual(my_hand.category, Hand.Categories.three_of_a_kind)
my_hand = Hand(['JH', 'JC', 'QS', '9D', '9H'])
self.assertEqual(my_hand.category, Hand.Categories.two_pair)
my_hand = Hand(['JH', 'JC', 'QS', '5D', '9H'])
self.assertEqual(my_hand.category, Hand.Categories.pair)
my_hand = Hand(['JH', '3C', '4S', '5C', '9H'])
self.assertEqual(my_hand.category, Hand.Categories.high_card)
def test_category_options(self):
my_hand = Hand(['10H', '6S', '9D', '8S', '7S', '7D', '7H'])
self.assertEqual(my_hand.category, Hand.Categories.straight)
my_hand = Hand(['10H', '6S', '9D', '8S', '7S', '7D', '7H', '7C'])
self.assertEqual(my_hand.category, Hand.Categories.four_of_a_kind)
my_hand = Hand(['10H', '6S', '9D', '8S', '7S', '7D', '7H', '8C'])
self.assertEqual(my_hand.category, Hand.Categories.full_house)
my_hand = Hand(['10S', '9S', '8S', '5S', '6S', '10H', '6D', '9D',
'8C', '7C'])
self.assertEqual(my_hand.category, Hand.Categories.flush)
my_hand = Hand(['KH', 'QH', 'JH', 'AH', '10H', '10S', '6S', '9S',
'8S', '7S'])
self.assertEqual(my_hand.category, Hand.Categories.straight_flush)
my_hand = Hand(['5S', '5H', '5D', '4S', '4H', '4D', '3D', '3S'])
self.assertEqual(my_hand.category, Hand.Categories.full_house)
my_hand = Hand(['5S', '5H', '5D', '5C', '4S', '4H', '3C', '3D', '3S'])
self.assertEqual(my_hand.category, Hand.Categories.four_of_a_kind)
def test_cmp(self):
pair_to_high_card = Hand(['JH', 'JC', 'QS', '5D', '9H']) < Hand([
'JH', '3C', '4S', '5C', '9H'])
self.assertEqual(pair_to_high_card, False)
straight_to_flush = Hand(['10H', '6S', '9D', '8S', '7S']) < Hand([
'10S', '9S', '8S', '5S', '6S'])
self.assertEqual(straight_to_flush, True)
def test_deck_validation(self):
"""
Test with some hands that are impossible to form with a 52-card deck
Five-of-a-kind
Something that is both a flush and has a pair (flush wins)
Something that is both a flush and four-of-a-kind (four-of-a-kind wins)
"""
pass
if __name__ == '__main__':
unittest.main()
<|reserved_special_token_1|>
import unittest
from poker import Hand, makeCard, Rank, count_ranks, RankCount, max_straight
class TestHand(unittest.TestCase):
def test_max_straight(self):
cards = map(makeCard, ['10S', '6S', '9S', '8S', '7S'])
straight = max_straight(cards)
self.assertEqual(straight, sorted(map(makeCard, ['10S', '6S', '9S',
'8S', '7S']), reverse=True))
cards = map(makeCard, ['10S', '6S', '9S', '8S', '8C', '7S'])
straight = max_straight(cards)
self.assertEqual(straight, sorted(map(makeCard, ['10S', '6S', '9S',
'8S', '7S']), reverse=True))
cards = map(makeCard, ['10S', '6S', '9S', '8S', '5C', '7S'])
straight = max_straight(cards)
self.assertEqual(straight, sorted(map(makeCard, ['10S', '6S', '9S',
'8S', '7S']), reverse=True))
def test_categories(self):
my_hand = Hand(['KH', 'QH', 'JH', 'AH', '10H'])
self.assertEqual(my_hand.category, Hand.Categories.straight_flush)
my_hand = Hand(['10S', '6S', '9S', '8S', '7S'])
self.assertEqual(my_hand.category, Hand.Categories.straight_flush)
my_hand = Hand(['JH', 'JC', '9H', 'JS', 'JD'])
self.assertEqual(my_hand.category, Hand.Categories.four_of_a_kind)
my_hand = Hand(['JH', 'JC', 'JS', '9D', '9H'])
self.assertEqual(my_hand.category, Hand.Categories.full_house)
my_hand = Hand(['10S', '9S', '8S', '5S', '6S'])
self.assertEqual(my_hand.category, Hand.Categories.flush)
my_hand = Hand(['10H', '6S', '9D', '8S', '7S'])
self.assertEqual(my_hand.category, Hand.Categories.straight)
my_hand = Hand(['JH', 'JC', '9H', 'JS', '8D'])
self.assertEqual(my_hand.category, Hand.Categories.three_of_a_kind)
my_hand = Hand(['JH', 'JC', 'QS', '9D', '9H'])
self.assertEqual(my_hand.category, Hand.Categories.two_pair)
my_hand = Hand(['JH', 'JC', 'QS', '5D', '9H'])
self.assertEqual(my_hand.category, Hand.Categories.pair)
my_hand = Hand(['JH', '3C', '4S', '5C', '9H'])
self.assertEqual(my_hand.category, Hand.Categories.high_card)
def test_category_options(self):
my_hand = Hand(['10H', '6S', '9D', '8S', '7S', '7D', '7H'])
self.assertEqual(my_hand.category, Hand.Categories.straight)
my_hand = Hand(['10H', '6S', '9D', '8S', '7S', '7D', '7H', '7C'])
self.assertEqual(my_hand.category, Hand.Categories.four_of_a_kind)
my_hand = Hand(['10H', '6S', '9D', '8S', '7S', '7D', '7H', '8C'])
self.assertEqual(my_hand.category, Hand.Categories.full_house)
my_hand = Hand(['10S', '9S', '8S', '5S', '6S', '10H', '6D', '9D',
'8C', '7C'])
self.assertEqual(my_hand.category, Hand.Categories.flush)
my_hand = Hand(['KH', 'QH', 'JH', 'AH', '10H', '10S', '6S', '9S',
'8S', '7S'])
self.assertEqual(my_hand.category, Hand.Categories.straight_flush)
my_hand = Hand(['5S', '5H', '5D', '4S', '4H', '4D', '3D', '3S'])
self.assertEqual(my_hand.category, Hand.Categories.full_house)
my_hand = Hand(['5S', '5H', '5D', '5C', '4S', '4H', '3C', '3D', '3S'])
self.assertEqual(my_hand.category, Hand.Categories.four_of_a_kind)
def test_cmp(self):
pair_to_high_card = Hand(['JH', 'JC', 'QS', '5D', '9H']) < Hand([
'JH', '3C', '4S', '5C', '9H'])
self.assertEqual(pair_to_high_card, False)
straight_to_flush = Hand(['10H', '6S', '9D', '8S', '7S']) < Hand([
'10S', '9S', '8S', '5S', '6S'])
self.assertEqual(straight_to_flush, True)
def test_deck_validation(self):
"""
Test with some hands that are impossible to form with a 52-card deck
Five-of-a-kind
Something that is both a flush and has a pair (flush wins)
Something that is both a flush and four-of-a-kind (four-of-a-kind wins)
"""
pass
if __name__ == '__main__':
unittest.main()
<|reserved_special_token_1|>
# def test_categories:
# ["5S", "5H", "5D", "4S", "4H", "4D", "3D", "3S"]
import unittest
from poker import Hand, makeCard, Rank, count_ranks, RankCount, max_straight
class TestHand(unittest.TestCase):
# def test_heap_multiples(self):
# heaped_multiples = Hand.heap_multiples({"J":4, "2":3})
# print heaped_multiples
# self.assertEqual(heaped_multiples, [(4, "J"), (3,"2")], "failure in heap_multiples")
def test_max_straight(self):
cards = map(makeCard, ["10S", "6S", "9S", "8S", "7S"])
straight = max_straight(cards)
self.assertEqual(straight, sorted(map(makeCard, ["10S", "6S", "9S", "8S", "7S"]), reverse=True))
cards = map(makeCard, ["10S", "6S", "9S", "8S", "8C", "7S"])
straight = max_straight(cards)
self.assertEqual(straight, sorted(map(makeCard, ["10S", "6S", "9S", "8S", "7S"]), reverse=True))
cards = map(makeCard, ["10S", "6S", "9S", "8S", "5C", "7S"])
straight = max_straight(cards)
self.assertEqual(straight, sorted(map(makeCard, ["10S", "6S", "9S", "8S", "7S"]), reverse=True))
def test_categories(self):
my_hand = Hand(["KH", "QH", "JH", "AH", "10H"])
self.assertEqual(my_hand.category, Hand.Categories.straight_flush)
my_hand = Hand(["10S", "6S", "9S", "8S", "7S"])
self.assertEqual(my_hand.category, Hand.Categories.straight_flush)
my_hand = Hand(["JH", "JC", "9H", "JS", "JD"])
self.assertEqual(my_hand.category, Hand.Categories.four_of_a_kind)
my_hand = Hand(["JH", "JC", "JS", "9D", "9H"])
self.assertEqual(my_hand.category, Hand.Categories.full_house)
my_hand = Hand(["10S", "9S", "8S", "5S", "6S"])
self.assertEqual(my_hand.category, Hand.Categories.flush)
my_hand = Hand(["10H", "6S", "9D", "8S", "7S"])
self.assertEqual(my_hand.category, Hand.Categories.straight)
my_hand = Hand(["JH", "JC", "9H", "JS", "8D"])
self.assertEqual(my_hand.category, Hand.Categories.three_of_a_kind)
my_hand = Hand(["JH", "JC", "QS", "9D", "9H"])
self.assertEqual(my_hand.category, Hand.Categories.two_pair)
my_hand = Hand(["JH", "JC", "QS", "5D", "9H"])
self.assertEqual(my_hand.category, Hand.Categories.pair)
my_hand = Hand(["JH", "3C", "4S", "5C", "9H"])
self.assertEqual(my_hand.category, Hand.Categories.high_card)
def test_category_options(self):
my_hand = Hand(["10H", "6S", "9D", "8S", "7S", "7D", "7H"])
self.assertEqual(my_hand.category, Hand.Categories.straight)
my_hand = Hand(["10H", "6S", "9D", "8S", "7S", "7D", "7H", "7C"])
self.assertEqual(my_hand.category, Hand.Categories.four_of_a_kind)
my_hand = Hand(["10H", "6S", "9D", "8S", "7S", "7D", "7H", "8C"])
self.assertEqual(my_hand.category, Hand.Categories.full_house)
my_hand = Hand(["10S", "9S", "8S", "5S", "6S", "10H", "6D", "9D", "8C", "7C"])
self.assertEqual(my_hand.category, Hand.Categories.flush)
my_hand = Hand(["KH", "QH", "JH", "AH", "10H", "10S", "6S", "9S", "8S", "7S"])
self.assertEqual(my_hand.category, Hand.Categories.straight_flush)
# It gets the royal flush
my_hand = Hand(["5S", "5H", "5D", "4S", "4H", "4D", "3D", "3S"])
self.assertEqual(my_hand.category, Hand.Categories.full_house)
# It gets the fours
my_hand = Hand(["5S", "5H", "5D", "5C", "4S", "4H", "3C", "3D", "3S"])
self.assertEqual(my_hand.category, Hand.Categories.four_of_a_kind)
# get the 4 kicker
def test_cmp(self):
pair_to_high_card = Hand(["JH", "JC", "QS", "5D", "9H"]) < Hand(["JH", "3C", "4S", "5C", "9H"])
self.assertEqual(pair_to_high_card, False)
straight_to_flush = Hand(["10H", "6S", "9D", "8S", "7S"]) < Hand(["10S", "9S", "8S", "5S", "6S"])
self.assertEqual(straight_to_flush, True)
def test_deck_validation(self):
"""
Test with some hands that are impossible to form with a 52-card deck
Five-of-a-kind
Something that is both a flush and has a pair (flush wins)
Something that is both a flush and four-of-a-kind (four-of-a-kind wins)
"""
pass
if __name__ == '__main__':
unittest.main()
|
flexible
|
{
"blob_id": "5b8d1bd026e97bb7508a500048f940abf0253471",
"index": 9698,
"step-1": "<mask token>\n\n\nclass TestHand(unittest.TestCase):\n\n def test_max_straight(self):\n cards = map(makeCard, ['10S', '6S', '9S', '8S', '7S'])\n straight = max_straight(cards)\n self.assertEqual(straight, sorted(map(makeCard, ['10S', '6S', '9S',\n '8S', '7S']), reverse=True))\n cards = map(makeCard, ['10S', '6S', '9S', '8S', '8C', '7S'])\n straight = max_straight(cards)\n self.assertEqual(straight, sorted(map(makeCard, ['10S', '6S', '9S',\n '8S', '7S']), reverse=True))\n cards = map(makeCard, ['10S', '6S', '9S', '8S', '5C', '7S'])\n straight = max_straight(cards)\n self.assertEqual(straight, sorted(map(makeCard, ['10S', '6S', '9S',\n '8S', '7S']), reverse=True))\n\n def test_categories(self):\n my_hand = Hand(['KH', 'QH', 'JH', 'AH', '10H'])\n self.assertEqual(my_hand.category, Hand.Categories.straight_flush)\n my_hand = Hand(['10S', '6S', '9S', '8S', '7S'])\n self.assertEqual(my_hand.category, Hand.Categories.straight_flush)\n my_hand = Hand(['JH', 'JC', '9H', 'JS', 'JD'])\n self.assertEqual(my_hand.category, Hand.Categories.four_of_a_kind)\n my_hand = Hand(['JH', 'JC', 'JS', '9D', '9H'])\n self.assertEqual(my_hand.category, Hand.Categories.full_house)\n my_hand = Hand(['10S', '9S', '8S', '5S', '6S'])\n self.assertEqual(my_hand.category, Hand.Categories.flush)\n my_hand = Hand(['10H', '6S', '9D', '8S', '7S'])\n self.assertEqual(my_hand.category, Hand.Categories.straight)\n my_hand = Hand(['JH', 'JC', '9H', 'JS', '8D'])\n self.assertEqual(my_hand.category, Hand.Categories.three_of_a_kind)\n my_hand = Hand(['JH', 'JC', 'QS', '9D', '9H'])\n self.assertEqual(my_hand.category, Hand.Categories.two_pair)\n my_hand = Hand(['JH', 'JC', 'QS', '5D', '9H'])\n self.assertEqual(my_hand.category, Hand.Categories.pair)\n my_hand = Hand(['JH', '3C', '4S', '5C', '9H'])\n self.assertEqual(my_hand.category, Hand.Categories.high_card)\n <mask token>\n\n def test_cmp(self):\n pair_to_high_card = Hand(['JH', 'JC', 'QS', '5D', '9H']) < Hand([\n 'JH', '3C', '4S', '5C', '9H'])\n self.assertEqual(pair_to_high_card, False)\n straight_to_flush = Hand(['10H', '6S', '9D', '8S', '7S']) < Hand([\n '10S', '9S', '8S', '5S', '6S'])\n self.assertEqual(straight_to_flush, True)\n\n def test_deck_validation(self):\n \"\"\"\n \tTest with some hands that are impossible to form with a 52-card deck\n \tFive-of-a-kind\n \tSomething that is both a flush and has a pair (flush wins)\n \tSomething that is both a flush and four-of-a-kind (four-of-a-kind wins)\n \t\"\"\"\n pass\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass TestHand(unittest.TestCase):\n\n def test_max_straight(self):\n cards = map(makeCard, ['10S', '6S', '9S', '8S', '7S'])\n straight = max_straight(cards)\n self.assertEqual(straight, sorted(map(makeCard, ['10S', '6S', '9S',\n '8S', '7S']), reverse=True))\n cards = map(makeCard, ['10S', '6S', '9S', '8S', '8C', '7S'])\n straight = max_straight(cards)\n self.assertEqual(straight, sorted(map(makeCard, ['10S', '6S', '9S',\n '8S', '7S']), reverse=True))\n cards = map(makeCard, ['10S', '6S', '9S', '8S', '5C', '7S'])\n straight = max_straight(cards)\n self.assertEqual(straight, sorted(map(makeCard, ['10S', '6S', '9S',\n '8S', '7S']), reverse=True))\n\n def test_categories(self):\n my_hand = Hand(['KH', 'QH', 'JH', 'AH', '10H'])\n self.assertEqual(my_hand.category, Hand.Categories.straight_flush)\n my_hand = Hand(['10S', '6S', '9S', '8S', '7S'])\n self.assertEqual(my_hand.category, Hand.Categories.straight_flush)\n my_hand = Hand(['JH', 'JC', '9H', 'JS', 'JD'])\n self.assertEqual(my_hand.category, Hand.Categories.four_of_a_kind)\n my_hand = Hand(['JH', 'JC', 'JS', '9D', '9H'])\n self.assertEqual(my_hand.category, Hand.Categories.full_house)\n my_hand = Hand(['10S', '9S', '8S', '5S', '6S'])\n self.assertEqual(my_hand.category, Hand.Categories.flush)\n my_hand = Hand(['10H', '6S', '9D', '8S', '7S'])\n self.assertEqual(my_hand.category, Hand.Categories.straight)\n my_hand = Hand(['JH', 'JC', '9H', 'JS', '8D'])\n self.assertEqual(my_hand.category, Hand.Categories.three_of_a_kind)\n my_hand = Hand(['JH', 'JC', 'QS', '9D', '9H'])\n self.assertEqual(my_hand.category, Hand.Categories.two_pair)\n my_hand = Hand(['JH', 'JC', 'QS', '5D', '9H'])\n self.assertEqual(my_hand.category, Hand.Categories.pair)\n my_hand = Hand(['JH', '3C', '4S', '5C', '9H'])\n self.assertEqual(my_hand.category, Hand.Categories.high_card)\n\n def test_category_options(self):\n my_hand = Hand(['10H', '6S', '9D', '8S', '7S', '7D', '7H'])\n self.assertEqual(my_hand.category, Hand.Categories.straight)\n my_hand = Hand(['10H', '6S', '9D', '8S', '7S', '7D', '7H', '7C'])\n self.assertEqual(my_hand.category, Hand.Categories.four_of_a_kind)\n my_hand = Hand(['10H', '6S', '9D', '8S', '7S', '7D', '7H', '8C'])\n self.assertEqual(my_hand.category, Hand.Categories.full_house)\n my_hand = Hand(['10S', '9S', '8S', '5S', '6S', '10H', '6D', '9D',\n '8C', '7C'])\n self.assertEqual(my_hand.category, Hand.Categories.flush)\n my_hand = Hand(['KH', 'QH', 'JH', 'AH', '10H', '10S', '6S', '9S',\n '8S', '7S'])\n self.assertEqual(my_hand.category, Hand.Categories.straight_flush)\n my_hand = Hand(['5S', '5H', '5D', '4S', '4H', '4D', '3D', '3S'])\n self.assertEqual(my_hand.category, Hand.Categories.full_house)\n my_hand = Hand(['5S', '5H', '5D', '5C', '4S', '4H', '3C', '3D', '3S'])\n self.assertEqual(my_hand.category, Hand.Categories.four_of_a_kind)\n\n def test_cmp(self):\n pair_to_high_card = Hand(['JH', 'JC', 'QS', '5D', '9H']) < Hand([\n 'JH', '3C', '4S', '5C', '9H'])\n self.assertEqual(pair_to_high_card, False)\n straight_to_flush = Hand(['10H', '6S', '9D', '8S', '7S']) < Hand([\n '10S', '9S', '8S', '5S', '6S'])\n self.assertEqual(straight_to_flush, True)\n\n def test_deck_validation(self):\n \"\"\"\n \tTest with some hands that are impossible to form with a 52-card deck\n \tFive-of-a-kind\n \tSomething that is both a flush and has a pair (flush wins)\n \tSomething that is both a flush and four-of-a-kind (four-of-a-kind wins)\n \t\"\"\"\n pass\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass TestHand(unittest.TestCase):\n\n def test_max_straight(self):\n cards = map(makeCard, ['10S', '6S', '9S', '8S', '7S'])\n straight = max_straight(cards)\n self.assertEqual(straight, sorted(map(makeCard, ['10S', '6S', '9S',\n '8S', '7S']), reverse=True))\n cards = map(makeCard, ['10S', '6S', '9S', '8S', '8C', '7S'])\n straight = max_straight(cards)\n self.assertEqual(straight, sorted(map(makeCard, ['10S', '6S', '9S',\n '8S', '7S']), reverse=True))\n cards = map(makeCard, ['10S', '6S', '9S', '8S', '5C', '7S'])\n straight = max_straight(cards)\n self.assertEqual(straight, sorted(map(makeCard, ['10S', '6S', '9S',\n '8S', '7S']), reverse=True))\n\n def test_categories(self):\n my_hand = Hand(['KH', 'QH', 'JH', 'AH', '10H'])\n self.assertEqual(my_hand.category, Hand.Categories.straight_flush)\n my_hand = Hand(['10S', '6S', '9S', '8S', '7S'])\n self.assertEqual(my_hand.category, Hand.Categories.straight_flush)\n my_hand = Hand(['JH', 'JC', '9H', 'JS', 'JD'])\n self.assertEqual(my_hand.category, Hand.Categories.four_of_a_kind)\n my_hand = Hand(['JH', 'JC', 'JS', '9D', '9H'])\n self.assertEqual(my_hand.category, Hand.Categories.full_house)\n my_hand = Hand(['10S', '9S', '8S', '5S', '6S'])\n self.assertEqual(my_hand.category, Hand.Categories.flush)\n my_hand = Hand(['10H', '6S', '9D', '8S', '7S'])\n self.assertEqual(my_hand.category, Hand.Categories.straight)\n my_hand = Hand(['JH', 'JC', '9H', 'JS', '8D'])\n self.assertEqual(my_hand.category, Hand.Categories.three_of_a_kind)\n my_hand = Hand(['JH', 'JC', 'QS', '9D', '9H'])\n self.assertEqual(my_hand.category, Hand.Categories.two_pair)\n my_hand = Hand(['JH', 'JC', 'QS', '5D', '9H'])\n self.assertEqual(my_hand.category, Hand.Categories.pair)\n my_hand = Hand(['JH', '3C', '4S', '5C', '9H'])\n self.assertEqual(my_hand.category, Hand.Categories.high_card)\n\n def test_category_options(self):\n my_hand = Hand(['10H', '6S', '9D', '8S', '7S', '7D', '7H'])\n self.assertEqual(my_hand.category, Hand.Categories.straight)\n my_hand = Hand(['10H', '6S', '9D', '8S', '7S', '7D', '7H', '7C'])\n self.assertEqual(my_hand.category, Hand.Categories.four_of_a_kind)\n my_hand = Hand(['10H', '6S', '9D', '8S', '7S', '7D', '7H', '8C'])\n self.assertEqual(my_hand.category, Hand.Categories.full_house)\n my_hand = Hand(['10S', '9S', '8S', '5S', '6S', '10H', '6D', '9D',\n '8C', '7C'])\n self.assertEqual(my_hand.category, Hand.Categories.flush)\n my_hand = Hand(['KH', 'QH', 'JH', 'AH', '10H', '10S', '6S', '9S',\n '8S', '7S'])\n self.assertEqual(my_hand.category, Hand.Categories.straight_flush)\n my_hand = Hand(['5S', '5H', '5D', '4S', '4H', '4D', '3D', '3S'])\n self.assertEqual(my_hand.category, Hand.Categories.full_house)\n my_hand = Hand(['5S', '5H', '5D', '5C', '4S', '4H', '3C', '3D', '3S'])\n self.assertEqual(my_hand.category, Hand.Categories.four_of_a_kind)\n\n def test_cmp(self):\n pair_to_high_card = Hand(['JH', 'JC', 'QS', '5D', '9H']) < Hand([\n 'JH', '3C', '4S', '5C', '9H'])\n self.assertEqual(pair_to_high_card, False)\n straight_to_flush = Hand(['10H', '6S', '9D', '8S', '7S']) < Hand([\n '10S', '9S', '8S', '5S', '6S'])\n self.assertEqual(straight_to_flush, True)\n\n def test_deck_validation(self):\n \"\"\"\n \tTest with some hands that are impossible to form with a 52-card deck\n \tFive-of-a-kind\n \tSomething that is both a flush and has a pair (flush wins)\n \tSomething that is both a flush and four-of-a-kind (four-of-a-kind wins)\n \t\"\"\"\n pass\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-4": "import unittest\nfrom poker import Hand, makeCard, Rank, count_ranks, RankCount, max_straight\n\n\nclass TestHand(unittest.TestCase):\n\n def test_max_straight(self):\n cards = map(makeCard, ['10S', '6S', '9S', '8S', '7S'])\n straight = max_straight(cards)\n self.assertEqual(straight, sorted(map(makeCard, ['10S', '6S', '9S',\n '8S', '7S']), reverse=True))\n cards = map(makeCard, ['10S', '6S', '9S', '8S', '8C', '7S'])\n straight = max_straight(cards)\n self.assertEqual(straight, sorted(map(makeCard, ['10S', '6S', '9S',\n '8S', '7S']), reverse=True))\n cards = map(makeCard, ['10S', '6S', '9S', '8S', '5C', '7S'])\n straight = max_straight(cards)\n self.assertEqual(straight, sorted(map(makeCard, ['10S', '6S', '9S',\n '8S', '7S']), reverse=True))\n\n def test_categories(self):\n my_hand = Hand(['KH', 'QH', 'JH', 'AH', '10H'])\n self.assertEqual(my_hand.category, Hand.Categories.straight_flush)\n my_hand = Hand(['10S', '6S', '9S', '8S', '7S'])\n self.assertEqual(my_hand.category, Hand.Categories.straight_flush)\n my_hand = Hand(['JH', 'JC', '9H', 'JS', 'JD'])\n self.assertEqual(my_hand.category, Hand.Categories.four_of_a_kind)\n my_hand = Hand(['JH', 'JC', 'JS', '9D', '9H'])\n self.assertEqual(my_hand.category, Hand.Categories.full_house)\n my_hand = Hand(['10S', '9S', '8S', '5S', '6S'])\n self.assertEqual(my_hand.category, Hand.Categories.flush)\n my_hand = Hand(['10H', '6S', '9D', '8S', '7S'])\n self.assertEqual(my_hand.category, Hand.Categories.straight)\n my_hand = Hand(['JH', 'JC', '9H', 'JS', '8D'])\n self.assertEqual(my_hand.category, Hand.Categories.three_of_a_kind)\n my_hand = Hand(['JH', 'JC', 'QS', '9D', '9H'])\n self.assertEqual(my_hand.category, Hand.Categories.two_pair)\n my_hand = Hand(['JH', 'JC', 'QS', '5D', '9H'])\n self.assertEqual(my_hand.category, Hand.Categories.pair)\n my_hand = Hand(['JH', '3C', '4S', '5C', '9H'])\n self.assertEqual(my_hand.category, Hand.Categories.high_card)\n\n def test_category_options(self):\n my_hand = Hand(['10H', '6S', '9D', '8S', '7S', '7D', '7H'])\n self.assertEqual(my_hand.category, Hand.Categories.straight)\n my_hand = Hand(['10H', '6S', '9D', '8S', '7S', '7D', '7H', '7C'])\n self.assertEqual(my_hand.category, Hand.Categories.four_of_a_kind)\n my_hand = Hand(['10H', '6S', '9D', '8S', '7S', '7D', '7H', '8C'])\n self.assertEqual(my_hand.category, Hand.Categories.full_house)\n my_hand = Hand(['10S', '9S', '8S', '5S', '6S', '10H', '6D', '9D',\n '8C', '7C'])\n self.assertEqual(my_hand.category, Hand.Categories.flush)\n my_hand = Hand(['KH', 'QH', 'JH', 'AH', '10H', '10S', '6S', '9S',\n '8S', '7S'])\n self.assertEqual(my_hand.category, Hand.Categories.straight_flush)\n my_hand = Hand(['5S', '5H', '5D', '4S', '4H', '4D', '3D', '3S'])\n self.assertEqual(my_hand.category, Hand.Categories.full_house)\n my_hand = Hand(['5S', '5H', '5D', '5C', '4S', '4H', '3C', '3D', '3S'])\n self.assertEqual(my_hand.category, Hand.Categories.four_of_a_kind)\n\n def test_cmp(self):\n pair_to_high_card = Hand(['JH', 'JC', 'QS', '5D', '9H']) < Hand([\n 'JH', '3C', '4S', '5C', '9H'])\n self.assertEqual(pair_to_high_card, False)\n straight_to_flush = Hand(['10H', '6S', '9D', '8S', '7S']) < Hand([\n '10S', '9S', '8S', '5S', '6S'])\n self.assertEqual(straight_to_flush, True)\n\n def test_deck_validation(self):\n \"\"\"\n \tTest with some hands that are impossible to form with a 52-card deck\n \tFive-of-a-kind\n \tSomething that is both a flush and has a pair (flush wins)\n \tSomething that is both a flush and four-of-a-kind (four-of-a-kind wins)\n \t\"\"\"\n pass\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-5": "# def test_categories:\n\t\n\n# [\"5S\", \"5H\", \"5D\", \"4S\", \"4H\", \"4D\", \"3D\", \"3S\"] \n\nimport unittest\n\nfrom poker import Hand, makeCard, Rank, count_ranks, RankCount, max_straight\n\nclass TestHand(unittest.TestCase):\n\n # def test_heap_multiples(self):\n # \theaped_multiples = Hand.heap_multiples({\"J\":4, \"2\":3})\n # \tprint heaped_multiples\n # \tself.assertEqual(heaped_multiples, [(4, \"J\"), (3,\"2\")], \"failure in heap_multiples\")\n\n def test_max_straight(self):\n \tcards = map(makeCard, [\"10S\", \"6S\", \"9S\", \"8S\", \"7S\"])\n \tstraight = max_straight(cards)\n \tself.assertEqual(straight, sorted(map(makeCard, [\"10S\", \"6S\", \"9S\", \"8S\", \"7S\"]), reverse=True))\n\n \tcards = map(makeCard, [\"10S\", \"6S\", \"9S\", \"8S\", \"8C\", \"7S\"])\n \tstraight = max_straight(cards)\n \tself.assertEqual(straight, sorted(map(makeCard, [\"10S\", \"6S\", \"9S\", \"8S\", \"7S\"]), reverse=True))\n\n \tcards = map(makeCard, [\"10S\", \"6S\", \"9S\", \"8S\", \"5C\", \"7S\"])\n \tstraight = max_straight(cards)\n \tself.assertEqual(straight, sorted(map(makeCard, [\"10S\", \"6S\", \"9S\", \"8S\", \"7S\"]), reverse=True))\n\n def test_categories(self):\n\n \tmy_hand = Hand([\"KH\", \"QH\", \"JH\", \"AH\", \"10H\"])\n \tself.assertEqual(my_hand.category, Hand.Categories.straight_flush)\n\n \tmy_hand = Hand([\"10S\", \"6S\", \"9S\", \"8S\", \"7S\"])\n \tself.assertEqual(my_hand.category, Hand.Categories.straight_flush)\n\n \tmy_hand = Hand([\"JH\", \"JC\", \"9H\", \"JS\", \"JD\"])\n \tself.assertEqual(my_hand.category, Hand.Categories.four_of_a_kind)\n\n \tmy_hand = Hand([\"JH\", \"JC\", \"JS\", \"9D\", \"9H\"])\n \tself.assertEqual(my_hand.category, Hand.Categories.full_house)\n\n \tmy_hand = Hand([\"10S\", \"9S\", \"8S\", \"5S\", \"6S\"])\n \tself.assertEqual(my_hand.category, Hand.Categories.flush)\n\n \tmy_hand = Hand([\"10H\", \"6S\", \"9D\", \"8S\", \"7S\"])\n \tself.assertEqual(my_hand.category, Hand.Categories.straight)\n\n \tmy_hand = Hand([\"JH\", \"JC\", \"9H\", \"JS\", \"8D\"])\n \tself.assertEqual(my_hand.category, Hand.Categories.three_of_a_kind)\n\n \tmy_hand = Hand([\"JH\", \"JC\", \"QS\", \"9D\", \"9H\"])\n \tself.assertEqual(my_hand.category, Hand.Categories.two_pair)\n\n \tmy_hand = Hand([\"JH\", \"JC\", \"QS\", \"5D\", \"9H\"])\n \tself.assertEqual(my_hand.category, Hand.Categories.pair)\n\n \tmy_hand = Hand([\"JH\", \"3C\", \"4S\", \"5C\", \"9H\"])\n \tself.assertEqual(my_hand.category, Hand.Categories.high_card)\n\n def test_category_options(self):\n\n \tmy_hand = Hand([\"10H\", \"6S\", \"9D\", \"8S\", \"7S\", \"7D\", \"7H\"])\n \tself.assertEqual(my_hand.category, Hand.Categories.straight)\n\n \tmy_hand = Hand([\"10H\", \"6S\", \"9D\", \"8S\", \"7S\", \"7D\", \"7H\", \"7C\"])\n \tself.assertEqual(my_hand.category, Hand.Categories.four_of_a_kind)\n\n \tmy_hand = Hand([\"10H\", \"6S\", \"9D\", \"8S\", \"7S\", \"7D\", \"7H\", \"8C\"])\n \tself.assertEqual(my_hand.category, Hand.Categories.full_house)\n\n \tmy_hand = Hand([\"10S\", \"9S\", \"8S\", \"5S\", \"6S\", \"10H\", \"6D\", \"9D\", \"8C\", \"7C\"])\n \tself.assertEqual(my_hand.category, Hand.Categories.flush)\n\n \tmy_hand = Hand([\"KH\", \"QH\", \"JH\", \"AH\", \"10H\", \"10S\", \"6S\", \"9S\", \"8S\", \"7S\"])\n \tself.assertEqual(my_hand.category, Hand.Categories.straight_flush)\n \t# It gets the royal flush\n\n \tmy_hand = Hand([\"5S\", \"5H\", \"5D\", \"4S\", \"4H\", \"4D\", \"3D\", \"3S\"])\n \tself.assertEqual(my_hand.category, Hand.Categories.full_house)\n \t# It gets the fours\n\n \tmy_hand = Hand([\"5S\", \"5H\", \"5D\", \"5C\", \"4S\", \"4H\", \"3C\", \"3D\", \"3S\"])\n \tself.assertEqual(my_hand.category, Hand.Categories.four_of_a_kind)\n \t# get the 4 kicker\n\n\n\n def test_cmp(self):\n \tpair_to_high_card = Hand([\"JH\", \"JC\", \"QS\", \"5D\", \"9H\"]) < Hand([\"JH\", \"3C\", \"4S\", \"5C\", \"9H\"])\n \tself.assertEqual(pair_to_high_card, False)\n\n \tstraight_to_flush = Hand([\"10H\", \"6S\", \"9D\", \"8S\", \"7S\"]) < Hand([\"10S\", \"9S\", \"8S\", \"5S\", \"6S\"])\n \tself.assertEqual(straight_to_flush, True)\n\n\n def test_deck_validation(self):\n \t\"\"\"\n \tTest with some hands that are impossible to form with a 52-card deck\n \tFive-of-a-kind\n \tSomething that is both a flush and has a pair (flush wins)\n \tSomething that is both a flush and four-of-a-kind (four-of-a-kind wins)\n \t\"\"\"\n \tpass\n\nif __name__ == '__main__':\n unittest.main()",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
'''
使用random模块,如何产生 50~150之间的数?
'''
import random
num1 = random.randint(50,151)
print(num1)
|
normal
|
{
"blob_id": "7d3355ee775f759412308ab68a7aa409b9c74b20",
"index": 708,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(num1)\n",
"step-3": "<mask token>\nnum1 = random.randint(50, 151)\nprint(num1)\n",
"step-4": "<mask token>\nimport random\nnum1 = random.randint(50, 151)\nprint(num1)\n",
"step-5": "'''\r\n使用random模块,如何产生 50~150之间的数?\r\n'''\r\n\r\n\r\nimport random\r\n\r\nnum1 = random.randint(50,151)\r\nprint(num1)",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
setup(packages=find_packages(), setup_requires=['flask'], name='mith1')
<|reserved_special_token_1|>
from setuptools import setup, find_packages
setup(packages=find_packages(), setup_requires=['flask'], name='mith1')
<|reserved_special_token_1|>
from setuptools import setup, find_packages
setup(
packages=find_packages(),
setup_requires=["flask"],
name="mith1",
)
|
flexible
|
{
"blob_id": "a5a7cd112faad1096ce4c6f04b2179fbdf732702",
"index": 1479,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nsetup(packages=find_packages(), setup_requires=['flask'], name='mith1')\n",
"step-3": "from setuptools import setup, find_packages\nsetup(packages=find_packages(), setup_requires=['flask'], name='mith1')\n",
"step-4": "from setuptools import setup, find_packages\nsetup(\n packages=find_packages(),\n setup_requires=[\"flask\"],\n name=\"mith1\",\n)",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class Sender:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def send_frame(self, frame):
self.receiver.receiver_frame(frame)
pass
def send_frame_selective(self):
self.tableOfFrames = Image.gruop_into_frames(self.image, self.size,
self.ChosenSumAlgorithm)
sizeoftable = len(self.tableOfFrames)
for i in range(0, sizeoftable):
self.ACK.append(False)
Receiver.numberOfFrames = sizeoftable
Receiver.reset_Data(Receiver)
endOfWindow = self.windowSize - 1
i = 0
while i < sizeoftable:
isCorrectFrame = True
for j in range(i, endOfWindow + 1):
if j == sizeoftable:
break
if self.ACK[j] == False:
print(f'SENDER: wysłano obiekt nr "{j}"')
self.ACK[j] = self.receiver.recieve_frame(self.
tableOfFrames[j], j)
else:
pass
for j in range(i, endOfWindow + 1):
if j == sizeoftable:
break
if self.ACK[j] == False:
isCorrectFrame = False
if isCorrectFrame:
if endOfWindow + self.windowSize >= sizeoftable:
endOfWindow = sizeoftable
else:
endOfWindow += self.windowSize
i += self.windowSize
else:
count = 0
for j in range(i, endOfWindow + 1):
if self.ACK[j] == True:
count += 1
else:
break
endOfWindow += count
i += count
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class Frame:
value = None
seq_number = 0
pass
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Sender:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __init__(self, receiver):
self.receiver = receiver
pass
def send_frame(self, frame):
self.receiver.receiver_frame(frame)
pass
def send_frame_selective(self):
self.tableOfFrames = Image.gruop_into_frames(self.image, self.size,
self.ChosenSumAlgorithm)
sizeoftable = len(self.tableOfFrames)
for i in range(0, sizeoftable):
self.ACK.append(False)
Receiver.numberOfFrames = sizeoftable
Receiver.reset_Data(Receiver)
endOfWindow = self.windowSize - 1
i = 0
while i < sizeoftable:
isCorrectFrame = True
for j in range(i, endOfWindow + 1):
if j == sizeoftable:
break
if self.ACK[j] == False:
print(f'SENDER: wysłano obiekt nr "{j}"')
self.ACK[j] = self.receiver.recieve_frame(self.
tableOfFrames[j], j)
else:
pass
for j in range(i, endOfWindow + 1):
if j == sizeoftable:
break
if self.ACK[j] == False:
isCorrectFrame = False
if isCorrectFrame:
if endOfWindow + self.windowSize >= sizeoftable:
endOfWindow = sizeoftable
else:
endOfWindow += self.windowSize
i += self.windowSize
else:
count = 0
for j in range(i, endOfWindow + 1):
if self.ACK[j] == True:
count += 1
else:
break
endOfWindow += count
i += count
def send_frame_go_back_n(self, delay):
self.tableOfFrames = Image.gruop_into_frames(self.image, self.size,
self.ChosenSumAlgorithm)
size_of_table = len(self.tableOfFrames)
for i in range(0, size_of_table):
self.ACK.append(False)
self.receiver.numberOfValues = self.image.size
self.receiver.numberOfFrames = len(self.tableOfFrames)
self.receiver.reset_Data()
i = 0
win_start = i
win_end = i + self.windowSize
length_table_of_frames = len(self.tableOfFrames)
while i < length_table_of_frames:
while i < win_end and i < length_table_of_frames:
data = self.tableOfFrames[i]
sequence_number = i
print(f'\nSENDER: wysłano obiekt nr "{i}"')
self.ACK[i] = self.receiver.recieve_frame(frame=data,
sequence_number=sequence_number)
time.sleep(delay)
if self.ACK[win_start]:
print(f'SENDER: odebrano ATK "{win_start}"\n')
win_end += 1
win_start += 1
else:
if win_end > length_table_of_frames:
win_end = length_table_of_frames
for k in range(win_start + 1, win_end):
if self.ACK[k]:
print(
f'SENDER: odebrano ATK "{k}, Pominięto ATK "{win_start}"\n'
)
i = win_start - 1
break
i += 1
time.sleep(delay)
pass
pass
time.sleep(delay)
if i == win_end:
i = win_start
pass
print('SENDER: koniec wysyłania\n')
pass
<|reserved_special_token_0|>
class Frame:
value = None
seq_number = 0
pass
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Sender:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __init__(self, receiver):
self.receiver = receiver
pass
def send_frame(self, frame):
self.receiver.receiver_frame(frame)
pass
def send_frame_selective(self):
self.tableOfFrames = Image.gruop_into_frames(self.image, self.size,
self.ChosenSumAlgorithm)
sizeoftable = len(self.tableOfFrames)
for i in range(0, sizeoftable):
self.ACK.append(False)
Receiver.numberOfFrames = sizeoftable
Receiver.reset_Data(Receiver)
endOfWindow = self.windowSize - 1
i = 0
while i < sizeoftable:
isCorrectFrame = True
for j in range(i, endOfWindow + 1):
if j == sizeoftable:
break
if self.ACK[j] == False:
print(f'SENDER: wysłano obiekt nr "{j}"')
self.ACK[j] = self.receiver.recieve_frame(self.
tableOfFrames[j], j)
else:
pass
for j in range(i, endOfWindow + 1):
if j == sizeoftable:
break
if self.ACK[j] == False:
isCorrectFrame = False
if isCorrectFrame:
if endOfWindow + self.windowSize >= sizeoftable:
endOfWindow = sizeoftable
else:
endOfWindow += self.windowSize
i += self.windowSize
else:
count = 0
for j in range(i, endOfWindow + 1):
if self.ACK[j] == True:
count += 1
else:
break
endOfWindow += count
i += count
def send_frame_go_back_n(self, delay):
self.tableOfFrames = Image.gruop_into_frames(self.image, self.size,
self.ChosenSumAlgorithm)
size_of_table = len(self.tableOfFrames)
for i in range(0, size_of_table):
self.ACK.append(False)
self.receiver.numberOfValues = self.image.size
self.receiver.numberOfFrames = len(self.tableOfFrames)
self.receiver.reset_Data()
i = 0
win_start = i
win_end = i + self.windowSize
length_table_of_frames = len(self.tableOfFrames)
while i < length_table_of_frames:
while i < win_end and i < length_table_of_frames:
data = self.tableOfFrames[i]
sequence_number = i
print(f'\nSENDER: wysłano obiekt nr "{i}"')
self.ACK[i] = self.receiver.recieve_frame(frame=data,
sequence_number=sequence_number)
time.sleep(delay)
if self.ACK[win_start]:
print(f'SENDER: odebrano ATK "{win_start}"\n')
win_end += 1
win_start += 1
else:
if win_end > length_table_of_frames:
win_end = length_table_of_frames
for k in range(win_start + 1, win_end):
if self.ACK[k]:
print(
f'SENDER: odebrano ATK "{k}, Pominięto ATK "{win_start}"\n'
)
i = win_start - 1
break
i += 1
time.sleep(delay)
pass
pass
time.sleep(delay)
if i == win_end:
i = win_start
pass
print('SENDER: koniec wysyłania\n')
pass
def send_frame_stop_and_wait(self):
self.tableOfFrames = Image.gruop_into_frames(self.image, self.size,
self.ChosenSumAlgorithm)
print(self.tableOfFrames)
sizeoftable = len(self.tableOfFrames)
for i in range(0, sizeoftable):
self.ACK.append(False)
Receiver.numberOfValues = number
Receiver.numberOfFrames = sizeoftable
Receiver.reset_Data(Receiver)
i = 0
endOfWindow = self.windowSize - 1
print('Rozmiar tablicy ramek:')
print(sizeoftable)
while i < sizeoftable:
self.ACK[i] = self.receiver.receive_frame_stop_and_wait(self.
tableOfFrames[i], i)
if self.ACK[i]:
i += 1
else:
self.ACK[i] = False
continue
class Frame:
value = None
seq_number = 0
pass
<|reserved_special_token_1|>
from Receiver import Receiver
import time
import Image
class Sender:
ACK = []
size = None
windowSize = None
tableOfFrames = []
ChosenSumAlgorithm = None
def __init__(self, receiver):
self.receiver = receiver
pass
def send_frame(self, frame):
self.receiver.receiver_frame(frame)
pass
def send_frame_selective(self):
self.tableOfFrames = Image.gruop_into_frames(self.image, self.size,
self.ChosenSumAlgorithm)
sizeoftable = len(self.tableOfFrames)
for i in range(0, sizeoftable):
self.ACK.append(False)
Receiver.numberOfFrames = sizeoftable
Receiver.reset_Data(Receiver)
endOfWindow = self.windowSize - 1
i = 0
while i < sizeoftable:
isCorrectFrame = True
for j in range(i, endOfWindow + 1):
if j == sizeoftable:
break
if self.ACK[j] == False:
print(f'SENDER: wysłano obiekt nr "{j}"')
self.ACK[j] = self.receiver.recieve_frame(self.
tableOfFrames[j], j)
else:
pass
for j in range(i, endOfWindow + 1):
if j == sizeoftable:
break
if self.ACK[j] == False:
isCorrectFrame = False
if isCorrectFrame:
if endOfWindow + self.windowSize >= sizeoftable:
endOfWindow = sizeoftable
else:
endOfWindow += self.windowSize
i += self.windowSize
else:
count = 0
for j in range(i, endOfWindow + 1):
if self.ACK[j] == True:
count += 1
else:
break
endOfWindow += count
i += count
def send_frame_go_back_n(self, delay):
self.tableOfFrames = Image.gruop_into_frames(self.image, self.size,
self.ChosenSumAlgorithm)
size_of_table = len(self.tableOfFrames)
for i in range(0, size_of_table):
self.ACK.append(False)
self.receiver.numberOfValues = self.image.size
self.receiver.numberOfFrames = len(self.tableOfFrames)
self.receiver.reset_Data()
i = 0
win_start = i
win_end = i + self.windowSize
length_table_of_frames = len(self.tableOfFrames)
while i < length_table_of_frames:
while i < win_end and i < length_table_of_frames:
data = self.tableOfFrames[i]
sequence_number = i
print(f'\nSENDER: wysłano obiekt nr "{i}"')
self.ACK[i] = self.receiver.recieve_frame(frame=data,
sequence_number=sequence_number)
time.sleep(delay)
if self.ACK[win_start]:
print(f'SENDER: odebrano ATK "{win_start}"\n')
win_end += 1
win_start += 1
else:
if win_end > length_table_of_frames:
win_end = length_table_of_frames
for k in range(win_start + 1, win_end):
if self.ACK[k]:
print(
f'SENDER: odebrano ATK "{k}, Pominięto ATK "{win_start}"\n'
)
i = win_start - 1
break
i += 1
time.sleep(delay)
pass
pass
time.sleep(delay)
if i == win_end:
i = win_start
pass
print('SENDER: koniec wysyłania\n')
pass
def send_frame_stop_and_wait(self):
self.tableOfFrames = Image.gruop_into_frames(self.image, self.size,
self.ChosenSumAlgorithm)
print(self.tableOfFrames)
sizeoftable = len(self.tableOfFrames)
for i in range(0, sizeoftable):
self.ACK.append(False)
Receiver.numberOfValues = number
Receiver.numberOfFrames = sizeoftable
Receiver.reset_Data(Receiver)
i = 0
endOfWindow = self.windowSize - 1
print('Rozmiar tablicy ramek:')
print(sizeoftable)
while i < sizeoftable:
self.ACK[i] = self.receiver.receive_frame_stop_and_wait(self.
tableOfFrames[i], i)
if self.ACK[i]:
i += 1
else:
self.ACK[i] = False
continue
class Frame:
value = None
seq_number = 0
pass
<|reserved_special_token_1|>
from Receiver import Receiver
import time
import Image
class Sender:
ACK = []
size = None
windowSize = None
tableOfFrames = []
ChosenSumAlgorithm = None
def __init__(self, receiver):
self.receiver = receiver
pass
def send_frame(self, frame):
self.receiver.receiver_frame(frame)
pass
def send_frame_selective(self):
#stworzenie tablicy z ramkami
self.tableOfFrames = Image.gruop_into_frames(self.image, self.size, self.ChosenSumAlgorithm)
# zapisuje ilosc ramek dopedli wysylania
sizeoftable = len(self.tableOfFrames)
# tworzy tablice o rozmiarze ilosci ramek z potwierdzeniami lub odrzuceniami pakietów
for i in range(0, sizeoftable):
self.ACK.append(False)
# przenoszenie do receivera potrzebnych wartosci
Receiver.numberOfFrames = sizeoftable
Receiver.reset_Data(Receiver)
endOfWindow = self.windowSize - 1
i = 0
# petla wysylajaca ramki zgodnie z regulami algotrytmu selektywnego
while i < sizeoftable:
isCorrectFrame = True
# petla operujaca oknem i wysylajaca te ramki ktore sender od nas chce
for j in range(i, endOfWindow + 1):
if j == sizeoftable:
break
if self.ACK[j] == False:
# time.sleep(0.2)
print(f'SENDER: wysłano obiekt nr "{j}"')
self.ACK[j] = self.receiver.recieve_frame(self.tableOfFrames[j], j)
else:
pass
# petla sprawdzajaca czy cala ramka zostala przeslana bez zarzutów
for j in range(i, endOfWindow + 1):
if j == sizeoftable:
break
if self.ACK[j] == False:
isCorrectFrame = False
# warunki odpowiadajace za przesuwanie sie okna gdy ramka jest dobra lub gdy ktorys z pakietow jest uszkodzony
if isCorrectFrame:
if (endOfWindow + self.windowSize) >= sizeoftable:
endOfWindow = sizeoftable
else:
endOfWindow += self.windowSize
i += self.windowSize
else:
count = 0
for j in range(i, endOfWindow + 1):
if self.ACK[j] == True:
count += 1
else:
break
endOfWindow += count
i += count
def send_frame_go_back_n(self, delay):
# self.image = interfere(self.image)
# przygotowanie ramek fo wysłania
# 1. stworzenie tablicy ramek z sumą kontrolną
self.tableOfFrames = Image.gruop_into_frames(self.image, self.size, self.ChosenSumAlgorithm)
# pokazuje ilość ramek
size_of_table = len(self.tableOfFrames)
# tworzy tablice o rozmiarze ilości ramek z potwierdzeniami lub odrzuceniami pakietów
for i in range(0, size_of_table):
self.ACK.append(False)
# przenoszenie do receivera potrzebnych wartości
self.receiver.numberOfValues = self.image.size
self.receiver.numberOfFrames = len(self.tableOfFrames)
self.receiver.reset_Data()
# rozpoczęcie przesyłania
i = 0
win_start = i
win_end = i + self.windowSize
length_table_of_frames = len(self.tableOfFrames)
while i < length_table_of_frames:
while i < win_end and i < length_table_of_frames:
# pobranie ramki do wysłania
data = self.tableOfFrames[i]
sequence_number = i
# wysyłanie ramki
print(f'\nSENDER: wysłano obiekt nr "{i}"')
self.ACK[i] = self.receiver.recieve_frame(frame=data, sequence_number=sequence_number)
time.sleep(delay)
if self.ACK[win_start]:
print(f'SENDER: odebrano ATK "{win_start}"\n')
win_end += 1
win_start += 1
# i = win_start
else:
if win_end > length_table_of_frames:
win_end = length_table_of_frames
for k in range(win_start + 1, win_end):
if self.ACK[k]:
print(f'SENDER: odebrano ATK "{k}, Pominięto ATK "{win_start}"\n')
i = win_start - 1
break
i += 1
time.sleep(delay)
pass
pass
time.sleep(delay)
if i == win_end:
i = win_start
pass
print('SENDER: koniec wysyłania\n')
pass
# Metoda wysyłająca dla protokołu stop-and-wait
def send_frame_stop_and_wait(self):
# test
# print(self.image)
self.tableOfFrames = Image.gruop_into_frames(self.image, self.size, self.ChosenSumAlgorithm)
#wyświetlenie tablicy zawierającej wszystkie ramki
print(self.tableOfFrames)
#zapis ilości ramek
sizeoftable = len(self.tableOfFrames)
#tworzy tablice o rozmiarze ilosci ramek z potwierdzeniami lub odrzuceniami pakietów
for i in range(0, sizeoftable):
self.ACK.append(False)
#przenoszenie do receivera potrzebnych wartosci
Receiver.numberOfValues = number
Receiver.numberOfFrames = sizeoftable
Receiver.reset_Data(Receiver)
i = 0
endOfWindow = self.windowSize -1
print("Rozmiar tablicy ramek:")
print(sizeoftable)
#wysyłanie poszczególnych ramek
while i < sizeoftable:
self.ACK[i] = self.receiver.receive_frame_stop_and_wait(self.tableOfFrames[i], i)
if self.ACK[i]:
i += 1
else:
self.ACK[i] = False
continue
class Frame:
value = None
seq_number = 0
pass
|
flexible
|
{
"blob_id": "ecbcd023b8fec5763c6ff7f4cd0999426fae4a50",
"index": 9093,
"step-1": "<mask token>\n\n\nclass Sender:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def send_frame(self, frame):\n self.receiver.receiver_frame(frame)\n pass\n\n def send_frame_selective(self):\n self.tableOfFrames = Image.gruop_into_frames(self.image, self.size,\n self.ChosenSumAlgorithm)\n sizeoftable = len(self.tableOfFrames)\n for i in range(0, sizeoftable):\n self.ACK.append(False)\n Receiver.numberOfFrames = sizeoftable\n Receiver.reset_Data(Receiver)\n endOfWindow = self.windowSize - 1\n i = 0\n while i < sizeoftable:\n isCorrectFrame = True\n for j in range(i, endOfWindow + 1):\n if j == sizeoftable:\n break\n if self.ACK[j] == False:\n print(f'SENDER: wysłano obiekt nr \"{j}\"')\n self.ACK[j] = self.receiver.recieve_frame(self.\n tableOfFrames[j], j)\n else:\n pass\n for j in range(i, endOfWindow + 1):\n if j == sizeoftable:\n break\n if self.ACK[j] == False:\n isCorrectFrame = False\n if isCorrectFrame:\n if endOfWindow + self.windowSize >= sizeoftable:\n endOfWindow = sizeoftable\n else:\n endOfWindow += self.windowSize\n i += self.windowSize\n else:\n count = 0\n for j in range(i, endOfWindow + 1):\n if self.ACK[j] == True:\n count += 1\n else:\n break\n endOfWindow += count\n i += count\n <mask token>\n <mask token>\n\n\nclass Frame:\n value = None\n seq_number = 0\n pass\n",
"step-2": "<mask token>\n\n\nclass Sender:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self, receiver):\n self.receiver = receiver\n pass\n\n def send_frame(self, frame):\n self.receiver.receiver_frame(frame)\n pass\n\n def send_frame_selective(self):\n self.tableOfFrames = Image.gruop_into_frames(self.image, self.size,\n self.ChosenSumAlgorithm)\n sizeoftable = len(self.tableOfFrames)\n for i in range(0, sizeoftable):\n self.ACK.append(False)\n Receiver.numberOfFrames = sizeoftable\n Receiver.reset_Data(Receiver)\n endOfWindow = self.windowSize - 1\n i = 0\n while i < sizeoftable:\n isCorrectFrame = True\n for j in range(i, endOfWindow + 1):\n if j == sizeoftable:\n break\n if self.ACK[j] == False:\n print(f'SENDER: wysłano obiekt nr \"{j}\"')\n self.ACK[j] = self.receiver.recieve_frame(self.\n tableOfFrames[j], j)\n else:\n pass\n for j in range(i, endOfWindow + 1):\n if j == sizeoftable:\n break\n if self.ACK[j] == False:\n isCorrectFrame = False\n if isCorrectFrame:\n if endOfWindow + self.windowSize >= sizeoftable:\n endOfWindow = sizeoftable\n else:\n endOfWindow += self.windowSize\n i += self.windowSize\n else:\n count = 0\n for j in range(i, endOfWindow + 1):\n if self.ACK[j] == True:\n count += 1\n else:\n break\n endOfWindow += count\n i += count\n\n def send_frame_go_back_n(self, delay):\n self.tableOfFrames = Image.gruop_into_frames(self.image, self.size,\n self.ChosenSumAlgorithm)\n size_of_table = len(self.tableOfFrames)\n for i in range(0, size_of_table):\n self.ACK.append(False)\n self.receiver.numberOfValues = self.image.size\n self.receiver.numberOfFrames = len(self.tableOfFrames)\n self.receiver.reset_Data()\n i = 0\n win_start = i\n win_end = i + self.windowSize\n length_table_of_frames = len(self.tableOfFrames)\n while i < length_table_of_frames:\n while i < win_end and i < length_table_of_frames:\n data = self.tableOfFrames[i]\n sequence_number = i\n print(f'\\nSENDER: wysłano obiekt nr \"{i}\"')\n self.ACK[i] = self.receiver.recieve_frame(frame=data,\n sequence_number=sequence_number)\n time.sleep(delay)\n if self.ACK[win_start]:\n print(f'SENDER: odebrano ATK \"{win_start}\"\\n')\n win_end += 1\n win_start += 1\n else:\n if win_end > length_table_of_frames:\n win_end = length_table_of_frames\n for k in range(win_start + 1, win_end):\n if self.ACK[k]:\n print(\n f'SENDER: odebrano ATK \"{k}, Pominięto ATK \"{win_start}\"\\n'\n )\n i = win_start - 1\n break\n i += 1\n time.sleep(delay)\n pass\n pass\n time.sleep(delay)\n if i == win_end:\n i = win_start\n pass\n print('SENDER: koniec wysyłania\\n')\n pass\n <mask token>\n\n\nclass Frame:\n value = None\n seq_number = 0\n pass\n",
"step-3": "<mask token>\n\n\nclass Sender:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self, receiver):\n self.receiver = receiver\n pass\n\n def send_frame(self, frame):\n self.receiver.receiver_frame(frame)\n pass\n\n def send_frame_selective(self):\n self.tableOfFrames = Image.gruop_into_frames(self.image, self.size,\n self.ChosenSumAlgorithm)\n sizeoftable = len(self.tableOfFrames)\n for i in range(0, sizeoftable):\n self.ACK.append(False)\n Receiver.numberOfFrames = sizeoftable\n Receiver.reset_Data(Receiver)\n endOfWindow = self.windowSize - 1\n i = 0\n while i < sizeoftable:\n isCorrectFrame = True\n for j in range(i, endOfWindow + 1):\n if j == sizeoftable:\n break\n if self.ACK[j] == False:\n print(f'SENDER: wysłano obiekt nr \"{j}\"')\n self.ACK[j] = self.receiver.recieve_frame(self.\n tableOfFrames[j], j)\n else:\n pass\n for j in range(i, endOfWindow + 1):\n if j == sizeoftable:\n break\n if self.ACK[j] == False:\n isCorrectFrame = False\n if isCorrectFrame:\n if endOfWindow + self.windowSize >= sizeoftable:\n endOfWindow = sizeoftable\n else:\n endOfWindow += self.windowSize\n i += self.windowSize\n else:\n count = 0\n for j in range(i, endOfWindow + 1):\n if self.ACK[j] == True:\n count += 1\n else:\n break\n endOfWindow += count\n i += count\n\n def send_frame_go_back_n(self, delay):\n self.tableOfFrames = Image.gruop_into_frames(self.image, self.size,\n self.ChosenSumAlgorithm)\n size_of_table = len(self.tableOfFrames)\n for i in range(0, size_of_table):\n self.ACK.append(False)\n self.receiver.numberOfValues = self.image.size\n self.receiver.numberOfFrames = len(self.tableOfFrames)\n self.receiver.reset_Data()\n i = 0\n win_start = i\n win_end = i + self.windowSize\n length_table_of_frames = len(self.tableOfFrames)\n while i < length_table_of_frames:\n while i < win_end and i < length_table_of_frames:\n data = self.tableOfFrames[i]\n sequence_number = i\n print(f'\\nSENDER: wysłano obiekt nr \"{i}\"')\n self.ACK[i] = self.receiver.recieve_frame(frame=data,\n sequence_number=sequence_number)\n time.sleep(delay)\n if self.ACK[win_start]:\n print(f'SENDER: odebrano ATK \"{win_start}\"\\n')\n win_end += 1\n win_start += 1\n else:\n if win_end > length_table_of_frames:\n win_end = length_table_of_frames\n for k in range(win_start + 1, win_end):\n if self.ACK[k]:\n print(\n f'SENDER: odebrano ATK \"{k}, Pominięto ATK \"{win_start}\"\\n'\n )\n i = win_start - 1\n break\n i += 1\n time.sleep(delay)\n pass\n pass\n time.sleep(delay)\n if i == win_end:\n i = win_start\n pass\n print('SENDER: koniec wysyłania\\n')\n pass\n\n def send_frame_stop_and_wait(self):\n self.tableOfFrames = Image.gruop_into_frames(self.image, self.size,\n self.ChosenSumAlgorithm)\n print(self.tableOfFrames)\n sizeoftable = len(self.tableOfFrames)\n for i in range(0, sizeoftable):\n self.ACK.append(False)\n Receiver.numberOfValues = number\n Receiver.numberOfFrames = sizeoftable\n Receiver.reset_Data(Receiver)\n i = 0\n endOfWindow = self.windowSize - 1\n print('Rozmiar tablicy ramek:')\n print(sizeoftable)\n while i < sizeoftable:\n self.ACK[i] = self.receiver.receive_frame_stop_and_wait(self.\n tableOfFrames[i], i)\n if self.ACK[i]:\n i += 1\n else:\n self.ACK[i] = False\n continue\n\n\nclass Frame:\n value = None\n seq_number = 0\n pass\n",
"step-4": "from Receiver import Receiver\nimport time\nimport Image\n\n\nclass Sender:\n ACK = []\n size = None\n windowSize = None\n tableOfFrames = []\n ChosenSumAlgorithm = None\n\n def __init__(self, receiver):\n self.receiver = receiver\n pass\n\n def send_frame(self, frame):\n self.receiver.receiver_frame(frame)\n pass\n\n def send_frame_selective(self):\n self.tableOfFrames = Image.gruop_into_frames(self.image, self.size,\n self.ChosenSumAlgorithm)\n sizeoftable = len(self.tableOfFrames)\n for i in range(0, sizeoftable):\n self.ACK.append(False)\n Receiver.numberOfFrames = sizeoftable\n Receiver.reset_Data(Receiver)\n endOfWindow = self.windowSize - 1\n i = 0\n while i < sizeoftable:\n isCorrectFrame = True\n for j in range(i, endOfWindow + 1):\n if j == sizeoftable:\n break\n if self.ACK[j] == False:\n print(f'SENDER: wysłano obiekt nr \"{j}\"')\n self.ACK[j] = self.receiver.recieve_frame(self.\n tableOfFrames[j], j)\n else:\n pass\n for j in range(i, endOfWindow + 1):\n if j == sizeoftable:\n break\n if self.ACK[j] == False:\n isCorrectFrame = False\n if isCorrectFrame:\n if endOfWindow + self.windowSize >= sizeoftable:\n endOfWindow = sizeoftable\n else:\n endOfWindow += self.windowSize\n i += self.windowSize\n else:\n count = 0\n for j in range(i, endOfWindow + 1):\n if self.ACK[j] == True:\n count += 1\n else:\n break\n endOfWindow += count\n i += count\n\n def send_frame_go_back_n(self, delay):\n self.tableOfFrames = Image.gruop_into_frames(self.image, self.size,\n self.ChosenSumAlgorithm)\n size_of_table = len(self.tableOfFrames)\n for i in range(0, size_of_table):\n self.ACK.append(False)\n self.receiver.numberOfValues = self.image.size\n self.receiver.numberOfFrames = len(self.tableOfFrames)\n self.receiver.reset_Data()\n i = 0\n win_start = i\n win_end = i + self.windowSize\n length_table_of_frames = len(self.tableOfFrames)\n while i < length_table_of_frames:\n while i < win_end and i < length_table_of_frames:\n data = self.tableOfFrames[i]\n sequence_number = i\n print(f'\\nSENDER: wysłano obiekt nr \"{i}\"')\n self.ACK[i] = self.receiver.recieve_frame(frame=data,\n sequence_number=sequence_number)\n time.sleep(delay)\n if self.ACK[win_start]:\n print(f'SENDER: odebrano ATK \"{win_start}\"\\n')\n win_end += 1\n win_start += 1\n else:\n if win_end > length_table_of_frames:\n win_end = length_table_of_frames\n for k in range(win_start + 1, win_end):\n if self.ACK[k]:\n print(\n f'SENDER: odebrano ATK \"{k}, Pominięto ATK \"{win_start}\"\\n'\n )\n i = win_start - 1\n break\n i += 1\n time.sleep(delay)\n pass\n pass\n time.sleep(delay)\n if i == win_end:\n i = win_start\n pass\n print('SENDER: koniec wysyłania\\n')\n pass\n\n def send_frame_stop_and_wait(self):\n self.tableOfFrames = Image.gruop_into_frames(self.image, self.size,\n self.ChosenSumAlgorithm)\n print(self.tableOfFrames)\n sizeoftable = len(self.tableOfFrames)\n for i in range(0, sizeoftable):\n self.ACK.append(False)\n Receiver.numberOfValues = number\n Receiver.numberOfFrames = sizeoftable\n Receiver.reset_Data(Receiver)\n i = 0\n endOfWindow = self.windowSize - 1\n print('Rozmiar tablicy ramek:')\n print(sizeoftable)\n while i < sizeoftable:\n self.ACK[i] = self.receiver.receive_frame_stop_and_wait(self.\n tableOfFrames[i], i)\n if self.ACK[i]:\n i += 1\n else:\n self.ACK[i] = False\n continue\n\n\nclass Frame:\n value = None\n seq_number = 0\n pass\n",
"step-5": "from Receiver import Receiver\nimport time\nimport Image\n\n\nclass Sender:\n ACK = []\n size = None\n windowSize = None\n tableOfFrames = []\n ChosenSumAlgorithm = None\n def __init__(self, receiver):\n self.receiver = receiver\n pass\n\n def send_frame(self, frame):\n self.receiver.receiver_frame(frame)\n pass\n\n def send_frame_selective(self):\n #stworzenie tablicy z ramkami\n self.tableOfFrames = Image.gruop_into_frames(self.image, self.size, self.ChosenSumAlgorithm)\n\n # zapisuje ilosc ramek dopedli wysylania\n sizeoftable = len(self.tableOfFrames)\n\n # tworzy tablice o rozmiarze ilosci ramek z potwierdzeniami lub odrzuceniami pakietów\n for i in range(0, sizeoftable):\n self.ACK.append(False)\n\n # przenoszenie do receivera potrzebnych wartosci\n Receiver.numberOfFrames = sizeoftable\n Receiver.reset_Data(Receiver)\n endOfWindow = self.windowSize - 1\n i = 0\n # petla wysylajaca ramki zgodnie z regulami algotrytmu selektywnego\n while i < sizeoftable:\n isCorrectFrame = True\n # petla operujaca oknem i wysylajaca te ramki ktore sender od nas chce\n for j in range(i, endOfWindow + 1):\n if j == sizeoftable:\n break\n if self.ACK[j] == False:\n # time.sleep(0.2)\n print(f'SENDER: wysłano obiekt nr \"{j}\"')\n self.ACK[j] = self.receiver.recieve_frame(self.tableOfFrames[j], j)\n else:\n pass\n # petla sprawdzajaca czy cala ramka zostala przeslana bez zarzutów\n for j in range(i, endOfWindow + 1):\n if j == sizeoftable:\n break\n if self.ACK[j] == False:\n isCorrectFrame = False\n # warunki odpowiadajace za przesuwanie sie okna gdy ramka jest dobra lub gdy ktorys z pakietow jest uszkodzony\n if isCorrectFrame:\n if (endOfWindow + self.windowSize) >= sizeoftable:\n endOfWindow = sizeoftable\n else:\n endOfWindow += self.windowSize\n i += self.windowSize\n else:\n count = 0\n for j in range(i, endOfWindow + 1):\n if self.ACK[j] == True:\n count += 1\n else:\n break\n endOfWindow += count\n i += count\n\n def send_frame_go_back_n(self, delay):\n # self.image = interfere(self.image)\n # przygotowanie ramek fo wysłania\n # 1. stworzenie tablicy ramek z sumą kontrolną\n self.tableOfFrames = Image.gruop_into_frames(self.image, self.size, self.ChosenSumAlgorithm)\n\n # pokazuje ilość ramek\n size_of_table = len(self.tableOfFrames)\n\n # tworzy tablice o rozmiarze ilości ramek z potwierdzeniami lub odrzuceniami pakietów\n for i in range(0, size_of_table):\n self.ACK.append(False)\n\n # przenoszenie do receivera potrzebnych wartości\n self.receiver.numberOfValues = self.image.size\n self.receiver.numberOfFrames = len(self.tableOfFrames)\n self.receiver.reset_Data()\n\n # rozpoczęcie przesyłania\n i = 0\n win_start = i\n win_end = i + self.windowSize\n length_table_of_frames = len(self.tableOfFrames)\n\n while i < length_table_of_frames:\n while i < win_end and i < length_table_of_frames:\n # pobranie ramki do wysłania\n data = self.tableOfFrames[i]\n sequence_number = i\n\n # wysyłanie ramki\n print(f'\\nSENDER: wysłano obiekt nr \"{i}\"')\n self.ACK[i] = self.receiver.recieve_frame(frame=data, sequence_number=sequence_number)\n\n time.sleep(delay)\n if self.ACK[win_start]:\n print(f'SENDER: odebrano ATK \"{win_start}\"\\n')\n win_end += 1\n win_start += 1\n # i = win_start\n else:\n if win_end > length_table_of_frames:\n win_end = length_table_of_frames\n for k in range(win_start + 1, win_end):\n if self.ACK[k]:\n print(f'SENDER: odebrano ATK \"{k}, Pominięto ATK \"{win_start}\"\\n')\n i = win_start - 1\n break\n\n i += 1\n time.sleep(delay)\n pass\n pass\n time.sleep(delay)\n if i == win_end:\n i = win_start\n pass\n\n print('SENDER: koniec wysyłania\\n')\n pass\n\n# Metoda wysyłająca dla protokołu stop-and-wait\n def send_frame_stop_and_wait(self):\n # test\n # print(self.image)\n self.tableOfFrames = Image.gruop_into_frames(self.image, self.size, self.ChosenSumAlgorithm)\n\n #wyświetlenie tablicy zawierającej wszystkie ramki\n print(self.tableOfFrames)\n\n #zapis ilości ramek\n sizeoftable = len(self.tableOfFrames)\n\n #tworzy tablice o rozmiarze ilosci ramek z potwierdzeniami lub odrzuceniami pakietów\n for i in range(0, sizeoftable):\n self.ACK.append(False)\n\n #przenoszenie do receivera potrzebnych wartosci\n Receiver.numberOfValues = number\n Receiver.numberOfFrames = sizeoftable\n Receiver.reset_Data(Receiver)\n i = 0\n endOfWindow = self.windowSize -1\n\n print(\"Rozmiar tablicy ramek:\")\n print(sizeoftable)\n\n #wysyłanie poszczególnych ramek\n while i < sizeoftable:\n self.ACK[i] = self.receiver.receive_frame_stop_and_wait(self.tableOfFrames[i], i)\n if self.ACK[i]:\n i += 1\n else:\n self.ACK[i] = False\n continue\n\nclass Frame:\n value = None\n seq_number = 0\n pass\n",
"step-ids": [
5,
7,
8,
10,
11
]
}
|
[
5,
7,
8,
10,
11
] |
from .Buzzer import BuzzerController
from .Card import CardScanner
from .RFID import RFIDController
from .Servo import ServoController
__all__ = ["BuzzerController", "CardScanner", "RFIDController", "ServoController"]
|
normal
|
{
"blob_id": "8fa78824a38a3b0c1f51aceacab671f987ea2705",
"index": 9635,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n__all__ = ['BuzzerController', 'CardScanner', 'RFIDController',\n 'ServoController']\n",
"step-3": "from .Buzzer import BuzzerController\nfrom .Card import CardScanner\nfrom .RFID import RFIDController\nfrom .Servo import ServoController\n__all__ = ['BuzzerController', 'CardScanner', 'RFIDController',\n 'ServoController']\n",
"step-4": "from .Buzzer import BuzzerController\nfrom .Card import CardScanner\nfrom .RFID import RFIDController\nfrom .Servo import ServoController\n\n__all__ = [\"BuzzerController\", \"CardScanner\", \"RFIDController\", \"ServoController\"]\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def solution(X, Y, D):
xy = Y - X
if xy == 0:
return 0
jumps = math.ceil(xy / D)
return jumps
<|reserved_special_token_1|>
import math
def solution(X, Y, D):
xy = Y - X
if xy == 0:
return 0
jumps = math.ceil(xy / D)
return jumps
<|reserved_special_token_1|>
import math
def solution(X, Y, D):
# write your code in Python 3.6
xy = Y-X;
if xy == 0: return 0
jumps = math.ceil(xy/D)
return jumps
|
flexible
|
{
"blob_id": "bdf819d8a5bc3906febced785c6d95db7dc3a603",
"index": 2376,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef solution(X, Y, D):\n xy = Y - X\n if xy == 0:\n return 0\n jumps = math.ceil(xy / D)\n return jumps\n",
"step-3": "import math\n\n\ndef solution(X, Y, D):\n xy = Y - X\n if xy == 0:\n return 0\n jumps = math.ceil(xy / D)\n return jumps\n",
"step-4": "import math\ndef solution(X, Y, D):\n # write your code in Python 3.6\n xy = Y-X;\n if xy == 0: return 0\n jumps = math.ceil(xy/D)\n return jumps\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# This implementation of EPG takes data as XML and produces corresponding pseudonymized data
from lxml import etree
from utils import generalize_or_supress
from hashlib import sha256
from count import getLast, saveCount
import pickle
from hmac import new
from random import random
from json import loads
from bigchain import putonBlockChain, findRecord
def EPGAinit(IDPath):
idt = open(IDPath,'rt').read()
Qti = etree.fromstring(idt)
print('Loading Identifiers')
print('Quasi Specifiers..')
print(', '.join(Qti.keys()))
print('Applying EPGAD_Init on Qti')
gQti = [generalize_or_supress(i[1],i[0]) for i in zip(Qti.keys(),Qti.values())]
hmacKey = ""
for i in gQti:
hmacKey+=i
Gi = sha256(hmacKey.encode()).hexdigest()
countObj = getLast(Gi)
GiObj = pickle.loads(countObj.GiObj)
if GiObj['cQueue'].empty():
if 'count' not in GiObj.keys():
GiObj['count'] = 0
count = 0
else:
GiObj['count']+=1
count = GiObj['count']
countObj.GiObj = pickle.dumps(GiObj)
saveCount(countObj)
prime = 179426549
if count >= prime:
raise Exception('Prime Exceeded')
else:
res = count**2%prime
if count <= prime/2:
GUi = res
else:
GUi = prime - res
Hi = new(Gi.encode() + str(GUi).encode() , hmacKey.encode() , sha256).hexdigest()
return Hi, GUi
def EPGAD(ReportPath, Hi=None, GUi = None):
if Hi == None:
Hi = sha256(str(random()).encode()).hexdigest()
jsn = open(ReportPath, 'rt').read()
jsnld = loads(jsn)
print('Report Loaded')
print('Finding Subject Information')
if 'subject' in jsnld.keys():
print('Subject Information Found')
if 'display' in jsnld['subject'].keys():
jsnld['subject']['display'] = ""
print('Subject Display Found and Suppressed')
if 'reference' in jsnld['subject'].keys():
jsnld['subject']['reference'] = Hi
print('Replacing Identifier with ', Hi)
print('Placing Record Asset on BlockChain')
print()
txid = putonBlockChain(jsnld,Hi, GUi)
print('Status OK. Retrieving Transaction')
findRecord(txid)
if __name__ == "__main__":
Hi, GUi = EPGAinit('sampleIdentity.xml')
EPGAD('sampleReport.json', Hi, GUi)
|
normal
|
{
"blob_id": "8f554166c28fe4c9a093568a97d39b6ba515241b",
"index": 3196,
"step-1": "<mask token>\n\n\ndef EPGAD(ReportPath, Hi=None, GUi=None):\n if Hi == None:\n Hi = sha256(str(random()).encode()).hexdigest()\n jsn = open(ReportPath, 'rt').read()\n jsnld = loads(jsn)\n print('Report Loaded')\n print('Finding Subject Information')\n if 'subject' in jsnld.keys():\n print('Subject Information Found')\n if 'display' in jsnld['subject'].keys():\n jsnld['subject']['display'] = ''\n print('Subject Display Found and Suppressed')\n if 'reference' in jsnld['subject'].keys():\n jsnld['subject']['reference'] = Hi\n print('Replacing Identifier with ', Hi)\n print('Placing Record Asset on BlockChain')\n print()\n txid = putonBlockChain(jsnld, Hi, GUi)\n print('Status OK. Retrieving Transaction')\n findRecord(txid)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef EPGAinit(IDPath):\n idt = open(IDPath, 'rt').read()\n Qti = etree.fromstring(idt)\n print('Loading Identifiers')\n print('Quasi Specifiers..')\n print(', '.join(Qti.keys()))\n print('Applying EPGAD_Init on Qti')\n gQti = [generalize_or_supress(i[1], i[0]) for i in zip(Qti.keys(), Qti.\n values())]\n hmacKey = ''\n for i in gQti:\n hmacKey += i\n Gi = sha256(hmacKey.encode()).hexdigest()\n countObj = getLast(Gi)\n GiObj = pickle.loads(countObj.GiObj)\n if GiObj['cQueue'].empty():\n if 'count' not in GiObj.keys():\n GiObj['count'] = 0\n count = 0\n else:\n GiObj['count'] += 1\n count = GiObj['count']\n countObj.GiObj = pickle.dumps(GiObj)\n saveCount(countObj)\n prime = 179426549\n if count >= prime:\n raise Exception('Prime Exceeded')\n else:\n res = count ** 2 % prime\n if count <= prime / 2:\n GUi = res\n else:\n GUi = prime - res\n Hi = new(Gi.encode() + str(GUi).encode(), hmacKey.encode(), sha256\n ).hexdigest()\n return Hi, GUi\n\n\ndef EPGAD(ReportPath, Hi=None, GUi=None):\n if Hi == None:\n Hi = sha256(str(random()).encode()).hexdigest()\n jsn = open(ReportPath, 'rt').read()\n jsnld = loads(jsn)\n print('Report Loaded')\n print('Finding Subject Information')\n if 'subject' in jsnld.keys():\n print('Subject Information Found')\n if 'display' in jsnld['subject'].keys():\n jsnld['subject']['display'] = ''\n print('Subject Display Found and Suppressed')\n if 'reference' in jsnld['subject'].keys():\n jsnld['subject']['reference'] = Hi\n print('Replacing Identifier with ', Hi)\n print('Placing Record Asset on BlockChain')\n print()\n txid = putonBlockChain(jsnld, Hi, GUi)\n print('Status OK. Retrieving Transaction')\n findRecord(txid)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef EPGAinit(IDPath):\n idt = open(IDPath, 'rt').read()\n Qti = etree.fromstring(idt)\n print('Loading Identifiers')\n print('Quasi Specifiers..')\n print(', '.join(Qti.keys()))\n print('Applying EPGAD_Init on Qti')\n gQti = [generalize_or_supress(i[1], i[0]) for i in zip(Qti.keys(), Qti.\n values())]\n hmacKey = ''\n for i in gQti:\n hmacKey += i\n Gi = sha256(hmacKey.encode()).hexdigest()\n countObj = getLast(Gi)\n GiObj = pickle.loads(countObj.GiObj)\n if GiObj['cQueue'].empty():\n if 'count' not in GiObj.keys():\n GiObj['count'] = 0\n count = 0\n else:\n GiObj['count'] += 1\n count = GiObj['count']\n countObj.GiObj = pickle.dumps(GiObj)\n saveCount(countObj)\n prime = 179426549\n if count >= prime:\n raise Exception('Prime Exceeded')\n else:\n res = count ** 2 % prime\n if count <= prime / 2:\n GUi = res\n else:\n GUi = prime - res\n Hi = new(Gi.encode() + str(GUi).encode(), hmacKey.encode(), sha256\n ).hexdigest()\n return Hi, GUi\n\n\ndef EPGAD(ReportPath, Hi=None, GUi=None):\n if Hi == None:\n Hi = sha256(str(random()).encode()).hexdigest()\n jsn = open(ReportPath, 'rt').read()\n jsnld = loads(jsn)\n print('Report Loaded')\n print('Finding Subject Information')\n if 'subject' in jsnld.keys():\n print('Subject Information Found')\n if 'display' in jsnld['subject'].keys():\n jsnld['subject']['display'] = ''\n print('Subject Display Found and Suppressed')\n if 'reference' in jsnld['subject'].keys():\n jsnld['subject']['reference'] = Hi\n print('Replacing Identifier with ', Hi)\n print('Placing Record Asset on BlockChain')\n print()\n txid = putonBlockChain(jsnld, Hi, GUi)\n print('Status OK. Retrieving Transaction')\n findRecord(txid)\n\n\nif __name__ == '__main__':\n Hi, GUi = EPGAinit('sampleIdentity.xml')\n EPGAD('sampleReport.json', Hi, GUi)\n",
"step-4": "from lxml import etree\nfrom utils import generalize_or_supress\nfrom hashlib import sha256\nfrom count import getLast, saveCount\nimport pickle\nfrom hmac import new\nfrom random import random\nfrom json import loads\nfrom bigchain import putonBlockChain, findRecord\n\n\ndef EPGAinit(IDPath):\n idt = open(IDPath, 'rt').read()\n Qti = etree.fromstring(idt)\n print('Loading Identifiers')\n print('Quasi Specifiers..')\n print(', '.join(Qti.keys()))\n print('Applying EPGAD_Init on Qti')\n gQti = [generalize_or_supress(i[1], i[0]) for i in zip(Qti.keys(), Qti.\n values())]\n hmacKey = ''\n for i in gQti:\n hmacKey += i\n Gi = sha256(hmacKey.encode()).hexdigest()\n countObj = getLast(Gi)\n GiObj = pickle.loads(countObj.GiObj)\n if GiObj['cQueue'].empty():\n if 'count' not in GiObj.keys():\n GiObj['count'] = 0\n count = 0\n else:\n GiObj['count'] += 1\n count = GiObj['count']\n countObj.GiObj = pickle.dumps(GiObj)\n saveCount(countObj)\n prime = 179426549\n if count >= prime:\n raise Exception('Prime Exceeded')\n else:\n res = count ** 2 % prime\n if count <= prime / 2:\n GUi = res\n else:\n GUi = prime - res\n Hi = new(Gi.encode() + str(GUi).encode(), hmacKey.encode(), sha256\n ).hexdigest()\n return Hi, GUi\n\n\ndef EPGAD(ReportPath, Hi=None, GUi=None):\n if Hi == None:\n Hi = sha256(str(random()).encode()).hexdigest()\n jsn = open(ReportPath, 'rt').read()\n jsnld = loads(jsn)\n print('Report Loaded')\n print('Finding Subject Information')\n if 'subject' in jsnld.keys():\n print('Subject Information Found')\n if 'display' in jsnld['subject'].keys():\n jsnld['subject']['display'] = ''\n print('Subject Display Found and Suppressed')\n if 'reference' in jsnld['subject'].keys():\n jsnld['subject']['reference'] = Hi\n print('Replacing Identifier with ', Hi)\n print('Placing Record Asset on BlockChain')\n print()\n txid = putonBlockChain(jsnld, Hi, GUi)\n print('Status OK. Retrieving Transaction')\n findRecord(txid)\n\n\nif __name__ == '__main__':\n Hi, GUi = EPGAinit('sampleIdentity.xml')\n EPGAD('sampleReport.json', Hi, GUi)\n",
"step-5": "# This implementation of EPG takes data as XML and produces corresponding pseudonymized data\n\nfrom lxml import etree\nfrom utils import generalize_or_supress\nfrom hashlib import sha256\nfrom count import getLast, saveCount\nimport pickle\nfrom hmac import new\nfrom random import random\nfrom json import loads\nfrom bigchain import putonBlockChain, findRecord\n\ndef EPGAinit(IDPath):\n\tidt = open(IDPath,'rt').read()\n\n\tQti = etree.fromstring(idt)\n\n\tprint('Loading Identifiers')\n\tprint('Quasi Specifiers..')\n\tprint(', '.join(Qti.keys()))\n\tprint('Applying EPGAD_Init on Qti')\n\t\n\tgQti = [generalize_or_supress(i[1],i[0]) for i in zip(Qti.keys(),Qti.values())]\n\n\thmacKey = \"\"\n\n\tfor i in gQti:\n\t\thmacKey+=i\n\n\tGi = sha256(hmacKey.encode()).hexdigest()\n\n\tcountObj = getLast(Gi)\n\tGiObj = pickle.loads(countObj.GiObj)\n\n\tif GiObj['cQueue'].empty():\n\t\tif 'count' not in GiObj.keys():\n\t\t\tGiObj['count'] = 0\n\t\t\tcount = 0\n\t\telse:\n\t\t\tGiObj['count']+=1\n\t\t\tcount = GiObj['count']\n\t\tcountObj.GiObj = pickle.dumps(GiObj)\n\t\tsaveCount(countObj)\n\n\tprime = 179426549\n\n\tif count >= prime:\n\t\t raise Exception('Prime Exceeded')\n\n\telse:\n\t\tres = count**2%prime\n\t\tif count <= prime/2:\n\t\t\tGUi = res\n\t\telse:\n\t\t\tGUi = prime - res\n\n\tHi = new(Gi.encode() + str(GUi).encode() , hmacKey.encode() , sha256).hexdigest()\n\treturn Hi, GUi\n\n\ndef EPGAD(ReportPath, Hi=None, GUi = None):\n\tif Hi == None:\n\t\tHi = sha256(str(random()).encode()).hexdigest()\n\tjsn = open(ReportPath, 'rt').read()\n\tjsnld = loads(jsn)\n\tprint('Report Loaded')\n\tprint('Finding Subject Information')\n\tif 'subject' in jsnld.keys():\n\t\tprint('Subject Information Found')\n\t\tif 'display' in jsnld['subject'].keys():\n\t\t\tjsnld['subject']['display'] = \"\"\n\t\t\tprint('Subject Display Found and Suppressed')\n\t\tif 'reference' in jsnld['subject'].keys():\n\t\t\tjsnld['subject']['reference'] = Hi\n\t\t\tprint('Replacing Identifier with ', Hi)\n\n\tprint('Placing Record Asset on BlockChain')\n\tprint()\n\ttxid = putonBlockChain(jsnld,Hi, GUi)\n\tprint('Status OK. Retrieving Transaction')\n\tfindRecord(txid)\n\nif __name__ == \"__main__\":\n\tHi, GUi = EPGAinit('sampleIdentity.xml')\n\tEPGAD('sampleReport.json', Hi, GUi)\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
def train_model_snapshot(model, criterion, lr, dataloaders, dataset_sizes,
device, num_cycles, num_epochs_per_cycle):
since = time.time()
best_model_wts = copy.deepcopy(model.state_dict())
best_acc = 0.0
best_loss = 1000000.0
model_w_arr = []
prob = torch.zeros((dataset_sizes['val'], 3), dtype=torch.float32).to(
device)
lbl = torch.zeros((dataset_sizes['val'],), dtype=torch.long).to(device)
for cycle in range(num_cycles):
optimizer = optim.SGD(model.parameters(), lr=lr, momentum=0.9)
scheduler = lr_scheduler.CosineAnnealingLR(optimizer,
num_epochs_per_cycle * len(dataloaders['train']))
for epoch in range(num_epochs_per_cycle):
for phase in ['train', 'val']:
if phase == 'train':
model.train()
else:
model.eval()
running_loss = 0.0
running_corrects = 0
idx = 0
for inputs, labels in dataloaders[phase]:
inputs = inputs.to(device)
labels = labels.to(device)
optimizer.zero_grad()
with torch.set_grad_enabled(phase == 'train'):
outputs = model(inputs)
_, preds = torch.max(outputs, 1)
if (epoch == num_epochs_per_cycle - 1 and phase ==
'val'):
prob[idx:idx + inputs.shape[0]] += F.softmax(
outputs, dim=1)
lbl[idx:idx + inputs.shape[0]] = labels
idx += inputs.shape[0]
loss = criterion(outputs, labels)
if phase == 'train':
loss.backward()
optimizer.step()
scheduler.step()
running_loss += loss.item() * inputs.size(0)
running_corrects += torch.sum(preds == labels.data)
epoch_loss = running_loss / dataset_sizes[phase]
epoch_acc = running_corrects.double() / dataset_sizes[phase]
if phase == 'val' and epoch_loss < best_loss:
best_loss = epoch_loss
best_model_wts = copy.deepcopy(model.state_dict())
model_w_arr.append(copy.deepcopy(model.state_dict()))
prob /= num_cycles
ensemble_loss = F.nll_loss(torch.log(prob), lbl)
ensemble_loss = ensemble_loss.item()
time_elapsed = time.time() - since
model_arr = []
for weights in model_w_arr:
model.load_state_dict(weights)
model_arr.append(model)
return model_arr, ensemble_loss, best_loss, prob
def test(models_arr, loader, device):
res = np.zeros((610, 3), dtype=np.float32)
for model in models_arr:
model.eval()
res_arr = []
for inputs, _ in loader:
inputs = inputs.to(device)
with torch.set_grad_enabled(False):
outputs = F.softmax(model(inputs), dim=1)
res_arr.append(outputs.detach().cpu().numpy())
res_arr = np.concatenate(res_arr, axis=0)
res += res_arr
return res / len(models_arr)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def train_model_snapshot(model, criterion, lr, dataloaders, dataset_sizes,
device, num_cycles, num_epochs_per_cycle):
since = time.time()
best_model_wts = copy.deepcopy(model.state_dict())
best_acc = 0.0
best_loss = 1000000.0
model_w_arr = []
prob = torch.zeros((dataset_sizes['val'], 3), dtype=torch.float32).to(
device)
lbl = torch.zeros((dataset_sizes['val'],), dtype=torch.long).to(device)
for cycle in range(num_cycles):
optimizer = optim.SGD(model.parameters(), lr=lr, momentum=0.9)
scheduler = lr_scheduler.CosineAnnealingLR(optimizer,
num_epochs_per_cycle * len(dataloaders['train']))
for epoch in range(num_epochs_per_cycle):
for phase in ['train', 'val']:
if phase == 'train':
model.train()
else:
model.eval()
running_loss = 0.0
running_corrects = 0
idx = 0
for inputs, labels in dataloaders[phase]:
inputs = inputs.to(device)
labels = labels.to(device)
optimizer.zero_grad()
with torch.set_grad_enabled(phase == 'train'):
outputs = model(inputs)
_, preds = torch.max(outputs, 1)
if (epoch == num_epochs_per_cycle - 1 and phase ==
'val'):
prob[idx:idx + inputs.shape[0]] += F.softmax(
outputs, dim=1)
lbl[idx:idx + inputs.shape[0]] = labels
idx += inputs.shape[0]
loss = criterion(outputs, labels)
if phase == 'train':
loss.backward()
optimizer.step()
scheduler.step()
running_loss += loss.item() * inputs.size(0)
running_corrects += torch.sum(preds == labels.data)
epoch_loss = running_loss / dataset_sizes[phase]
epoch_acc = running_corrects.double() / dataset_sizes[phase]
if phase == 'val' and epoch_loss < best_loss:
best_loss = epoch_loss
best_model_wts = copy.deepcopy(model.state_dict())
model_w_arr.append(copy.deepcopy(model.state_dict()))
prob /= num_cycles
ensemble_loss = F.nll_loss(torch.log(prob), lbl)
ensemble_loss = ensemble_loss.item()
time_elapsed = time.time() - since
model_arr = []
for weights in model_w_arr:
model.load_state_dict(weights)
model_arr.append(model)
return model_arr, ensemble_loss, best_loss, prob
def test(models_arr, loader, device):
res = np.zeros((610, 3), dtype=np.float32)
for model in models_arr:
model.eval()
res_arr = []
for inputs, _ in loader:
inputs = inputs.to(device)
with torch.set_grad_enabled(False):
outputs = F.softmax(model(inputs), dim=1)
res_arr.append(outputs.detach().cpu().numpy())
res_arr = np.concatenate(res_arr, axis=0)
res += res_arr
return res / len(models_arr)
<|reserved_special_token_0|>
def read_test_data(p):
imgs = []
labels = []
ids = []
for fname in os.listdir(p):
img = Image.open(os.path.join(p, fname))
try:
if not 'DMWVNR' in fname:
exif = dict((ExifTags.TAGS[k], v) for k, v in img._getexif(
).items() if k in ExifTags.TAGS)
if exif['Orientation'] == 3:
img = img.rotate(180, expand=True)
elif exif['Orientation'] == 6:
img = img.rotate(270, expand=True)
elif exif['Orientation'] == 8:
img = img.rotate(90, expand=True)
except:
pass
img = img.convert('RGB').resize((512, 512), Image.ANTIALIAS)
imgs.append(np.array(img.copy()))
labels.append(0)
ids.append(fname.split('.')[0])
img.close()
return imgs, labels, ids
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def train_model_snapshot(model, criterion, lr, dataloaders, dataset_sizes,
device, num_cycles, num_epochs_per_cycle):
since = time.time()
best_model_wts = copy.deepcopy(model.state_dict())
best_acc = 0.0
best_loss = 1000000.0
model_w_arr = []
prob = torch.zeros((dataset_sizes['val'], 3), dtype=torch.float32).to(
device)
lbl = torch.zeros((dataset_sizes['val'],), dtype=torch.long).to(device)
for cycle in range(num_cycles):
optimizer = optim.SGD(model.parameters(), lr=lr, momentum=0.9)
scheduler = lr_scheduler.CosineAnnealingLR(optimizer,
num_epochs_per_cycle * len(dataloaders['train']))
for epoch in range(num_epochs_per_cycle):
for phase in ['train', 'val']:
if phase == 'train':
model.train()
else:
model.eval()
running_loss = 0.0
running_corrects = 0
idx = 0
for inputs, labels in dataloaders[phase]:
inputs = inputs.to(device)
labels = labels.to(device)
optimizer.zero_grad()
with torch.set_grad_enabled(phase == 'train'):
outputs = model(inputs)
_, preds = torch.max(outputs, 1)
if (epoch == num_epochs_per_cycle - 1 and phase ==
'val'):
prob[idx:idx + inputs.shape[0]] += F.softmax(
outputs, dim=1)
lbl[idx:idx + inputs.shape[0]] = labels
idx += inputs.shape[0]
loss = criterion(outputs, labels)
if phase == 'train':
loss.backward()
optimizer.step()
scheduler.step()
running_loss += loss.item() * inputs.size(0)
running_corrects += torch.sum(preds == labels.data)
epoch_loss = running_loss / dataset_sizes[phase]
epoch_acc = running_corrects.double() / dataset_sizes[phase]
if phase == 'val' and epoch_loss < best_loss:
best_loss = epoch_loss
best_model_wts = copy.deepcopy(model.state_dict())
model_w_arr.append(copy.deepcopy(model.state_dict()))
prob /= num_cycles
ensemble_loss = F.nll_loss(torch.log(prob), lbl)
ensemble_loss = ensemble_loss.item()
time_elapsed = time.time() - since
model_arr = []
for weights in model_w_arr:
model.load_state_dict(weights)
model_arr.append(model)
return model_arr, ensemble_loss, best_loss, prob
def test(models_arr, loader, device):
res = np.zeros((610, 3), dtype=np.float32)
for model in models_arr:
model.eval()
res_arr = []
for inputs, _ in loader:
inputs = inputs.to(device)
with torch.set_grad_enabled(False):
outputs = F.softmax(model(inputs), dim=1)
res_arr.append(outputs.detach().cpu().numpy())
res_arr = np.concatenate(res_arr, axis=0)
res += res_arr
return res / len(models_arr)
def read_train_data(p):
imgs = []
labels = []
for i, lbl in enumerate(os.listdir(p)):
for fname in os.listdir(os.path.join(p, lbl)):
img = Image.open(os.path.join(p, lbl, fname))
try:
exif = dict((ExifTags.TAGS[k], v) for k, v in img._getexif(
).items() if k in ExifTags.TAGS)
if exif['Orientation'] == 3:
img = img.rotate(180, expand=True)
elif exif['Orientation'] == 6:
img = img.rotate(270, expand=True)
elif exif['Orientation'] == 8:
img = img.rotate(90, expand=True)
except:
pass
img = np.array(img.convert('RGB').resize((512, 512), Image.
ANTIALIAS))
imgs.append(img)
labels.append(i)
return imgs, labels
def read_test_data(p):
imgs = []
labels = []
ids = []
for fname in os.listdir(p):
img = Image.open(os.path.join(p, fname))
try:
if not 'DMWVNR' in fname:
exif = dict((ExifTags.TAGS[k], v) for k, v in img._getexif(
).items() if k in ExifTags.TAGS)
if exif['Orientation'] == 3:
img = img.rotate(180, expand=True)
elif exif['Orientation'] == 6:
img = img.rotate(270, expand=True)
elif exif['Orientation'] == 8:
img = img.rotate(90, expand=True)
except:
pass
img = img.convert('RGB').resize((512, 512), Image.ANTIALIAS)
imgs.append(np.array(img.copy()))
labels.append(0)
ids.append(fname.split('.')[0])
img.close()
return imgs, labels, ids
<|reserved_special_token_1|>
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim import lr_scheduler
import numpy as np
import torchvision
from torchvision import datasets, models, transforms
import time
import os
import copy
import torch.nn.functional as F
from PIL import Image, ExifTags
def train_model_snapshot(model, criterion, lr, dataloaders, dataset_sizes,
device, num_cycles, num_epochs_per_cycle):
since = time.time()
best_model_wts = copy.deepcopy(model.state_dict())
best_acc = 0.0
best_loss = 1000000.0
model_w_arr = []
prob = torch.zeros((dataset_sizes['val'], 3), dtype=torch.float32).to(
device)
lbl = torch.zeros((dataset_sizes['val'],), dtype=torch.long).to(device)
for cycle in range(num_cycles):
optimizer = optim.SGD(model.parameters(), lr=lr, momentum=0.9)
scheduler = lr_scheduler.CosineAnnealingLR(optimizer,
num_epochs_per_cycle * len(dataloaders['train']))
for epoch in range(num_epochs_per_cycle):
for phase in ['train', 'val']:
if phase == 'train':
model.train()
else:
model.eval()
running_loss = 0.0
running_corrects = 0
idx = 0
for inputs, labels in dataloaders[phase]:
inputs = inputs.to(device)
labels = labels.to(device)
optimizer.zero_grad()
with torch.set_grad_enabled(phase == 'train'):
outputs = model(inputs)
_, preds = torch.max(outputs, 1)
if (epoch == num_epochs_per_cycle - 1 and phase ==
'val'):
prob[idx:idx + inputs.shape[0]] += F.softmax(
outputs, dim=1)
lbl[idx:idx + inputs.shape[0]] = labels
idx += inputs.shape[0]
loss = criterion(outputs, labels)
if phase == 'train':
loss.backward()
optimizer.step()
scheduler.step()
running_loss += loss.item() * inputs.size(0)
running_corrects += torch.sum(preds == labels.data)
epoch_loss = running_loss / dataset_sizes[phase]
epoch_acc = running_corrects.double() / dataset_sizes[phase]
if phase == 'val' and epoch_loss < best_loss:
best_loss = epoch_loss
best_model_wts = copy.deepcopy(model.state_dict())
model_w_arr.append(copy.deepcopy(model.state_dict()))
prob /= num_cycles
ensemble_loss = F.nll_loss(torch.log(prob), lbl)
ensemble_loss = ensemble_loss.item()
time_elapsed = time.time() - since
model_arr = []
for weights in model_w_arr:
model.load_state_dict(weights)
model_arr.append(model)
return model_arr, ensemble_loss, best_loss, prob
def test(models_arr, loader, device):
res = np.zeros((610, 3), dtype=np.float32)
for model in models_arr:
model.eval()
res_arr = []
for inputs, _ in loader:
inputs = inputs.to(device)
with torch.set_grad_enabled(False):
outputs = F.softmax(model(inputs), dim=1)
res_arr.append(outputs.detach().cpu().numpy())
res_arr = np.concatenate(res_arr, axis=0)
res += res_arr
return res / len(models_arr)
def read_train_data(p):
imgs = []
labels = []
for i, lbl in enumerate(os.listdir(p)):
for fname in os.listdir(os.path.join(p, lbl)):
img = Image.open(os.path.join(p, lbl, fname))
try:
exif = dict((ExifTags.TAGS[k], v) for k, v in img._getexif(
).items() if k in ExifTags.TAGS)
if exif['Orientation'] == 3:
img = img.rotate(180, expand=True)
elif exif['Orientation'] == 6:
img = img.rotate(270, expand=True)
elif exif['Orientation'] == 8:
img = img.rotate(90, expand=True)
except:
pass
img = np.array(img.convert('RGB').resize((512, 512), Image.
ANTIALIAS))
imgs.append(img)
labels.append(i)
return imgs, labels
def read_test_data(p):
imgs = []
labels = []
ids = []
for fname in os.listdir(p):
img = Image.open(os.path.join(p, fname))
try:
if not 'DMWVNR' in fname:
exif = dict((ExifTags.TAGS[k], v) for k, v in img._getexif(
).items() if k in ExifTags.TAGS)
if exif['Orientation'] == 3:
img = img.rotate(180, expand=True)
elif exif['Orientation'] == 6:
img = img.rotate(270, expand=True)
elif exif['Orientation'] == 8:
img = img.rotate(90, expand=True)
except:
pass
img = img.convert('RGB').resize((512, 512), Image.ANTIALIAS)
imgs.append(np.array(img.copy()))
labels.append(0)
ids.append(fname.split('.')[0])
img.close()
return imgs, labels, ids
<|reserved_special_token_1|>
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim import lr_scheduler
import numpy as np
import torchvision
from torchvision import datasets, models, transforms
#import matplotlib.pyplot as plt
import time
import os
import copy
import torch.nn.functional as F
from PIL import Image, ExifTags
def train_model_snapshot(model, criterion, lr, dataloaders, dataset_sizes, device, num_cycles, num_epochs_per_cycle):
since = time.time()
best_model_wts = copy.deepcopy(model.state_dict())
best_acc = 0.0
best_loss = 1000000.0
model_w_arr = []
prob = torch.zeros((dataset_sizes['val'], 3), dtype = torch.float32).to(device)
lbl = torch.zeros((dataset_sizes['val'],), dtype = torch.long).to(device)
for cycle in range(num_cycles):
optimizer = optim.SGD(model.parameters(), lr=lr, momentum=0.9)#, weight_decay = 0.0005)
scheduler = lr_scheduler.CosineAnnealingLR(optimizer, num_epochs_per_cycle*len(dataloaders['train']))
for epoch in range(num_epochs_per_cycle):
#print('Cycle {}: Epoch {}/{}'.format(cycle, epoch, num_epochs_per_cycle - 1))
#print('-' * 10)
# Each epoch has a training and validation phase
for phase in ['train', 'val']:
if phase == 'train':
model.train() # Set model to training mode
else:
model.eval() # Set model to evaluate mode
running_loss = 0.0
running_corrects = 0
idx = 0
# Iterate over data.
for inputs, labels in dataloaders[phase]:
inputs = inputs.to(device)
labels = labels.to(device)
# zero the parameter gradients
optimizer.zero_grad()
# forward
# track history if only in train
with torch.set_grad_enabled(phase == 'train'):
outputs = model(inputs)
_, preds = torch.max(outputs, 1)
if (epoch == num_epochs_per_cycle-1) and (phase == 'val'):
prob[idx:idx+inputs.shape[0]] += F.softmax(outputs, dim = 1)
lbl[idx:idx+inputs.shape[0]] = labels
idx += inputs.shape[0]
loss = criterion(outputs, labels)
# backward + optimize only if in training phase
if phase == 'train':
loss.backward()
optimizer.step()
scheduler.step()
#print(optimizer.param_groups[0]['lr'])
# statistics
running_loss += loss.item() * inputs.size(0)
running_corrects += torch.sum(preds == labels.data)
epoch_loss = running_loss / dataset_sizes[phase]
epoch_acc = running_corrects.double() / dataset_sizes[phase]
#print('{} Loss: {:.4f} Acc: {:.4f}'.format(
# phase, epoch_loss, epoch_acc))
# deep copy the model
if phase == 'val' and epoch_loss < best_loss:
best_loss = epoch_loss
best_model_wts = copy.deepcopy(model.state_dict())
#print()
model_w_arr.append(copy.deepcopy(model.state_dict()))
prob /= num_cycles
ensemble_loss = F.nll_loss(torch.log(prob), lbl)
ensemble_loss = ensemble_loss.item()
time_elapsed = time.time() - since
#print('Training complete in {:.0f}m {:.0f}s'.format(
# time_elapsed // 60, time_elapsed % 60))
#print('Ensemble Loss : {:4f}, Best val Loss: {:4f}'.format(ensemble_loss, best_loss))
# load best model weights
model_arr =[]
for weights in model_w_arr:
model.load_state_dict(weights)
model_arr.append(model)
return model_arr, ensemble_loss, best_loss, prob
def test(models_arr, loader, device):
res = np.zeros((610, 3), dtype = np.float32)
for model in models_arr:
model.eval()
res_arr = []
for inputs, _ in loader:
inputs = inputs.to(device)
# forward
# track history if only in train
with torch.set_grad_enabled(False):
outputs = F.softmax(model(inputs), dim = 1)
res_arr.append(outputs.detach().cpu().numpy())
res_arr = np.concatenate(res_arr, axis = 0)
res += res_arr
return res / len(models_arr)
def read_train_data(p):
imgs = []
labels = []
for i, lbl in enumerate(os.listdir(p)):
for fname in os.listdir(os.path.join(p, lbl)):
#read image
img = Image.open(os.path.join(p, lbl, fname))
#rotate image to original view
try:
exif=dict((ExifTags.TAGS[k], v) for k, v in img._getexif().items() if k in ExifTags.TAGS)
if exif['Orientation'] == 3:
img=img.rotate(180, expand=True)
elif exif['Orientation'] == 6:
img=img.rotate(270, expand=True)
elif exif['Orientation'] == 8:
img=img.rotate(90, expand=True)
except:
pass
#resize all images to the same size
img = np.array(img.convert('RGB').resize((512,512), Image.ANTIALIAS))
imgs.append(img)
labels.append(i)
return imgs, labels
def read_test_data(p):
imgs = []
labels = []
ids = []
for fname in os.listdir(p):
#read image
img = Image.open(os.path.join(p, fname))
#rotate image to original view
try:
if not('DMWVNR' in fname):
exif=dict((ExifTags.TAGS[k], v) for k, v in img._getexif().items() if k in ExifTags.TAGS)
if exif['Orientation'] == 3:
img=img.rotate(180, expand=True)
elif exif['Orientation'] == 6:
img=img.rotate(270, expand=True)
elif exif['Orientation'] == 8:
img=img.rotate(90, expand=True)
except:
pass
#resize all images to the same size
img = img.convert('RGB').resize((512,512), Image.ANTIALIAS)
imgs.append(np.array(img.copy()))
labels.append(0)
ids.append(fname.split('.')[0])
img.close()
return imgs, labels, ids
|
flexible
|
{
"blob_id": "d807a363c08d117c848ffdc0a768c696ea7746bd",
"index": 1787,
"step-1": "<mask token>\n\n\ndef train_model_snapshot(model, criterion, lr, dataloaders, dataset_sizes,\n device, num_cycles, num_epochs_per_cycle):\n since = time.time()\n best_model_wts = copy.deepcopy(model.state_dict())\n best_acc = 0.0\n best_loss = 1000000.0\n model_w_arr = []\n prob = torch.zeros((dataset_sizes['val'], 3), dtype=torch.float32).to(\n device)\n lbl = torch.zeros((dataset_sizes['val'],), dtype=torch.long).to(device)\n for cycle in range(num_cycles):\n optimizer = optim.SGD(model.parameters(), lr=lr, momentum=0.9)\n scheduler = lr_scheduler.CosineAnnealingLR(optimizer, \n num_epochs_per_cycle * len(dataloaders['train']))\n for epoch in range(num_epochs_per_cycle):\n for phase in ['train', 'val']:\n if phase == 'train':\n model.train()\n else:\n model.eval()\n running_loss = 0.0\n running_corrects = 0\n idx = 0\n for inputs, labels in dataloaders[phase]:\n inputs = inputs.to(device)\n labels = labels.to(device)\n optimizer.zero_grad()\n with torch.set_grad_enabled(phase == 'train'):\n outputs = model(inputs)\n _, preds = torch.max(outputs, 1)\n if (epoch == num_epochs_per_cycle - 1 and phase ==\n 'val'):\n prob[idx:idx + inputs.shape[0]] += F.softmax(\n outputs, dim=1)\n lbl[idx:idx + inputs.shape[0]] = labels\n idx += inputs.shape[0]\n loss = criterion(outputs, labels)\n if phase == 'train':\n loss.backward()\n optimizer.step()\n scheduler.step()\n running_loss += loss.item() * inputs.size(0)\n running_corrects += torch.sum(preds == labels.data)\n epoch_loss = running_loss / dataset_sizes[phase]\n epoch_acc = running_corrects.double() / dataset_sizes[phase]\n if phase == 'val' and epoch_loss < best_loss:\n best_loss = epoch_loss\n best_model_wts = copy.deepcopy(model.state_dict())\n model_w_arr.append(copy.deepcopy(model.state_dict()))\n prob /= num_cycles\n ensemble_loss = F.nll_loss(torch.log(prob), lbl)\n ensemble_loss = ensemble_loss.item()\n time_elapsed = time.time() - since\n model_arr = []\n for weights in model_w_arr:\n model.load_state_dict(weights)\n model_arr.append(model)\n return model_arr, ensemble_loss, best_loss, prob\n\n\ndef test(models_arr, loader, device):\n res = np.zeros((610, 3), dtype=np.float32)\n for model in models_arr:\n model.eval()\n res_arr = []\n for inputs, _ in loader:\n inputs = inputs.to(device)\n with torch.set_grad_enabled(False):\n outputs = F.softmax(model(inputs), dim=1)\n res_arr.append(outputs.detach().cpu().numpy())\n res_arr = np.concatenate(res_arr, axis=0)\n res += res_arr\n return res / len(models_arr)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef train_model_snapshot(model, criterion, lr, dataloaders, dataset_sizes,\n device, num_cycles, num_epochs_per_cycle):\n since = time.time()\n best_model_wts = copy.deepcopy(model.state_dict())\n best_acc = 0.0\n best_loss = 1000000.0\n model_w_arr = []\n prob = torch.zeros((dataset_sizes['val'], 3), dtype=torch.float32).to(\n device)\n lbl = torch.zeros((dataset_sizes['val'],), dtype=torch.long).to(device)\n for cycle in range(num_cycles):\n optimizer = optim.SGD(model.parameters(), lr=lr, momentum=0.9)\n scheduler = lr_scheduler.CosineAnnealingLR(optimizer, \n num_epochs_per_cycle * len(dataloaders['train']))\n for epoch in range(num_epochs_per_cycle):\n for phase in ['train', 'val']:\n if phase == 'train':\n model.train()\n else:\n model.eval()\n running_loss = 0.0\n running_corrects = 0\n idx = 0\n for inputs, labels in dataloaders[phase]:\n inputs = inputs.to(device)\n labels = labels.to(device)\n optimizer.zero_grad()\n with torch.set_grad_enabled(phase == 'train'):\n outputs = model(inputs)\n _, preds = torch.max(outputs, 1)\n if (epoch == num_epochs_per_cycle - 1 and phase ==\n 'val'):\n prob[idx:idx + inputs.shape[0]] += F.softmax(\n outputs, dim=1)\n lbl[idx:idx + inputs.shape[0]] = labels\n idx += inputs.shape[0]\n loss = criterion(outputs, labels)\n if phase == 'train':\n loss.backward()\n optimizer.step()\n scheduler.step()\n running_loss += loss.item() * inputs.size(0)\n running_corrects += torch.sum(preds == labels.data)\n epoch_loss = running_loss / dataset_sizes[phase]\n epoch_acc = running_corrects.double() / dataset_sizes[phase]\n if phase == 'val' and epoch_loss < best_loss:\n best_loss = epoch_loss\n best_model_wts = copy.deepcopy(model.state_dict())\n model_w_arr.append(copy.deepcopy(model.state_dict()))\n prob /= num_cycles\n ensemble_loss = F.nll_loss(torch.log(prob), lbl)\n ensemble_loss = ensemble_loss.item()\n time_elapsed = time.time() - since\n model_arr = []\n for weights in model_w_arr:\n model.load_state_dict(weights)\n model_arr.append(model)\n return model_arr, ensemble_loss, best_loss, prob\n\n\ndef test(models_arr, loader, device):\n res = np.zeros((610, 3), dtype=np.float32)\n for model in models_arr:\n model.eval()\n res_arr = []\n for inputs, _ in loader:\n inputs = inputs.to(device)\n with torch.set_grad_enabled(False):\n outputs = F.softmax(model(inputs), dim=1)\n res_arr.append(outputs.detach().cpu().numpy())\n res_arr = np.concatenate(res_arr, axis=0)\n res += res_arr\n return res / len(models_arr)\n\n\n<mask token>\n\n\ndef read_test_data(p):\n imgs = []\n labels = []\n ids = []\n for fname in os.listdir(p):\n img = Image.open(os.path.join(p, fname))\n try:\n if not 'DMWVNR' in fname:\n exif = dict((ExifTags.TAGS[k], v) for k, v in img._getexif(\n ).items() if k in ExifTags.TAGS)\n if exif['Orientation'] == 3:\n img = img.rotate(180, expand=True)\n elif exif['Orientation'] == 6:\n img = img.rotate(270, expand=True)\n elif exif['Orientation'] == 8:\n img = img.rotate(90, expand=True)\n except:\n pass\n img = img.convert('RGB').resize((512, 512), Image.ANTIALIAS)\n imgs.append(np.array(img.copy()))\n labels.append(0)\n ids.append(fname.split('.')[0])\n img.close()\n return imgs, labels, ids\n",
"step-3": "<mask token>\n\n\ndef train_model_snapshot(model, criterion, lr, dataloaders, dataset_sizes,\n device, num_cycles, num_epochs_per_cycle):\n since = time.time()\n best_model_wts = copy.deepcopy(model.state_dict())\n best_acc = 0.0\n best_loss = 1000000.0\n model_w_arr = []\n prob = torch.zeros((dataset_sizes['val'], 3), dtype=torch.float32).to(\n device)\n lbl = torch.zeros((dataset_sizes['val'],), dtype=torch.long).to(device)\n for cycle in range(num_cycles):\n optimizer = optim.SGD(model.parameters(), lr=lr, momentum=0.9)\n scheduler = lr_scheduler.CosineAnnealingLR(optimizer, \n num_epochs_per_cycle * len(dataloaders['train']))\n for epoch in range(num_epochs_per_cycle):\n for phase in ['train', 'val']:\n if phase == 'train':\n model.train()\n else:\n model.eval()\n running_loss = 0.0\n running_corrects = 0\n idx = 0\n for inputs, labels in dataloaders[phase]:\n inputs = inputs.to(device)\n labels = labels.to(device)\n optimizer.zero_grad()\n with torch.set_grad_enabled(phase == 'train'):\n outputs = model(inputs)\n _, preds = torch.max(outputs, 1)\n if (epoch == num_epochs_per_cycle - 1 and phase ==\n 'val'):\n prob[idx:idx + inputs.shape[0]] += F.softmax(\n outputs, dim=1)\n lbl[idx:idx + inputs.shape[0]] = labels\n idx += inputs.shape[0]\n loss = criterion(outputs, labels)\n if phase == 'train':\n loss.backward()\n optimizer.step()\n scheduler.step()\n running_loss += loss.item() * inputs.size(0)\n running_corrects += torch.sum(preds == labels.data)\n epoch_loss = running_loss / dataset_sizes[phase]\n epoch_acc = running_corrects.double() / dataset_sizes[phase]\n if phase == 'val' and epoch_loss < best_loss:\n best_loss = epoch_loss\n best_model_wts = copy.deepcopy(model.state_dict())\n model_w_arr.append(copy.deepcopy(model.state_dict()))\n prob /= num_cycles\n ensemble_loss = F.nll_loss(torch.log(prob), lbl)\n ensemble_loss = ensemble_loss.item()\n time_elapsed = time.time() - since\n model_arr = []\n for weights in model_w_arr:\n model.load_state_dict(weights)\n model_arr.append(model)\n return model_arr, ensemble_loss, best_loss, prob\n\n\ndef test(models_arr, loader, device):\n res = np.zeros((610, 3), dtype=np.float32)\n for model in models_arr:\n model.eval()\n res_arr = []\n for inputs, _ in loader:\n inputs = inputs.to(device)\n with torch.set_grad_enabled(False):\n outputs = F.softmax(model(inputs), dim=1)\n res_arr.append(outputs.detach().cpu().numpy())\n res_arr = np.concatenate(res_arr, axis=0)\n res += res_arr\n return res / len(models_arr)\n\n\ndef read_train_data(p):\n imgs = []\n labels = []\n for i, lbl in enumerate(os.listdir(p)):\n for fname in os.listdir(os.path.join(p, lbl)):\n img = Image.open(os.path.join(p, lbl, fname))\n try:\n exif = dict((ExifTags.TAGS[k], v) for k, v in img._getexif(\n ).items() if k in ExifTags.TAGS)\n if exif['Orientation'] == 3:\n img = img.rotate(180, expand=True)\n elif exif['Orientation'] == 6:\n img = img.rotate(270, expand=True)\n elif exif['Orientation'] == 8:\n img = img.rotate(90, expand=True)\n except:\n pass\n img = np.array(img.convert('RGB').resize((512, 512), Image.\n ANTIALIAS))\n imgs.append(img)\n labels.append(i)\n return imgs, labels\n\n\ndef read_test_data(p):\n imgs = []\n labels = []\n ids = []\n for fname in os.listdir(p):\n img = Image.open(os.path.join(p, fname))\n try:\n if not 'DMWVNR' in fname:\n exif = dict((ExifTags.TAGS[k], v) for k, v in img._getexif(\n ).items() if k in ExifTags.TAGS)\n if exif['Orientation'] == 3:\n img = img.rotate(180, expand=True)\n elif exif['Orientation'] == 6:\n img = img.rotate(270, expand=True)\n elif exif['Orientation'] == 8:\n img = img.rotate(90, expand=True)\n except:\n pass\n img = img.convert('RGB').resize((512, 512), Image.ANTIALIAS)\n imgs.append(np.array(img.copy()))\n labels.append(0)\n ids.append(fname.split('.')[0])\n img.close()\n return imgs, labels, ids\n",
"step-4": "import torch\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torch.optim import lr_scheduler\nimport numpy as np\nimport torchvision\nfrom torchvision import datasets, models, transforms\nimport time\nimport os\nimport copy\nimport torch.nn.functional as F\nfrom PIL import Image, ExifTags\n\n\ndef train_model_snapshot(model, criterion, lr, dataloaders, dataset_sizes,\n device, num_cycles, num_epochs_per_cycle):\n since = time.time()\n best_model_wts = copy.deepcopy(model.state_dict())\n best_acc = 0.0\n best_loss = 1000000.0\n model_w_arr = []\n prob = torch.zeros((dataset_sizes['val'], 3), dtype=torch.float32).to(\n device)\n lbl = torch.zeros((dataset_sizes['val'],), dtype=torch.long).to(device)\n for cycle in range(num_cycles):\n optimizer = optim.SGD(model.parameters(), lr=lr, momentum=0.9)\n scheduler = lr_scheduler.CosineAnnealingLR(optimizer, \n num_epochs_per_cycle * len(dataloaders['train']))\n for epoch in range(num_epochs_per_cycle):\n for phase in ['train', 'val']:\n if phase == 'train':\n model.train()\n else:\n model.eval()\n running_loss = 0.0\n running_corrects = 0\n idx = 0\n for inputs, labels in dataloaders[phase]:\n inputs = inputs.to(device)\n labels = labels.to(device)\n optimizer.zero_grad()\n with torch.set_grad_enabled(phase == 'train'):\n outputs = model(inputs)\n _, preds = torch.max(outputs, 1)\n if (epoch == num_epochs_per_cycle - 1 and phase ==\n 'val'):\n prob[idx:idx + inputs.shape[0]] += F.softmax(\n outputs, dim=1)\n lbl[idx:idx + inputs.shape[0]] = labels\n idx += inputs.shape[0]\n loss = criterion(outputs, labels)\n if phase == 'train':\n loss.backward()\n optimizer.step()\n scheduler.step()\n running_loss += loss.item() * inputs.size(0)\n running_corrects += torch.sum(preds == labels.data)\n epoch_loss = running_loss / dataset_sizes[phase]\n epoch_acc = running_corrects.double() / dataset_sizes[phase]\n if phase == 'val' and epoch_loss < best_loss:\n best_loss = epoch_loss\n best_model_wts = copy.deepcopy(model.state_dict())\n model_w_arr.append(copy.deepcopy(model.state_dict()))\n prob /= num_cycles\n ensemble_loss = F.nll_loss(torch.log(prob), lbl)\n ensemble_loss = ensemble_loss.item()\n time_elapsed = time.time() - since\n model_arr = []\n for weights in model_w_arr:\n model.load_state_dict(weights)\n model_arr.append(model)\n return model_arr, ensemble_loss, best_loss, prob\n\n\ndef test(models_arr, loader, device):\n res = np.zeros((610, 3), dtype=np.float32)\n for model in models_arr:\n model.eval()\n res_arr = []\n for inputs, _ in loader:\n inputs = inputs.to(device)\n with torch.set_grad_enabled(False):\n outputs = F.softmax(model(inputs), dim=1)\n res_arr.append(outputs.detach().cpu().numpy())\n res_arr = np.concatenate(res_arr, axis=0)\n res += res_arr\n return res / len(models_arr)\n\n\ndef read_train_data(p):\n imgs = []\n labels = []\n for i, lbl in enumerate(os.listdir(p)):\n for fname in os.listdir(os.path.join(p, lbl)):\n img = Image.open(os.path.join(p, lbl, fname))\n try:\n exif = dict((ExifTags.TAGS[k], v) for k, v in img._getexif(\n ).items() if k in ExifTags.TAGS)\n if exif['Orientation'] == 3:\n img = img.rotate(180, expand=True)\n elif exif['Orientation'] == 6:\n img = img.rotate(270, expand=True)\n elif exif['Orientation'] == 8:\n img = img.rotate(90, expand=True)\n except:\n pass\n img = np.array(img.convert('RGB').resize((512, 512), Image.\n ANTIALIAS))\n imgs.append(img)\n labels.append(i)\n return imgs, labels\n\n\ndef read_test_data(p):\n imgs = []\n labels = []\n ids = []\n for fname in os.listdir(p):\n img = Image.open(os.path.join(p, fname))\n try:\n if not 'DMWVNR' in fname:\n exif = dict((ExifTags.TAGS[k], v) for k, v in img._getexif(\n ).items() if k in ExifTags.TAGS)\n if exif['Orientation'] == 3:\n img = img.rotate(180, expand=True)\n elif exif['Orientation'] == 6:\n img = img.rotate(270, expand=True)\n elif exif['Orientation'] == 8:\n img = img.rotate(90, expand=True)\n except:\n pass\n img = img.convert('RGB').resize((512, 512), Image.ANTIALIAS)\n imgs.append(np.array(img.copy()))\n labels.append(0)\n ids.append(fname.split('.')[0])\n img.close()\n return imgs, labels, ids\n",
"step-5": "import torch\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torch.optim import lr_scheduler\nimport numpy as np\nimport torchvision\nfrom torchvision import datasets, models, transforms\n#import matplotlib.pyplot as plt\nimport time\nimport os\nimport copy\nimport torch.nn.functional as F\nfrom PIL import Image, ExifTags\n\ndef train_model_snapshot(model, criterion, lr, dataloaders, dataset_sizes, device, num_cycles, num_epochs_per_cycle):\n since = time.time()\n\n best_model_wts = copy.deepcopy(model.state_dict())\n best_acc = 0.0\n best_loss = 1000000.0\n model_w_arr = []\n prob = torch.zeros((dataset_sizes['val'], 3), dtype = torch.float32).to(device)\n lbl = torch.zeros((dataset_sizes['val'],), dtype = torch.long).to(device)\n for cycle in range(num_cycles):\n optimizer = optim.SGD(model.parameters(), lr=lr, momentum=0.9)#, weight_decay = 0.0005)\n scheduler = lr_scheduler.CosineAnnealingLR(optimizer, num_epochs_per_cycle*len(dataloaders['train']))\n for epoch in range(num_epochs_per_cycle):\n #print('Cycle {}: Epoch {}/{}'.format(cycle, epoch, num_epochs_per_cycle - 1))\n #print('-' * 10)\n\n # Each epoch has a training and validation phase\n for phase in ['train', 'val']:\n if phase == 'train':\n model.train() # Set model to training mode\n else:\n model.eval() # Set model to evaluate mode\n\n running_loss = 0.0\n running_corrects = 0\n idx = 0\n # Iterate over data.\n for inputs, labels in dataloaders[phase]:\n inputs = inputs.to(device)\n labels = labels.to(device)\n\n # zero the parameter gradients\n optimizer.zero_grad()\n\n # forward\n # track history if only in train\n with torch.set_grad_enabled(phase == 'train'):\n outputs = model(inputs)\n _, preds = torch.max(outputs, 1)\n if (epoch == num_epochs_per_cycle-1) and (phase == 'val'):\n prob[idx:idx+inputs.shape[0]] += F.softmax(outputs, dim = 1)\n lbl[idx:idx+inputs.shape[0]] = labels\n idx += inputs.shape[0]\n loss = criterion(outputs, labels)\n # backward + optimize only if in training phase\n if phase == 'train':\n loss.backward()\n optimizer.step()\n scheduler.step()\n #print(optimizer.param_groups[0]['lr'])\n \n # statistics\n running_loss += loss.item() * inputs.size(0)\n running_corrects += torch.sum(preds == labels.data)\n\n epoch_loss = running_loss / dataset_sizes[phase]\n epoch_acc = running_corrects.double() / dataset_sizes[phase]\n\n #print('{} Loss: {:.4f} Acc: {:.4f}'.format(\n # phase, epoch_loss, epoch_acc))\n\n # deep copy the model\n if phase == 'val' and epoch_loss < best_loss:\n best_loss = epoch_loss\n best_model_wts = copy.deepcopy(model.state_dict())\n #print()\n model_w_arr.append(copy.deepcopy(model.state_dict()))\n\n prob /= num_cycles\n ensemble_loss = F.nll_loss(torch.log(prob), lbl) \n ensemble_loss = ensemble_loss.item()\n time_elapsed = time.time() - since\n #print('Training complete in {:.0f}m {:.0f}s'.format(\n # time_elapsed // 60, time_elapsed % 60))\n #print('Ensemble Loss : {:4f}, Best val Loss: {:4f}'.format(ensemble_loss, best_loss))\n\n # load best model weights\n model_arr =[]\n for weights in model_w_arr:\n model.load_state_dict(weights) \n model_arr.append(model) \n return model_arr, ensemble_loss, best_loss, prob\n\ndef test(models_arr, loader, device):\n res = np.zeros((610, 3), dtype = np.float32)\n for model in models_arr:\n model.eval()\n res_arr = []\n for inputs, _ in loader:\n inputs = inputs.to(device)\n # forward\n # track history if only in train\n with torch.set_grad_enabled(False):\n outputs = F.softmax(model(inputs), dim = 1) \n res_arr.append(outputs.detach().cpu().numpy())\n res_arr = np.concatenate(res_arr, axis = 0)\n res += res_arr\n return res / len(models_arr)\n\ndef read_train_data(p):\n imgs = []\n labels = []\n for i, lbl in enumerate(os.listdir(p)):\n for fname in os.listdir(os.path.join(p, lbl)):\n #read image\n img = Image.open(os.path.join(p, lbl, fname))\n #rotate image to original view\n try:\n exif=dict((ExifTags.TAGS[k], v) for k, v in img._getexif().items() if k in ExifTags.TAGS)\n if exif['Orientation'] == 3:\n img=img.rotate(180, expand=True)\n elif exif['Orientation'] == 6:\n img=img.rotate(270, expand=True)\n elif exif['Orientation'] == 8:\n img=img.rotate(90, expand=True)\n except:\n pass\n #resize all images to the same size\n img = np.array(img.convert('RGB').resize((512,512), Image.ANTIALIAS))\n imgs.append(img)\n labels.append(i)\n return imgs, labels\n\ndef read_test_data(p):\n imgs = []\n labels = []\n ids = []\n for fname in os.listdir(p):\n #read image\n img = Image.open(os.path.join(p, fname))\n #rotate image to original view\n try:\n if not('DMWVNR' in fname):\n exif=dict((ExifTags.TAGS[k], v) for k, v in img._getexif().items() if k in ExifTags.TAGS)\n if exif['Orientation'] == 3:\n img=img.rotate(180, expand=True)\n elif exif['Orientation'] == 6:\n img=img.rotate(270, expand=True)\n elif exif['Orientation'] == 8:\n img=img.rotate(90, expand=True)\n except:\n pass\n #resize all images to the same size\n img = img.convert('RGB').resize((512,512), Image.ANTIALIAS)\n imgs.append(np.array(img.copy()))\n labels.append(0)\n ids.append(fname.split('.')[0])\n img.close()\n return imgs, labels, ids\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
# Standard library
# Third party library
# Local library
from warehouse.server import run_server
from warehouse.server.config import log
if __name__ == "__main__":
log.initialize_logs()
run_server()
|
normal
|
{
"blob_id": "8c8b5c1ff749a8563788b8d5be5332e273275be3",
"index": 6450,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif __name__ == '__main__':\n log.initialize_logs()\n run_server()\n",
"step-3": "from warehouse.server import run_server\nfrom warehouse.server.config import log\nif __name__ == '__main__':\n log.initialize_logs()\n run_server()\n",
"step-4": "# Standard library\n# Third party library\n# Local library\nfrom warehouse.server import run_server\nfrom warehouse.server.config import log\n\n\nif __name__ == \"__main__\":\n log.initialize_logs()\n run_server()\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
n = int(input())
if n % 10 == 1 and (n < 11 or n > 20):
print(n, "korova")
elif n % 10 > 1 and n % 10 < 5 and (n < 11 or n > 20):
print(n, "korovy")
else:
print(n, "korov")
|
normal
|
{
"blob_id": "78037d936ee5f9b31bf00263885fbec225a4f8f2",
"index": 2191,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif n % 10 == 1 and (n < 11 or n > 20):\n print(n, 'korova')\nelif n % 10 > 1 and n % 10 < 5 and (n < 11 or n > 20):\n print(n, 'korovy')\nelse:\n print(n, 'korov')\n",
"step-3": "n = int(input())\nif n % 10 == 1 and (n < 11 or n > 20):\n print(n, 'korova')\nelif n % 10 > 1 and n % 10 < 5 and (n < 11 or n > 20):\n print(n, 'korovy')\nelse:\n print(n, 'korov')\n",
"step-4": "n = int(input())\n\nif n % 10 == 1 and (n < 11 or n > 20):\n print(n, \"korova\")\nelif n % 10 > 1 and n % 10 < 5 and (n < 11 or n > 20):\n print(n, \"korovy\")\nelse:\n print(n, \"korov\")\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
[print(i) for i in sentence if len(i) < 5]
<|reserved_special_token_1|>
sentence = 'Practice Problems to Drill List Comprehension in Your Head.'
sentence = sentence.split()
sentence = [i.replace('.', '') for i in sentence]
[print(i) for i in sentence if len(i) < 5]
<|reserved_special_token_1|>
sentence = "Practice Problems to Drill List Comprehension in Your Head."
sentence = sentence.split()
sentence = [i.replace(".", "") for i in sentence]
[print(i) for i in sentence if len(i)<5]
|
flexible
|
{
"blob_id": "c0e349be45cd964e8e398baaed64eae792189dd1",
"index": 5723,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n[print(i) for i in sentence if len(i) < 5]\n",
"step-3": "sentence = 'Practice Problems to Drill List Comprehension in Your Head.'\nsentence = sentence.split()\nsentence = [i.replace('.', '') for i in sentence]\n[print(i) for i in sentence if len(i) < 5]\n",
"step-4": "sentence = \"Practice Problems to Drill List Comprehension in Your Head.\"\nsentence = sentence.split()\nsentence = [i.replace(\".\", \"\") for i in sentence]\n[print(i) for i in sentence if len(i)<5]",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
###############################################################################
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
# #
###############################################################################
__author__ = 'Donovan Parks'
__copyright__ = 'Copyright 2020'
__credits__ = ['Donovan Parks']
__license__ = 'GPL3'
__version__ = '0.0.1'
__maintainer__ = 'Donovan Parks'
__email__ = 'donovan.parks@gmail.com'
__status__ = 'Development'
import sys
import argparse
import re
import datetime
import os
import logging
import time
import math
from collections import defaultdict, namedtuple
from biolib.common import canonical_gid
class CurationLists(object):
"""Lists and pseudo-trees for new representatives, polyphyletic taxa, rogue genomes, and genomes with modified NCBI names."""
def __init__(self, domain, output_dir):
"""Initialization."""
self.domain = domain
self.output_dir = output_dir
self.logger = logging.getLogger('timestamp')
def pseudo_tree(self, gids, out_tree):
"""Create pseudo-tree with the specified genome IDs."""
pseudo_tree = '('
pseudo_tree += ','.join(gids)
pseudo_tree += ');'
fout = open(out_tree, 'w')
fout.write(pseudo_tree)
fout.close()
def new_gtdb_reps(self,
domain_gids,
gtdb_sp_clusters,
gtdb_prev_sp_clusters):
"""New GTDB representatives."""
self.logger.info('Identifying previous GTDB representatives.')
prev_rids = set()
with open(gtdb_prev_sp_clusters) as f:
f.readline()
for line in f:
tokens = line.strip().split('\t')
rid = canonical_gid(tokens[0])
prev_rids.add(rid)
self.logger.info(' - identified {:,} previous GTDB representatives.'.format(
len(prev_rids)))
self.logger.info('Identifying current GTDB representatives.')
cur_rids = set()
with open(gtdb_sp_clusters) as f:
f.readline()
for line in f:
tokens = line.strip().split('\t')
rid = canonical_gid(tokens[0])
cur_rids.add(rid)
self.logger.info(' - identified {:,} current GTDB representatives.'.format(
len(cur_rids)))
self.logger.info('Creating curation list and pseudo-tree of new GTDB representatives.')
out_file = os.path.join(self.output_dir, f'gids_new_reps.{self.domain}.lst')
fout = open(out_file, 'w')
new_rids = set()
for rid in cur_rids:
if rid in domain_gids and rid not in prev_rids:
fout.write('{}\n'.format(rid))
new_rids.add(rid)
fout.close()
self.logger.info(' - identified {:,} new GTDB representatives.'.format(
len(new_rids)))
self.pseudo_tree(new_rids, out_file.replace('.lst', '.tree'))
def poly_rogue_gtdb_reps(self,
domain_gids,
taxa_gid_map,
gtdb_decorate_table):
"""Polyphyletic and rogue GTDB representatives."""
self.logger.info('Identifying polyphyletic and rogue GTDB representatives.')
poly_taxa_count = 0
poly_gids = set()
rogue_gids = set()
with open(gtdb_decorate_table) as f:
f.readline()
for line in f:
tokens = line.split('\t')
taxon = tokens[0]
fmeasure = float(tokens[2])
rogue_in = tokens[7].strip()
rogue_out = tokens[8].strip()
if fmeasure < 1.0:
poly_taxa_count += 1
poly_gids.update(taxa_gid_map[taxon])
if rogue_in:
for gid in rogue_in.split(','):
gid = canonical_gid(gid.strip())
if not gid.startswith('D-'):
rogue_gids.add(gid)
if rogue_out:
for gid in rogue_out.split(','):
gid = canonical_gid(gid.strip())
if not gid.startswith('D-'):
rogue_gids.add(gid)
self.logger.info(' - identified {:,} polyphyletic taxa spanning {:,} GTDB representatives.'.format(
poly_taxa_count,
len(poly_gids)))
self.logger.info(' - identified {:,} rogue GTDB representatives.'.format(
len(rogue_gids)))
self.logger.info('Creating curation lists and pseudo-trees of polyphyletic GTDB representatives.')
out_file = os.path.join(self.output_dir, f'gids_poly_taxa.{self.domain}.lst')
fout = open(out_file, 'w')
for gid in poly_gids:
fout.write('{}\n'.format(gid))
fout.close()
self.pseudo_tree(poly_gids, out_file.replace('.lst', '.tree'))
self.logger.info('Creating curation lists and pseudo-trees of rogue GTDB representatives.')
out_file = os.path.join(self.output_dir, f'gids_rogues.{self.domain}.lst')
fout = open(out_file, 'w')
for gid in rogue_gids:
fout.write('{}\n'.format(gid))
fout.close()
self.pseudo_tree(rogue_gids, out_file.replace('.lst', '.tree'))
def run(self,
gtdb_init_taxonomy,
gtdb_sp_clusters,
gtdb_prev_sp_clusters,
gtdb_decorate_table):
"""Create curation lists and pseudo-trees."""
# get genomes
self.logger.info('Identifying taxonomic assignment of genomes.')
taxa_gid_map = defaultdict(set)
domain_gids = set()
for line in open(gtdb_init_taxonomy):
tokens = line.strip().split('\t')
gid = canonical_gid(tokens[0])
taxa = [t.strip() for t in tokens[1].split(';')]
for taxon in taxa:
taxa_gid_map[taxon].add(gid)
domain_gids.add(gid)
self.logger.info(' - identified {:,} genomes.'.format(
len(domain_gids)))
# new GTDB representatives
self.new_gtdb_reps(domain_gids,
gtdb_sp_clusters,
gtdb_prev_sp_clusters)
# polyphyletic and rogue GTDB representatives
self.poly_rogue_gtdb_reps(domain_gids,
taxa_gid_map,
gtdb_decorate_table)
|
normal
|
{
"blob_id": "53909b750f259b67b061ba26d604e0c2556376df",
"index": 9560,
"step-1": "<mask token>\n\n\nclass CurationLists(object):\n <mask token>\n <mask token>\n\n def pseudo_tree(self, gids, out_tree):\n \"\"\"Create pseudo-tree with the specified genome IDs.\"\"\"\n pseudo_tree = '('\n pseudo_tree += ','.join(gids)\n pseudo_tree += ');'\n fout = open(out_tree, 'w')\n fout.write(pseudo_tree)\n fout.close()\n <mask token>\n\n def poly_rogue_gtdb_reps(self, domain_gids, taxa_gid_map,\n gtdb_decorate_table):\n \"\"\"Polyphyletic and rogue GTDB representatives.\"\"\"\n self.logger.info(\n 'Identifying polyphyletic and rogue GTDB representatives.')\n poly_taxa_count = 0\n poly_gids = set()\n rogue_gids = set()\n with open(gtdb_decorate_table) as f:\n f.readline()\n for line in f:\n tokens = line.split('\\t')\n taxon = tokens[0]\n fmeasure = float(tokens[2])\n rogue_in = tokens[7].strip()\n rogue_out = tokens[8].strip()\n if fmeasure < 1.0:\n poly_taxa_count += 1\n poly_gids.update(taxa_gid_map[taxon])\n if rogue_in:\n for gid in rogue_in.split(','):\n gid = canonical_gid(gid.strip())\n if not gid.startswith('D-'):\n rogue_gids.add(gid)\n if rogue_out:\n for gid in rogue_out.split(','):\n gid = canonical_gid(gid.strip())\n if not gid.startswith('D-'):\n rogue_gids.add(gid)\n self.logger.info(\n ' - identified {:,} polyphyletic taxa spanning {:,} GTDB representatives.'\n .format(poly_taxa_count, len(poly_gids)))\n self.logger.info(' - identified {:,} rogue GTDB representatives.'.\n format(len(rogue_gids)))\n self.logger.info(\n 'Creating curation lists and pseudo-trees of polyphyletic GTDB representatives.'\n )\n out_file = os.path.join(self.output_dir,\n f'gids_poly_taxa.{self.domain}.lst')\n fout = open(out_file, 'w')\n for gid in poly_gids:\n fout.write('{}\\n'.format(gid))\n fout.close()\n self.pseudo_tree(poly_gids, out_file.replace('.lst', '.tree'))\n self.logger.info(\n 'Creating curation lists and pseudo-trees of rogue GTDB representatives.'\n )\n out_file = os.path.join(self.output_dir,\n f'gids_rogues.{self.domain}.lst')\n fout = open(out_file, 'w')\n for gid in rogue_gids:\n fout.write('{}\\n'.format(gid))\n fout.close()\n self.pseudo_tree(rogue_gids, out_file.replace('.lst', '.tree'))\n\n def run(self, gtdb_init_taxonomy, gtdb_sp_clusters,\n gtdb_prev_sp_clusters, gtdb_decorate_table):\n \"\"\"Create curation lists and pseudo-trees.\"\"\"\n self.logger.info('Identifying taxonomic assignment of genomes.')\n taxa_gid_map = defaultdict(set)\n domain_gids = set()\n for line in open(gtdb_init_taxonomy):\n tokens = line.strip().split('\\t')\n gid = canonical_gid(tokens[0])\n taxa = [t.strip() for t in tokens[1].split(';')]\n for taxon in taxa:\n taxa_gid_map[taxon].add(gid)\n domain_gids.add(gid)\n self.logger.info(' - identified {:,} genomes.'.format(len(domain_gids))\n )\n self.new_gtdb_reps(domain_gids, gtdb_sp_clusters, gtdb_prev_sp_clusters\n )\n self.poly_rogue_gtdb_reps(domain_gids, taxa_gid_map,\n gtdb_decorate_table)\n",
"step-2": "<mask token>\n\n\nclass CurationLists(object):\n <mask token>\n\n def __init__(self, domain, output_dir):\n \"\"\"Initialization.\"\"\"\n self.domain = domain\n self.output_dir = output_dir\n self.logger = logging.getLogger('timestamp')\n\n def pseudo_tree(self, gids, out_tree):\n \"\"\"Create pseudo-tree with the specified genome IDs.\"\"\"\n pseudo_tree = '('\n pseudo_tree += ','.join(gids)\n pseudo_tree += ');'\n fout = open(out_tree, 'w')\n fout.write(pseudo_tree)\n fout.close()\n <mask token>\n\n def poly_rogue_gtdb_reps(self, domain_gids, taxa_gid_map,\n gtdb_decorate_table):\n \"\"\"Polyphyletic and rogue GTDB representatives.\"\"\"\n self.logger.info(\n 'Identifying polyphyletic and rogue GTDB representatives.')\n poly_taxa_count = 0\n poly_gids = set()\n rogue_gids = set()\n with open(gtdb_decorate_table) as f:\n f.readline()\n for line in f:\n tokens = line.split('\\t')\n taxon = tokens[0]\n fmeasure = float(tokens[2])\n rogue_in = tokens[7].strip()\n rogue_out = tokens[8].strip()\n if fmeasure < 1.0:\n poly_taxa_count += 1\n poly_gids.update(taxa_gid_map[taxon])\n if rogue_in:\n for gid in rogue_in.split(','):\n gid = canonical_gid(gid.strip())\n if not gid.startswith('D-'):\n rogue_gids.add(gid)\n if rogue_out:\n for gid in rogue_out.split(','):\n gid = canonical_gid(gid.strip())\n if not gid.startswith('D-'):\n rogue_gids.add(gid)\n self.logger.info(\n ' - identified {:,} polyphyletic taxa spanning {:,} GTDB representatives.'\n .format(poly_taxa_count, len(poly_gids)))\n self.logger.info(' - identified {:,} rogue GTDB representatives.'.\n format(len(rogue_gids)))\n self.logger.info(\n 'Creating curation lists and pseudo-trees of polyphyletic GTDB representatives.'\n )\n out_file = os.path.join(self.output_dir,\n f'gids_poly_taxa.{self.domain}.lst')\n fout = open(out_file, 'w')\n for gid in poly_gids:\n fout.write('{}\\n'.format(gid))\n fout.close()\n self.pseudo_tree(poly_gids, out_file.replace('.lst', '.tree'))\n self.logger.info(\n 'Creating curation lists and pseudo-trees of rogue GTDB representatives.'\n )\n out_file = os.path.join(self.output_dir,\n f'gids_rogues.{self.domain}.lst')\n fout = open(out_file, 'w')\n for gid in rogue_gids:\n fout.write('{}\\n'.format(gid))\n fout.close()\n self.pseudo_tree(rogue_gids, out_file.replace('.lst', '.tree'))\n\n def run(self, gtdb_init_taxonomy, gtdb_sp_clusters,\n gtdb_prev_sp_clusters, gtdb_decorate_table):\n \"\"\"Create curation lists and pseudo-trees.\"\"\"\n self.logger.info('Identifying taxonomic assignment of genomes.')\n taxa_gid_map = defaultdict(set)\n domain_gids = set()\n for line in open(gtdb_init_taxonomy):\n tokens = line.strip().split('\\t')\n gid = canonical_gid(tokens[0])\n taxa = [t.strip() for t in tokens[1].split(';')]\n for taxon in taxa:\n taxa_gid_map[taxon].add(gid)\n domain_gids.add(gid)\n self.logger.info(' - identified {:,} genomes.'.format(len(domain_gids))\n )\n self.new_gtdb_reps(domain_gids, gtdb_sp_clusters, gtdb_prev_sp_clusters\n )\n self.poly_rogue_gtdb_reps(domain_gids, taxa_gid_map,\n gtdb_decorate_table)\n",
"step-3": "<mask token>\n\n\nclass CurationLists(object):\n <mask token>\n\n def __init__(self, domain, output_dir):\n \"\"\"Initialization.\"\"\"\n self.domain = domain\n self.output_dir = output_dir\n self.logger = logging.getLogger('timestamp')\n\n def pseudo_tree(self, gids, out_tree):\n \"\"\"Create pseudo-tree with the specified genome IDs.\"\"\"\n pseudo_tree = '('\n pseudo_tree += ','.join(gids)\n pseudo_tree += ');'\n fout = open(out_tree, 'w')\n fout.write(pseudo_tree)\n fout.close()\n\n def new_gtdb_reps(self, domain_gids, gtdb_sp_clusters,\n gtdb_prev_sp_clusters):\n \"\"\"New GTDB representatives.\"\"\"\n self.logger.info('Identifying previous GTDB representatives.')\n prev_rids = set()\n with open(gtdb_prev_sp_clusters) as f:\n f.readline()\n for line in f:\n tokens = line.strip().split('\\t')\n rid = canonical_gid(tokens[0])\n prev_rids.add(rid)\n self.logger.info(' - identified {:,} previous GTDB representatives.'\n .format(len(prev_rids)))\n self.logger.info('Identifying current GTDB representatives.')\n cur_rids = set()\n with open(gtdb_sp_clusters) as f:\n f.readline()\n for line in f:\n tokens = line.strip().split('\\t')\n rid = canonical_gid(tokens[0])\n cur_rids.add(rid)\n self.logger.info(' - identified {:,} current GTDB representatives.'\n .format(len(cur_rids)))\n self.logger.info(\n 'Creating curation list and pseudo-tree of new GTDB representatives.'\n )\n out_file = os.path.join(self.output_dir,\n f'gids_new_reps.{self.domain}.lst')\n fout = open(out_file, 'w')\n new_rids = set()\n for rid in cur_rids:\n if rid in domain_gids and rid not in prev_rids:\n fout.write('{}\\n'.format(rid))\n new_rids.add(rid)\n fout.close()\n self.logger.info(' - identified {:,} new GTDB representatives.'.\n format(len(new_rids)))\n self.pseudo_tree(new_rids, out_file.replace('.lst', '.tree'))\n\n def poly_rogue_gtdb_reps(self, domain_gids, taxa_gid_map,\n gtdb_decorate_table):\n \"\"\"Polyphyletic and rogue GTDB representatives.\"\"\"\n self.logger.info(\n 'Identifying polyphyletic and rogue GTDB representatives.')\n poly_taxa_count = 0\n poly_gids = set()\n rogue_gids = set()\n with open(gtdb_decorate_table) as f:\n f.readline()\n for line in f:\n tokens = line.split('\\t')\n taxon = tokens[0]\n fmeasure = float(tokens[2])\n rogue_in = tokens[7].strip()\n rogue_out = tokens[8].strip()\n if fmeasure < 1.0:\n poly_taxa_count += 1\n poly_gids.update(taxa_gid_map[taxon])\n if rogue_in:\n for gid in rogue_in.split(','):\n gid = canonical_gid(gid.strip())\n if not gid.startswith('D-'):\n rogue_gids.add(gid)\n if rogue_out:\n for gid in rogue_out.split(','):\n gid = canonical_gid(gid.strip())\n if not gid.startswith('D-'):\n rogue_gids.add(gid)\n self.logger.info(\n ' - identified {:,} polyphyletic taxa spanning {:,} GTDB representatives.'\n .format(poly_taxa_count, len(poly_gids)))\n self.logger.info(' - identified {:,} rogue GTDB representatives.'.\n format(len(rogue_gids)))\n self.logger.info(\n 'Creating curation lists and pseudo-trees of polyphyletic GTDB representatives.'\n )\n out_file = os.path.join(self.output_dir,\n f'gids_poly_taxa.{self.domain}.lst')\n fout = open(out_file, 'w')\n for gid in poly_gids:\n fout.write('{}\\n'.format(gid))\n fout.close()\n self.pseudo_tree(poly_gids, out_file.replace('.lst', '.tree'))\n self.logger.info(\n 'Creating curation lists and pseudo-trees of rogue GTDB representatives.'\n )\n out_file = os.path.join(self.output_dir,\n f'gids_rogues.{self.domain}.lst')\n fout = open(out_file, 'w')\n for gid in rogue_gids:\n fout.write('{}\\n'.format(gid))\n fout.close()\n self.pseudo_tree(rogue_gids, out_file.replace('.lst', '.tree'))\n\n def run(self, gtdb_init_taxonomy, gtdb_sp_clusters,\n gtdb_prev_sp_clusters, gtdb_decorate_table):\n \"\"\"Create curation lists and pseudo-trees.\"\"\"\n self.logger.info('Identifying taxonomic assignment of genomes.')\n taxa_gid_map = defaultdict(set)\n domain_gids = set()\n for line in open(gtdb_init_taxonomy):\n tokens = line.strip().split('\\t')\n gid = canonical_gid(tokens[0])\n taxa = [t.strip() for t in tokens[1].split(';')]\n for taxon in taxa:\n taxa_gid_map[taxon].add(gid)\n domain_gids.add(gid)\n self.logger.info(' - identified {:,} genomes.'.format(len(domain_gids))\n )\n self.new_gtdb_reps(domain_gids, gtdb_sp_clusters, gtdb_prev_sp_clusters\n )\n self.poly_rogue_gtdb_reps(domain_gids, taxa_gid_map,\n gtdb_decorate_table)\n",
"step-4": "__author__ = 'Donovan Parks'\n__copyright__ = 'Copyright 2020'\n__credits__ = ['Donovan Parks']\n__license__ = 'GPL3'\n__version__ = '0.0.1'\n__maintainer__ = 'Donovan Parks'\n__email__ = 'donovan.parks@gmail.com'\n__status__ = 'Development'\nimport sys\nimport argparse\nimport re\nimport datetime\nimport os\nimport logging\nimport time\nimport math\nfrom collections import defaultdict, namedtuple\nfrom biolib.common import canonical_gid\n\n\nclass CurationLists(object):\n \"\"\"Lists and pseudo-trees for new representatives, polyphyletic taxa, rogue genomes, and genomes with modified NCBI names.\"\"\"\n\n def __init__(self, domain, output_dir):\n \"\"\"Initialization.\"\"\"\n self.domain = domain\n self.output_dir = output_dir\n self.logger = logging.getLogger('timestamp')\n\n def pseudo_tree(self, gids, out_tree):\n \"\"\"Create pseudo-tree with the specified genome IDs.\"\"\"\n pseudo_tree = '('\n pseudo_tree += ','.join(gids)\n pseudo_tree += ');'\n fout = open(out_tree, 'w')\n fout.write(pseudo_tree)\n fout.close()\n\n def new_gtdb_reps(self, domain_gids, gtdb_sp_clusters,\n gtdb_prev_sp_clusters):\n \"\"\"New GTDB representatives.\"\"\"\n self.logger.info('Identifying previous GTDB representatives.')\n prev_rids = set()\n with open(gtdb_prev_sp_clusters) as f:\n f.readline()\n for line in f:\n tokens = line.strip().split('\\t')\n rid = canonical_gid(tokens[0])\n prev_rids.add(rid)\n self.logger.info(' - identified {:,} previous GTDB representatives.'\n .format(len(prev_rids)))\n self.logger.info('Identifying current GTDB representatives.')\n cur_rids = set()\n with open(gtdb_sp_clusters) as f:\n f.readline()\n for line in f:\n tokens = line.strip().split('\\t')\n rid = canonical_gid(tokens[0])\n cur_rids.add(rid)\n self.logger.info(' - identified {:,} current GTDB representatives.'\n .format(len(cur_rids)))\n self.logger.info(\n 'Creating curation list and pseudo-tree of new GTDB representatives.'\n )\n out_file = os.path.join(self.output_dir,\n f'gids_new_reps.{self.domain}.lst')\n fout = open(out_file, 'w')\n new_rids = set()\n for rid in cur_rids:\n if rid in domain_gids and rid not in prev_rids:\n fout.write('{}\\n'.format(rid))\n new_rids.add(rid)\n fout.close()\n self.logger.info(' - identified {:,} new GTDB representatives.'.\n format(len(new_rids)))\n self.pseudo_tree(new_rids, out_file.replace('.lst', '.tree'))\n\n def poly_rogue_gtdb_reps(self, domain_gids, taxa_gid_map,\n gtdb_decorate_table):\n \"\"\"Polyphyletic and rogue GTDB representatives.\"\"\"\n self.logger.info(\n 'Identifying polyphyletic and rogue GTDB representatives.')\n poly_taxa_count = 0\n poly_gids = set()\n rogue_gids = set()\n with open(gtdb_decorate_table) as f:\n f.readline()\n for line in f:\n tokens = line.split('\\t')\n taxon = tokens[0]\n fmeasure = float(tokens[2])\n rogue_in = tokens[7].strip()\n rogue_out = tokens[8].strip()\n if fmeasure < 1.0:\n poly_taxa_count += 1\n poly_gids.update(taxa_gid_map[taxon])\n if rogue_in:\n for gid in rogue_in.split(','):\n gid = canonical_gid(gid.strip())\n if not gid.startswith('D-'):\n rogue_gids.add(gid)\n if rogue_out:\n for gid in rogue_out.split(','):\n gid = canonical_gid(gid.strip())\n if not gid.startswith('D-'):\n rogue_gids.add(gid)\n self.logger.info(\n ' - identified {:,} polyphyletic taxa spanning {:,} GTDB representatives.'\n .format(poly_taxa_count, len(poly_gids)))\n self.logger.info(' - identified {:,} rogue GTDB representatives.'.\n format(len(rogue_gids)))\n self.logger.info(\n 'Creating curation lists and pseudo-trees of polyphyletic GTDB representatives.'\n )\n out_file = os.path.join(self.output_dir,\n f'gids_poly_taxa.{self.domain}.lst')\n fout = open(out_file, 'w')\n for gid in poly_gids:\n fout.write('{}\\n'.format(gid))\n fout.close()\n self.pseudo_tree(poly_gids, out_file.replace('.lst', '.tree'))\n self.logger.info(\n 'Creating curation lists and pseudo-trees of rogue GTDB representatives.'\n )\n out_file = os.path.join(self.output_dir,\n f'gids_rogues.{self.domain}.lst')\n fout = open(out_file, 'w')\n for gid in rogue_gids:\n fout.write('{}\\n'.format(gid))\n fout.close()\n self.pseudo_tree(rogue_gids, out_file.replace('.lst', '.tree'))\n\n def run(self, gtdb_init_taxonomy, gtdb_sp_clusters,\n gtdb_prev_sp_clusters, gtdb_decorate_table):\n \"\"\"Create curation lists and pseudo-trees.\"\"\"\n self.logger.info('Identifying taxonomic assignment of genomes.')\n taxa_gid_map = defaultdict(set)\n domain_gids = set()\n for line in open(gtdb_init_taxonomy):\n tokens = line.strip().split('\\t')\n gid = canonical_gid(tokens[0])\n taxa = [t.strip() for t in tokens[1].split(';')]\n for taxon in taxa:\n taxa_gid_map[taxon].add(gid)\n domain_gids.add(gid)\n self.logger.info(' - identified {:,} genomes.'.format(len(domain_gids))\n )\n self.new_gtdb_reps(domain_gids, gtdb_sp_clusters, gtdb_prev_sp_clusters\n )\n self.poly_rogue_gtdb_reps(domain_gids, taxa_gid_map,\n gtdb_decorate_table)\n",
"step-5": "###############################################################################\r\n# #\r\n# This program is free software: you can redistribute it and/or modify #\r\n# it under the terms of the GNU General Public License as published by #\r\n# the Free Software Foundation, either version 3 of the License, or #\r\n# (at your option) any later version. #\r\n# #\r\n# This program is distributed in the hope that it will be useful, #\r\n# but WITHOUT ANY WARRANTY; without even the implied warranty of #\r\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #\r\n# GNU General Public License for more details. #\r\n# #\r\n# You should have received a copy of the GNU General Public License #\r\n# along with this program. If not, see <http://www.gnu.org/licenses/>. #\r\n# #\r\n###############################################################################\r\n\r\n__author__ = 'Donovan Parks'\r\n__copyright__ = 'Copyright 2020'\r\n__credits__ = ['Donovan Parks']\r\n__license__ = 'GPL3'\r\n__version__ = '0.0.1'\r\n__maintainer__ = 'Donovan Parks'\r\n__email__ = 'donovan.parks@gmail.com'\r\n__status__ = 'Development'\r\n\r\nimport sys\r\nimport argparse\r\nimport re\r\nimport datetime\r\nimport os\r\nimport logging\r\nimport time\r\nimport math\r\nfrom collections import defaultdict, namedtuple\r\n\r\nfrom biolib.common import canonical_gid\r\n\r\nclass CurationLists(object):\r\n \"\"\"Lists and pseudo-trees for new representatives, polyphyletic taxa, rogue genomes, and genomes with modified NCBI names.\"\"\"\r\n \r\n def __init__(self, domain, output_dir):\r\n \"\"\"Initialization.\"\"\"\r\n \r\n self.domain = domain\r\n self.output_dir = output_dir\r\n self.logger = logging.getLogger('timestamp')\r\n \r\n def pseudo_tree(self, gids, out_tree):\r\n \"\"\"Create pseudo-tree with the specified genome IDs.\"\"\"\r\n \r\n pseudo_tree = '('\r\n pseudo_tree += ','.join(gids)\r\n pseudo_tree += ');'\r\n \r\n fout = open(out_tree, 'w')\r\n fout.write(pseudo_tree)\r\n fout.close()\r\n \r\n def new_gtdb_reps(self,\r\n domain_gids,\r\n gtdb_sp_clusters,\r\n gtdb_prev_sp_clusters):\r\n \"\"\"New GTDB representatives.\"\"\"\r\n\r\n self.logger.info('Identifying previous GTDB representatives.')\r\n prev_rids = set()\r\n with open(gtdb_prev_sp_clusters) as f:\r\n f.readline()\r\n for line in f:\r\n tokens = line.strip().split('\\t')\r\n rid = canonical_gid(tokens[0])\r\n prev_rids.add(rid)\r\n self.logger.info(' - identified {:,} previous GTDB representatives.'.format(\r\n len(prev_rids)))\r\n\r\n self.logger.info('Identifying current GTDB representatives.')\r\n cur_rids = set()\r\n with open(gtdb_sp_clusters) as f:\r\n f.readline()\r\n for line in f:\r\n tokens = line.strip().split('\\t')\r\n rid = canonical_gid(tokens[0])\r\n cur_rids.add(rid)\r\n self.logger.info(' - identified {:,} current GTDB representatives.'.format(\r\n len(cur_rids)))\r\n\r\n self.logger.info('Creating curation list and pseudo-tree of new GTDB representatives.')\r\n out_file = os.path.join(self.output_dir, f'gids_new_reps.{self.domain}.lst')\r\n fout = open(out_file, 'w')\r\n new_rids = set()\r\n for rid in cur_rids:\r\n if rid in domain_gids and rid not in prev_rids:\r\n fout.write('{}\\n'.format(rid))\r\n new_rids.add(rid)\r\n fout.close()\r\n self.logger.info(' - identified {:,} new GTDB representatives.'.format(\r\n len(new_rids)))\r\n \r\n self.pseudo_tree(new_rids, out_file.replace('.lst', '.tree'))\r\n\r\n def poly_rogue_gtdb_reps(self,\r\n domain_gids,\r\n taxa_gid_map,\r\n gtdb_decorate_table):\r\n \"\"\"Polyphyletic and rogue GTDB representatives.\"\"\"\r\n \r\n self.logger.info('Identifying polyphyletic and rogue GTDB representatives.')\r\n poly_taxa_count = 0\r\n poly_gids = set()\r\n rogue_gids = set()\r\n with open(gtdb_decorate_table) as f:\r\n f.readline()\r\n for line in f:\r\n tokens = line.split('\\t')\r\n \r\n taxon = tokens[0]\r\n fmeasure = float(tokens[2])\r\n rogue_in = tokens[7].strip()\r\n rogue_out = tokens[8].strip()\r\n if fmeasure < 1.0:\r\n poly_taxa_count += 1\r\n poly_gids.update(taxa_gid_map[taxon])\r\n \r\n if rogue_in:\r\n for gid in rogue_in.split(','):\r\n gid = canonical_gid(gid.strip())\r\n if not gid.startswith('D-'):\r\n rogue_gids.add(gid)\r\n \r\n if rogue_out:\r\n for gid in rogue_out.split(','):\r\n gid = canonical_gid(gid.strip())\r\n if not gid.startswith('D-'):\r\n rogue_gids.add(gid)\r\n\r\n self.logger.info(' - identified {:,} polyphyletic taxa spanning {:,} GTDB representatives.'.format(\r\n poly_taxa_count,\r\n len(poly_gids)))\r\n self.logger.info(' - identified {:,} rogue GTDB representatives.'.format(\r\n len(rogue_gids)))\r\n\r\n self.logger.info('Creating curation lists and pseudo-trees of polyphyletic GTDB representatives.')\r\n out_file = os.path.join(self.output_dir, f'gids_poly_taxa.{self.domain}.lst')\r\n fout = open(out_file, 'w')\r\n for gid in poly_gids:\r\n fout.write('{}\\n'.format(gid))\r\n fout.close()\r\n self.pseudo_tree(poly_gids, out_file.replace('.lst', '.tree'))\r\n \r\n self.logger.info('Creating curation lists and pseudo-trees of rogue GTDB representatives.')\r\n out_file = os.path.join(self.output_dir, f'gids_rogues.{self.domain}.lst')\r\n fout = open(out_file, 'w')\r\n for gid in rogue_gids:\r\n fout.write('{}\\n'.format(gid))\r\n fout.close()\r\n self.pseudo_tree(rogue_gids, out_file.replace('.lst', '.tree'))\r\n \r\n def run(self,\r\n gtdb_init_taxonomy,\r\n gtdb_sp_clusters,\r\n gtdb_prev_sp_clusters,\r\n gtdb_decorate_table):\r\n \"\"\"Create curation lists and pseudo-trees.\"\"\"\r\n\r\n # get genomes\r\n self.logger.info('Identifying taxonomic assignment of genomes.')\r\n taxa_gid_map = defaultdict(set)\r\n domain_gids = set()\r\n for line in open(gtdb_init_taxonomy):\r\n tokens = line.strip().split('\\t')\r\n gid = canonical_gid(tokens[0])\r\n \r\n taxa = [t.strip() for t in tokens[1].split(';')]\r\n for taxon in taxa:\r\n taxa_gid_map[taxon].add(gid)\r\n \r\n domain_gids.add(gid)\r\n self.logger.info(' - identified {:,} genomes.'.format(\r\n len(domain_gids)))\r\n \r\n # new GTDB representatives\r\n self.new_gtdb_reps(domain_gids,\r\n gtdb_sp_clusters,\r\n gtdb_prev_sp_clusters)\r\n \r\n # polyphyletic and rogue GTDB representatives\r\n self.poly_rogue_gtdb_reps(domain_gids,\r\n taxa_gid_map,\r\n gtdb_decorate_table)",
"step-ids": [
4,
5,
6,
9,
10
]
}
|
[
4,
5,
6,
9,
10
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
model.fit
<|reserved_special_token_0|>
model.save('./T_100_Modelo_C64k33_C128k33_d025_D256_d05_D5.h5')
plt.plot(history.history['accuracy'], label='accuracy')
plt.plot(history.history['val_accuracy'], label='validation accuracy')
plt.plot(history.history['val_loss'], label='val_loss')
plt.plot(history.history['loss'], label='loss')
plt.title('Accuracy y Loss Clasificando coches por color')
plt.xlabel('Épocas')
plt.legend(loc='lower right')
plt.show()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
model = load_model('./Modelo_C64k33_C128k33_d025_D256_d05_D5.h5')
model.fit
batch_size = 20
epochs = 100
train_data_dir = (
'D:/Universidad/OneDrive - Universidad de Las Palmas de Gran Canaria/TERCERO/Fundamentos de los Sistemas Inteligentes/RedesNeuronales/cars_colours/train'
)
validation_data_dir = (
'D:/Universidad/OneDrive - Universidad de Las Palmas de Gran Canaria/TERCERO/Fundamentos de los Sistemas Inteligentes/RedesNeuronales/cars_colours/test'
)
train_datagen = ImageDataGenerator(rescale=1.0 / 255, rotation_range=15,
zoom_range=0.1)
validation_datagen = ImageDataGenerator(rescale=1.0 / 255)
validation_datagen = ImageDataGenerator(rescale=1.0 / 255)
validation_generator = validation_datagen.flow_from_directory(
validation_data_dir, target_size=(250, 150), batch_size=batch_size,
class_mode='categorical')
train_generator = train_datagen.flow_from_directory(train_data_dir,
target_size=(250, 150), batch_size=batch_size, class_mode='categorical')
validation_generator = validation_datagen.flow_from_directory(
validation_data_dir, target_size=(250, 150), batch_size=batch_size,
class_mode='categorical')
history = model.fit_generator(train_generator, epochs=epochs,
validation_data=validation_generator)
model.save('./T_100_Modelo_C64k33_C128k33_d025_D256_d05_D5.h5')
plt.plot(history.history['accuracy'], label='accuracy')
plt.plot(history.history['val_accuracy'], label='validation accuracy')
plt.plot(history.history['val_loss'], label='val_loss')
plt.plot(history.history['loss'], label='loss')
plt.title('Accuracy y Loss Clasificando coches por color')
plt.xlabel('Épocas')
plt.legend(loc='lower right')
plt.show()
<|reserved_special_token_1|>
import PIL
from matplotlib import pyplot as plt
import matplotlib
from keras.preprocessing.image import ImageDataGenerator
from keras.models import load_model
from keras.models import Sequential
from keras.layers import Dense, Dropout
from keras.optimizers import RMSprop
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras.callbacks import EarlyStopping
from keras import backend as K
import keras
from time import time
from sklearn.metrics import classification_report, confusion_matrix
import numpy as np
model = load_model('./Modelo_C64k33_C128k33_d025_D256_d05_D5.h5')
model.fit
batch_size = 20
epochs = 100
train_data_dir = (
'D:/Universidad/OneDrive - Universidad de Las Palmas de Gran Canaria/TERCERO/Fundamentos de los Sistemas Inteligentes/RedesNeuronales/cars_colours/train'
)
validation_data_dir = (
'D:/Universidad/OneDrive - Universidad de Las Palmas de Gran Canaria/TERCERO/Fundamentos de los Sistemas Inteligentes/RedesNeuronales/cars_colours/test'
)
train_datagen = ImageDataGenerator(rescale=1.0 / 255, rotation_range=15,
zoom_range=0.1)
validation_datagen = ImageDataGenerator(rescale=1.0 / 255)
validation_datagen = ImageDataGenerator(rescale=1.0 / 255)
validation_generator = validation_datagen.flow_from_directory(
validation_data_dir, target_size=(250, 150), batch_size=batch_size,
class_mode='categorical')
train_generator = train_datagen.flow_from_directory(train_data_dir,
target_size=(250, 150), batch_size=batch_size, class_mode='categorical')
validation_generator = validation_datagen.flow_from_directory(
validation_data_dir, target_size=(250, 150), batch_size=batch_size,
class_mode='categorical')
history = model.fit_generator(train_generator, epochs=epochs,
validation_data=validation_generator)
model.save('./T_100_Modelo_C64k33_C128k33_d025_D256_d05_D5.h5')
plt.plot(history.history['accuracy'], label='accuracy')
plt.plot(history.history['val_accuracy'], label='validation accuracy')
plt.plot(history.history['val_loss'], label='val_loss')
plt.plot(history.history['loss'], label='loss')
plt.title('Accuracy y Loss Clasificando coches por color')
plt.xlabel('Épocas')
plt.legend(loc='lower right')
plt.show()
<|reserved_special_token_1|>
import PIL
from matplotlib import pyplot as plt
import matplotlib
from keras.preprocessing.image import ImageDataGenerator
from keras.models import load_model
from keras.models import Sequential
from keras.layers import Dense, Dropout
from keras.optimizers import RMSprop
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras.callbacks import EarlyStopping
from keras import backend as K
import keras
from time import time
from sklearn.metrics import classification_report, confusion_matrix
import numpy as np
#model = load_model("./Modelo_C32K44_C128k44_d075_D256_d05_D5.h5")
#model = load_model("./Modelo_C32k55_C64k55_d025_D128_d05_D5.h5")
model = load_model("./Modelo_C64k33_C128k33_d025_D256_d05_D5.h5")
model.fit
batch_size = 20
epochs = 100
train_data_dir = 'D:/Universidad/OneDrive - Universidad de Las Palmas de Gran Canaria/TERCERO/Fundamentos de los Sistemas Inteligentes/RedesNeuronales/cars_colours/train'
validation_data_dir = 'D:/Universidad/OneDrive - Universidad de Las Palmas de Gran Canaria/TERCERO/Fundamentos de los Sistemas Inteligentes/RedesNeuronales/cars_colours/test'
train_datagen = ImageDataGenerator(
rescale=1./255,
rotation_range=15,
zoom_range=0.1
)
validation_datagen = ImageDataGenerator(
rescale=1./255
)
validation_datagen = ImageDataGenerator(
rescale=1./255
)
validation_generator = validation_datagen.flow_from_directory(
validation_data_dir,
target_size=(250, 150),
batch_size=batch_size,
class_mode='categorical')
train_generator = train_datagen.flow_from_directory(
train_data_dir,
target_size=(250, 150),
batch_size=batch_size,
class_mode='categorical')
validation_generator = validation_datagen.flow_from_directory(
validation_data_dir,
target_size=(250, 150),
batch_size=batch_size,
class_mode='categorical')
history = model.fit_generator(
train_generator,
epochs=epochs,
validation_data = validation_generator,
#callbacks = [es]
)
model.save("./T_100_Modelo_C64k33_C128k33_d025_D256_d05_D5.h5")
plt.plot(history.history['accuracy'], label='accuracy')
plt.plot(history.history['val_accuracy'], label='validation accuracy')
plt.plot(history.history['val_loss'], label='val_loss')
plt.plot(history.history['loss'], label='loss')
plt.title('Accuracy y Loss Clasificando coches por color')
plt.xlabel('Épocas')
plt.legend(loc="lower right")
plt.show()
|
flexible
|
{
"blob_id": "d2f760b821fc5c599cda1091334364e18234ab06",
"index": 4222,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nmodel.fit\n<mask token>\nmodel.save('./T_100_Modelo_C64k33_C128k33_d025_D256_d05_D5.h5')\nplt.plot(history.history['accuracy'], label='accuracy')\nplt.plot(history.history['val_accuracy'], label='validation accuracy')\nplt.plot(history.history['val_loss'], label='val_loss')\nplt.plot(history.history['loss'], label='loss')\nplt.title('Accuracy y Loss Clasificando coches por color')\nplt.xlabel('Épocas')\nplt.legend(loc='lower right')\nplt.show()\n",
"step-3": "<mask token>\nmodel = load_model('./Modelo_C64k33_C128k33_d025_D256_d05_D5.h5')\nmodel.fit\nbatch_size = 20\nepochs = 100\ntrain_data_dir = (\n 'D:/Universidad/OneDrive - Universidad de Las Palmas de Gran Canaria/TERCERO/Fundamentos de los Sistemas Inteligentes/RedesNeuronales/cars_colours/train'\n )\nvalidation_data_dir = (\n 'D:/Universidad/OneDrive - Universidad de Las Palmas de Gran Canaria/TERCERO/Fundamentos de los Sistemas Inteligentes/RedesNeuronales/cars_colours/test'\n )\ntrain_datagen = ImageDataGenerator(rescale=1.0 / 255, rotation_range=15,\n zoom_range=0.1)\nvalidation_datagen = ImageDataGenerator(rescale=1.0 / 255)\nvalidation_datagen = ImageDataGenerator(rescale=1.0 / 255)\nvalidation_generator = validation_datagen.flow_from_directory(\n validation_data_dir, target_size=(250, 150), batch_size=batch_size,\n class_mode='categorical')\ntrain_generator = train_datagen.flow_from_directory(train_data_dir,\n target_size=(250, 150), batch_size=batch_size, class_mode='categorical')\nvalidation_generator = validation_datagen.flow_from_directory(\n validation_data_dir, target_size=(250, 150), batch_size=batch_size,\n class_mode='categorical')\nhistory = model.fit_generator(train_generator, epochs=epochs,\n validation_data=validation_generator)\nmodel.save('./T_100_Modelo_C64k33_C128k33_d025_D256_d05_D5.h5')\nplt.plot(history.history['accuracy'], label='accuracy')\nplt.plot(history.history['val_accuracy'], label='validation accuracy')\nplt.plot(history.history['val_loss'], label='val_loss')\nplt.plot(history.history['loss'], label='loss')\nplt.title('Accuracy y Loss Clasificando coches por color')\nplt.xlabel('Épocas')\nplt.legend(loc='lower right')\nplt.show()\n",
"step-4": "import PIL\nfrom matplotlib import pyplot as plt\nimport matplotlib\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.models import load_model\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout\nfrom keras.optimizers import RMSprop\nfrom keras.layers import Dense, Dropout, Flatten\nfrom keras.layers import Conv2D, MaxPooling2D\nfrom keras.callbacks import EarlyStopping\nfrom keras import backend as K\nimport keras\nfrom time import time\nfrom sklearn.metrics import classification_report, confusion_matrix\nimport numpy as np\nmodel = load_model('./Modelo_C64k33_C128k33_d025_D256_d05_D5.h5')\nmodel.fit\nbatch_size = 20\nepochs = 100\ntrain_data_dir = (\n 'D:/Universidad/OneDrive - Universidad de Las Palmas de Gran Canaria/TERCERO/Fundamentos de los Sistemas Inteligentes/RedesNeuronales/cars_colours/train'\n )\nvalidation_data_dir = (\n 'D:/Universidad/OneDrive - Universidad de Las Palmas de Gran Canaria/TERCERO/Fundamentos de los Sistemas Inteligentes/RedesNeuronales/cars_colours/test'\n )\ntrain_datagen = ImageDataGenerator(rescale=1.0 / 255, rotation_range=15,\n zoom_range=0.1)\nvalidation_datagen = ImageDataGenerator(rescale=1.0 / 255)\nvalidation_datagen = ImageDataGenerator(rescale=1.0 / 255)\nvalidation_generator = validation_datagen.flow_from_directory(\n validation_data_dir, target_size=(250, 150), batch_size=batch_size,\n class_mode='categorical')\ntrain_generator = train_datagen.flow_from_directory(train_data_dir,\n target_size=(250, 150), batch_size=batch_size, class_mode='categorical')\nvalidation_generator = validation_datagen.flow_from_directory(\n validation_data_dir, target_size=(250, 150), batch_size=batch_size,\n class_mode='categorical')\nhistory = model.fit_generator(train_generator, epochs=epochs,\n validation_data=validation_generator)\nmodel.save('./T_100_Modelo_C64k33_C128k33_d025_D256_d05_D5.h5')\nplt.plot(history.history['accuracy'], label='accuracy')\nplt.plot(history.history['val_accuracy'], label='validation accuracy')\nplt.plot(history.history['val_loss'], label='val_loss')\nplt.plot(history.history['loss'], label='loss')\nplt.title('Accuracy y Loss Clasificando coches por color')\nplt.xlabel('Épocas')\nplt.legend(loc='lower right')\nplt.show()\n",
"step-5": "import PIL\nfrom matplotlib import pyplot as plt\nimport matplotlib\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.models import load_model\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout\nfrom keras.optimizers import RMSprop\nfrom keras.layers import Dense, Dropout, Flatten\nfrom keras.layers import Conv2D, MaxPooling2D\nfrom keras.callbacks import EarlyStopping\nfrom keras import backend as K\nimport keras\nfrom time import time\nfrom sklearn.metrics import classification_report, confusion_matrix\nimport numpy as np\n\n#model = load_model(\"./Modelo_C32K44_C128k44_d075_D256_d05_D5.h5\")\n#model = load_model(\"./Modelo_C32k55_C64k55_d025_D128_d05_D5.h5\")\nmodel = load_model(\"./Modelo_C64k33_C128k33_d025_D256_d05_D5.h5\")\nmodel.fit\nbatch_size = 20\nepochs = 100\n\ntrain_data_dir = 'D:/Universidad/OneDrive - Universidad de Las Palmas de Gran Canaria/TERCERO/Fundamentos de los Sistemas Inteligentes/RedesNeuronales/cars_colours/train'\nvalidation_data_dir = 'D:/Universidad/OneDrive - Universidad de Las Palmas de Gran Canaria/TERCERO/Fundamentos de los Sistemas Inteligentes/RedesNeuronales/cars_colours/test'\n\ntrain_datagen = ImageDataGenerator(\n rescale=1./255,\n rotation_range=15,\n zoom_range=0.1\n)\n\nvalidation_datagen = ImageDataGenerator(\n rescale=1./255\n)\n\nvalidation_datagen = ImageDataGenerator(\n rescale=1./255\n)\n\nvalidation_generator = validation_datagen.flow_from_directory(\n validation_data_dir,\n target_size=(250, 150),\n batch_size=batch_size,\n class_mode='categorical')\n\ntrain_generator = train_datagen.flow_from_directory(\n train_data_dir,\n target_size=(250, 150),\n batch_size=batch_size,\n class_mode='categorical')\n\nvalidation_generator = validation_datagen.flow_from_directory(\n validation_data_dir,\n target_size=(250, 150),\n batch_size=batch_size,\n class_mode='categorical')\n\nhistory = model.fit_generator(\n train_generator,\n epochs=epochs,\n validation_data = validation_generator,\n #callbacks = [es]\n)\n\nmodel.save(\"./T_100_Modelo_C64k33_C128k33_d025_D256_d05_D5.h5\")\n\nplt.plot(history.history['accuracy'], label='accuracy')\nplt.plot(history.history['val_accuracy'], label='validation accuracy')\nplt.plot(history.history['val_loss'], label='val_loss')\nplt.plot(history.history['loss'], label='loss')\n\nplt.title('Accuracy y Loss Clasificando coches por color')\nplt.xlabel('Épocas')\nplt.legend(loc=\"lower right\")\n\nplt.show()",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Student:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Student:
def __init__(self, name, rollno):
self.name = name
self.rollno = rollno
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Student:
def __init__(self, name, rollno):
self.name = name
self.rollno = rollno
std1 = Student('Siva', 123)
<|reserved_special_token_1|>
##class Human:
## pass
##hb1-HB("Sudhir")
##hb2=HB("Sreenu")
class Student:
def __init__(self,name,rollno):
self.name=name
self.rollno=rollno
std1=Student("Siva",123)
|
flexible
|
{
"blob_id": "97656bca3ce0085fb2f1167d37485fb7ee812730",
"index": 4825,
"step-1": "<mask token>\n",
"step-2": "class Student:\n <mask token>\n\n\n<mask token>\n",
"step-3": "class Student:\n\n def __init__(self, name, rollno):\n self.name = name\n self.rollno = rollno\n\n\n<mask token>\n",
"step-4": "class Student:\n\n def __init__(self, name, rollno):\n self.name = name\n self.rollno = rollno\n\n\nstd1 = Student('Siva', 123)\n",
"step-5": "##class Human:\r\n## pass\r\n##hb1-HB(\"Sudhir\")\r\n##hb2=HB(\"Sreenu\")\r\n\r\n\r\nclass Student:\r\n def __init__(self,name,rollno):\r\n self.name=name\r\n self.rollno=rollno\r\nstd1=Student(\"Siva\",123)\r\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
dependencies = [('myapp', '0003_admin')]
operations = [migrations.DeleteModel(name='admin'), migrations.
RemoveField(model_name='course', name='t_id'), migrations.
RemoveField(model_name='enrollcourse', name='course_id'),
migrations.RemoveField(model_name='enrollcourse', name='student_id'
), migrations.RemoveField(model_name='enrollcourse', name=
'teachers_id'), migrations.RemoveField(model_name='enrollcourse',
name='user_id'), migrations.DeleteModel(name='news'), migrations.
RemoveField(model_name='ratings', name='course'), migrations.
RemoveField(model_name='student', name='user_id'), migrations.
RemoveField(model_name='teachers', name='user_id'), migrations.
RemoveField(model_name='viewsa', name='sid'), migrations.
RemoveField(model_name='viewsa', name='tid'), migrations.
RemoveField(model_name='viewsa', name='uid'), migrations.
DeleteModel(name='course'), migrations.DeleteModel(name=
'Enrollcourse'), migrations.DeleteModel(name='ratings'), migrations
.DeleteModel(name='student'), migrations.DeleteModel(name=
'teachers'), migrations.DeleteModel(name='User'), migrations.
DeleteModel(name='viewsa')]
<|reserved_special_token_1|>
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [('myapp', '0003_admin')]
operations = [migrations.DeleteModel(name='admin'), migrations.
RemoveField(model_name='course', name='t_id'), migrations.
RemoveField(model_name='enrollcourse', name='course_id'),
migrations.RemoveField(model_name='enrollcourse', name='student_id'
), migrations.RemoveField(model_name='enrollcourse', name=
'teachers_id'), migrations.RemoveField(model_name='enrollcourse',
name='user_id'), migrations.DeleteModel(name='news'), migrations.
RemoveField(model_name='ratings', name='course'), migrations.
RemoveField(model_name='student', name='user_id'), migrations.
RemoveField(model_name='teachers', name='user_id'), migrations.
RemoveField(model_name='viewsa', name='sid'), migrations.
RemoveField(model_name='viewsa', name='tid'), migrations.
RemoveField(model_name='viewsa', name='uid'), migrations.
DeleteModel(name='course'), migrations.DeleteModel(name=
'Enrollcourse'), migrations.DeleteModel(name='ratings'), migrations
.DeleteModel(name='student'), migrations.DeleteModel(name=
'teachers'), migrations.DeleteModel(name='User'), migrations.
DeleteModel(name='viewsa')]
<|reserved_special_token_1|>
# Generated by Django 3.0.2 on 2020-02-18 05:52
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('myapp', '0003_admin'),
]
operations = [
migrations.DeleteModel(
name='admin',
),
migrations.RemoveField(
model_name='course',
name='t_id',
),
migrations.RemoveField(
model_name='enrollcourse',
name='course_id',
),
migrations.RemoveField(
model_name='enrollcourse',
name='student_id',
),
migrations.RemoveField(
model_name='enrollcourse',
name='teachers_id',
),
migrations.RemoveField(
model_name='enrollcourse',
name='user_id',
),
migrations.DeleteModel(
name='news',
),
migrations.RemoveField(
model_name='ratings',
name='course',
),
migrations.RemoveField(
model_name='student',
name='user_id',
),
migrations.RemoveField(
model_name='teachers',
name='user_id',
),
migrations.RemoveField(
model_name='viewsa',
name='sid',
),
migrations.RemoveField(
model_name='viewsa',
name='tid',
),
migrations.RemoveField(
model_name='viewsa',
name='uid',
),
migrations.DeleteModel(
name='course',
),
migrations.DeleteModel(
name='Enrollcourse',
),
migrations.DeleteModel(
name='ratings',
),
migrations.DeleteModel(
name='student',
),
migrations.DeleteModel(
name='teachers',
),
migrations.DeleteModel(
name='User',
),
migrations.DeleteModel(
name='viewsa',
),
]
|
flexible
|
{
"blob_id": "c0bf146ebfdb54cce80ef85c4c7f4a61632e67d4",
"index": 3371,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('myapp', '0003_admin')]\n operations = [migrations.DeleteModel(name='admin'), migrations.\n RemoveField(model_name='course', name='t_id'), migrations.\n RemoveField(model_name='enrollcourse', name='course_id'),\n migrations.RemoveField(model_name='enrollcourse', name='student_id'\n ), migrations.RemoveField(model_name='enrollcourse', name=\n 'teachers_id'), migrations.RemoveField(model_name='enrollcourse',\n name='user_id'), migrations.DeleteModel(name='news'), migrations.\n RemoveField(model_name='ratings', name='course'), migrations.\n RemoveField(model_name='student', name='user_id'), migrations.\n RemoveField(model_name='teachers', name='user_id'), migrations.\n RemoveField(model_name='viewsa', name='sid'), migrations.\n RemoveField(model_name='viewsa', name='tid'), migrations.\n RemoveField(model_name='viewsa', name='uid'), migrations.\n DeleteModel(name='course'), migrations.DeleteModel(name=\n 'Enrollcourse'), migrations.DeleteModel(name='ratings'), migrations\n .DeleteModel(name='student'), migrations.DeleteModel(name=\n 'teachers'), migrations.DeleteModel(name='User'), migrations.\n DeleteModel(name='viewsa')]\n",
"step-4": "from django.db import migrations\n\n\nclass Migration(migrations.Migration):\n dependencies = [('myapp', '0003_admin')]\n operations = [migrations.DeleteModel(name='admin'), migrations.\n RemoveField(model_name='course', name='t_id'), migrations.\n RemoveField(model_name='enrollcourse', name='course_id'),\n migrations.RemoveField(model_name='enrollcourse', name='student_id'\n ), migrations.RemoveField(model_name='enrollcourse', name=\n 'teachers_id'), migrations.RemoveField(model_name='enrollcourse',\n name='user_id'), migrations.DeleteModel(name='news'), migrations.\n RemoveField(model_name='ratings', name='course'), migrations.\n RemoveField(model_name='student', name='user_id'), migrations.\n RemoveField(model_name='teachers', name='user_id'), migrations.\n RemoveField(model_name='viewsa', name='sid'), migrations.\n RemoveField(model_name='viewsa', name='tid'), migrations.\n RemoveField(model_name='viewsa', name='uid'), migrations.\n DeleteModel(name='course'), migrations.DeleteModel(name=\n 'Enrollcourse'), migrations.DeleteModel(name='ratings'), migrations\n .DeleteModel(name='student'), migrations.DeleteModel(name=\n 'teachers'), migrations.DeleteModel(name='User'), migrations.\n DeleteModel(name='viewsa')]\n",
"step-5": "# Generated by Django 3.0.2 on 2020-02-18 05:52\n\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('myapp', '0003_admin'),\n ]\n\n operations = [\n migrations.DeleteModel(\n name='admin',\n ),\n migrations.RemoveField(\n model_name='course',\n name='t_id',\n ),\n migrations.RemoveField(\n model_name='enrollcourse',\n name='course_id',\n ),\n migrations.RemoveField(\n model_name='enrollcourse',\n name='student_id',\n ),\n migrations.RemoveField(\n model_name='enrollcourse',\n name='teachers_id',\n ),\n migrations.RemoveField(\n model_name='enrollcourse',\n name='user_id',\n ),\n migrations.DeleteModel(\n name='news',\n ),\n migrations.RemoveField(\n model_name='ratings',\n name='course',\n ),\n migrations.RemoveField(\n model_name='student',\n name='user_id',\n ),\n migrations.RemoveField(\n model_name='teachers',\n name='user_id',\n ),\n migrations.RemoveField(\n model_name='viewsa',\n name='sid',\n ),\n migrations.RemoveField(\n model_name='viewsa',\n name='tid',\n ),\n migrations.RemoveField(\n model_name='viewsa',\n name='uid',\n ),\n migrations.DeleteModel(\n name='course',\n ),\n migrations.DeleteModel(\n name='Enrollcourse',\n ),\n migrations.DeleteModel(\n name='ratings',\n ),\n migrations.DeleteModel(\n name='student',\n ),\n migrations.DeleteModel(\n name='teachers',\n ),\n migrations.DeleteModel(\n name='User',\n ),\n migrations.DeleteModel(\n name='viewsa',\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import os
def savelesson(text):
os.path.expanduser("~/.buzzers/lessons")
def getlessonlist():
path = os.path.expanduser("~/.buzzers")
dirs = os.walk(os.path.expanduser("~/.buzzers/lessons"))
#"/home/loadquo/files/lhsgghc/Programs/PCSoftware/src/admin/lessons")
lessons = []
for root, d, fs in dirs:
fullfs = [root +"/"+ f for f in fs]
lessons.extend(fs)
return lessons
|
normal
|
{
"blob_id": "de003440be513d53b87f526ea95c0fbbc4a9f66f",
"index": 2584,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef getlessonlist():\n path = os.path.expanduser('~/.buzzers')\n dirs = os.walk(os.path.expanduser('~/.buzzers/lessons'))\n lessons = []\n for root, d, fs in dirs:\n fullfs = [(root + '/' + f) for f in fs]\n lessons.extend(fs)\n return lessons\n",
"step-3": "<mask token>\n\n\ndef savelesson(text):\n os.path.expanduser('~/.buzzers/lessons')\n\n\ndef getlessonlist():\n path = os.path.expanduser('~/.buzzers')\n dirs = os.walk(os.path.expanduser('~/.buzzers/lessons'))\n lessons = []\n for root, d, fs in dirs:\n fullfs = [(root + '/' + f) for f in fs]\n lessons.extend(fs)\n return lessons\n",
"step-4": "import os\n\n\ndef savelesson(text):\n os.path.expanduser('~/.buzzers/lessons')\n\n\ndef getlessonlist():\n path = os.path.expanduser('~/.buzzers')\n dirs = os.walk(os.path.expanduser('~/.buzzers/lessons'))\n lessons = []\n for root, d, fs in dirs:\n fullfs = [(root + '/' + f) for f in fs]\n lessons.extend(fs)\n return lessons\n",
"step-5": "import os\n\ndef savelesson(text):\n os.path.expanduser(\"~/.buzzers/lessons\")\n\ndef getlessonlist():\n path = os.path.expanduser(\"~/.buzzers\")\n dirs = os.walk(os.path.expanduser(\"~/.buzzers/lessons\"))\n#\"/home/loadquo/files/lhsgghc/Programs/PCSoftware/src/admin/lessons\")\n lessons = []\n for root, d, fs in dirs: \n fullfs = [root +\"/\"+ f for f in fs]\n lessons.extend(fs)\n return lessons\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from django.urls import re_path
from .consumers import ChatConsumer, ChatLobbyConsumer
websocket_urlpatterns = [
re_path(r'ws/chat/(?P<room_id>\w+)/$', ChatConsumer),
re_path(r'ws/lobby/$', ChatLobbyConsumer),
]
|
normal
|
{
"blob_id": "1bd1769f94b93e0bb674adfd1bb96c778708f6d8",
"index": 5593,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwebsocket_urlpatterns = [re_path('ws/chat/(?P<room_id>\\\\w+)/$',\n ChatConsumer), re_path('ws/lobby/$', ChatLobbyConsumer)]\n",
"step-3": "from django.urls import re_path\nfrom .consumers import ChatConsumer, ChatLobbyConsumer\nwebsocket_urlpatterns = [re_path('ws/chat/(?P<room_id>\\\\w+)/$',\n ChatConsumer), re_path('ws/lobby/$', ChatLobbyConsumer)]\n",
"step-4": "from django.urls import re_path\n\nfrom .consumers import ChatConsumer, ChatLobbyConsumer\n\nwebsocket_urlpatterns = [\n re_path(r'ws/chat/(?P<room_id>\\w+)/$', ChatConsumer),\n re_path(r'ws/lobby/$', ChatLobbyConsumer),\n]",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
with open('haiku.txt', 'w') as file:
file.write('This is the line 1 of the haiku\n')
file.write('Following the line 2 of the haiku\n')
file.write('Finishing off with the line 3 of the haiku\n')
with open('haiku.txt', 'a') as file:
file.write('This is the line 1 of the haiku\n')
file.write('Following the line 2 of the haiku\n')
file.write('Finishing off with the line 3 of the haiku\n')
with open('existing_file.txt', 'r+') as file:
file.write('This is the line 1 of the haiku\n')
file.write('Following the line 2 of the haiku\n')
file.write('Finishing off with the line 3 of the haiku\n')
<|reserved_special_token_1|>
"""
r - reading fike
w - writing to file
a - append to file / add to the end of the file - always at the end
r+ - read and write to file (writing based on python cursor position) -> by default at the beginning of file -> won't insert and shift things over,
will overwrite the contents. -> r+ can only be used with already existing files.
"""
with open("haiku.txt", "w") as file:
file.write("This is the line 1 of the haiku\n")
file.write("Following the line 2 of the haiku\n")
file.write("Finishing off with the line 3 of the haiku\n")
with open("haiku.txt", "a") as file:
file.write("This is the line 1 of the haiku\n")
file.write("Following the line 2 of the haiku\n")
file.write("Finishing off with the line 3 of the haiku\n")
with open("existing_file.txt", "r+") as file:
file.write("This is the line 1 of the haiku\n")
file.write("Following the line 2 of the haiku\n")
file.write("Finishing off with the line 3 of the haiku\n")
|
flexible
|
{
"blob_id": "cde2454c68a0d6a0c86b7d647e41a86d3aa97a0d",
"index": 8267,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwith open('haiku.txt', 'w') as file:\n file.write('This is the line 1 of the haiku\\n')\n file.write('Following the line 2 of the haiku\\n')\n file.write('Finishing off with the line 3 of the haiku\\n')\nwith open('haiku.txt', 'a') as file:\n file.write('This is the line 1 of the haiku\\n')\n file.write('Following the line 2 of the haiku\\n')\n file.write('Finishing off with the line 3 of the haiku\\n')\nwith open('existing_file.txt', 'r+') as file:\n file.write('This is the line 1 of the haiku\\n')\n file.write('Following the line 2 of the haiku\\n')\n file.write('Finishing off with the line 3 of the haiku\\n')\n",
"step-3": "\"\"\"\nr - reading fike\nw - writing to file\na - append to file / add to the end of the file - always at the end\nr+ - read and write to file (writing based on python cursor position) -> by default at the beginning of file -> won't insert and shift things over,\nwill overwrite the contents. -> r+ can only be used with already existing files.\n\n\"\"\"\n\nwith open(\"haiku.txt\", \"w\") as file:\n file.write(\"This is the line 1 of the haiku\\n\")\n file.write(\"Following the line 2 of the haiku\\n\")\n file.write(\"Finishing off with the line 3 of the haiku\\n\")\n\nwith open(\"haiku.txt\", \"a\") as file:\n file.write(\"This is the line 1 of the haiku\\n\")\n file.write(\"Following the line 2 of the haiku\\n\")\n file.write(\"Finishing off with the line 3 of the haiku\\n\")\n\nwith open(\"existing_file.txt\", \"r+\") as file:\n file.write(\"This is the line 1 of the haiku\\n\")\n file.write(\"Following the line 2 of the haiku\\n\")\n file.write(\"Finishing off with the line 3 of the haiku\\n\")",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
class BadArgumentException(Exception):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return self.msg
class TooManyArgumentsException(Exception):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return self.msg
class NotEnoughArgumentsException(Exception):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return self.msg
class UsedArchivepgsqlAsArchiveWAL(Exception):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return self.msg
def get_version():
version = VERSION or check_output(['git', 'describe']).strip()
return ' '.join(['%prog', version])
def create_common_parser(**kwargs):
kwargs['version'] = get_version()
parser = OptionParser(**kwargs)
parser.add_option('-c', '--config', dest='config_file', help=
'configuration file', default='/etc/bbpgsql.ini')
parser.add_option('--dry-run', dest='dry_run', help=
'test run - do not actually modify any files', action='store_true',
default=False)
return parser
def common_parse_args(args=None):
parser = create_common_parser()
options, args = parser.parse_args(args)
return parser, options, args
def common_validate_options_and_args(options=None, args=None):
if not os.path.exists(options.config_file):
raise Exception('File %s does not exist' % options.config_file)
if not os.access(options.config_file, os.R_OK):
raise Exception('No read access for %s' % options.config_file)
config_stats = os.stat(options.config_file)
if (config_stats.st_mode & stat.S_IRWXG | config_stats.st_mode & stat.
S_IRWXO):
raise Exception('File %s has open group or other permissions' %
options.config_file)
return True
def non_destructive_minimal_parse_and_validate_args(args=None):
args = args or sys.argv[:]
parser, options, args = common_parse_args(args)
common_validate_options_and_args(options, args)
return options, args
<|reserved_special_token_0|>
def wal_file_exists(config, wal_path):
return os.path.isfile(get_wal_filename(config, wal_path))
def get_wal_filename(config, wal_path):
data_dir = get_data_dir(config)
return os.path.join(data_dir, wal_path)
def is_valid_file(config, wal_path):
return is_relative_path(wal_path) and wal_file_exists(config, wal_path)
<|reserved_special_token_0|>
def archivepgsql_parse_args(args=None):
archivepgsql_usage = ' '.join([os.path.basename(sys.argv[0]), '[options]'])
parser = create_common_parser(usage=archivepgsql_usage)
options, args = parser.parse_args(args)
return parser, options, args
def archivepgsql_validate_options_and_args(options=None, args=None):
if not common_validate_options_and_args(options, args):
return False
if args:
if args[0].startswith('pg_xlog'):
raise UsedArchivepgsqlAsArchiveWAL(
'archivepgsql was called with a WAL file path as an argument. This is probably due to configuring archivepgsql as the archive_command in the PGSQL configuration instead of archivewal.'
)
raise TooManyArgumentsException(
'archivepgsql should not be called with any arguments. Are you using it as the archive_command instead of archivewal?'
)
return True
def restorewal_parse_args(args=None):
restorewal_usage = ' '.join([os.path.basename(sys.argv[0]), '[options]',
'<name_of_wal_file_to_restore>', '<path_to_write_restored_file>'])
parser = create_common_parser(usage=restorewal_usage)
options, args = parser.parse_args(args)
return parser, options, args
def restorewal_validate_options_and_args(options=None, args=None):
args = args or []
if not common_validate_options_and_args(options, args):
return False
nargs = len(args)
if nargs != 2:
raise Exception(
'restorewal must be given the name of the WAL file to retrieve and the destination path to restore to.'
)
return True
def storagestats_parse_args(args=None):
storagestats_usage = ' '.join([os.path.basename(sys.argv[0]), '[options]'])
parser = create_common_parser(usage=storagestats_usage)
options, args = parser.parse_args(args)
return parser, options, args
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class BadArgumentException(Exception):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return self.msg
class TooManyArgumentsException(Exception):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return self.msg
class NotEnoughArgumentsException(Exception):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return self.msg
class UsedArchivepgsqlAsArchiveWAL(Exception):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return self.msg
def get_version():
version = VERSION or check_output(['git', 'describe']).strip()
return ' '.join(['%prog', version])
def create_common_parser(**kwargs):
kwargs['version'] = get_version()
parser = OptionParser(**kwargs)
parser.add_option('-c', '--config', dest='config_file', help=
'configuration file', default='/etc/bbpgsql.ini')
parser.add_option('--dry-run', dest='dry_run', help=
'test run - do not actually modify any files', action='store_true',
default=False)
return parser
def common_parse_args(args=None):
parser = create_common_parser()
options, args = parser.parse_args(args)
return parser, options, args
def common_validate_options_and_args(options=None, args=None):
if not os.path.exists(options.config_file):
raise Exception('File %s does not exist' % options.config_file)
if not os.access(options.config_file, os.R_OK):
raise Exception('No read access for %s' % options.config_file)
config_stats = os.stat(options.config_file)
if (config_stats.st_mode & stat.S_IRWXG | config_stats.st_mode & stat.
S_IRWXO):
raise Exception('File %s has open group or other permissions' %
options.config_file)
return True
def non_destructive_minimal_parse_and_validate_args(args=None):
args = args or sys.argv[:]
parser, options, args = common_parse_args(args)
common_validate_options_and_args(options, args)
return options, args
<|reserved_special_token_0|>
def wal_file_exists(config, wal_path):
return os.path.isfile(get_wal_filename(config, wal_path))
def get_wal_filename(config, wal_path):
data_dir = get_data_dir(config)
return os.path.join(data_dir, wal_path)
def is_valid_file(config, wal_path):
return is_relative_path(wal_path) and wal_file_exists(config, wal_path)
<|reserved_special_token_0|>
def archivepgsql_parse_args(args=None):
archivepgsql_usage = ' '.join([os.path.basename(sys.argv[0]), '[options]'])
parser = create_common_parser(usage=archivepgsql_usage)
options, args = parser.parse_args(args)
return parser, options, args
def archivepgsql_validate_options_and_args(options=None, args=None):
if not common_validate_options_and_args(options, args):
return False
if args:
if args[0].startswith('pg_xlog'):
raise UsedArchivepgsqlAsArchiveWAL(
'archivepgsql was called with a WAL file path as an argument. This is probably due to configuring archivepgsql as the archive_command in the PGSQL configuration instead of archivewal.'
)
raise TooManyArgumentsException(
'archivepgsql should not be called with any arguments. Are you using it as the archive_command instead of archivewal?'
)
return True
def restorewal_parse_args(args=None):
restorewal_usage = ' '.join([os.path.basename(sys.argv[0]), '[options]',
'<name_of_wal_file_to_restore>', '<path_to_write_restored_file>'])
parser = create_common_parser(usage=restorewal_usage)
options, args = parser.parse_args(args)
return parser, options, args
def restorewal_validate_options_and_args(options=None, args=None):
args = args or []
if not common_validate_options_and_args(options, args):
return False
nargs = len(args)
if nargs != 2:
raise Exception(
'restorewal must be given the name of the WAL file to retrieve and the destination path to restore to.'
)
return True
def storagestats_parse_args(args=None):
storagestats_usage = ' '.join([os.path.basename(sys.argv[0]), '[options]'])
parser = create_common_parser(usage=storagestats_usage)
options, args = parser.parse_args(args)
return parser, options, args
def storagestats_validate_options_and_args(options=None, args=None):
if not common_validate_options_and_args(options, args):
return False
if args:
raise TooManyArgumentsException('storagestats takes no arguments')
return True
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class BadArgumentException(Exception):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return self.msg
class TooManyArgumentsException(Exception):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return self.msg
class NotEnoughArgumentsException(Exception):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return self.msg
class UsedArchivepgsqlAsArchiveWAL(Exception):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return self.msg
def get_version():
version = VERSION or check_output(['git', 'describe']).strip()
return ' '.join(['%prog', version])
def create_common_parser(**kwargs):
kwargs['version'] = get_version()
parser = OptionParser(**kwargs)
parser.add_option('-c', '--config', dest='config_file', help=
'configuration file', default='/etc/bbpgsql.ini')
parser.add_option('--dry-run', dest='dry_run', help=
'test run - do not actually modify any files', action='store_true',
default=False)
return parser
def common_parse_args(args=None):
parser = create_common_parser()
options, args = parser.parse_args(args)
return parser, options, args
def common_validate_options_and_args(options=None, args=None):
if not os.path.exists(options.config_file):
raise Exception('File %s does not exist' % options.config_file)
if not os.access(options.config_file, os.R_OK):
raise Exception('No read access for %s' % options.config_file)
config_stats = os.stat(options.config_file)
if (config_stats.st_mode & stat.S_IRWXG | config_stats.st_mode & stat.
S_IRWXO):
raise Exception('File %s has open group or other permissions' %
options.config_file)
return True
def non_destructive_minimal_parse_and_validate_args(args=None):
args = args or sys.argv[:]
parser, options, args = common_parse_args(args)
common_validate_options_and_args(options, args)
return options, args
def archivewal_parse_args(args=None):
archivewal_usage = ' '.join([os.path.basename(sys.argv[0]), '[options]',
'<path_to_wal_file_to_archive>'])
parser = create_common_parser(usage=archivewal_usage)
options, args = parser.parse_args(args)
return parser, options, args
def is_relative_path(wal_path):
return not os.path.isabs(wal_path)
def wal_file_exists(config, wal_path):
return os.path.isfile(get_wal_filename(config, wal_path))
def get_wal_filename(config, wal_path):
data_dir = get_data_dir(config)
return os.path.join(data_dir, wal_path)
def is_valid_file(config, wal_path):
return is_relative_path(wal_path) and wal_file_exists(config, wal_path)
def archivewal_validate_options_and_args(options=None, args=None):
args = args or []
if not common_validate_options_and_args(options, args):
return False
config = get_config_from_filename_and_set_up_logging(options.config_file)
if len(args) != 1 or not is_valid_file(config, args[0]):
raise Exception(
'A relative path to a WAL file to be archived must be provided!')
return True
def archivepgsql_parse_args(args=None):
archivepgsql_usage = ' '.join([os.path.basename(sys.argv[0]), '[options]'])
parser = create_common_parser(usage=archivepgsql_usage)
options, args = parser.parse_args(args)
return parser, options, args
def archivepgsql_validate_options_and_args(options=None, args=None):
if not common_validate_options_and_args(options, args):
return False
if args:
if args[0].startswith('pg_xlog'):
raise UsedArchivepgsqlAsArchiveWAL(
'archivepgsql was called with a WAL file path as an argument. This is probably due to configuring archivepgsql as the archive_command in the PGSQL configuration instead of archivewal.'
)
raise TooManyArgumentsException(
'archivepgsql should not be called with any arguments. Are you using it as the archive_command instead of archivewal?'
)
return True
def restorewal_parse_args(args=None):
restorewal_usage = ' '.join([os.path.basename(sys.argv[0]), '[options]',
'<name_of_wal_file_to_restore>', '<path_to_write_restored_file>'])
parser = create_common_parser(usage=restorewal_usage)
options, args = parser.parse_args(args)
return parser, options, args
def restorewal_validate_options_and_args(options=None, args=None):
args = args or []
if not common_validate_options_and_args(options, args):
return False
nargs = len(args)
if nargs != 2:
raise Exception(
'restorewal must be given the name of the WAL file to retrieve and the destination path to restore to.'
)
return True
def storagestats_parse_args(args=None):
storagestats_usage = ' '.join([os.path.basename(sys.argv[0]), '[options]'])
parser = create_common_parser(usage=storagestats_usage)
options, args = parser.parse_args(args)
return parser, options, args
def storagestats_validate_options_and_args(options=None, args=None):
if not common_validate_options_and_args(options, args):
return False
if args:
raise TooManyArgumentsException('storagestats takes no arguments')
return True
<|reserved_special_token_1|>
import os
import stat
from optparse import OptionParser
from bbpgsql.configuration import get_config_from_filename_and_set_up_logging
from bbpgsql.configuration.general import get_data_dir
from subprocess import check_output
import sys
VERSION = ''
class BadArgumentException(Exception):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return self.msg
class TooManyArgumentsException(Exception):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return self.msg
class NotEnoughArgumentsException(Exception):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return self.msg
class UsedArchivepgsqlAsArchiveWAL(Exception):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return self.msg
def get_version():
version = VERSION or check_output(['git', 'describe']).strip()
return ' '.join(['%prog', version])
def create_common_parser(**kwargs):
kwargs['version'] = get_version()
parser = OptionParser(**kwargs)
parser.add_option('-c', '--config', dest='config_file', help=
'configuration file', default='/etc/bbpgsql.ini')
parser.add_option('--dry-run', dest='dry_run', help=
'test run - do not actually modify any files', action='store_true',
default=False)
return parser
def common_parse_args(args=None):
parser = create_common_parser()
options, args = parser.parse_args(args)
return parser, options, args
def common_validate_options_and_args(options=None, args=None):
if not os.path.exists(options.config_file):
raise Exception('File %s does not exist' % options.config_file)
if not os.access(options.config_file, os.R_OK):
raise Exception('No read access for %s' % options.config_file)
config_stats = os.stat(options.config_file)
if (config_stats.st_mode & stat.S_IRWXG | config_stats.st_mode & stat.
S_IRWXO):
raise Exception('File %s has open group or other permissions' %
options.config_file)
return True
def non_destructive_minimal_parse_and_validate_args(args=None):
args = args or sys.argv[:]
parser, options, args = common_parse_args(args)
common_validate_options_and_args(options, args)
return options, args
def archivewal_parse_args(args=None):
archivewal_usage = ' '.join([os.path.basename(sys.argv[0]), '[options]',
'<path_to_wal_file_to_archive>'])
parser = create_common_parser(usage=archivewal_usage)
options, args = parser.parse_args(args)
return parser, options, args
def is_relative_path(wal_path):
return not os.path.isabs(wal_path)
def wal_file_exists(config, wal_path):
return os.path.isfile(get_wal_filename(config, wal_path))
def get_wal_filename(config, wal_path):
data_dir = get_data_dir(config)
return os.path.join(data_dir, wal_path)
def is_valid_file(config, wal_path):
return is_relative_path(wal_path) and wal_file_exists(config, wal_path)
def archivewal_validate_options_and_args(options=None, args=None):
args = args or []
if not common_validate_options_and_args(options, args):
return False
config = get_config_from_filename_and_set_up_logging(options.config_file)
if len(args) != 1 or not is_valid_file(config, args[0]):
raise Exception(
'A relative path to a WAL file to be archived must be provided!')
return True
def archivepgsql_parse_args(args=None):
archivepgsql_usage = ' '.join([os.path.basename(sys.argv[0]), '[options]'])
parser = create_common_parser(usage=archivepgsql_usage)
options, args = parser.parse_args(args)
return parser, options, args
def archivepgsql_validate_options_and_args(options=None, args=None):
if not common_validate_options_and_args(options, args):
return False
if args:
if args[0].startswith('pg_xlog'):
raise UsedArchivepgsqlAsArchiveWAL(
'archivepgsql was called with a WAL file path as an argument. This is probably due to configuring archivepgsql as the archive_command in the PGSQL configuration instead of archivewal.'
)
raise TooManyArgumentsException(
'archivepgsql should not be called with any arguments. Are you using it as the archive_command instead of archivewal?'
)
return True
def restorewal_parse_args(args=None):
restorewal_usage = ' '.join([os.path.basename(sys.argv[0]), '[options]',
'<name_of_wal_file_to_restore>', '<path_to_write_restored_file>'])
parser = create_common_parser(usage=restorewal_usage)
options, args = parser.parse_args(args)
return parser, options, args
def restorewal_validate_options_and_args(options=None, args=None):
args = args or []
if not common_validate_options_and_args(options, args):
return False
nargs = len(args)
if nargs != 2:
raise Exception(
'restorewal must be given the name of the WAL file to retrieve and the destination path to restore to.'
)
return True
def storagestats_parse_args(args=None):
storagestats_usage = ' '.join([os.path.basename(sys.argv[0]), '[options]'])
parser = create_common_parser(usage=storagestats_usage)
options, args = parser.parse_args(args)
return parser, options, args
def storagestats_validate_options_and_args(options=None, args=None):
if not common_validate_options_and_args(options, args):
return False
if args:
raise TooManyArgumentsException('storagestats takes no arguments')
return True
<|reserved_special_token_1|>
import os
import stat
from optparse import OptionParser
from bbpgsql.configuration import get_config_from_filename_and_set_up_logging
from bbpgsql.configuration.general import get_data_dir
from subprocess import check_output
import sys
VERSION = ''
class BadArgumentException(Exception):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return self.msg
class TooManyArgumentsException(Exception):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return self.msg
class NotEnoughArgumentsException(Exception):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return self.msg
class UsedArchivepgsqlAsArchiveWAL(Exception):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return self.msg
def get_version():
# override "version" with a constant string for release
version = VERSION or check_output(['git', 'describe']).strip()
return ' '.join(['%prog', version])
def create_common_parser(**kwargs):
kwargs['version'] = get_version()
parser = OptionParser(**kwargs)
parser.add_option('-c', '--config', dest='config_file',
help='configuration file', default='/etc/bbpgsql.ini')
parser.add_option('--dry-run', dest='dry_run',
help='test run - do not actually modify any files',
action='store_true',
default=False)
return parser
def common_parse_args(args=None):
parser = create_common_parser()
options, args = parser.parse_args(args)
return parser, options, args
def common_validate_options_and_args(options=None, args=None):
if not os.path.exists(options.config_file):
raise Exception("File %s does not exist" % (options.config_file))
if not os.access(options.config_file, os.R_OK):
raise Exception("No read access for %s" % (options.config_file))
config_stats = os.stat(options.config_file)
if ((config_stats.st_mode & stat.S_IRWXG) |
(config_stats.st_mode & stat.S_IRWXO)):
raise Exception("File %s has open group or other permissions" %
(options.config_file))
return True
def non_destructive_minimal_parse_and_validate_args(args=None):
args = args or sys.argv[:]
parser, options, args = common_parse_args(args)
common_validate_options_and_args(options, args)
return options, args
def archivewal_parse_args(args=None):
archivewal_usage = ' '.join([
os.path.basename(sys.argv[0]),
'[options]',
'<path_to_wal_file_to_archive>'])
parser = create_common_parser(usage=archivewal_usage)
options, args = parser.parse_args(args)
return parser, options, args
def is_relative_path(wal_path):
return not os.path.isabs(wal_path)
def wal_file_exists(config, wal_path):
return os.path.isfile(get_wal_filename(config, wal_path))
def get_wal_filename(config, wal_path):
data_dir = get_data_dir(config)
return os.path.join(data_dir, wal_path)
def is_valid_file(config, wal_path):
return is_relative_path(wal_path) and wal_file_exists(config, wal_path)
def archivewal_validate_options_and_args(options=None, args=None):
args = args or []
if not common_validate_options_and_args(options, args):
return False
config = get_config_from_filename_and_set_up_logging(options.config_file)
if len(args) != 1 or not is_valid_file(config, args[0]):
raise Exception('A relative path to a WAL file to be archived' \
' must be provided!')
return True
def archivepgsql_parse_args(args=None):
archivepgsql_usage = ' '.join([
os.path.basename(sys.argv[0]),
'[options]'])
parser = create_common_parser(usage=archivepgsql_usage)
options, args = parser.parse_args(args)
return parser, options, args
def archivepgsql_validate_options_and_args(options=None, args=None):
if not common_validate_options_and_args(options, args):
return False
if args:
if args[0].startswith('pg_xlog'):
raise UsedArchivepgsqlAsArchiveWAL('archivepgsql was called with' \
' a WAL file path as an argument. This is' \
' probably due to configuring archivepgsql' \
' as the archive_command in the PGSQL' \
' configuration instead of archivewal.')
raise TooManyArgumentsException('archivepgsql should not be called' \
' with any arguments. Are you using it as the' \
' archive_command instead of archivewal?')
return True
def restorewal_parse_args(args=None):
restorewal_usage = ' '.join([
os.path.basename(sys.argv[0]),
'[options]',
'<name_of_wal_file_to_restore>',
'<path_to_write_restored_file>',
])
parser = create_common_parser(usage=restorewal_usage)
options, args = parser.parse_args(args)
return parser, options, args
def restorewal_validate_options_and_args(options=None, args=None):
args = args or []
if not common_validate_options_and_args(options, args):
return False
nargs = len(args)
if nargs != 2:
raise Exception('restorewal must be given the name of the WAL' \
' file to retrieve and the destination path to' \
' restore to.')
return True
def storagestats_parse_args(args=None):
storagestats_usage = ' '.join([
os.path.basename(sys.argv[0]),
'[options]'])
parser = create_common_parser(usage=storagestats_usage)
options, args = parser.parse_args(args)
return parser, options, args
def storagestats_validate_options_and_args(options=None, args=None):
if not common_validate_options_and_args(options, args):
return False
if args:
raise TooManyArgumentsException('storagestats takes no arguments')
return True
|
flexible
|
{
"blob_id": "eed79a3895975a0475c0b192bd8a42e80def2e78",
"index": 2502,
"step-1": "<mask token>\n\n\nclass BadArgumentException(Exception):\n\n def __init__(self, msg):\n self.msg = msg\n\n def __str__(self):\n return self.msg\n\n\nclass TooManyArgumentsException(Exception):\n\n def __init__(self, msg):\n self.msg = msg\n\n def __str__(self):\n return self.msg\n\n\nclass NotEnoughArgumentsException(Exception):\n\n def __init__(self, msg):\n self.msg = msg\n\n def __str__(self):\n return self.msg\n\n\nclass UsedArchivepgsqlAsArchiveWAL(Exception):\n\n def __init__(self, msg):\n self.msg = msg\n\n def __str__(self):\n return self.msg\n\n\ndef get_version():\n version = VERSION or check_output(['git', 'describe']).strip()\n return ' '.join(['%prog', version])\n\n\ndef create_common_parser(**kwargs):\n kwargs['version'] = get_version()\n parser = OptionParser(**kwargs)\n parser.add_option('-c', '--config', dest='config_file', help=\n 'configuration file', default='/etc/bbpgsql.ini')\n parser.add_option('--dry-run', dest='dry_run', help=\n 'test run - do not actually modify any files', action='store_true',\n default=False)\n return parser\n\n\ndef common_parse_args(args=None):\n parser = create_common_parser()\n options, args = parser.parse_args(args)\n return parser, options, args\n\n\ndef common_validate_options_and_args(options=None, args=None):\n if not os.path.exists(options.config_file):\n raise Exception('File %s does not exist' % options.config_file)\n if not os.access(options.config_file, os.R_OK):\n raise Exception('No read access for %s' % options.config_file)\n config_stats = os.stat(options.config_file)\n if (config_stats.st_mode & stat.S_IRWXG | config_stats.st_mode & stat.\n S_IRWXO):\n raise Exception('File %s has open group or other permissions' %\n options.config_file)\n return True\n\n\ndef non_destructive_minimal_parse_and_validate_args(args=None):\n args = args or sys.argv[:]\n parser, options, args = common_parse_args(args)\n common_validate_options_and_args(options, args)\n return options, args\n\n\n<mask token>\n\n\ndef wal_file_exists(config, wal_path):\n return os.path.isfile(get_wal_filename(config, wal_path))\n\n\ndef get_wal_filename(config, wal_path):\n data_dir = get_data_dir(config)\n return os.path.join(data_dir, wal_path)\n\n\ndef is_valid_file(config, wal_path):\n return is_relative_path(wal_path) and wal_file_exists(config, wal_path)\n\n\n<mask token>\n\n\ndef archivepgsql_parse_args(args=None):\n archivepgsql_usage = ' '.join([os.path.basename(sys.argv[0]), '[options]'])\n parser = create_common_parser(usage=archivepgsql_usage)\n options, args = parser.parse_args(args)\n return parser, options, args\n\n\ndef archivepgsql_validate_options_and_args(options=None, args=None):\n if not common_validate_options_and_args(options, args):\n return False\n if args:\n if args[0].startswith('pg_xlog'):\n raise UsedArchivepgsqlAsArchiveWAL(\n 'archivepgsql was called with a WAL file path as an argument. This is probably due to configuring archivepgsql as the archive_command in the PGSQL configuration instead of archivewal.'\n )\n raise TooManyArgumentsException(\n 'archivepgsql should not be called with any arguments. Are you using it as the archive_command instead of archivewal?'\n )\n return True\n\n\ndef restorewal_parse_args(args=None):\n restorewal_usage = ' '.join([os.path.basename(sys.argv[0]), '[options]',\n '<name_of_wal_file_to_restore>', '<path_to_write_restored_file>'])\n parser = create_common_parser(usage=restorewal_usage)\n options, args = parser.parse_args(args)\n return parser, options, args\n\n\ndef restorewal_validate_options_and_args(options=None, args=None):\n args = args or []\n if not common_validate_options_and_args(options, args):\n return False\n nargs = len(args)\n if nargs != 2:\n raise Exception(\n 'restorewal must be given the name of the WAL file to retrieve and the destination path to restore to.'\n )\n return True\n\n\ndef storagestats_parse_args(args=None):\n storagestats_usage = ' '.join([os.path.basename(sys.argv[0]), '[options]'])\n parser = create_common_parser(usage=storagestats_usage)\n options, args = parser.parse_args(args)\n return parser, options, args\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass BadArgumentException(Exception):\n\n def __init__(self, msg):\n self.msg = msg\n\n def __str__(self):\n return self.msg\n\n\nclass TooManyArgumentsException(Exception):\n\n def __init__(self, msg):\n self.msg = msg\n\n def __str__(self):\n return self.msg\n\n\nclass NotEnoughArgumentsException(Exception):\n\n def __init__(self, msg):\n self.msg = msg\n\n def __str__(self):\n return self.msg\n\n\nclass UsedArchivepgsqlAsArchiveWAL(Exception):\n\n def __init__(self, msg):\n self.msg = msg\n\n def __str__(self):\n return self.msg\n\n\ndef get_version():\n version = VERSION or check_output(['git', 'describe']).strip()\n return ' '.join(['%prog', version])\n\n\ndef create_common_parser(**kwargs):\n kwargs['version'] = get_version()\n parser = OptionParser(**kwargs)\n parser.add_option('-c', '--config', dest='config_file', help=\n 'configuration file', default='/etc/bbpgsql.ini')\n parser.add_option('--dry-run', dest='dry_run', help=\n 'test run - do not actually modify any files', action='store_true',\n default=False)\n return parser\n\n\ndef common_parse_args(args=None):\n parser = create_common_parser()\n options, args = parser.parse_args(args)\n return parser, options, args\n\n\ndef common_validate_options_and_args(options=None, args=None):\n if not os.path.exists(options.config_file):\n raise Exception('File %s does not exist' % options.config_file)\n if not os.access(options.config_file, os.R_OK):\n raise Exception('No read access for %s' % options.config_file)\n config_stats = os.stat(options.config_file)\n if (config_stats.st_mode & stat.S_IRWXG | config_stats.st_mode & stat.\n S_IRWXO):\n raise Exception('File %s has open group or other permissions' %\n options.config_file)\n return True\n\n\ndef non_destructive_minimal_parse_and_validate_args(args=None):\n args = args or sys.argv[:]\n parser, options, args = common_parse_args(args)\n common_validate_options_and_args(options, args)\n return options, args\n\n\n<mask token>\n\n\ndef wal_file_exists(config, wal_path):\n return os.path.isfile(get_wal_filename(config, wal_path))\n\n\ndef get_wal_filename(config, wal_path):\n data_dir = get_data_dir(config)\n return os.path.join(data_dir, wal_path)\n\n\ndef is_valid_file(config, wal_path):\n return is_relative_path(wal_path) and wal_file_exists(config, wal_path)\n\n\n<mask token>\n\n\ndef archivepgsql_parse_args(args=None):\n archivepgsql_usage = ' '.join([os.path.basename(sys.argv[0]), '[options]'])\n parser = create_common_parser(usage=archivepgsql_usage)\n options, args = parser.parse_args(args)\n return parser, options, args\n\n\ndef archivepgsql_validate_options_and_args(options=None, args=None):\n if not common_validate_options_and_args(options, args):\n return False\n if args:\n if args[0].startswith('pg_xlog'):\n raise UsedArchivepgsqlAsArchiveWAL(\n 'archivepgsql was called with a WAL file path as an argument. This is probably due to configuring archivepgsql as the archive_command in the PGSQL configuration instead of archivewal.'\n )\n raise TooManyArgumentsException(\n 'archivepgsql should not be called with any arguments. Are you using it as the archive_command instead of archivewal?'\n )\n return True\n\n\ndef restorewal_parse_args(args=None):\n restorewal_usage = ' '.join([os.path.basename(sys.argv[0]), '[options]',\n '<name_of_wal_file_to_restore>', '<path_to_write_restored_file>'])\n parser = create_common_parser(usage=restorewal_usage)\n options, args = parser.parse_args(args)\n return parser, options, args\n\n\ndef restorewal_validate_options_and_args(options=None, args=None):\n args = args or []\n if not common_validate_options_and_args(options, args):\n return False\n nargs = len(args)\n if nargs != 2:\n raise Exception(\n 'restorewal must be given the name of the WAL file to retrieve and the destination path to restore to.'\n )\n return True\n\n\ndef storagestats_parse_args(args=None):\n storagestats_usage = ' '.join([os.path.basename(sys.argv[0]), '[options]'])\n parser = create_common_parser(usage=storagestats_usage)\n options, args = parser.parse_args(args)\n return parser, options, args\n\n\ndef storagestats_validate_options_and_args(options=None, args=None):\n if not common_validate_options_and_args(options, args):\n return False\n if args:\n raise TooManyArgumentsException('storagestats takes no arguments')\n return True\n",
"step-3": "<mask token>\n\n\nclass BadArgumentException(Exception):\n\n def __init__(self, msg):\n self.msg = msg\n\n def __str__(self):\n return self.msg\n\n\nclass TooManyArgumentsException(Exception):\n\n def __init__(self, msg):\n self.msg = msg\n\n def __str__(self):\n return self.msg\n\n\nclass NotEnoughArgumentsException(Exception):\n\n def __init__(self, msg):\n self.msg = msg\n\n def __str__(self):\n return self.msg\n\n\nclass UsedArchivepgsqlAsArchiveWAL(Exception):\n\n def __init__(self, msg):\n self.msg = msg\n\n def __str__(self):\n return self.msg\n\n\ndef get_version():\n version = VERSION or check_output(['git', 'describe']).strip()\n return ' '.join(['%prog', version])\n\n\ndef create_common_parser(**kwargs):\n kwargs['version'] = get_version()\n parser = OptionParser(**kwargs)\n parser.add_option('-c', '--config', dest='config_file', help=\n 'configuration file', default='/etc/bbpgsql.ini')\n parser.add_option('--dry-run', dest='dry_run', help=\n 'test run - do not actually modify any files', action='store_true',\n default=False)\n return parser\n\n\ndef common_parse_args(args=None):\n parser = create_common_parser()\n options, args = parser.parse_args(args)\n return parser, options, args\n\n\ndef common_validate_options_and_args(options=None, args=None):\n if not os.path.exists(options.config_file):\n raise Exception('File %s does not exist' % options.config_file)\n if not os.access(options.config_file, os.R_OK):\n raise Exception('No read access for %s' % options.config_file)\n config_stats = os.stat(options.config_file)\n if (config_stats.st_mode & stat.S_IRWXG | config_stats.st_mode & stat.\n S_IRWXO):\n raise Exception('File %s has open group or other permissions' %\n options.config_file)\n return True\n\n\ndef non_destructive_minimal_parse_and_validate_args(args=None):\n args = args or sys.argv[:]\n parser, options, args = common_parse_args(args)\n common_validate_options_and_args(options, args)\n return options, args\n\n\ndef archivewal_parse_args(args=None):\n archivewal_usage = ' '.join([os.path.basename(sys.argv[0]), '[options]',\n '<path_to_wal_file_to_archive>'])\n parser = create_common_parser(usage=archivewal_usage)\n options, args = parser.parse_args(args)\n return parser, options, args\n\n\ndef is_relative_path(wal_path):\n return not os.path.isabs(wal_path)\n\n\ndef wal_file_exists(config, wal_path):\n return os.path.isfile(get_wal_filename(config, wal_path))\n\n\ndef get_wal_filename(config, wal_path):\n data_dir = get_data_dir(config)\n return os.path.join(data_dir, wal_path)\n\n\ndef is_valid_file(config, wal_path):\n return is_relative_path(wal_path) and wal_file_exists(config, wal_path)\n\n\ndef archivewal_validate_options_and_args(options=None, args=None):\n args = args or []\n if not common_validate_options_and_args(options, args):\n return False\n config = get_config_from_filename_and_set_up_logging(options.config_file)\n if len(args) != 1 or not is_valid_file(config, args[0]):\n raise Exception(\n 'A relative path to a WAL file to be archived must be provided!')\n return True\n\n\ndef archivepgsql_parse_args(args=None):\n archivepgsql_usage = ' '.join([os.path.basename(sys.argv[0]), '[options]'])\n parser = create_common_parser(usage=archivepgsql_usage)\n options, args = parser.parse_args(args)\n return parser, options, args\n\n\ndef archivepgsql_validate_options_and_args(options=None, args=None):\n if not common_validate_options_and_args(options, args):\n return False\n if args:\n if args[0].startswith('pg_xlog'):\n raise UsedArchivepgsqlAsArchiveWAL(\n 'archivepgsql was called with a WAL file path as an argument. This is probably due to configuring archivepgsql as the archive_command in the PGSQL configuration instead of archivewal.'\n )\n raise TooManyArgumentsException(\n 'archivepgsql should not be called with any arguments. Are you using it as the archive_command instead of archivewal?'\n )\n return True\n\n\ndef restorewal_parse_args(args=None):\n restorewal_usage = ' '.join([os.path.basename(sys.argv[0]), '[options]',\n '<name_of_wal_file_to_restore>', '<path_to_write_restored_file>'])\n parser = create_common_parser(usage=restorewal_usage)\n options, args = parser.parse_args(args)\n return parser, options, args\n\n\ndef restorewal_validate_options_and_args(options=None, args=None):\n args = args or []\n if not common_validate_options_and_args(options, args):\n return False\n nargs = len(args)\n if nargs != 2:\n raise Exception(\n 'restorewal must be given the name of the WAL file to retrieve and the destination path to restore to.'\n )\n return True\n\n\ndef storagestats_parse_args(args=None):\n storagestats_usage = ' '.join([os.path.basename(sys.argv[0]), '[options]'])\n parser = create_common_parser(usage=storagestats_usage)\n options, args = parser.parse_args(args)\n return parser, options, args\n\n\ndef storagestats_validate_options_and_args(options=None, args=None):\n if not common_validate_options_and_args(options, args):\n return False\n if args:\n raise TooManyArgumentsException('storagestats takes no arguments')\n return True\n",
"step-4": "import os\nimport stat\nfrom optparse import OptionParser\nfrom bbpgsql.configuration import get_config_from_filename_and_set_up_logging\nfrom bbpgsql.configuration.general import get_data_dir\nfrom subprocess import check_output\nimport sys\nVERSION = ''\n\n\nclass BadArgumentException(Exception):\n\n def __init__(self, msg):\n self.msg = msg\n\n def __str__(self):\n return self.msg\n\n\nclass TooManyArgumentsException(Exception):\n\n def __init__(self, msg):\n self.msg = msg\n\n def __str__(self):\n return self.msg\n\n\nclass NotEnoughArgumentsException(Exception):\n\n def __init__(self, msg):\n self.msg = msg\n\n def __str__(self):\n return self.msg\n\n\nclass UsedArchivepgsqlAsArchiveWAL(Exception):\n\n def __init__(self, msg):\n self.msg = msg\n\n def __str__(self):\n return self.msg\n\n\ndef get_version():\n version = VERSION or check_output(['git', 'describe']).strip()\n return ' '.join(['%prog', version])\n\n\ndef create_common_parser(**kwargs):\n kwargs['version'] = get_version()\n parser = OptionParser(**kwargs)\n parser.add_option('-c', '--config', dest='config_file', help=\n 'configuration file', default='/etc/bbpgsql.ini')\n parser.add_option('--dry-run', dest='dry_run', help=\n 'test run - do not actually modify any files', action='store_true',\n default=False)\n return parser\n\n\ndef common_parse_args(args=None):\n parser = create_common_parser()\n options, args = parser.parse_args(args)\n return parser, options, args\n\n\ndef common_validate_options_and_args(options=None, args=None):\n if not os.path.exists(options.config_file):\n raise Exception('File %s does not exist' % options.config_file)\n if not os.access(options.config_file, os.R_OK):\n raise Exception('No read access for %s' % options.config_file)\n config_stats = os.stat(options.config_file)\n if (config_stats.st_mode & stat.S_IRWXG | config_stats.st_mode & stat.\n S_IRWXO):\n raise Exception('File %s has open group or other permissions' %\n options.config_file)\n return True\n\n\ndef non_destructive_minimal_parse_and_validate_args(args=None):\n args = args or sys.argv[:]\n parser, options, args = common_parse_args(args)\n common_validate_options_and_args(options, args)\n return options, args\n\n\ndef archivewal_parse_args(args=None):\n archivewal_usage = ' '.join([os.path.basename(sys.argv[0]), '[options]',\n '<path_to_wal_file_to_archive>'])\n parser = create_common_parser(usage=archivewal_usage)\n options, args = parser.parse_args(args)\n return parser, options, args\n\n\ndef is_relative_path(wal_path):\n return not os.path.isabs(wal_path)\n\n\ndef wal_file_exists(config, wal_path):\n return os.path.isfile(get_wal_filename(config, wal_path))\n\n\ndef get_wal_filename(config, wal_path):\n data_dir = get_data_dir(config)\n return os.path.join(data_dir, wal_path)\n\n\ndef is_valid_file(config, wal_path):\n return is_relative_path(wal_path) and wal_file_exists(config, wal_path)\n\n\ndef archivewal_validate_options_and_args(options=None, args=None):\n args = args or []\n if not common_validate_options_and_args(options, args):\n return False\n config = get_config_from_filename_and_set_up_logging(options.config_file)\n if len(args) != 1 or not is_valid_file(config, args[0]):\n raise Exception(\n 'A relative path to a WAL file to be archived must be provided!')\n return True\n\n\ndef archivepgsql_parse_args(args=None):\n archivepgsql_usage = ' '.join([os.path.basename(sys.argv[0]), '[options]'])\n parser = create_common_parser(usage=archivepgsql_usage)\n options, args = parser.parse_args(args)\n return parser, options, args\n\n\ndef archivepgsql_validate_options_and_args(options=None, args=None):\n if not common_validate_options_and_args(options, args):\n return False\n if args:\n if args[0].startswith('pg_xlog'):\n raise UsedArchivepgsqlAsArchiveWAL(\n 'archivepgsql was called with a WAL file path as an argument. This is probably due to configuring archivepgsql as the archive_command in the PGSQL configuration instead of archivewal.'\n )\n raise TooManyArgumentsException(\n 'archivepgsql should not be called with any arguments. Are you using it as the archive_command instead of archivewal?'\n )\n return True\n\n\ndef restorewal_parse_args(args=None):\n restorewal_usage = ' '.join([os.path.basename(sys.argv[0]), '[options]',\n '<name_of_wal_file_to_restore>', '<path_to_write_restored_file>'])\n parser = create_common_parser(usage=restorewal_usage)\n options, args = parser.parse_args(args)\n return parser, options, args\n\n\ndef restorewal_validate_options_and_args(options=None, args=None):\n args = args or []\n if not common_validate_options_and_args(options, args):\n return False\n nargs = len(args)\n if nargs != 2:\n raise Exception(\n 'restorewal must be given the name of the WAL file to retrieve and the destination path to restore to.'\n )\n return True\n\n\ndef storagestats_parse_args(args=None):\n storagestats_usage = ' '.join([os.path.basename(sys.argv[0]), '[options]'])\n parser = create_common_parser(usage=storagestats_usage)\n options, args = parser.parse_args(args)\n return parser, options, args\n\n\ndef storagestats_validate_options_and_args(options=None, args=None):\n if not common_validate_options_and_args(options, args):\n return False\n if args:\n raise TooManyArgumentsException('storagestats takes no arguments')\n return True\n",
"step-5": "import os\nimport stat\nfrom optparse import OptionParser\nfrom bbpgsql.configuration import get_config_from_filename_and_set_up_logging\nfrom bbpgsql.configuration.general import get_data_dir\nfrom subprocess import check_output\nimport sys\n\nVERSION = ''\n\n\nclass BadArgumentException(Exception):\n def __init__(self, msg):\n self.msg = msg\n\n def __str__(self):\n return self.msg\n\n\nclass TooManyArgumentsException(Exception):\n def __init__(self, msg):\n self.msg = msg\n\n def __str__(self):\n return self.msg\n\n\nclass NotEnoughArgumentsException(Exception):\n def __init__(self, msg):\n self.msg = msg\n\n def __str__(self):\n return self.msg\n\n\nclass UsedArchivepgsqlAsArchiveWAL(Exception):\n def __init__(self, msg):\n self.msg = msg\n\n def __str__(self):\n return self.msg\n\n\ndef get_version():\n # override \"version\" with a constant string for release\n version = VERSION or check_output(['git', 'describe']).strip()\n return ' '.join(['%prog', version])\n\n\ndef create_common_parser(**kwargs):\n kwargs['version'] = get_version()\n parser = OptionParser(**kwargs)\n\n parser.add_option('-c', '--config', dest='config_file',\n help='configuration file', default='/etc/bbpgsql.ini')\n\n parser.add_option('--dry-run', dest='dry_run',\n help='test run - do not actually modify any files',\n action='store_true',\n default=False)\n\n return parser\n\n\ndef common_parse_args(args=None):\n parser = create_common_parser()\n options, args = parser.parse_args(args)\n return parser, options, args\n\n\ndef common_validate_options_and_args(options=None, args=None):\n if not os.path.exists(options.config_file):\n raise Exception(\"File %s does not exist\" % (options.config_file))\n if not os.access(options.config_file, os.R_OK):\n raise Exception(\"No read access for %s\" % (options.config_file))\n config_stats = os.stat(options.config_file)\n if ((config_stats.st_mode & stat.S_IRWXG) |\n (config_stats.st_mode & stat.S_IRWXO)):\n raise Exception(\"File %s has open group or other permissions\" %\n (options.config_file))\n return True\n\n\ndef non_destructive_minimal_parse_and_validate_args(args=None):\n args = args or sys.argv[:]\n parser, options, args = common_parse_args(args)\n common_validate_options_and_args(options, args)\n return options, args\n\n\ndef archivewal_parse_args(args=None):\n archivewal_usage = ' '.join([\n os.path.basename(sys.argv[0]),\n '[options]',\n '<path_to_wal_file_to_archive>'])\n parser = create_common_parser(usage=archivewal_usage)\n options, args = parser.parse_args(args)\n return parser, options, args\n\n\ndef is_relative_path(wal_path):\n return not os.path.isabs(wal_path)\n\n\ndef wal_file_exists(config, wal_path):\n return os.path.isfile(get_wal_filename(config, wal_path))\n\n\ndef get_wal_filename(config, wal_path):\n data_dir = get_data_dir(config)\n return os.path.join(data_dir, wal_path)\n\n\ndef is_valid_file(config, wal_path):\n return is_relative_path(wal_path) and wal_file_exists(config, wal_path)\n\n\ndef archivewal_validate_options_and_args(options=None, args=None):\n args = args or []\n if not common_validate_options_and_args(options, args):\n return False\n config = get_config_from_filename_and_set_up_logging(options.config_file)\n if len(args) != 1 or not is_valid_file(config, args[0]):\n raise Exception('A relative path to a WAL file to be archived' \\\n ' must be provided!')\n return True\n\n\ndef archivepgsql_parse_args(args=None):\n archivepgsql_usage = ' '.join([\n os.path.basename(sys.argv[0]),\n '[options]'])\n parser = create_common_parser(usage=archivepgsql_usage)\n options, args = parser.parse_args(args)\n return parser, options, args\n\n\ndef archivepgsql_validate_options_and_args(options=None, args=None):\n if not common_validate_options_and_args(options, args):\n return False\n if args:\n if args[0].startswith('pg_xlog'):\n raise UsedArchivepgsqlAsArchiveWAL('archivepgsql was called with' \\\n ' a WAL file path as an argument. This is' \\\n ' probably due to configuring archivepgsql' \\\n ' as the archive_command in the PGSQL' \\\n ' configuration instead of archivewal.')\n raise TooManyArgumentsException('archivepgsql should not be called' \\\n ' with any arguments. Are you using it as the' \\\n ' archive_command instead of archivewal?')\n return True\n\n\ndef restorewal_parse_args(args=None):\n restorewal_usage = ' '.join([\n os.path.basename(sys.argv[0]),\n '[options]',\n '<name_of_wal_file_to_restore>',\n '<path_to_write_restored_file>',\n ])\n parser = create_common_parser(usage=restorewal_usage)\n options, args = parser.parse_args(args)\n return parser, options, args\n\n\ndef restorewal_validate_options_and_args(options=None, args=None):\n args = args or []\n if not common_validate_options_and_args(options, args):\n return False\n nargs = len(args)\n if nargs != 2:\n raise Exception('restorewal must be given the name of the WAL' \\\n ' file to retrieve and the destination path to' \\\n ' restore to.')\n return True\n\n\ndef storagestats_parse_args(args=None):\n storagestats_usage = ' '.join([\n os.path.basename(sys.argv[0]),\n '[options]'])\n parser = create_common_parser(usage=storagestats_usage)\n options, args = parser.parse_args(args)\n return parser, options, args\n\n\ndef storagestats_validate_options_and_args(options=None, args=None):\n if not common_validate_options_and_args(options, args):\n return False\n if args:\n raise TooManyArgumentsException('storagestats takes no arguments')\n return True\n",
"step-ids": [
25,
26,
29,
31,
32
]
}
|
[
25,
26,
29,
31,
32
] |
<|reserved_special_token_0|>
class Test(unittest.TestCase):
<|reserved_special_token_0|>
def test_final_summary(self):
pkl = os.path.join(self.testfiles_dir, AMPLE_PKL)
if not os.path.isfile(pkl):
return
with open(pkl, 'rb') as f:
if sys.version_info.major == 3:
d = pickle.load(f, encoding='latin1')
else:
d = pickle.load(f)
summary = mrbump_util.finalSummary(d)
self.assertIsNotNone(summary)
def test_topfiles(self):
topf = mrbump_util.ResultsSummary(results_pkl=os.path.join(self.
testfiles_dir, AMPLE_PKL)).topFiles()
self.assertEqual(len(topf), 3)
self.assertEqual(topf[2]['info'], 'SHELXE trace of MR result')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Test(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.thisd = os.path.abspath(os.path.dirname(__file__))
cls.ample_share = SHARE_DIR
cls.testfiles_dir = os.path.join(cls.ample_share, 'testfiles')
def test_final_summary(self):
pkl = os.path.join(self.testfiles_dir, AMPLE_PKL)
if not os.path.isfile(pkl):
return
with open(pkl, 'rb') as f:
if sys.version_info.major == 3:
d = pickle.load(f, encoding='latin1')
else:
d = pickle.load(f)
summary = mrbump_util.finalSummary(d)
self.assertIsNotNone(summary)
def test_topfiles(self):
topf = mrbump_util.ResultsSummary(results_pkl=os.path.join(self.
testfiles_dir, AMPLE_PKL)).topFiles()
self.assertEqual(len(topf), 3)
self.assertEqual(topf[2]['info'], 'SHELXE trace of MR result')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Test(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.thisd = os.path.abspath(os.path.dirname(__file__))
cls.ample_share = SHARE_DIR
cls.testfiles_dir = os.path.join(cls.ample_share, 'testfiles')
def test_final_summary(self):
pkl = os.path.join(self.testfiles_dir, AMPLE_PKL)
if not os.path.isfile(pkl):
return
with open(pkl, 'rb') as f:
if sys.version_info.major == 3:
d = pickle.load(f, encoding='latin1')
else:
d = pickle.load(f)
summary = mrbump_util.finalSummary(d)
self.assertIsNotNone(summary)
def test_topfiles(self):
topf = mrbump_util.ResultsSummary(results_pkl=os.path.join(self.
testfiles_dir, AMPLE_PKL)).topFiles()
self.assertEqual(len(topf), 3)
self.assertEqual(topf[2]['info'], 'SHELXE trace of MR result')
if __name__ == '__main__':
unittest.main()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import pickle
import os
import sys
import unittest
from ample.constants import AMPLE_PKL, SHARE_DIR
from ample.util import mrbump_util
class Test(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.thisd = os.path.abspath(os.path.dirname(__file__))
cls.ample_share = SHARE_DIR
cls.testfiles_dir = os.path.join(cls.ample_share, 'testfiles')
def test_final_summary(self):
pkl = os.path.join(self.testfiles_dir, AMPLE_PKL)
if not os.path.isfile(pkl):
return
with open(pkl, 'rb') as f:
if sys.version_info.major == 3:
d = pickle.load(f, encoding='latin1')
else:
d = pickle.load(f)
summary = mrbump_util.finalSummary(d)
self.assertIsNotNone(summary)
def test_topfiles(self):
topf = mrbump_util.ResultsSummary(results_pkl=os.path.join(self.
testfiles_dir, AMPLE_PKL)).topFiles()
self.assertEqual(len(topf), 3)
self.assertEqual(topf[2]['info'], 'SHELXE trace of MR result')
if __name__ == '__main__':
unittest.main()
<|reserved_special_token_1|>
"""Test functions for util.mrbump_util"""
import pickle
import os
import sys
import unittest
from ample.constants import AMPLE_PKL, SHARE_DIR
from ample.util import mrbump_util
class Test(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.thisd = os.path.abspath(os.path.dirname(__file__))
cls.ample_share = SHARE_DIR
cls.testfiles_dir = os.path.join(cls.ample_share, 'testfiles')
def test_final_summary(self):
pkl = os.path.join(self.testfiles_dir, AMPLE_PKL)
if not os.path.isfile(pkl):
return
with open(pkl, 'rb') as f:
if sys.version_info.major == 3:
d = pickle.load(f, encoding='latin1')
else:
d = pickle.load(f)
summary = mrbump_util.finalSummary(d)
self.assertIsNotNone(summary)
def test_topfiles(self):
topf = mrbump_util.ResultsSummary(results_pkl=os.path.join(self.testfiles_dir, AMPLE_PKL)).topFiles()
self.assertEqual(len(topf), 3)
self.assertEqual(topf[2]['info'], 'SHELXE trace of MR result')
if __name__ == "__main__":
unittest.main()
|
flexible
|
{
"blob_id": "f6dd5acc75d1a85a996629e22e81cdef316c1dcd",
"index": 8939,
"step-1": "<mask token>\n\n\nclass Test(unittest.TestCase):\n <mask token>\n\n def test_final_summary(self):\n pkl = os.path.join(self.testfiles_dir, AMPLE_PKL)\n if not os.path.isfile(pkl):\n return\n with open(pkl, 'rb') as f:\n if sys.version_info.major == 3:\n d = pickle.load(f, encoding='latin1')\n else:\n d = pickle.load(f)\n summary = mrbump_util.finalSummary(d)\n self.assertIsNotNone(summary)\n\n def test_topfiles(self):\n topf = mrbump_util.ResultsSummary(results_pkl=os.path.join(self.\n testfiles_dir, AMPLE_PKL)).topFiles()\n self.assertEqual(len(topf), 3)\n self.assertEqual(topf[2]['info'], 'SHELXE trace of MR result')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Test(unittest.TestCase):\n\n @classmethod\n def setUpClass(cls):\n cls.thisd = os.path.abspath(os.path.dirname(__file__))\n cls.ample_share = SHARE_DIR\n cls.testfiles_dir = os.path.join(cls.ample_share, 'testfiles')\n\n def test_final_summary(self):\n pkl = os.path.join(self.testfiles_dir, AMPLE_PKL)\n if not os.path.isfile(pkl):\n return\n with open(pkl, 'rb') as f:\n if sys.version_info.major == 3:\n d = pickle.load(f, encoding='latin1')\n else:\n d = pickle.load(f)\n summary = mrbump_util.finalSummary(d)\n self.assertIsNotNone(summary)\n\n def test_topfiles(self):\n topf = mrbump_util.ResultsSummary(results_pkl=os.path.join(self.\n testfiles_dir, AMPLE_PKL)).topFiles()\n self.assertEqual(len(topf), 3)\n self.assertEqual(topf[2]['info'], 'SHELXE trace of MR result')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Test(unittest.TestCase):\n\n @classmethod\n def setUpClass(cls):\n cls.thisd = os.path.abspath(os.path.dirname(__file__))\n cls.ample_share = SHARE_DIR\n cls.testfiles_dir = os.path.join(cls.ample_share, 'testfiles')\n\n def test_final_summary(self):\n pkl = os.path.join(self.testfiles_dir, AMPLE_PKL)\n if not os.path.isfile(pkl):\n return\n with open(pkl, 'rb') as f:\n if sys.version_info.major == 3:\n d = pickle.load(f, encoding='latin1')\n else:\n d = pickle.load(f)\n summary = mrbump_util.finalSummary(d)\n self.assertIsNotNone(summary)\n\n def test_topfiles(self):\n topf = mrbump_util.ResultsSummary(results_pkl=os.path.join(self.\n testfiles_dir, AMPLE_PKL)).topFiles()\n self.assertEqual(len(topf), 3)\n self.assertEqual(topf[2]['info'], 'SHELXE trace of MR result')\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-4": "<mask token>\nimport pickle\nimport os\nimport sys\nimport unittest\nfrom ample.constants import AMPLE_PKL, SHARE_DIR\nfrom ample.util import mrbump_util\n\n\nclass Test(unittest.TestCase):\n\n @classmethod\n def setUpClass(cls):\n cls.thisd = os.path.abspath(os.path.dirname(__file__))\n cls.ample_share = SHARE_DIR\n cls.testfiles_dir = os.path.join(cls.ample_share, 'testfiles')\n\n def test_final_summary(self):\n pkl = os.path.join(self.testfiles_dir, AMPLE_PKL)\n if not os.path.isfile(pkl):\n return\n with open(pkl, 'rb') as f:\n if sys.version_info.major == 3:\n d = pickle.load(f, encoding='latin1')\n else:\n d = pickle.load(f)\n summary = mrbump_util.finalSummary(d)\n self.assertIsNotNone(summary)\n\n def test_topfiles(self):\n topf = mrbump_util.ResultsSummary(results_pkl=os.path.join(self.\n testfiles_dir, AMPLE_PKL)).topFiles()\n self.assertEqual(len(topf), 3)\n self.assertEqual(topf[2]['info'], 'SHELXE trace of MR result')\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-5": "\"\"\"Test functions for util.mrbump_util\"\"\"\n\nimport pickle\nimport os\nimport sys\nimport unittest\n\nfrom ample.constants import AMPLE_PKL, SHARE_DIR\nfrom ample.util import mrbump_util\n\n\nclass Test(unittest.TestCase):\n @classmethod\n def setUpClass(cls):\n cls.thisd = os.path.abspath(os.path.dirname(__file__))\n cls.ample_share = SHARE_DIR\n cls.testfiles_dir = os.path.join(cls.ample_share, 'testfiles')\n\n def test_final_summary(self):\n pkl = os.path.join(self.testfiles_dir, AMPLE_PKL)\n if not os.path.isfile(pkl):\n return\n with open(pkl, 'rb') as f:\n if sys.version_info.major == 3:\n d = pickle.load(f, encoding='latin1')\n else:\n d = pickle.load(f)\n summary = mrbump_util.finalSummary(d)\n self.assertIsNotNone(summary)\n\n def test_topfiles(self):\n topf = mrbump_util.ResultsSummary(results_pkl=os.path.join(self.testfiles_dir, AMPLE_PKL)).topFiles()\n self.assertEqual(len(topf), 3)\n self.assertEqual(topf[2]['info'], 'SHELXE trace of MR result')\n\n\nif __name__ == \"__main__\":\n unittest.main()\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
#implement variable!
import numpy as np
class Variable:
def __init__(self, data):
self.data = data
class Function:
'''
Base class
specific functions are implemented in the inherited class
'''
def __call__(self, input):
x = input.data #data extract
y = self.foward(x)
output = Variable(y) #here! is key point
return output
def foward(self, x):
raise NotImplementedError()
class Square(Function):
def foward(self, x):
return x ** 2
class Exp(Function):
def foward(self, x):
return np.exp(x)
# input/output of a Function.__call__ is unified as a variable instance.
square = Square()
exp = Exp()
# like a composite function
# x -> [Square] -> a -> [Exp] -> b -> [Square] -> y
x = Variable(np.array(0.5))
a = square(x)
b = exp(a)
y = square(b)
print(y.data)
|
normal
|
{
"blob_id": "9efd83524ebb598f30c8fb6c0f9f0c65333578e6",
"index": 6292,
"step-1": "<mask token>\n\n\nclass Function:\n <mask token>\n <mask token>\n\n def foward(self, x):\n raise NotImplementedError()\n\n\nclass Square(Function):\n\n def foward(self, x):\n return x ** 2\n\n\nclass Exp(Function):\n\n def foward(self, x):\n return np.exp(x)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Variable:\n\n def __init__(self, data):\n self.data = data\n\n\nclass Function:\n \"\"\"\n Base class\n specific functions are implemented in the inherited class\n \"\"\"\n\n def __call__(self, input):\n x = input.data\n y = self.foward(x)\n output = Variable(y)\n return output\n\n def foward(self, x):\n raise NotImplementedError()\n\n\nclass Square(Function):\n\n def foward(self, x):\n return x ** 2\n\n\nclass Exp(Function):\n\n def foward(self, x):\n return np.exp(x)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Variable:\n\n def __init__(self, data):\n self.data = data\n\n\nclass Function:\n \"\"\"\n Base class\n specific functions are implemented in the inherited class\n \"\"\"\n\n def __call__(self, input):\n x = input.data\n y = self.foward(x)\n output = Variable(y)\n return output\n\n def foward(self, x):\n raise NotImplementedError()\n\n\nclass Square(Function):\n\n def foward(self, x):\n return x ** 2\n\n\nclass Exp(Function):\n\n def foward(self, x):\n return np.exp(x)\n\n\n<mask token>\nprint(y.data)\n",
"step-4": "<mask token>\n\n\nclass Variable:\n\n def __init__(self, data):\n self.data = data\n\n\nclass Function:\n \"\"\"\n Base class\n specific functions are implemented in the inherited class\n \"\"\"\n\n def __call__(self, input):\n x = input.data\n y = self.foward(x)\n output = Variable(y)\n return output\n\n def foward(self, x):\n raise NotImplementedError()\n\n\nclass Square(Function):\n\n def foward(self, x):\n return x ** 2\n\n\nclass Exp(Function):\n\n def foward(self, x):\n return np.exp(x)\n\n\nsquare = Square()\nexp = Exp()\nx = Variable(np.array(0.5))\na = square(x)\nb = exp(a)\ny = square(b)\nprint(y.data)\n",
"step-5": "#implement variable!\nimport numpy as np\n\nclass Variable:\n def __init__(self, data):\n self.data = data\n\nclass Function:\n '''\n Base class\n specific functions are implemented in the inherited class\n '''\n def __call__(self, input): \n x = input.data #data extract\n y = self.foward(x)\n output = Variable(y) #here! is key point\n return output\n\n def foward(self, x):\n raise NotImplementedError()\n\nclass Square(Function):\n def foward(self, x):\n return x ** 2\n\nclass Exp(Function):\n def foward(self, x):\n return np.exp(x)\n\n# input/output of a Function.__call__ is unified as a variable instance.\nsquare = Square()\nexp = Exp()\n\n# like a composite function\n# x -> [Square] -> a -> [Exp] -> b -> [Square] -> y\nx = Variable(np.array(0.5))\na = square(x)\nb = exp(a)\ny = square(b)\nprint(y.data)\n\n\n",
"step-ids": [
6,
10,
11,
12,
14
]
}
|
[
6,
10,
11,
12,
14
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
if __name__ == '__main__':
logging.basicConfig(stream=sys.stdout, format=
'[%(asctime)s] {%(filename)s:%(lineno)d} %(levelname)s - %(message)s',
level=logging.DEBUG)
client = docker.from_env()
logging.info(str([x.name for x in client.volumes.list()]))
if 'airflow_pg_data' not in [x.name for x in client.volumes.list()]:
client.volumes.create('airflow_pg_data')
logging.info(str([x.name for x in client.containers.list()]))
if 'airflow_pg' not in [x.name for x in client.containers.list()]:
pg = client.containers.run(image='postgres', name='airflow_pg',
auto_remove=True, detach=True, environment={'POSTGRES_PASSWORD':
'airflow', 'POSTGRES_USER': 'airflow', 'PGDATA':
'/airflow/data'}, volumes={'airflow_pg_data': {'bind':
'/airflow/data', 'mode': 'rw'}}, ports={'5432/tcp': 5432})
<|reserved_special_token_1|>
import docker
import logging
import sys
if __name__ == '__main__':
logging.basicConfig(stream=sys.stdout, format=
'[%(asctime)s] {%(filename)s:%(lineno)d} %(levelname)s - %(message)s',
level=logging.DEBUG)
client = docker.from_env()
logging.info(str([x.name for x in client.volumes.list()]))
if 'airflow_pg_data' not in [x.name for x in client.volumes.list()]:
client.volumes.create('airflow_pg_data')
logging.info(str([x.name for x in client.containers.list()]))
if 'airflow_pg' not in [x.name for x in client.containers.list()]:
pg = client.containers.run(image='postgres', name='airflow_pg',
auto_remove=True, detach=True, environment={'POSTGRES_PASSWORD':
'airflow', 'POSTGRES_USER': 'airflow', 'PGDATA':
'/airflow/data'}, volumes={'airflow_pg_data': {'bind':
'/airflow/data', 'mode': 'rw'}}, ports={'5432/tcp': 5432})
<|reserved_special_token_1|>
import docker
import logging
import sys
if __name__ == '__main__':
# setting up logger
logging.basicConfig(stream=sys.stdout,
format='[%(asctime)s] {%(filename)s:%(lineno)d} %(levelname)s - %(message)s',
level=logging.DEBUG)
# get the docker client
client = docker.from_env()
# list out docker volumes
logging.info(str([x.name for x in client.volumes.list()]))
# Check if airflow backend volume is created or not
# if the volume is not created then create it
if 'airflow_pg_data' not in [x.name for x in client.volumes.list()]:
client.volumes.create('airflow_pg_data')
# kill container if it is already running
logging.info(str([x.name for x in client.containers.list()]))
if 'airflow_pg' not in [x.name for x in client.containers.list()]:
# launch postgres backend
pg = client.containers.run(image='postgres',
name='airflow_pg',
auto_remove=True,
detach=True,
environment={
'POSTGRES_PASSWORD': 'airflow',
'POSTGRES_USER': 'airflow',
'PGDATA': '/airflow/data'
},
volumes={'airflow_pg_data': {'bind': '/airflow/data', 'mode': 'rw'}},
ports={'5432/tcp': 5432}
)
|
flexible
|
{
"blob_id": "a5c9ff1fe250310216e2eaa7a6ff5cc76fc10f94",
"index": 4324,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif __name__ == '__main__':\n logging.basicConfig(stream=sys.stdout, format=\n '[%(asctime)s] {%(filename)s:%(lineno)d} %(levelname)s - %(message)s',\n level=logging.DEBUG)\n client = docker.from_env()\n logging.info(str([x.name for x in client.volumes.list()]))\n if 'airflow_pg_data' not in [x.name for x in client.volumes.list()]:\n client.volumes.create('airflow_pg_data')\n logging.info(str([x.name for x in client.containers.list()]))\n if 'airflow_pg' not in [x.name for x in client.containers.list()]:\n pg = client.containers.run(image='postgres', name='airflow_pg',\n auto_remove=True, detach=True, environment={'POSTGRES_PASSWORD':\n 'airflow', 'POSTGRES_USER': 'airflow', 'PGDATA':\n '/airflow/data'}, volumes={'airflow_pg_data': {'bind':\n '/airflow/data', 'mode': 'rw'}}, ports={'5432/tcp': 5432})\n",
"step-3": "import docker\nimport logging\nimport sys\nif __name__ == '__main__':\n logging.basicConfig(stream=sys.stdout, format=\n '[%(asctime)s] {%(filename)s:%(lineno)d} %(levelname)s - %(message)s',\n level=logging.DEBUG)\n client = docker.from_env()\n logging.info(str([x.name for x in client.volumes.list()]))\n if 'airflow_pg_data' not in [x.name for x in client.volumes.list()]:\n client.volumes.create('airflow_pg_data')\n logging.info(str([x.name for x in client.containers.list()]))\n if 'airflow_pg' not in [x.name for x in client.containers.list()]:\n pg = client.containers.run(image='postgres', name='airflow_pg',\n auto_remove=True, detach=True, environment={'POSTGRES_PASSWORD':\n 'airflow', 'POSTGRES_USER': 'airflow', 'PGDATA':\n '/airflow/data'}, volumes={'airflow_pg_data': {'bind':\n '/airflow/data', 'mode': 'rw'}}, ports={'5432/tcp': 5432})\n",
"step-4": "import docker\nimport logging\nimport sys\n\nif __name__ == '__main__':\n\n # setting up logger\n logging.basicConfig(stream=sys.stdout,\n format='[%(asctime)s] {%(filename)s:%(lineno)d} %(levelname)s - %(message)s',\n level=logging.DEBUG)\n\n # get the docker client\n client = docker.from_env()\n\n # list out docker volumes\n logging.info(str([x.name for x in client.volumes.list()]))\n\n # Check if airflow backend volume is created or not\n # if the volume is not created then create it\n if 'airflow_pg_data' not in [x.name for x in client.volumes.list()]:\n client.volumes.create('airflow_pg_data')\n\n # kill container if it is already running\n logging.info(str([x.name for x in client.containers.list()]))\n if 'airflow_pg' not in [x.name for x in client.containers.list()]:\n\n # launch postgres backend\n pg = client.containers.run(image='postgres',\n name='airflow_pg',\n auto_remove=True,\n detach=True,\n environment={\n 'POSTGRES_PASSWORD': 'airflow',\n 'POSTGRES_USER': 'airflow',\n 'PGDATA': '/airflow/data'\n },\n volumes={'airflow_pg_data': {'bind': '/airflow/data', 'mode': 'rw'}},\n ports={'5432/tcp': 5432}\n )\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
{'ivy': {'svm': ({'kernel': 'rbf', 'C': 10.0}, 0.03448275862068966,
0.03508771929824561), 'tuned_ensemble': ({'svm__C': 100000.0,
'rf__n_estimators': 101, 'cart__min_samples_leaf': 7,
'knn__n_neighbors': 2, 'rf__random_state': 1542, 'cart__max_depth': 33,
'cart__max_features': 0.3571428571428572, 'svm__kernel': 'sigmoid',
'rf__max_leaf_nodes': 2, 'rf__min_samples_split': 11,
'cart__random_state': 1542, 'nb__priors': None, 'knn__weights':
'uniform', 'rf__min_samples_leaf': 16, 'rf__max_features':
0.439795918367347, 'cart__min_samples_split': 18}, 0.2891566265060241,
0.34146341463414637), 'nb': ({'priors': None}, 0.3529411764705882,
0.3529411764705882), 'best_param_ensemble': ({}, 0.2891566265060241,
0.2988505747126437), 'rf': ({'min_samples_split': 17,
'min_samples_leaf': 1, 'n_estimators': 61, 'random_state': 1542,
'max_leaf_nodes': 46, 'max_features': 0.9448979591836735},
0.27083333333333337, 0.380952380952381), 'cart': ({'max_depth': 50,
'random_state': 1542, 'max_features': 0.19183673469387758,
'min_samples_split': 13, 'min_samples_leaf': 5}, 0.3119266055045872,
0.2105263157894737), 'knn': ({'n_neighbors': 8, 'weights': 'uniform'},
0.23529411764705882, 0.23749999999999996)}}
<|reserved_special_token_1|>
{'ivy': {'svm': ({'kernel': 'rbf', 'C': 10.0}, 0.034482758620689662, 0.035087719298245612), 'tuned_ensemble': ({'svm__C': 100000.0, 'rf__n_estimators': 101, 'cart__min_samples_leaf': 7, 'knn__n_neighbors': 2, 'rf__random_state': 1542, 'cart__max_depth': 33, 'cart__max_features': 0.35714285714285721, 'svm__kernel': 'sigmoid', 'rf__max_leaf_nodes': 2, 'rf__min_samples_split': 11, 'cart__random_state': 1542, 'nb__priors': None, 'knn__weights': 'uniform', 'rf__min_samples_leaf': 16, 'rf__max_features': 0.43979591836734699, 'cart__min_samples_split': 18}, 0.28915662650602408, 0.34146341463414637), 'nb': ({'priors': None}, 0.3529411764705882, 0.3529411764705882), 'best_param_ensemble': ({}, 0.28915662650602408, 0.2988505747126437), 'rf': ({'min_samples_split': 17, 'min_samples_leaf': 1, 'n_estimators': 61, 'random_state': 1542, 'max_leaf_nodes': 46, 'max_features': 0.94489795918367347}, 0.27083333333333337, 0.38095238095238099), 'cart': ({'max_depth': 50, 'random_state': 1542, 'max_features': 0.19183673469387758, 'min_samples_split': 13, 'min_samples_leaf': 5}, 0.31192660550458717, 0.2105263157894737), 'knn': ({'n_neighbors': 8, 'weights': 'uniform'}, 0.23529411764705882, 0.23749999999999996)}}
|
flexible
|
{
"blob_id": "fa02fb701b59728671a7e87147adaeb33422dcdb",
"index": 1600,
"step-1": "<mask token>\n",
"step-2": "{'ivy': {'svm': ({'kernel': 'rbf', 'C': 10.0}, 0.03448275862068966, \n 0.03508771929824561), 'tuned_ensemble': ({'svm__C': 100000.0,\n 'rf__n_estimators': 101, 'cart__min_samples_leaf': 7,\n 'knn__n_neighbors': 2, 'rf__random_state': 1542, 'cart__max_depth': 33,\n 'cart__max_features': 0.3571428571428572, 'svm__kernel': 'sigmoid',\n 'rf__max_leaf_nodes': 2, 'rf__min_samples_split': 11,\n 'cart__random_state': 1542, 'nb__priors': None, 'knn__weights':\n 'uniform', 'rf__min_samples_leaf': 16, 'rf__max_features': \n 0.439795918367347, 'cart__min_samples_split': 18}, 0.2891566265060241, \n 0.34146341463414637), 'nb': ({'priors': None}, 0.3529411764705882, \n 0.3529411764705882), 'best_param_ensemble': ({}, 0.2891566265060241, \n 0.2988505747126437), 'rf': ({'min_samples_split': 17,\n 'min_samples_leaf': 1, 'n_estimators': 61, 'random_state': 1542,\n 'max_leaf_nodes': 46, 'max_features': 0.9448979591836735}, \n 0.27083333333333337, 0.380952380952381), 'cart': ({'max_depth': 50,\n 'random_state': 1542, 'max_features': 0.19183673469387758,\n 'min_samples_split': 13, 'min_samples_leaf': 5}, 0.3119266055045872, \n 0.2105263157894737), 'knn': ({'n_neighbors': 8, 'weights': 'uniform'}, \n 0.23529411764705882, 0.23749999999999996)}}\n",
"step-3": "{'ivy': {'svm': ({'kernel': 'rbf', 'C': 10.0}, 0.034482758620689662, 0.035087719298245612), 'tuned_ensemble': ({'svm__C': 100000.0, 'rf__n_estimators': 101, 'cart__min_samples_leaf': 7, 'knn__n_neighbors': 2, 'rf__random_state': 1542, 'cart__max_depth': 33, 'cart__max_features': 0.35714285714285721, 'svm__kernel': 'sigmoid', 'rf__max_leaf_nodes': 2, 'rf__min_samples_split': 11, 'cart__random_state': 1542, 'nb__priors': None, 'knn__weights': 'uniform', 'rf__min_samples_leaf': 16, 'rf__max_features': 0.43979591836734699, 'cart__min_samples_split': 18}, 0.28915662650602408, 0.34146341463414637), 'nb': ({'priors': None}, 0.3529411764705882, 0.3529411764705882), 'best_param_ensemble': ({}, 0.28915662650602408, 0.2988505747126437), 'rf': ({'min_samples_split': 17, 'min_samples_leaf': 1, 'n_estimators': 61, 'random_state': 1542, 'max_leaf_nodes': 46, 'max_features': 0.94489795918367347}, 0.27083333333333337, 0.38095238095238099), 'cart': ({'max_depth': 50, 'random_state': 1542, 'max_features': 0.19183673469387758, 'min_samples_split': 13, 'min_samples_leaf': 5}, 0.31192660550458717, 0.2105263157894737), 'knn': ({'n_neighbors': 8, 'weights': 'uniform'}, 0.23529411764705882, 0.23749999999999996)}}",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
class People:
<|reserved_special_token_0|>
def eat(self):
pass
print('%s is eating...' % self.name)
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class Man(People):
def __init__(self, name, age, money):
super(Man, self).__init__(name, age)
self.money = money
print('%s 一出生就有%s money...' % (name, money))
def piao(self):
print('%s is piaoing...20s...isdone' % self.name)
def sleep(self):
print('man is sleeping')
class Women(People):
pass
def get_birth(self):
print('%s is born a baby....' % self.name)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class People:
def __init__(self, name, age):
self.name = name
self.age = age
def eat(self):
pass
print('%s is eating...' % self.name)
<|reserved_special_token_0|>
def talk(self):
print('%s is talking...' % self.name)
class Man(People):
def __init__(self, name, age, money):
super(Man, self).__init__(name, age)
self.money = money
print('%s 一出生就有%s money...' % (name, money))
def piao(self):
print('%s is piaoing...20s...isdone' % self.name)
def sleep(self):
print('man is sleeping')
class Women(People):
pass
def get_birth(self):
print('%s is born a baby....' % self.name)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class People:
def __init__(self, name, age):
self.name = name
self.age = age
def eat(self):
pass
print('%s is eating...' % self.name)
def sleep(self):
print('%s is sleeping...' % self.name)
def talk(self):
print('%s is talking...' % self.name)
class Man(People):
def __init__(self, name, age, money):
super(Man, self).__init__(name, age)
self.money = money
print('%s 一出生就有%s money...' % (name, money))
def piao(self):
print('%s is piaoing...20s...isdone' % self.name)
def sleep(self):
print('man is sleeping')
class Women(People):
pass
def get_birth(self):
print('%s is born a baby....' % self.name)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class People:
def __init__(self, name, age):
self.name = name
self.age = age
def eat(self):
pass
print('%s is eating...' % self.name)
def sleep(self):
print('%s is sleeping...' % self.name)
def talk(self):
print('%s is talking...' % self.name)
class Man(People):
def __init__(self, name, age, money):
super(Man, self).__init__(name, age)
self.money = money
print('%s 一出生就有%s money...' % (name, money))
def piao(self):
print('%s is piaoing...20s...isdone' % self.name)
def sleep(self):
print('man is sleeping')
class Women(People):
pass
def get_birth(self):
print('%s is born a baby....' % self.name)
<|reserved_special_token_0|>
m1.eat()
m1.sleep()
m1.talk()
m1.piao()
<|reserved_special_token_0|>
w1.get_birth()
<|reserved_special_token_1|>
__author__ = 'Administrator'
class People:
def __init__(self,name,age):
self.name = name
self.age = age
def eat(self):
pass
print("%s is eating..." % self.name)
def sleep(self):
print("%s is sleeping..." % self.name)
def talk(self):
print("%s is talking..." % self.name)
class Man(People):
def __init__(self,name,age,money):
# People.__init__(self,name,age)
super(Man,self).__init__(name,age)
self.money = money
print("%s 一出生就有%s money..." % (name,money))
def piao(self):
print("%s is piaoing...20s...isdone" % self.name)
def sleep(self):
#People.sleep(self)
print("man is sleeping")
class Women(People):
pass
def get_birth(self):
print("%s is born a baby...." % self.name)
m1 = Man("chenronghua",22,10000)
m1.eat()
m1.sleep()
m1.talk()
m1.piao()
w1 = Women("ronghua",26)
w1.get_birth()
|
flexible
|
{
"blob_id": "6fdc9b2091652b05d6c1207d2f78b75c880fadda",
"index": 9084,
"step-1": "<mask token>\n\n\nclass People:\n <mask token>\n\n def eat(self):\n pass\n print('%s is eating...' % self.name)\n <mask token>\n <mask token>\n\n\nclass Man(People):\n\n def __init__(self, name, age, money):\n super(Man, self).__init__(name, age)\n self.money = money\n print('%s 一出生就有%s money...' % (name, money))\n\n def piao(self):\n print('%s is piaoing...20s...isdone' % self.name)\n\n def sleep(self):\n print('man is sleeping')\n\n\nclass Women(People):\n pass\n\n def get_birth(self):\n print('%s is born a baby....' % self.name)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass People:\n\n def __init__(self, name, age):\n self.name = name\n self.age = age\n\n def eat(self):\n pass\n print('%s is eating...' % self.name)\n <mask token>\n\n def talk(self):\n print('%s is talking...' % self.name)\n\n\nclass Man(People):\n\n def __init__(self, name, age, money):\n super(Man, self).__init__(name, age)\n self.money = money\n print('%s 一出生就有%s money...' % (name, money))\n\n def piao(self):\n print('%s is piaoing...20s...isdone' % self.name)\n\n def sleep(self):\n print('man is sleeping')\n\n\nclass Women(People):\n pass\n\n def get_birth(self):\n print('%s is born a baby....' % self.name)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass People:\n\n def __init__(self, name, age):\n self.name = name\n self.age = age\n\n def eat(self):\n pass\n print('%s is eating...' % self.name)\n\n def sleep(self):\n print('%s is sleeping...' % self.name)\n\n def talk(self):\n print('%s is talking...' % self.name)\n\n\nclass Man(People):\n\n def __init__(self, name, age, money):\n super(Man, self).__init__(name, age)\n self.money = money\n print('%s 一出生就有%s money...' % (name, money))\n\n def piao(self):\n print('%s is piaoing...20s...isdone' % self.name)\n\n def sleep(self):\n print('man is sleeping')\n\n\nclass Women(People):\n pass\n\n def get_birth(self):\n print('%s is born a baby....' % self.name)\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass People:\n\n def __init__(self, name, age):\n self.name = name\n self.age = age\n\n def eat(self):\n pass\n print('%s is eating...' % self.name)\n\n def sleep(self):\n print('%s is sleeping...' % self.name)\n\n def talk(self):\n print('%s is talking...' % self.name)\n\n\nclass Man(People):\n\n def __init__(self, name, age, money):\n super(Man, self).__init__(name, age)\n self.money = money\n print('%s 一出生就有%s money...' % (name, money))\n\n def piao(self):\n print('%s is piaoing...20s...isdone' % self.name)\n\n def sleep(self):\n print('man is sleeping')\n\n\nclass Women(People):\n pass\n\n def get_birth(self):\n print('%s is born a baby....' % self.name)\n\n\n<mask token>\nm1.eat()\nm1.sleep()\nm1.talk()\nm1.piao()\n<mask token>\nw1.get_birth()\n",
"step-5": "__author__ = 'Administrator'\n\n\nclass People:\n def __init__(self,name,age):\n self.name = name\n self.age = age\n def eat(self):\n pass\n print(\"%s is eating...\" % self.name)\n\n def sleep(self):\n print(\"%s is sleeping...\" % self.name)\n\n def talk(self):\n print(\"%s is talking...\" % self.name)\n\nclass Man(People):\n def __init__(self,name,age,money):\n # People.__init__(self,name,age)\n super(Man,self).__init__(name,age)\n\n self.money = money\n print(\"%s 一出生就有%s money...\" % (name,money))\n def piao(self):\n print(\"%s is piaoing...20s...isdone\" % self.name)\n\n def sleep(self):\n #People.sleep(self)\n print(\"man is sleeping\")\n\nclass Women(People):\n pass\n def get_birth(self):\n print(\"%s is born a baby....\" % self.name)\n\n\n\nm1 = Man(\"chenronghua\",22,10000)\n\nm1.eat()\n\nm1.sleep()\n\nm1.talk()\n\nm1.piao()\n\n\nw1 = Women(\"ronghua\",26)\n\nw1.get_birth()\n\n\n\n\n\n",
"step-ids": [
8,
10,
11,
12,
14
]
}
|
[
8,
10,
11,
12,
14
] |
import streamlit as st
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
username=st.text_input ("username")
upload=st.file_uploader("uploadfile",type=['csv'])
button=st.button("submit")
if button==True:
df=pd.read_csv(upload)
st.write(df.head())
fig = plt.figure()
my = fig.add_subplot(1,1,1)
my.scatter(df["sepal.length"],df["petal.length"],)
my.set_xlabel("sepal.length")
my.set_ylabel("petal.length")
st.write(fig)
|
normal
|
{
"blob_id": "72f1547ea7de78a5fe4b583523e592fa25c0ee77",
"index": 2467,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif button == True:\n df = pd.read_csv(upload)\n st.write(df.head())\n fig = plt.figure()\n my = fig.add_subplot(1, 1, 1)\n my.scatter(df['sepal.length'], df['petal.length'])\n my.set_xlabel('sepal.length')\n my.set_ylabel('petal.length')\n st.write(fig)\n",
"step-3": "<mask token>\nusername = st.text_input('username')\nupload = st.file_uploader('uploadfile', type=['csv'])\nbutton = st.button('submit')\nif button == True:\n df = pd.read_csv(upload)\n st.write(df.head())\n fig = plt.figure()\n my = fig.add_subplot(1, 1, 1)\n my.scatter(df['sepal.length'], df['petal.length'])\n my.set_xlabel('sepal.length')\n my.set_ylabel('petal.length')\n st.write(fig)\n",
"step-4": "import streamlit as st\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nusername = st.text_input('username')\nupload = st.file_uploader('uploadfile', type=['csv'])\nbutton = st.button('submit')\nif button == True:\n df = pd.read_csv(upload)\n st.write(df.head())\n fig = plt.figure()\n my = fig.add_subplot(1, 1, 1)\n my.scatter(df['sepal.length'], df['petal.length'])\n my.set_xlabel('sepal.length')\n my.set_ylabel('petal.length')\n st.write(fig)\n",
"step-5": "import streamlit as st\r\nimport pandas as pd\r\nimport seaborn as sns\r\nimport matplotlib.pyplot as plt\r\nusername=st.text_input (\"username\")\r\nupload=st.file_uploader(\"uploadfile\",type=['csv'])\r\nbutton=st.button(\"submit\")\r\nif button==True:\r\n df=pd.read_csv(upload)\r\n st.write(df.head())\r\n fig = plt.figure()\r\n my = fig.add_subplot(1,1,1)\r\n my.scatter(df[\"sepal.length\"],df[\"petal.length\"],)\r\n my.set_xlabel(\"sepal.length\")\r\n my.set_ylabel(\"petal.length\")\r\n st.write(fig)\r\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
setup(name='zuknuft', version='0.1', author='riotbib', author_email=
'riotbib@github', scripts=['zukunft.py'], install_requires=['bottle'])
<|reserved_special_token_1|>
from distutils.core import setup
setup(name='zuknuft', version='0.1', author='riotbib', author_email=
'riotbib@github', scripts=['zukunft.py'], install_requires=['bottle'])
<|reserved_special_token_1|>
from distutils.core import setup
setup(
name="zuknuft",
version="0.1",
author="riotbib",
author_email="riotbib@github",
scripts=["zukunft.py"],
install_requires=[
'bottle',
],
)
|
flexible
|
{
"blob_id": "638842cda666100ce197437cb354f66de77eb328",
"index": 8065,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nsetup(name='zuknuft', version='0.1', author='riotbib', author_email=\n 'riotbib@github', scripts=['zukunft.py'], install_requires=['bottle'])\n",
"step-3": "from distutils.core import setup\nsetup(name='zuknuft', version='0.1', author='riotbib', author_email=\n 'riotbib@github', scripts=['zukunft.py'], install_requires=['bottle'])\n",
"step-4": "from distutils.core import setup\n\nsetup(\n name=\"zuknuft\",\n version=\"0.1\",\n author=\"riotbib\",\n author_email=\"riotbib@github\",\n scripts=[\"zukunft.py\"],\n install_requires=[\n 'bottle',\n ],\n)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# [START gae_python37_app]
from flask import Flask
from bs4 import BeautifulSoup
import requests
import datetime
import regex as re
import unicodedata
from pyopenmensa import feed as op
from lxml import etree
class UnexpectedFormatError(AttributeError):
pass
WARNING = 'No Mensa path!'
CATEGORY_KEY = "cat"
MAIN_MEAL_KEY = "mm"
ADDITION_KEY = "a"
PRICE_KEY = "p"
DATE_KEY = "d"
STUDENT_KEY = "student"
EMPLOYEE_KEY = "employee"
MENSAE = ["westerberg", "mschlossg", "mhaste", "mvechta"]
def get_meals(_mensa, date=None):
result = requests.get(f"https://osnabrueck.my-mensa.de/essen.php?v=5121119&hyp=1&lang=de&mensa={_mensa}")
if result.status_code == 200:
content = result.content
else:
raise ConnectionError
b_soup = BeautifulSoup(content, "html.parser")
unparsed_meals = b_soup.find_all(
href=lambda href: href and re.compile(f"mensa={_mensa}#{_mensa}_tag_20\d{{3,5}}_essen").search(href))
_meals = []
for meal in unparsed_meals:
category = meal.parent.previous_sibling.previous_sibling.text
meal_info = meal.find_all(["h3", "p"])
if len(meal_info) != 3:
raise UnexpectedFormatError("More than 3 meal info")
meal_info = [unicodedata.normalize("NFKD", info.text).replace("\xad", "") for info in meal_info]
_main_meal, _additional, price = meal_info
if price == "-":
price = {}
else:
price_search = re.compile("((\d+,\d{2})|-)\D*((\d+,\d{2})|-)").search(price)
if not price_search:
raise UnexpectedFormatError(f"price formation error {price}")
try:
stud_price_str = price_search.group(2)
emp_price_str = price_search.group(4)
price = {STUDENT_KEY: float(stud_price_str.replace(",", ".")) if stud_price_str else None,
EMPLOYEE_KEY: float(emp_price_str.replace(",", ".")) if emp_price_str else None}
except ValueError:
raise UnexpectedFormatError(f"price formation error {price_search.groups()}")
date_search = re.compile("tag_(\d{4})(\d{1,3})").search(meal["href"])
if not date_search:
raise UnexpectedFormatError(f"Date formation error{meal['href']}")
try:
year, day = [int(group) for group in date_search.groups()]
except ValueError:
raise UnexpectedFormatError(f"Date formation error {year}, {day}")
if date:
date_days = (date - datetime.datetime(date.year, 1, 1)).days
if date_days != day or year != date.year:
continue
meal_date = datetime.datetime(year, 1, 1) + datetime.timedelta(day)
_meals.append({CATEGORY_KEY: category, MAIN_MEAL_KEY: _main_meal,
ADDITION_KEY: _additional, PRICE_KEY: price, DATE_KEY: meal_date.date()})
return _meals
def get_total_feed(mensa):
canteen = op.LazyBuilder()
meals = get_meals(mensa)
for meal in meals:
main_meal = meal[MAIN_MEAL_KEY]
additional = meal[ADDITION_KEY]
ing_reg = re.compile("\(((\d+|[a-n])(,(\d+|[a-n]))*)\)")
# noinspection PyTypeChecker
ingredients_match = ing_reg.findall(main_meal + " " + additional)
ingredients = list(set(",".join([ingred[0] for ingred in ingredients_match]).split(",")))
ingredients.sort()
ingredients = ",".join(ingredients)
main_meal = ing_reg.sub("", main_meal)
additional = ing_reg.sub("", additional)
notes = [note for note in [additional, ingredients] if len(note) > 0]
prices = {role: price for role, price in meal[PRICE_KEY].items() if price}
canteen.addMeal(meal[DATE_KEY], meal[CATEGORY_KEY], main_meal,
notes if len(notes) > 0 else None, prices)
return canteen.toXMLFeed()
def validate(xml_data):
# with open("open-mensa-v2.xsd", 'r') as schema_file:
# xml_schema_str = schema_file.read()
#
# xml_schema_doc = etree.parse(StringIO(xml_schema_str))
# xml_schema = etree.XMLSchema(StringIO(xml_schema_doc))
# parse xml
try:
xml_schema_doc = etree.parse("./open-mensa-v2.xsd")
xml_schema = etree.XMLSchema(xml_schema_doc)
# doc = etree.parse(xml_data.encode())
print('XML well formed, syntax ok.')
etree.fromstring(xml_data.encode(), parser=etree.XMLParser(schema=xml_schema))
# xml_schema.assertValid(doc)
print('XML valid, schema validation ok.')
# check for XML syntax errors
except etree.XMLSyntaxError as err:
raise UnexpectedFormatError(err)
except etree.DocumentInvalid as err:
print('Schema validation error, see error_schema.log')
raise UnexpectedFormatError(err)
# If `entrypoint` is not defined in app.yaml, App Engine will look for an app
# called `app` in `main.py`.
app = Flask(__name__)
@app.route(f'/<mensa>')
def mensa_feed(mensa):
if mensa not in MENSAE:
return WARNING
feed = get_total_feed(mensa)
validate(feed)
return feed
@app.route('/')
@app.route('/index')
def mensa_list():
mensae = "\n ".join(["<list-item>" + mensa + "</list-item>" for mensa in MENSAE])
response = f"""
Status: 404 Not Found
Content-Type: application/xml; charset=utf-8
'<?xml version="1.0" encoding="UTF-8"?>'
<error>
<code>404</code>
<message>Mensa not found</message>
<debug-data>
<list-desc>Valid filenames</list-desc>"
{mensae}
</debug-data>"
</error>"""
return response
if __name__ == '__main__':
# This is used when running locally only. When deploying to Google App
# Engine, a webserver process such as Gunicorn will serve the app. This
# can be configured by adding an `entrypoint` to app.yaml.
app.run(host='127.0.0.1', port=8080, debug=True)
# [END gae_python37_app]
|
normal
|
{
"blob_id": "9ce406124d36c2baf09cf0d95fceb2ad63948919",
"index": 4801,
"step-1": "<mask token>\n\n\nclass UnexpectedFormatError(AttributeError):\n pass\n\n\n<mask token>\n\n\ndef get_meals(_mensa, date=None):\n result = requests.get(\n f'https://osnabrueck.my-mensa.de/essen.php?v=5121119&hyp=1&lang=de&mensa={_mensa}'\n )\n if result.status_code == 200:\n content = result.content\n else:\n raise ConnectionError\n b_soup = BeautifulSoup(content, 'html.parser')\n unparsed_meals = b_soup.find_all(href=lambda href: href and re.compile(\n f'mensa={_mensa}#{_mensa}_tag_20\\\\d{{3,5}}_essen').search(href))\n _meals = []\n for meal in unparsed_meals:\n category = meal.parent.previous_sibling.previous_sibling.text\n meal_info = meal.find_all(['h3', 'p'])\n if len(meal_info) != 3:\n raise UnexpectedFormatError('More than 3 meal info')\n meal_info = [unicodedata.normalize('NFKD', info.text).replace(\n '\\xad', '') for info in meal_info]\n _main_meal, _additional, price = meal_info\n if price == '-':\n price = {}\n else:\n price_search = re.compile('((\\\\d+,\\\\d{2})|-)\\\\D*((\\\\d+,\\\\d{2})|-)'\n ).search(price)\n if not price_search:\n raise UnexpectedFormatError(f'price formation error {price}')\n try:\n stud_price_str = price_search.group(2)\n emp_price_str = price_search.group(4)\n price = {STUDENT_KEY: float(stud_price_str.replace(',', '.'\n )) if stud_price_str else None, EMPLOYEE_KEY: float(\n emp_price_str.replace(',', '.')) if emp_price_str else None\n }\n except ValueError:\n raise UnexpectedFormatError(\n f'price formation error {price_search.groups()}')\n date_search = re.compile('tag_(\\\\d{4})(\\\\d{1,3})').search(meal['href'])\n if not date_search:\n raise UnexpectedFormatError(f\"Date formation error{meal['href']}\")\n try:\n year, day = [int(group) for group in date_search.groups()]\n except ValueError:\n raise UnexpectedFormatError(f'Date formation error {year}, {day}')\n if date:\n date_days = (date - datetime.datetime(date.year, 1, 1)).days\n if date_days != day or year != date.year:\n continue\n meal_date = datetime.datetime(year, 1, 1) + datetime.timedelta(day)\n _meals.append({CATEGORY_KEY: category, MAIN_MEAL_KEY: _main_meal,\n ADDITION_KEY: _additional, PRICE_KEY: price, DATE_KEY:\n meal_date.date()})\n return _meals\n\n\ndef get_total_feed(mensa):\n canteen = op.LazyBuilder()\n meals = get_meals(mensa)\n for meal in meals:\n main_meal = meal[MAIN_MEAL_KEY]\n additional = meal[ADDITION_KEY]\n ing_reg = re.compile('\\\\(((\\\\d+|[a-n])(,(\\\\d+|[a-n]))*)\\\\)')\n ingredients_match = ing_reg.findall(main_meal + ' ' + additional)\n ingredients = list(set(','.join([ingred[0] for ingred in\n ingredients_match]).split(',')))\n ingredients.sort()\n ingredients = ','.join(ingredients)\n main_meal = ing_reg.sub('', main_meal)\n additional = ing_reg.sub('', additional)\n notes = [note for note in [additional, ingredients] if len(note) > 0]\n prices = {role: price for role, price in meal[PRICE_KEY].items() if\n price}\n canteen.addMeal(meal[DATE_KEY], meal[CATEGORY_KEY], main_meal, \n notes if len(notes) > 0 else None, prices)\n return canteen.toXMLFeed()\n\n\ndef validate(xml_data):\n try:\n xml_schema_doc = etree.parse('./open-mensa-v2.xsd')\n xml_schema = etree.XMLSchema(xml_schema_doc)\n print('XML well formed, syntax ok.')\n etree.fromstring(xml_data.encode(), parser=etree.XMLParser(schema=\n xml_schema))\n print('XML valid, schema validation ok.')\n except etree.XMLSyntaxError as err:\n raise UnexpectedFormatError(err)\n except etree.DocumentInvalid as err:\n print('Schema validation error, see error_schema.log')\n raise UnexpectedFormatError(err)\n\n\n<mask token>\n\n\n@app.route(f'/<mensa>')\ndef mensa_feed(mensa):\n if mensa not in MENSAE:\n return WARNING\n feed = get_total_feed(mensa)\n validate(feed)\n return feed\n\n\n@app.route('/')\n@app.route('/index')\ndef mensa_list():\n mensae = '\\n '.join([('<list-item>' + mensa + '</list-item>') for\n mensa in MENSAE])\n response = f\"\"\"\n Status: 404 Not Found\n Content-Type: application/xml; charset=utf-8\n \n '<?xml version=\"1.0\" encoding=\"UTF-8\"?>'\n <error>\n <code>404</code>\n <message>Mensa not found</message>\n <debug-data>\n <list-desc>Valid filenames</list-desc>\"\n {mensae}\n </debug-data>\"\n </error>\"\"\"\n return response\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass UnexpectedFormatError(AttributeError):\n pass\n\n\n<mask token>\n\n\ndef get_meals(_mensa, date=None):\n result = requests.get(\n f'https://osnabrueck.my-mensa.de/essen.php?v=5121119&hyp=1&lang=de&mensa={_mensa}'\n )\n if result.status_code == 200:\n content = result.content\n else:\n raise ConnectionError\n b_soup = BeautifulSoup(content, 'html.parser')\n unparsed_meals = b_soup.find_all(href=lambda href: href and re.compile(\n f'mensa={_mensa}#{_mensa}_tag_20\\\\d{{3,5}}_essen').search(href))\n _meals = []\n for meal in unparsed_meals:\n category = meal.parent.previous_sibling.previous_sibling.text\n meal_info = meal.find_all(['h3', 'p'])\n if len(meal_info) != 3:\n raise UnexpectedFormatError('More than 3 meal info')\n meal_info = [unicodedata.normalize('NFKD', info.text).replace(\n '\\xad', '') for info in meal_info]\n _main_meal, _additional, price = meal_info\n if price == '-':\n price = {}\n else:\n price_search = re.compile('((\\\\d+,\\\\d{2})|-)\\\\D*((\\\\d+,\\\\d{2})|-)'\n ).search(price)\n if not price_search:\n raise UnexpectedFormatError(f'price formation error {price}')\n try:\n stud_price_str = price_search.group(2)\n emp_price_str = price_search.group(4)\n price = {STUDENT_KEY: float(stud_price_str.replace(',', '.'\n )) if stud_price_str else None, EMPLOYEE_KEY: float(\n emp_price_str.replace(',', '.')) if emp_price_str else None\n }\n except ValueError:\n raise UnexpectedFormatError(\n f'price formation error {price_search.groups()}')\n date_search = re.compile('tag_(\\\\d{4})(\\\\d{1,3})').search(meal['href'])\n if not date_search:\n raise UnexpectedFormatError(f\"Date formation error{meal['href']}\")\n try:\n year, day = [int(group) for group in date_search.groups()]\n except ValueError:\n raise UnexpectedFormatError(f'Date formation error {year}, {day}')\n if date:\n date_days = (date - datetime.datetime(date.year, 1, 1)).days\n if date_days != day or year != date.year:\n continue\n meal_date = datetime.datetime(year, 1, 1) + datetime.timedelta(day)\n _meals.append({CATEGORY_KEY: category, MAIN_MEAL_KEY: _main_meal,\n ADDITION_KEY: _additional, PRICE_KEY: price, DATE_KEY:\n meal_date.date()})\n return _meals\n\n\ndef get_total_feed(mensa):\n canteen = op.LazyBuilder()\n meals = get_meals(mensa)\n for meal in meals:\n main_meal = meal[MAIN_MEAL_KEY]\n additional = meal[ADDITION_KEY]\n ing_reg = re.compile('\\\\(((\\\\d+|[a-n])(,(\\\\d+|[a-n]))*)\\\\)')\n ingredients_match = ing_reg.findall(main_meal + ' ' + additional)\n ingredients = list(set(','.join([ingred[0] for ingred in\n ingredients_match]).split(',')))\n ingredients.sort()\n ingredients = ','.join(ingredients)\n main_meal = ing_reg.sub('', main_meal)\n additional = ing_reg.sub('', additional)\n notes = [note for note in [additional, ingredients] if len(note) > 0]\n prices = {role: price for role, price in meal[PRICE_KEY].items() if\n price}\n canteen.addMeal(meal[DATE_KEY], meal[CATEGORY_KEY], main_meal, \n notes if len(notes) > 0 else None, prices)\n return canteen.toXMLFeed()\n\n\ndef validate(xml_data):\n try:\n xml_schema_doc = etree.parse('./open-mensa-v2.xsd')\n xml_schema = etree.XMLSchema(xml_schema_doc)\n print('XML well formed, syntax ok.')\n etree.fromstring(xml_data.encode(), parser=etree.XMLParser(schema=\n xml_schema))\n print('XML valid, schema validation ok.')\n except etree.XMLSyntaxError as err:\n raise UnexpectedFormatError(err)\n except etree.DocumentInvalid as err:\n print('Schema validation error, see error_schema.log')\n raise UnexpectedFormatError(err)\n\n\n<mask token>\n\n\n@app.route(f'/<mensa>')\ndef mensa_feed(mensa):\n if mensa not in MENSAE:\n return WARNING\n feed = get_total_feed(mensa)\n validate(feed)\n return feed\n\n\n@app.route('/')\n@app.route('/index')\ndef mensa_list():\n mensae = '\\n '.join([('<list-item>' + mensa + '</list-item>') for\n mensa in MENSAE])\n response = f\"\"\"\n Status: 404 Not Found\n Content-Type: application/xml; charset=utf-8\n \n '<?xml version=\"1.0\" encoding=\"UTF-8\"?>'\n <error>\n <code>404</code>\n <message>Mensa not found</message>\n <debug-data>\n <list-desc>Valid filenames</list-desc>\"\n {mensae}\n </debug-data>\"\n </error>\"\"\"\n return response\n\n\nif __name__ == '__main__':\n app.run(host='127.0.0.1', port=8080, debug=True)\n",
"step-3": "<mask token>\n\n\nclass UnexpectedFormatError(AttributeError):\n pass\n\n\nWARNING = 'No Mensa path!'\nCATEGORY_KEY = 'cat'\nMAIN_MEAL_KEY = 'mm'\nADDITION_KEY = 'a'\nPRICE_KEY = 'p'\nDATE_KEY = 'd'\nSTUDENT_KEY = 'student'\nEMPLOYEE_KEY = 'employee'\nMENSAE = ['westerberg', 'mschlossg', 'mhaste', 'mvechta']\n\n\ndef get_meals(_mensa, date=None):\n result = requests.get(\n f'https://osnabrueck.my-mensa.de/essen.php?v=5121119&hyp=1&lang=de&mensa={_mensa}'\n )\n if result.status_code == 200:\n content = result.content\n else:\n raise ConnectionError\n b_soup = BeautifulSoup(content, 'html.parser')\n unparsed_meals = b_soup.find_all(href=lambda href: href and re.compile(\n f'mensa={_mensa}#{_mensa}_tag_20\\\\d{{3,5}}_essen').search(href))\n _meals = []\n for meal in unparsed_meals:\n category = meal.parent.previous_sibling.previous_sibling.text\n meal_info = meal.find_all(['h3', 'p'])\n if len(meal_info) != 3:\n raise UnexpectedFormatError('More than 3 meal info')\n meal_info = [unicodedata.normalize('NFKD', info.text).replace(\n '\\xad', '') for info in meal_info]\n _main_meal, _additional, price = meal_info\n if price == '-':\n price = {}\n else:\n price_search = re.compile('((\\\\d+,\\\\d{2})|-)\\\\D*((\\\\d+,\\\\d{2})|-)'\n ).search(price)\n if not price_search:\n raise UnexpectedFormatError(f'price formation error {price}')\n try:\n stud_price_str = price_search.group(2)\n emp_price_str = price_search.group(4)\n price = {STUDENT_KEY: float(stud_price_str.replace(',', '.'\n )) if stud_price_str else None, EMPLOYEE_KEY: float(\n emp_price_str.replace(',', '.')) if emp_price_str else None\n }\n except ValueError:\n raise UnexpectedFormatError(\n f'price formation error {price_search.groups()}')\n date_search = re.compile('tag_(\\\\d{4})(\\\\d{1,3})').search(meal['href'])\n if not date_search:\n raise UnexpectedFormatError(f\"Date formation error{meal['href']}\")\n try:\n year, day = [int(group) for group in date_search.groups()]\n except ValueError:\n raise UnexpectedFormatError(f'Date formation error {year}, {day}')\n if date:\n date_days = (date - datetime.datetime(date.year, 1, 1)).days\n if date_days != day or year != date.year:\n continue\n meal_date = datetime.datetime(year, 1, 1) + datetime.timedelta(day)\n _meals.append({CATEGORY_KEY: category, MAIN_MEAL_KEY: _main_meal,\n ADDITION_KEY: _additional, PRICE_KEY: price, DATE_KEY:\n meal_date.date()})\n return _meals\n\n\ndef get_total_feed(mensa):\n canteen = op.LazyBuilder()\n meals = get_meals(mensa)\n for meal in meals:\n main_meal = meal[MAIN_MEAL_KEY]\n additional = meal[ADDITION_KEY]\n ing_reg = re.compile('\\\\(((\\\\d+|[a-n])(,(\\\\d+|[a-n]))*)\\\\)')\n ingredients_match = ing_reg.findall(main_meal + ' ' + additional)\n ingredients = list(set(','.join([ingred[0] for ingred in\n ingredients_match]).split(',')))\n ingredients.sort()\n ingredients = ','.join(ingredients)\n main_meal = ing_reg.sub('', main_meal)\n additional = ing_reg.sub('', additional)\n notes = [note for note in [additional, ingredients] if len(note) > 0]\n prices = {role: price for role, price in meal[PRICE_KEY].items() if\n price}\n canteen.addMeal(meal[DATE_KEY], meal[CATEGORY_KEY], main_meal, \n notes if len(notes) > 0 else None, prices)\n return canteen.toXMLFeed()\n\n\ndef validate(xml_data):\n try:\n xml_schema_doc = etree.parse('./open-mensa-v2.xsd')\n xml_schema = etree.XMLSchema(xml_schema_doc)\n print('XML well formed, syntax ok.')\n etree.fromstring(xml_data.encode(), parser=etree.XMLParser(schema=\n xml_schema))\n print('XML valid, schema validation ok.')\n except etree.XMLSyntaxError as err:\n raise UnexpectedFormatError(err)\n except etree.DocumentInvalid as err:\n print('Schema validation error, see error_schema.log')\n raise UnexpectedFormatError(err)\n\n\napp = Flask(__name__)\n\n\n@app.route(f'/<mensa>')\ndef mensa_feed(mensa):\n if mensa not in MENSAE:\n return WARNING\n feed = get_total_feed(mensa)\n validate(feed)\n return feed\n\n\n@app.route('/')\n@app.route('/index')\ndef mensa_list():\n mensae = '\\n '.join([('<list-item>' + mensa + '</list-item>') for\n mensa in MENSAE])\n response = f\"\"\"\n Status: 404 Not Found\n Content-Type: application/xml; charset=utf-8\n \n '<?xml version=\"1.0\" encoding=\"UTF-8\"?>'\n <error>\n <code>404</code>\n <message>Mensa not found</message>\n <debug-data>\n <list-desc>Valid filenames</list-desc>\"\n {mensae}\n </debug-data>\"\n </error>\"\"\"\n return response\n\n\nif __name__ == '__main__':\n app.run(host='127.0.0.1', port=8080, debug=True)\n",
"step-4": "from flask import Flask\nfrom bs4 import BeautifulSoup\nimport requests\nimport datetime\nimport regex as re\nimport unicodedata\nfrom pyopenmensa import feed as op\nfrom lxml import etree\n\n\nclass UnexpectedFormatError(AttributeError):\n pass\n\n\nWARNING = 'No Mensa path!'\nCATEGORY_KEY = 'cat'\nMAIN_MEAL_KEY = 'mm'\nADDITION_KEY = 'a'\nPRICE_KEY = 'p'\nDATE_KEY = 'd'\nSTUDENT_KEY = 'student'\nEMPLOYEE_KEY = 'employee'\nMENSAE = ['westerberg', 'mschlossg', 'mhaste', 'mvechta']\n\n\ndef get_meals(_mensa, date=None):\n result = requests.get(\n f'https://osnabrueck.my-mensa.de/essen.php?v=5121119&hyp=1&lang=de&mensa={_mensa}'\n )\n if result.status_code == 200:\n content = result.content\n else:\n raise ConnectionError\n b_soup = BeautifulSoup(content, 'html.parser')\n unparsed_meals = b_soup.find_all(href=lambda href: href and re.compile(\n f'mensa={_mensa}#{_mensa}_tag_20\\\\d{{3,5}}_essen').search(href))\n _meals = []\n for meal in unparsed_meals:\n category = meal.parent.previous_sibling.previous_sibling.text\n meal_info = meal.find_all(['h3', 'p'])\n if len(meal_info) != 3:\n raise UnexpectedFormatError('More than 3 meal info')\n meal_info = [unicodedata.normalize('NFKD', info.text).replace(\n '\\xad', '') for info in meal_info]\n _main_meal, _additional, price = meal_info\n if price == '-':\n price = {}\n else:\n price_search = re.compile('((\\\\d+,\\\\d{2})|-)\\\\D*((\\\\d+,\\\\d{2})|-)'\n ).search(price)\n if not price_search:\n raise UnexpectedFormatError(f'price formation error {price}')\n try:\n stud_price_str = price_search.group(2)\n emp_price_str = price_search.group(4)\n price = {STUDENT_KEY: float(stud_price_str.replace(',', '.'\n )) if stud_price_str else None, EMPLOYEE_KEY: float(\n emp_price_str.replace(',', '.')) if emp_price_str else None\n }\n except ValueError:\n raise UnexpectedFormatError(\n f'price formation error {price_search.groups()}')\n date_search = re.compile('tag_(\\\\d{4})(\\\\d{1,3})').search(meal['href'])\n if not date_search:\n raise UnexpectedFormatError(f\"Date formation error{meal['href']}\")\n try:\n year, day = [int(group) for group in date_search.groups()]\n except ValueError:\n raise UnexpectedFormatError(f'Date formation error {year}, {day}')\n if date:\n date_days = (date - datetime.datetime(date.year, 1, 1)).days\n if date_days != day or year != date.year:\n continue\n meal_date = datetime.datetime(year, 1, 1) + datetime.timedelta(day)\n _meals.append({CATEGORY_KEY: category, MAIN_MEAL_KEY: _main_meal,\n ADDITION_KEY: _additional, PRICE_KEY: price, DATE_KEY:\n meal_date.date()})\n return _meals\n\n\ndef get_total_feed(mensa):\n canteen = op.LazyBuilder()\n meals = get_meals(mensa)\n for meal in meals:\n main_meal = meal[MAIN_MEAL_KEY]\n additional = meal[ADDITION_KEY]\n ing_reg = re.compile('\\\\(((\\\\d+|[a-n])(,(\\\\d+|[a-n]))*)\\\\)')\n ingredients_match = ing_reg.findall(main_meal + ' ' + additional)\n ingredients = list(set(','.join([ingred[0] for ingred in\n ingredients_match]).split(',')))\n ingredients.sort()\n ingredients = ','.join(ingredients)\n main_meal = ing_reg.sub('', main_meal)\n additional = ing_reg.sub('', additional)\n notes = [note for note in [additional, ingredients] if len(note) > 0]\n prices = {role: price for role, price in meal[PRICE_KEY].items() if\n price}\n canteen.addMeal(meal[DATE_KEY], meal[CATEGORY_KEY], main_meal, \n notes if len(notes) > 0 else None, prices)\n return canteen.toXMLFeed()\n\n\ndef validate(xml_data):\n try:\n xml_schema_doc = etree.parse('./open-mensa-v2.xsd')\n xml_schema = etree.XMLSchema(xml_schema_doc)\n print('XML well formed, syntax ok.')\n etree.fromstring(xml_data.encode(), parser=etree.XMLParser(schema=\n xml_schema))\n print('XML valid, schema validation ok.')\n except etree.XMLSyntaxError as err:\n raise UnexpectedFormatError(err)\n except etree.DocumentInvalid as err:\n print('Schema validation error, see error_schema.log')\n raise UnexpectedFormatError(err)\n\n\napp = Flask(__name__)\n\n\n@app.route(f'/<mensa>')\ndef mensa_feed(mensa):\n if mensa not in MENSAE:\n return WARNING\n feed = get_total_feed(mensa)\n validate(feed)\n return feed\n\n\n@app.route('/')\n@app.route('/index')\ndef mensa_list():\n mensae = '\\n '.join([('<list-item>' + mensa + '</list-item>') for\n mensa in MENSAE])\n response = f\"\"\"\n Status: 404 Not Found\n Content-Type: application/xml; charset=utf-8\n \n '<?xml version=\"1.0\" encoding=\"UTF-8\"?>'\n <error>\n <code>404</code>\n <message>Mensa not found</message>\n <debug-data>\n <list-desc>Valid filenames</list-desc>\"\n {mensae}\n </debug-data>\"\n </error>\"\"\"\n return response\n\n\nif __name__ == '__main__':\n app.run(host='127.0.0.1', port=8080, debug=True)\n",
"step-5": "# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# [START gae_python37_app]\nfrom flask import Flask\nfrom bs4 import BeautifulSoup\nimport requests\nimport datetime\nimport regex as re\nimport unicodedata\nfrom pyopenmensa import feed as op\nfrom lxml import etree\n\n\nclass UnexpectedFormatError(AttributeError):\n pass\n\n\nWARNING = 'No Mensa path!'\n\n\nCATEGORY_KEY = \"cat\"\nMAIN_MEAL_KEY = \"mm\"\nADDITION_KEY = \"a\"\nPRICE_KEY = \"p\"\nDATE_KEY = \"d\"\nSTUDENT_KEY = \"student\"\nEMPLOYEE_KEY = \"employee\"\n\nMENSAE = [\"westerberg\", \"mschlossg\", \"mhaste\", \"mvechta\"]\n\n\ndef get_meals(_mensa, date=None):\n result = requests.get(f\"https://osnabrueck.my-mensa.de/essen.php?v=5121119&hyp=1&lang=de&mensa={_mensa}\")\n if result.status_code == 200:\n content = result.content\n else:\n raise ConnectionError\n b_soup = BeautifulSoup(content, \"html.parser\")\n unparsed_meals = b_soup.find_all(\n href=lambda href: href and re.compile(f\"mensa={_mensa}#{_mensa}_tag_20\\d{{3,5}}_essen\").search(href))\n _meals = []\n for meal in unparsed_meals:\n category = meal.parent.previous_sibling.previous_sibling.text\n meal_info = meal.find_all([\"h3\", \"p\"])\n if len(meal_info) != 3:\n raise UnexpectedFormatError(\"More than 3 meal info\")\n meal_info = [unicodedata.normalize(\"NFKD\", info.text).replace(\"\\xad\", \"\") for info in meal_info]\n _main_meal, _additional, price = meal_info\n if price == \"-\":\n price = {}\n else:\n price_search = re.compile(\"((\\d+,\\d{2})|-)\\D*((\\d+,\\d{2})|-)\").search(price)\n if not price_search:\n raise UnexpectedFormatError(f\"price formation error {price}\")\n try:\n stud_price_str = price_search.group(2)\n emp_price_str = price_search.group(4)\n price = {STUDENT_KEY: float(stud_price_str.replace(\",\", \".\")) if stud_price_str else None,\n EMPLOYEE_KEY: float(emp_price_str.replace(\",\", \".\")) if emp_price_str else None}\n except ValueError:\n raise UnexpectedFormatError(f\"price formation error {price_search.groups()}\")\n date_search = re.compile(\"tag_(\\d{4})(\\d{1,3})\").search(meal[\"href\"])\n if not date_search:\n raise UnexpectedFormatError(f\"Date formation error{meal['href']}\")\n try:\n year, day = [int(group) for group in date_search.groups()]\n except ValueError:\n raise UnexpectedFormatError(f\"Date formation error {year}, {day}\")\n if date:\n date_days = (date - datetime.datetime(date.year, 1, 1)).days\n if date_days != day or year != date.year:\n continue\n meal_date = datetime.datetime(year, 1, 1) + datetime.timedelta(day)\n _meals.append({CATEGORY_KEY: category, MAIN_MEAL_KEY: _main_meal,\n ADDITION_KEY: _additional, PRICE_KEY: price, DATE_KEY: meal_date.date()})\n return _meals\n\n\ndef get_total_feed(mensa):\n canteen = op.LazyBuilder()\n meals = get_meals(mensa)\n for meal in meals:\n main_meal = meal[MAIN_MEAL_KEY]\n additional = meal[ADDITION_KEY]\n ing_reg = re.compile(\"\\(((\\d+|[a-n])(,(\\d+|[a-n]))*)\\)\")\n # noinspection PyTypeChecker\n ingredients_match = ing_reg.findall(main_meal + \" \" + additional)\n ingredients = list(set(\",\".join([ingred[0] for ingred in ingredients_match]).split(\",\")))\n ingredients.sort()\n ingredients = \",\".join(ingredients)\n main_meal = ing_reg.sub(\"\", main_meal)\n additional = ing_reg.sub(\"\", additional)\n notes = [note for note in [additional, ingredients] if len(note) > 0]\n prices = {role: price for role, price in meal[PRICE_KEY].items() if price}\n canteen.addMeal(meal[DATE_KEY], meal[CATEGORY_KEY], main_meal,\n notes if len(notes) > 0 else None, prices)\n return canteen.toXMLFeed()\n\n\ndef validate(xml_data):\n # with open(\"open-mensa-v2.xsd\", 'r') as schema_file:\n # xml_schema_str = schema_file.read()\n #\n # xml_schema_doc = etree.parse(StringIO(xml_schema_str))\n # xml_schema = etree.XMLSchema(StringIO(xml_schema_doc))\n\n # parse xml\n try:\n xml_schema_doc = etree.parse(\"./open-mensa-v2.xsd\")\n xml_schema = etree.XMLSchema(xml_schema_doc)\n # doc = etree.parse(xml_data.encode())\n print('XML well formed, syntax ok.')\n etree.fromstring(xml_data.encode(), parser=etree.XMLParser(schema=xml_schema))\n # xml_schema.assertValid(doc)\n print('XML valid, schema validation ok.')\n # check for XML syntax errors\n except etree.XMLSyntaxError as err:\n raise UnexpectedFormatError(err)\n except etree.DocumentInvalid as err:\n print('Schema validation error, see error_schema.log')\n raise UnexpectedFormatError(err)\n\n\n# If `entrypoint` is not defined in app.yaml, App Engine will look for an app\n# called `app` in `main.py`.\napp = Flask(__name__)\n\n\n@app.route(f'/<mensa>')\ndef mensa_feed(mensa):\n if mensa not in MENSAE:\n return WARNING\n feed = get_total_feed(mensa)\n validate(feed)\n return feed\n\n\n@app.route('/')\n@app.route('/index')\ndef mensa_list():\n mensae = \"\\n \".join([\"<list-item>\" + mensa + \"</list-item>\" for mensa in MENSAE])\n response = f\"\"\"\n Status: 404 Not Found\n Content-Type: application/xml; charset=utf-8\n \n '<?xml version=\"1.0\" encoding=\"UTF-8\"?>'\n <error>\n <code>404</code>\n <message>Mensa not found</message>\n <debug-data>\n <list-desc>Valid filenames</list-desc>\"\n {mensae}\n </debug-data>\"\n </error>\"\"\"\n return response\n\n\nif __name__ == '__main__':\n # This is used when running locally only. When deploying to Google App\n # Engine, a webserver process such as Gunicorn will serve the app. This\n # can be configured by adding an `entrypoint` to app.yaml.\n app.run(host='127.0.0.1', port=8080, debug=True)\n# [END gae_python37_app]\n",
"step-ids": [
6,
7,
8,
9,
10
]
}
|
[
6,
7,
8,
9,
10
] |
import numpy as np
#!pip install pygame
import pygame
#from copy import deepcopy
pygame.init()
#-----------
# Modifications (Matthieu, 15/04):
# Modification de la représentation du terrain du jeu. Il est maintenant représenté par une seule liste.
# un seul identifiant par coupe semble plus simple à gérer qu'un couple (joueur,numero)
# Les indices de la liste correspondant à chaque coupe sont par exemple :
# [11] [10] [9] [8] [7] [6] ligne de l'ordi (joueur 1)
# [0] [1] [2] [3] [4] [5] ligne du joueur (joueur 0)
# Modifications de certaines fonctions de vérification des règles pour éviter les deepcopy
# Simplification de la structure de l'arbre (structure de dictionnaire contenant les fils de chaque noeud)
# On ne le construit que pour une profondeur donnée profondeurArbre (1 par défaut), ou même pas du tout
# Algo alpha beta
# Pbs :
# Fonction qui permettrait de détecter les situations ou le jeu peut boucler à l'infini
# Pouvoir tester les performances de l'ia, par exemple sur quelques centaines de parties, combien de %
# sont gagnées par l'ia contre un algo qui joue aléatoirement
# Améliorer la fonction d'évaluation qui est pour l'instant très basique
##-------------
# Le terrain de jeu est un tableau de deux lignes (les deux camps) et de nCoupes colonnes (les coupelles),
# contenant initialement n graines. La première constitue le camp du joueur, la seconde, celle de l'ordinateur.
# Dans chaque camp, les coupelles sont numérotées de 1 à nCoupes.
# A chaque tour, le joueur doit choisir un numéro de coupelle.
# Les graines de celle-ci sont alors transférées dans les coupes suivantes etc.
#
# modifs du 17.03 par Léo:
# -suppression de scoreGagnant, qui n'apparait pas dans les règles de base de l'Awalé
# -Pour faciliter les manipulations du code et sa compréhension, on parle maintenant
# du joueur 0 et du joueur 1 (au lieu de 1 et 2) et les coupelles sont numérotées de 0 à nCoupes-1.
#Notions de classe:
#https://openclassrooms.com/fr/courses/235344-apprenez-a-programmer-en-python/232721-apprehendez-les-classes
#Explication de l'algorithme minimax général (page 52) :
#http://stephane.ayache.perso.luminy.univ-amu.fr/zoom/cours/Cours/IA_Jeux/IAEtJeux2.pdf
#Code par Léo et Paul
#Pb: le jeu peut boucler à l'infini à la fin d'une partie (souvent lorsqu'il reste 2 graines disposées symétriquement)
# -> se pencher sur la fonction "partieFinie" et peut-être essayer d'intégrer cette fonction dans l'algo récursif minimax..
#Pb: structure d'arbre trop compliquée: (*)
#l'arbre est construit à partir d'une liste selon le principe suivant:
#les nCoupes fils de l'élément d'indice k sont d'indices k*nCoupes + l, avec l variant entre 1 et nCoupes
#On vérifie alors (à l'aide d'un dessin par exemple) qu'il y a une bijection naturelle entre la structure d'arbre et la liste (ou tableau) de taille voulue
class terrainDeJeu:
# [11] [10] [9] [8] [7] [6]// ligne de l'ordi (joueur 1)
# [0] [1] [2] [3] [4] [5]// ligne du joueur (joueur 0)
def __init__(self,nCoupes,profondeur,nGrainesParCoupelle=4) : #Constructeur
self.plateau = np.full(2*nCoupes,nGrainesParCoupelle)
self.nGrainesParCoupelleInit = nGrainesParCoupelle
self.nCoupes = nCoupes
self.scores = [0,0] # scores[0] = score du joueur 0...
self.tour = 0
self.finie = False
self.profondeurMinimax = profondeur
self.arbreFils = {}
#clone le terrain de jeu pour pouvoir simuler un coup par la suite
def clone(self):
clone = terrainDeJeu(self.nCoupes,self.profondeurMinimax,self.nGrainesParCoupelleInit)
clone.plateau= self.plateau.copy()
clone.scores = self.scores.copy()
clone.tour = self.tour
clone.finie = self.finie
return clone
#retourne l'id de la coupe suivant idCoupe sur le plateau (suivant = sens trigo)
def coupeSuivante(self,idCoupe):
return (idCoupe + 1)%(2*self.nCoupes)
#retourne l'id de la coupe précédant idCoupe sur le plateau (précédant = sens horaire)
def coupePrecedente(self,idCoupe):
return (idCoupe - 1)%(2*self.nCoupes)
#retourne le joueur (0 ou 1) à qui appartient la coupe idCoupe
def joueurCoupe(self,idCoupe):
return 0 if idCoupe < self.nCoupes else 1
#retourne si idCoupe peut être prise (contient 2 ou 3 graines)
def coupePrenable(self,idCoupe):
return (self.plateau[idCoupe]==2 or self.plateau[idCoupe]==3)
def deplacer(self,joueur,idCoupe):
coupeInitiale = idCoupe #id de la coupelle choisie
nGraines = self.plateau[idCoupe]
self.plateau[idCoupe] = 0
while (nGraines != 0): #On redistribue les graines de la coupelle initiale
idCoupe = self.coupeSuivante(idCoupe)
if (idCoupe != coupeInitiale): #On ne redistribue pas dans la coupelle initiale
self.plateau[idCoupe] += 1
nGraines -= 1
coupeFinale = idCoupe
joueurCoupeFinale = self.joueurCoupe(coupeFinale)
if (joueur != joueurCoupeFinale):
#on vérifie si on va affamer l'adversaire
#si non, on prend les graines normalement
if (self.nourrirAdversaire(joueur,coupeFinale)):
while (self.joueurCoupe(idCoupe)==joueurCoupeFinale and self.coupePrenable(idCoupe)):
self.scores[joueur]+=self.plateau[idCoupe]
self.plateau[idCoupe]=0
idCoupe = self.coupePrecedente(idCoupe)
#si on va affamer l'adversaire :
# on ne prend aucune graine donc on ne fait rien
self.tour=(self.tour+1)%2
#On compte le nombre de graines restantes sur le plateau
def grainesRestantes(self):
return np.sum(self.plateau)
#on compte le nombre de graines restantes sur le plateau pour les coupes de joueur
def grainesRestantesJoueur(self,joueur):
if joueur==0:
return np.sum(self.plateau[0:self.nCoupes])
else:
return np.sum(self.plateau[self.nCoupes:len(self.plateau)])
#détermine si, dans le cas où joueur finit son coup sur la coupe coupeFinale,
#Yson adversaire sera affamé ou pas
#on regarde donc si il restera au moins une graine sur le terrain de l'adversaire
def nourrirAdversaire(self,joueur,coupeFinale):
adversaire = (joueur+1)%2
#on commence la vérification à la coupe la plus éloignée de adversaire (dans le sens horaire)
admissible = False
idCoupe = (self.nCoupes*(adversaire+1))-1
while (self.joueurCoupe(idCoupe)==adversaire):
#si idCoupe est après coupeFinale et qu'il reste des graines dedans le coup est admissible
if (idCoupe>coupeFinale and self.plateau[idCoupe]!=0):
admissible=True
#si joueur peut pas prendre la coupe idCoupe le coup est admissible
elif (not self.coupePrenable(idCoupe)):
admissible=True
idCoupe=self.coupePrecedente(idCoupe)
#True si le coup est admissible pour la règle "nourrir"
return admissible
#coupes admissibles que peut jouer joueur pour nourrir son adversaire
def coupesAdmissiblesNourrir(self,joueur):
coupesAdmissibles = []
#on commence par la coupe la plus proche de l'adversaire (dans le sens trigo)
idCoupe = (self.nCoupes*(joueur+1))-1
distance = 1
while (self.joueurCoupe(idCoupe)==joueur):
#s'il y a plus de graines dans idCoupe que la distance qui la sépare aux coupes de l'adversaire
#le coup est admissible, au moins une graine nourrira l'adversaire
if self.plateau[idCoupe]>=distance:
coupesAdmissibles.append(idCoupe)
idCoupe = self.coupePrecedente(idCoupe)
distance +=1
return coupesAdmissibles
def coupesAdmissibles(self,joueur):
adversaire = (joueur+1)%2
if self.grainesRestantesJoueur(adversaire) == 0:
coupesAdmissibles = self.coupesAdmissiblesNourrir(joueur)
#si aucun coup ne peut être joué pour nourrir l'adversaire
if len(coupesAdmissibles)==0:
self.scores[joueur] += self.grainesRestantes()
self.plateau = np.zeros(2*self.nCoupes,dtype=int)
self.finie = True
#partie terminée
#sinon toutes les coupes non vides sont admissibles
else :
coupesAdmissibles = [(k+joueur*self.nCoupes) for k in range(self.nCoupes) if self.plateau[(k+joueur*self.nCoupes)]>0]
return coupesAdmissibles
def tourDuJoueur(self):
joueur = 0
#si l'adversaire n'a plus de graines, il faut obligatoirement le nourrir
coupesAdmissibles = self.coupesAdmissibles(joueur)
print("C'est au tour du joueur 1. Entrez le numéro de la coupelle à jouer:")
nCoupe = int(input())
#print("coupesAdmissibles",coupesAdmissibles)
while nCoupe<0 or nCoupe>self.nCoupes-1 or (not (nCoupe in coupesAdmissibles)):
#cas où la coupelle n'existe pas, ou correspond à un coup non admissible
print("Coupelle incorrecte. Entrez le numéro de la coupelle à jouer.")
nCoupe = int(input())
self.deplacer(joueur,nCoupe)
self.jouer()
def tourOrdi(self):
joueur = 1
self.profondeur = 0
self.value = self.alphabeta(joueur,-np.inf,np.inf)
for idCoupe in self.arbreFils.keys():
print("coupe = ",idCoupe," : valeur = ",self.arbreFils[idCoupe].value)
for idCoupe in self.arbreFils.keys():
if self.value==self.arbreFils[idCoupe].value:
self.deplacer(joueur,idCoupe)
break
self.jouer()
def partieFinie(self):
#True si le plateau ne contient plus aucune graine
limiteGagne = self.nCoupes*self.nGrainesParCoupelleInit
self.finie = (self.grainesRestantes()==0 or self.scores[0]> limiteGagne or self.scores[1]> limiteGagne)
return self.finie
def afficherPlateau(self):
print(np.array([self.plateau[self.nCoupes:len(self.plateau)][::-1],self.plateau[0:self.nCoupes]])) # [::-1] permet d'inverse la liste
def afficherScores(self):
print("score J1........."+str(self.scores[0]))
print("score MinMax....."+str(self.scores[1]))
def evaluation(self,joueur):
adversaire = (joueur+1)%2
return self.scores[joueur]-self.scores[adversaire]
#Fonction principale
def jouer(self):
if (not self.partieFinie()) :
self.afficherPlateau()
self.afficherScores()
if (self.tour==0):
self.tourDuJoueur()
else:
self.tourOrdi()
print("\n")
else:
self.afficherPlateau()
self.afficherScores()
print("Partie Finie !")
#plus vraiment utile, le code du minimax est repris dans celui de la fonction alphabeta
def minimax(self, joueurMaximisant, profondeurArbre=1): #joueurMaximisant = joueur pour lequel on veut maximiser le score (0 ou 1)
#On simule ici des situations fictives de jeu de manière récursive (l'I.A. lit en quelque sorte l'avenir pour n=profondeur tours en avance)
self.arbreFils = {}
#on détermine les coups possibles
#si aucun coup n'est possible cette fonction arrête aussi la partie
coupesPossibles = self.coupesAdmissibles(self.tour)
if (self.profondeur == self.profondeurMinimax or self.finie): #cas de base
self.value = self.evaluation(joueurMaximisant)
return self.value
if self.tour==joueurMaximisant:
fctComparaison = max
self.value = - np.inf
else:
fctComparaison = min
self.value = np.inf
#on parcourt tous les coups possibles
for idCoupe in coupesPossibles:
fils=self.clone()
fils.profondeur=self.profondeur+1
fils.deplacer(fils.tour,idCoupe)
fils.value = fils.minimax(joueurMaximisant)
#on ne remplit effectivement l'arbre (attribut arbreFils)
#que pour une profondeur < à profondeurArbre
#on pourrait même ne pas le remplir du tout mais profondeurArbre = 1
#permet d'afficher les valeurs associées à chaque coup...
if (self.profondeur < profondeurArbre):
self.arbreFils[idCoupe]=fils
self.value = fctComparaison(self.value, fils.value)
return self.value
def alphabeta(self, joueurMaximisant, alpha, beta, profondeurArbre=1): #joueurMaximisant = joueur pour lequel on veut maximiser le score (0 ou 1)
#On simule ici des situations fictives de jeu de manière récursive (l'I.A. lit en quelque sorte l'avenir pour n=profondeur tours en avance)
self.arbreFils = {}
#on détermine les coups possibles
#si aucun coup n'est possible cette fonction arrête aussi la partie
coupesPossibles = self.coupesAdmissibles(self.tour)
if (self.profondeur == self.profondeurMinimax or self.finie): #cas de base
self.value = self.evaluation(joueurMaximisant)
return self.value
if self.tour==joueurMaximisant:
fctComparaison = max
self.value = - np.inf
else:
fctComparaison = min
self.value = np.inf
#on parcourt tous les coups possibles
for idCoupe in coupesPossibles:
fils=self.clone()
fils.profondeur=self.profondeur+1
fils.deplacer(fils.tour,idCoupe)
fils.value = fils.alphabeta(joueurMaximisant,alpha,beta)
#on ne remplit effectivement l'arbre (attribut arbreFils)
#que pour une profondeur < à profondeurArbre
#on pourrait même ne pas le remplir du tout mais profondeurArbre = 1
#permet d'afficher les valeurs associées à chaque coup...
if (self.profondeur < profondeurArbre):
self.arbreFils[idCoupe]=fils
self.value = fctComparaison(self.value, fils.value)
#coupures alpha et beta si on est sûrs d'avoir le meilleur résultat possible
if self.tour==joueurMaximisant:
if self.value >= beta:
return self.value
alpha = fctComparaison(alpha,self.value)
else:
if alpha >= self.value:
return self.value
beta = fctComparaison(beta,self.value)
return self.value
t = terrainDeJeu(nCoupes=6,nGrainesParCoupelle=4,profondeur=8)
t.jouer()
|
normal
|
{
"blob_id": "576d6bec4a91ba6f0597b76a5da5ad3ef6562b19",
"index": 9592,
"step-1": "<mask token>\n\n\nclass terrainDeJeu:\n\n def __init__(self, nCoupes, profondeur, nGrainesParCoupelle=4):\n self.plateau = np.full(2 * nCoupes, nGrainesParCoupelle)\n self.nGrainesParCoupelleInit = nGrainesParCoupelle\n self.nCoupes = nCoupes\n self.scores = [0, 0]\n self.tour = 0\n self.finie = False\n self.profondeurMinimax = profondeur\n self.arbreFils = {}\n <mask token>\n\n def coupeSuivante(self, idCoupe):\n return (idCoupe + 1) % (2 * self.nCoupes)\n <mask token>\n\n def joueurCoupe(self, idCoupe):\n return 0 if idCoupe < self.nCoupes else 1\n\n def coupePrenable(self, idCoupe):\n return self.plateau[idCoupe] == 2 or self.plateau[idCoupe] == 3\n\n def deplacer(self, joueur, idCoupe):\n coupeInitiale = idCoupe\n nGraines = self.plateau[idCoupe]\n self.plateau[idCoupe] = 0\n while nGraines != 0:\n idCoupe = self.coupeSuivante(idCoupe)\n if idCoupe != coupeInitiale:\n self.plateau[idCoupe] += 1\n nGraines -= 1\n coupeFinale = idCoupe\n joueurCoupeFinale = self.joueurCoupe(coupeFinale)\n if joueur != joueurCoupeFinale:\n if self.nourrirAdversaire(joueur, coupeFinale):\n while self.joueurCoupe(idCoupe\n ) == joueurCoupeFinale and self.coupePrenable(idCoupe):\n self.scores[joueur] += self.plateau[idCoupe]\n self.plateau[idCoupe] = 0\n idCoupe = self.coupePrecedente(idCoupe)\n self.tour = (self.tour + 1) % 2\n\n def grainesRestantes(self):\n return np.sum(self.plateau)\n <mask token>\n\n def nourrirAdversaire(self, joueur, coupeFinale):\n adversaire = (joueur + 1) % 2\n admissible = False\n idCoupe = self.nCoupes * (adversaire + 1) - 1\n while self.joueurCoupe(idCoupe) == adversaire:\n if idCoupe > coupeFinale and self.plateau[idCoupe] != 0:\n admissible = True\n elif not self.coupePrenable(idCoupe):\n admissible = True\n idCoupe = self.coupePrecedente(idCoupe)\n return admissible\n\n def coupesAdmissiblesNourrir(self, joueur):\n coupesAdmissibles = []\n idCoupe = self.nCoupes * (joueur + 1) - 1\n distance = 1\n while self.joueurCoupe(idCoupe) == joueur:\n if self.plateau[idCoupe] >= distance:\n coupesAdmissibles.append(idCoupe)\n idCoupe = self.coupePrecedente(idCoupe)\n distance += 1\n return coupesAdmissibles\n\n def coupesAdmissibles(self, joueur):\n adversaire = (joueur + 1) % 2\n if self.grainesRestantesJoueur(adversaire) == 0:\n coupesAdmissibles = self.coupesAdmissiblesNourrir(joueur)\n if len(coupesAdmissibles) == 0:\n self.scores[joueur] += self.grainesRestantes()\n self.plateau = np.zeros(2 * self.nCoupes, dtype=int)\n self.finie = True\n else:\n coupesAdmissibles = [(k + joueur * self.nCoupes) for k in range\n (self.nCoupes) if self.plateau[k + joueur * self.nCoupes] > 0]\n return coupesAdmissibles\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def afficherScores(self):\n print('score J1.........' + str(self.scores[0]))\n print('score MinMax.....' + str(self.scores[1]))\n <mask token>\n\n def jouer(self):\n if not self.partieFinie():\n self.afficherPlateau()\n self.afficherScores()\n if self.tour == 0:\n self.tourDuJoueur()\n else:\n self.tourOrdi()\n print('\\n')\n else:\n self.afficherPlateau()\n self.afficherScores()\n print('Partie Finie !')\n\n def minimax(self, joueurMaximisant, profondeurArbre=1):\n self.arbreFils = {}\n coupesPossibles = self.coupesAdmissibles(self.tour)\n if self.profondeur == self.profondeurMinimax or self.finie:\n self.value = self.evaluation(joueurMaximisant)\n return self.value\n if self.tour == joueurMaximisant:\n fctComparaison = max\n self.value = -np.inf\n else:\n fctComparaison = min\n self.value = np.inf\n for idCoupe in coupesPossibles:\n fils = self.clone()\n fils.profondeur = self.profondeur + 1\n fils.deplacer(fils.tour, idCoupe)\n fils.value = fils.minimax(joueurMaximisant)\n if self.profondeur < profondeurArbre:\n self.arbreFils[idCoupe] = fils\n self.value = fctComparaison(self.value, fils.value)\n return self.value\n\n def alphabeta(self, joueurMaximisant, alpha, beta, profondeurArbre=1):\n self.arbreFils = {}\n coupesPossibles = self.coupesAdmissibles(self.tour)\n if self.profondeur == self.profondeurMinimax or self.finie:\n self.value = self.evaluation(joueurMaximisant)\n return self.value\n if self.tour == joueurMaximisant:\n fctComparaison = max\n self.value = -np.inf\n else:\n fctComparaison = min\n self.value = np.inf\n for idCoupe in coupesPossibles:\n fils = self.clone()\n fils.profondeur = self.profondeur + 1\n fils.deplacer(fils.tour, idCoupe)\n fils.value = fils.alphabeta(joueurMaximisant, alpha, beta)\n if self.profondeur < profondeurArbre:\n self.arbreFils[idCoupe] = fils\n self.value = fctComparaison(self.value, fils.value)\n if self.tour == joueurMaximisant:\n if self.value >= beta:\n return self.value\n alpha = fctComparaison(alpha, self.value)\n else:\n if alpha >= self.value:\n return self.value\n beta = fctComparaison(beta, self.value)\n return self.value\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass terrainDeJeu:\n\n def __init__(self, nCoupes, profondeur, nGrainesParCoupelle=4):\n self.plateau = np.full(2 * nCoupes, nGrainesParCoupelle)\n self.nGrainesParCoupelleInit = nGrainesParCoupelle\n self.nCoupes = nCoupes\n self.scores = [0, 0]\n self.tour = 0\n self.finie = False\n self.profondeurMinimax = profondeur\n self.arbreFils = {}\n\n def clone(self):\n clone = terrainDeJeu(self.nCoupes, self.profondeurMinimax, self.\n nGrainesParCoupelleInit)\n clone.plateau = self.plateau.copy()\n clone.scores = self.scores.copy()\n clone.tour = self.tour\n clone.finie = self.finie\n return clone\n\n def coupeSuivante(self, idCoupe):\n return (idCoupe + 1) % (2 * self.nCoupes)\n\n def coupePrecedente(self, idCoupe):\n return (idCoupe - 1) % (2 * self.nCoupes)\n\n def joueurCoupe(self, idCoupe):\n return 0 if idCoupe < self.nCoupes else 1\n\n def coupePrenable(self, idCoupe):\n return self.plateau[idCoupe] == 2 or self.plateau[idCoupe] == 3\n\n def deplacer(self, joueur, idCoupe):\n coupeInitiale = idCoupe\n nGraines = self.plateau[idCoupe]\n self.plateau[idCoupe] = 0\n while nGraines != 0:\n idCoupe = self.coupeSuivante(idCoupe)\n if idCoupe != coupeInitiale:\n self.plateau[idCoupe] += 1\n nGraines -= 1\n coupeFinale = idCoupe\n joueurCoupeFinale = self.joueurCoupe(coupeFinale)\n if joueur != joueurCoupeFinale:\n if self.nourrirAdversaire(joueur, coupeFinale):\n while self.joueurCoupe(idCoupe\n ) == joueurCoupeFinale and self.coupePrenable(idCoupe):\n self.scores[joueur] += self.plateau[idCoupe]\n self.plateau[idCoupe] = 0\n idCoupe = self.coupePrecedente(idCoupe)\n self.tour = (self.tour + 1) % 2\n\n def grainesRestantes(self):\n return np.sum(self.plateau)\n\n def grainesRestantesJoueur(self, joueur):\n if joueur == 0:\n return np.sum(self.plateau[0:self.nCoupes])\n else:\n return np.sum(self.plateau[self.nCoupes:len(self.plateau)])\n\n def nourrirAdversaire(self, joueur, coupeFinale):\n adversaire = (joueur + 1) % 2\n admissible = False\n idCoupe = self.nCoupes * (adversaire + 1) - 1\n while self.joueurCoupe(idCoupe) == adversaire:\n if idCoupe > coupeFinale and self.plateau[idCoupe] != 0:\n admissible = True\n elif not self.coupePrenable(idCoupe):\n admissible = True\n idCoupe = self.coupePrecedente(idCoupe)\n return admissible\n\n def coupesAdmissiblesNourrir(self, joueur):\n coupesAdmissibles = []\n idCoupe = self.nCoupes * (joueur + 1) - 1\n distance = 1\n while self.joueurCoupe(idCoupe) == joueur:\n if self.plateau[idCoupe] >= distance:\n coupesAdmissibles.append(idCoupe)\n idCoupe = self.coupePrecedente(idCoupe)\n distance += 1\n return coupesAdmissibles\n\n def coupesAdmissibles(self, joueur):\n adversaire = (joueur + 1) % 2\n if self.grainesRestantesJoueur(adversaire) == 0:\n coupesAdmissibles = self.coupesAdmissiblesNourrir(joueur)\n if len(coupesAdmissibles) == 0:\n self.scores[joueur] += self.grainesRestantes()\n self.plateau = np.zeros(2 * self.nCoupes, dtype=int)\n self.finie = True\n else:\n coupesAdmissibles = [(k + joueur * self.nCoupes) for k in range\n (self.nCoupes) if self.plateau[k + joueur * self.nCoupes] > 0]\n return coupesAdmissibles\n <mask token>\n <mask token>\n\n def partieFinie(self):\n limiteGagne = self.nCoupes * self.nGrainesParCoupelleInit\n self.finie = self.grainesRestantes() == 0 or self.scores[0\n ] > limiteGagne or self.scores[1] > limiteGagne\n return self.finie\n\n def afficherPlateau(self):\n print(np.array([self.plateau[self.nCoupes:len(self.plateau)][::-1],\n self.plateau[0:self.nCoupes]]))\n\n def afficherScores(self):\n print('score J1.........' + str(self.scores[0]))\n print('score MinMax.....' + str(self.scores[1]))\n\n def evaluation(self, joueur):\n adversaire = (joueur + 1) % 2\n return self.scores[joueur] - self.scores[adversaire]\n\n def jouer(self):\n if not self.partieFinie():\n self.afficherPlateau()\n self.afficherScores()\n if self.tour == 0:\n self.tourDuJoueur()\n else:\n self.tourOrdi()\n print('\\n')\n else:\n self.afficherPlateau()\n self.afficherScores()\n print('Partie Finie !')\n\n def minimax(self, joueurMaximisant, profondeurArbre=1):\n self.arbreFils = {}\n coupesPossibles = self.coupesAdmissibles(self.tour)\n if self.profondeur == self.profondeurMinimax or self.finie:\n self.value = self.evaluation(joueurMaximisant)\n return self.value\n if self.tour == joueurMaximisant:\n fctComparaison = max\n self.value = -np.inf\n else:\n fctComparaison = min\n self.value = np.inf\n for idCoupe in coupesPossibles:\n fils = self.clone()\n fils.profondeur = self.profondeur + 1\n fils.deplacer(fils.tour, idCoupe)\n fils.value = fils.minimax(joueurMaximisant)\n if self.profondeur < profondeurArbre:\n self.arbreFils[idCoupe] = fils\n self.value = fctComparaison(self.value, fils.value)\n return self.value\n\n def alphabeta(self, joueurMaximisant, alpha, beta, profondeurArbre=1):\n self.arbreFils = {}\n coupesPossibles = self.coupesAdmissibles(self.tour)\n if self.profondeur == self.profondeurMinimax or self.finie:\n self.value = self.evaluation(joueurMaximisant)\n return self.value\n if self.tour == joueurMaximisant:\n fctComparaison = max\n self.value = -np.inf\n else:\n fctComparaison = min\n self.value = np.inf\n for idCoupe in coupesPossibles:\n fils = self.clone()\n fils.profondeur = self.profondeur + 1\n fils.deplacer(fils.tour, idCoupe)\n fils.value = fils.alphabeta(joueurMaximisant, alpha, beta)\n if self.profondeur < profondeurArbre:\n self.arbreFils[idCoupe] = fils\n self.value = fctComparaison(self.value, fils.value)\n if self.tour == joueurMaximisant:\n if self.value >= beta:\n return self.value\n alpha = fctComparaison(alpha, self.value)\n else:\n if alpha >= self.value:\n return self.value\n beta = fctComparaison(beta, self.value)\n return self.value\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass terrainDeJeu:\n\n def __init__(self, nCoupes, profondeur, nGrainesParCoupelle=4):\n self.plateau = np.full(2 * nCoupes, nGrainesParCoupelle)\n self.nGrainesParCoupelleInit = nGrainesParCoupelle\n self.nCoupes = nCoupes\n self.scores = [0, 0]\n self.tour = 0\n self.finie = False\n self.profondeurMinimax = profondeur\n self.arbreFils = {}\n\n def clone(self):\n clone = terrainDeJeu(self.nCoupes, self.profondeurMinimax, self.\n nGrainesParCoupelleInit)\n clone.plateau = self.plateau.copy()\n clone.scores = self.scores.copy()\n clone.tour = self.tour\n clone.finie = self.finie\n return clone\n\n def coupeSuivante(self, idCoupe):\n return (idCoupe + 1) % (2 * self.nCoupes)\n\n def coupePrecedente(self, idCoupe):\n return (idCoupe - 1) % (2 * self.nCoupes)\n\n def joueurCoupe(self, idCoupe):\n return 0 if idCoupe < self.nCoupes else 1\n\n def coupePrenable(self, idCoupe):\n return self.plateau[idCoupe] == 2 or self.plateau[idCoupe] == 3\n\n def deplacer(self, joueur, idCoupe):\n coupeInitiale = idCoupe\n nGraines = self.plateau[idCoupe]\n self.plateau[idCoupe] = 0\n while nGraines != 0:\n idCoupe = self.coupeSuivante(idCoupe)\n if idCoupe != coupeInitiale:\n self.plateau[idCoupe] += 1\n nGraines -= 1\n coupeFinale = idCoupe\n joueurCoupeFinale = self.joueurCoupe(coupeFinale)\n if joueur != joueurCoupeFinale:\n if self.nourrirAdversaire(joueur, coupeFinale):\n while self.joueurCoupe(idCoupe\n ) == joueurCoupeFinale and self.coupePrenable(idCoupe):\n self.scores[joueur] += self.plateau[idCoupe]\n self.plateau[idCoupe] = 0\n idCoupe = self.coupePrecedente(idCoupe)\n self.tour = (self.tour + 1) % 2\n\n def grainesRestantes(self):\n return np.sum(self.plateau)\n\n def grainesRestantesJoueur(self, joueur):\n if joueur == 0:\n return np.sum(self.plateau[0:self.nCoupes])\n else:\n return np.sum(self.plateau[self.nCoupes:len(self.plateau)])\n\n def nourrirAdversaire(self, joueur, coupeFinale):\n adversaire = (joueur + 1) % 2\n admissible = False\n idCoupe = self.nCoupes * (adversaire + 1) - 1\n while self.joueurCoupe(idCoupe) == adversaire:\n if idCoupe > coupeFinale and self.plateau[idCoupe] != 0:\n admissible = True\n elif not self.coupePrenable(idCoupe):\n admissible = True\n idCoupe = self.coupePrecedente(idCoupe)\n return admissible\n\n def coupesAdmissiblesNourrir(self, joueur):\n coupesAdmissibles = []\n idCoupe = self.nCoupes * (joueur + 1) - 1\n distance = 1\n while self.joueurCoupe(idCoupe) == joueur:\n if self.plateau[idCoupe] >= distance:\n coupesAdmissibles.append(idCoupe)\n idCoupe = self.coupePrecedente(idCoupe)\n distance += 1\n return coupesAdmissibles\n\n def coupesAdmissibles(self, joueur):\n adversaire = (joueur + 1) % 2\n if self.grainesRestantesJoueur(adversaire) == 0:\n coupesAdmissibles = self.coupesAdmissiblesNourrir(joueur)\n if len(coupesAdmissibles) == 0:\n self.scores[joueur] += self.grainesRestantes()\n self.plateau = np.zeros(2 * self.nCoupes, dtype=int)\n self.finie = True\n else:\n coupesAdmissibles = [(k + joueur * self.nCoupes) for k in range\n (self.nCoupes) if self.plateau[k + joueur * self.nCoupes] > 0]\n return coupesAdmissibles\n <mask token>\n\n def tourOrdi(self):\n joueur = 1\n self.profondeur = 0\n self.value = self.alphabeta(joueur, -np.inf, np.inf)\n for idCoupe in self.arbreFils.keys():\n print('coupe = ', idCoupe, ' : valeur = ', self.arbreFils[\n idCoupe].value)\n for idCoupe in self.arbreFils.keys():\n if self.value == self.arbreFils[idCoupe].value:\n self.deplacer(joueur, idCoupe)\n break\n self.jouer()\n\n def partieFinie(self):\n limiteGagne = self.nCoupes * self.nGrainesParCoupelleInit\n self.finie = self.grainesRestantes() == 0 or self.scores[0\n ] > limiteGagne or self.scores[1] > limiteGagne\n return self.finie\n\n def afficherPlateau(self):\n print(np.array([self.plateau[self.nCoupes:len(self.plateau)][::-1],\n self.plateau[0:self.nCoupes]]))\n\n def afficherScores(self):\n print('score J1.........' + str(self.scores[0]))\n print('score MinMax.....' + str(self.scores[1]))\n\n def evaluation(self, joueur):\n adversaire = (joueur + 1) % 2\n return self.scores[joueur] - self.scores[adversaire]\n\n def jouer(self):\n if not self.partieFinie():\n self.afficherPlateau()\n self.afficherScores()\n if self.tour == 0:\n self.tourDuJoueur()\n else:\n self.tourOrdi()\n print('\\n')\n else:\n self.afficherPlateau()\n self.afficherScores()\n print('Partie Finie !')\n\n def minimax(self, joueurMaximisant, profondeurArbre=1):\n self.arbreFils = {}\n coupesPossibles = self.coupesAdmissibles(self.tour)\n if self.profondeur == self.profondeurMinimax or self.finie:\n self.value = self.evaluation(joueurMaximisant)\n return self.value\n if self.tour == joueurMaximisant:\n fctComparaison = max\n self.value = -np.inf\n else:\n fctComparaison = min\n self.value = np.inf\n for idCoupe in coupesPossibles:\n fils = self.clone()\n fils.profondeur = self.profondeur + 1\n fils.deplacer(fils.tour, idCoupe)\n fils.value = fils.minimax(joueurMaximisant)\n if self.profondeur < profondeurArbre:\n self.arbreFils[idCoupe] = fils\n self.value = fctComparaison(self.value, fils.value)\n return self.value\n\n def alphabeta(self, joueurMaximisant, alpha, beta, profondeurArbre=1):\n self.arbreFils = {}\n coupesPossibles = self.coupesAdmissibles(self.tour)\n if self.profondeur == self.profondeurMinimax or self.finie:\n self.value = self.evaluation(joueurMaximisant)\n return self.value\n if self.tour == joueurMaximisant:\n fctComparaison = max\n self.value = -np.inf\n else:\n fctComparaison = min\n self.value = np.inf\n for idCoupe in coupesPossibles:\n fils = self.clone()\n fils.profondeur = self.profondeur + 1\n fils.deplacer(fils.tour, idCoupe)\n fils.value = fils.alphabeta(joueurMaximisant, alpha, beta)\n if self.profondeur < profondeurArbre:\n self.arbreFils[idCoupe] = fils\n self.value = fctComparaison(self.value, fils.value)\n if self.tour == joueurMaximisant:\n if self.value >= beta:\n return self.value\n alpha = fctComparaison(alpha, self.value)\n else:\n if alpha >= self.value:\n return self.value\n beta = fctComparaison(beta, self.value)\n return self.value\n\n\n<mask token>\n",
"step-4": "<mask token>\npygame.init()\n\n\nclass terrainDeJeu:\n\n def __init__(self, nCoupes, profondeur, nGrainesParCoupelle=4):\n self.plateau = np.full(2 * nCoupes, nGrainesParCoupelle)\n self.nGrainesParCoupelleInit = nGrainesParCoupelle\n self.nCoupes = nCoupes\n self.scores = [0, 0]\n self.tour = 0\n self.finie = False\n self.profondeurMinimax = profondeur\n self.arbreFils = {}\n\n def clone(self):\n clone = terrainDeJeu(self.nCoupes, self.profondeurMinimax, self.\n nGrainesParCoupelleInit)\n clone.plateau = self.plateau.copy()\n clone.scores = self.scores.copy()\n clone.tour = self.tour\n clone.finie = self.finie\n return clone\n\n def coupeSuivante(self, idCoupe):\n return (idCoupe + 1) % (2 * self.nCoupes)\n\n def coupePrecedente(self, idCoupe):\n return (idCoupe - 1) % (2 * self.nCoupes)\n\n def joueurCoupe(self, idCoupe):\n return 0 if idCoupe < self.nCoupes else 1\n\n def coupePrenable(self, idCoupe):\n return self.plateau[idCoupe] == 2 or self.plateau[idCoupe] == 3\n\n def deplacer(self, joueur, idCoupe):\n coupeInitiale = idCoupe\n nGraines = self.plateau[idCoupe]\n self.plateau[idCoupe] = 0\n while nGraines != 0:\n idCoupe = self.coupeSuivante(idCoupe)\n if idCoupe != coupeInitiale:\n self.plateau[idCoupe] += 1\n nGraines -= 1\n coupeFinale = idCoupe\n joueurCoupeFinale = self.joueurCoupe(coupeFinale)\n if joueur != joueurCoupeFinale:\n if self.nourrirAdversaire(joueur, coupeFinale):\n while self.joueurCoupe(idCoupe\n ) == joueurCoupeFinale and self.coupePrenable(idCoupe):\n self.scores[joueur] += self.plateau[idCoupe]\n self.plateau[idCoupe] = 0\n idCoupe = self.coupePrecedente(idCoupe)\n self.tour = (self.tour + 1) % 2\n\n def grainesRestantes(self):\n return np.sum(self.plateau)\n\n def grainesRestantesJoueur(self, joueur):\n if joueur == 0:\n return np.sum(self.plateau[0:self.nCoupes])\n else:\n return np.sum(self.plateau[self.nCoupes:len(self.plateau)])\n\n def nourrirAdversaire(self, joueur, coupeFinale):\n adversaire = (joueur + 1) % 2\n admissible = False\n idCoupe = self.nCoupes * (adversaire + 1) - 1\n while self.joueurCoupe(idCoupe) == adversaire:\n if idCoupe > coupeFinale and self.plateau[idCoupe] != 0:\n admissible = True\n elif not self.coupePrenable(idCoupe):\n admissible = True\n idCoupe = self.coupePrecedente(idCoupe)\n return admissible\n\n def coupesAdmissiblesNourrir(self, joueur):\n coupesAdmissibles = []\n idCoupe = self.nCoupes * (joueur + 1) - 1\n distance = 1\n while self.joueurCoupe(idCoupe) == joueur:\n if self.plateau[idCoupe] >= distance:\n coupesAdmissibles.append(idCoupe)\n idCoupe = self.coupePrecedente(idCoupe)\n distance += 1\n return coupesAdmissibles\n\n def coupesAdmissibles(self, joueur):\n adversaire = (joueur + 1) % 2\n if self.grainesRestantesJoueur(adversaire) == 0:\n coupesAdmissibles = self.coupesAdmissiblesNourrir(joueur)\n if len(coupesAdmissibles) == 0:\n self.scores[joueur] += self.grainesRestantes()\n self.plateau = np.zeros(2 * self.nCoupes, dtype=int)\n self.finie = True\n else:\n coupesAdmissibles = [(k + joueur * self.nCoupes) for k in range\n (self.nCoupes) if self.plateau[k + joueur * self.nCoupes] > 0]\n return coupesAdmissibles\n\n def tourDuJoueur(self):\n joueur = 0\n coupesAdmissibles = self.coupesAdmissibles(joueur)\n print(\n \"C'est au tour du joueur 1. Entrez le numéro de la coupelle à jouer:\"\n )\n nCoupe = int(input())\n while (nCoupe < 0 or nCoupe > self.nCoupes - 1 or not nCoupe in\n coupesAdmissibles):\n print(\n 'Coupelle incorrecte. Entrez le numéro de la coupelle à jouer.'\n )\n nCoupe = int(input())\n self.deplacer(joueur, nCoupe)\n self.jouer()\n\n def tourOrdi(self):\n joueur = 1\n self.profondeur = 0\n self.value = self.alphabeta(joueur, -np.inf, np.inf)\n for idCoupe in self.arbreFils.keys():\n print('coupe = ', idCoupe, ' : valeur = ', self.arbreFils[\n idCoupe].value)\n for idCoupe in self.arbreFils.keys():\n if self.value == self.arbreFils[idCoupe].value:\n self.deplacer(joueur, idCoupe)\n break\n self.jouer()\n\n def partieFinie(self):\n limiteGagne = self.nCoupes * self.nGrainesParCoupelleInit\n self.finie = self.grainesRestantes() == 0 or self.scores[0\n ] > limiteGagne or self.scores[1] > limiteGagne\n return self.finie\n\n def afficherPlateau(self):\n print(np.array([self.plateau[self.nCoupes:len(self.plateau)][::-1],\n self.plateau[0:self.nCoupes]]))\n\n def afficherScores(self):\n print('score J1.........' + str(self.scores[0]))\n print('score MinMax.....' + str(self.scores[1]))\n\n def evaluation(self, joueur):\n adversaire = (joueur + 1) % 2\n return self.scores[joueur] - self.scores[adversaire]\n\n def jouer(self):\n if not self.partieFinie():\n self.afficherPlateau()\n self.afficherScores()\n if self.tour == 0:\n self.tourDuJoueur()\n else:\n self.tourOrdi()\n print('\\n')\n else:\n self.afficherPlateau()\n self.afficherScores()\n print('Partie Finie !')\n\n def minimax(self, joueurMaximisant, profondeurArbre=1):\n self.arbreFils = {}\n coupesPossibles = self.coupesAdmissibles(self.tour)\n if self.profondeur == self.profondeurMinimax or self.finie:\n self.value = self.evaluation(joueurMaximisant)\n return self.value\n if self.tour == joueurMaximisant:\n fctComparaison = max\n self.value = -np.inf\n else:\n fctComparaison = min\n self.value = np.inf\n for idCoupe in coupesPossibles:\n fils = self.clone()\n fils.profondeur = self.profondeur + 1\n fils.deplacer(fils.tour, idCoupe)\n fils.value = fils.minimax(joueurMaximisant)\n if self.profondeur < profondeurArbre:\n self.arbreFils[idCoupe] = fils\n self.value = fctComparaison(self.value, fils.value)\n return self.value\n\n def alphabeta(self, joueurMaximisant, alpha, beta, profondeurArbre=1):\n self.arbreFils = {}\n coupesPossibles = self.coupesAdmissibles(self.tour)\n if self.profondeur == self.profondeurMinimax or self.finie:\n self.value = self.evaluation(joueurMaximisant)\n return self.value\n if self.tour == joueurMaximisant:\n fctComparaison = max\n self.value = -np.inf\n else:\n fctComparaison = min\n self.value = np.inf\n for idCoupe in coupesPossibles:\n fils = self.clone()\n fils.profondeur = self.profondeur + 1\n fils.deplacer(fils.tour, idCoupe)\n fils.value = fils.alphabeta(joueurMaximisant, alpha, beta)\n if self.profondeur < profondeurArbre:\n self.arbreFils[idCoupe] = fils\n self.value = fctComparaison(self.value, fils.value)\n if self.tour == joueurMaximisant:\n if self.value >= beta:\n return self.value\n alpha = fctComparaison(alpha, self.value)\n else:\n if alpha >= self.value:\n return self.value\n beta = fctComparaison(beta, self.value)\n return self.value\n\n\nt = terrainDeJeu(nCoupes=6, nGrainesParCoupelle=4, profondeur=8)\nt.jouer()\n",
"step-5": "import numpy as np\r\n#!pip install pygame\r\nimport pygame\r\n#from copy import deepcopy\r\npygame.init()\r\n#-----------\r\n# Modifications (Matthieu, 15/04):\r\n# Modification de la représentation du terrain du jeu. Il est maintenant représenté par une seule liste.\r\n# un seul identifiant par coupe semble plus simple à gérer qu'un couple (joueur,numero)\r\n# Les indices de la liste correspondant à chaque coupe sont par exemple :\r\n# [11] [10] [9] [8] [7] [6] ligne de l'ordi (joueur 1)\r\n# [0] [1] [2] [3] [4] [5] ligne du joueur (joueur 0)\r\n# Modifications de certaines fonctions de vérification des règles pour éviter les deepcopy\r\n# Simplification de la structure de l'arbre (structure de dictionnaire contenant les fils de chaque noeud)\r\n# On ne le construit que pour une profondeur donnée profondeurArbre (1 par défaut), ou même pas du tout\r\n# Algo alpha beta\r\n# Pbs : \r\n# Fonction qui permettrait de détecter les situations ou le jeu peut boucler à l'infini\r\n# Pouvoir tester les performances de l'ia, par exemple sur quelques centaines de parties, combien de % \r\n# sont gagnées par l'ia contre un algo qui joue aléatoirement\r\n# Améliorer la fonction d'évaluation qui est pour l'instant très basique\r\n##-------------\r\n# Le terrain de jeu est un tableau de deux lignes (les deux camps) et de nCoupes colonnes (les coupelles),\r\n# contenant initialement n graines. La première constitue le camp du joueur, la seconde, celle de l'ordinateur.\r\n# Dans chaque camp, les coupelles sont numérotées de 1 à nCoupes.\r\n# A chaque tour, le joueur doit choisir un numéro de coupelle.\r\n# Les graines de celle-ci sont alors transférées dans les coupes suivantes etc.\r\n#\r\n# modifs du 17.03 par Léo:\r\n# -suppression de scoreGagnant, qui n'apparait pas dans les règles de base de l'Awalé\r\n# -Pour faciliter les manipulations du code et sa compréhension, on parle maintenant\r\n# du joueur 0 et du joueur 1 (au lieu de 1 et 2) et les coupelles sont numérotées de 0 à nCoupes-1.\r\n#Notions de classe:\r\n#https://openclassrooms.com/fr/courses/235344-apprenez-a-programmer-en-python/232721-apprehendez-les-classes\r\n#Explication de l'algorithme minimax général (page 52) :\r\n#http://stephane.ayache.perso.luminy.univ-amu.fr/zoom/cours/Cours/IA_Jeux/IAEtJeux2.pdf\r\n#Code par Léo et Paul\r\n#Pb: le jeu peut boucler à l'infini à la fin d'une partie (souvent lorsqu'il reste 2 graines disposées symétriquement)\r\n# -> se pencher sur la fonction \"partieFinie\" et peut-être essayer d'intégrer cette fonction dans l'algo récursif minimax..\r\n#Pb: structure d'arbre trop compliquée: (*)\r\n#l'arbre est construit à partir d'une liste selon le principe suivant:\r\n#les nCoupes fils de l'élément d'indice k sont d'indices k*nCoupes + l, avec l variant entre 1 et nCoupes\r\n#On vérifie alors (à l'aide d'un dessin par exemple) qu'il y a une bijection naturelle entre la structure d'arbre et la liste (ou tableau) de taille voulue\r\nclass terrainDeJeu:\r\n # [11] [10] [9] [8] [7] [6]// ligne de l'ordi (joueur 1)\r\n # [0] [1] [2] [3] [4] [5]// ligne du joueur (joueur 0)\r\n def __init__(self,nCoupes,profondeur,nGrainesParCoupelle=4) : #Constructeur\r\n self.plateau = np.full(2*nCoupes,nGrainesParCoupelle)\r\n self.nGrainesParCoupelleInit = nGrainesParCoupelle\r\n self.nCoupes = nCoupes\r\n self.scores = [0,0] # scores[0] = score du joueur 0...\r\n self.tour = 0\r\n self.finie = False\r\n self.profondeurMinimax = profondeur\r\n self.arbreFils = {}\r\n \r\n \r\n #clone le terrain de jeu pour pouvoir simuler un coup par la suite\r\n def clone(self):\r\n clone = terrainDeJeu(self.nCoupes,self.profondeurMinimax,self.nGrainesParCoupelleInit)\r\n clone.plateau= self.plateau.copy()\r\n clone.scores = self.scores.copy()\r\n clone.tour = self.tour\r\n clone.finie = self.finie\r\n return clone\r\n \r\n #retourne l'id de la coupe suivant idCoupe sur le plateau (suivant = sens trigo)\r\n def coupeSuivante(self,idCoupe):\r\n return (idCoupe + 1)%(2*self.nCoupes)\r\n #retourne l'id de la coupe précédant idCoupe sur le plateau (précédant = sens horaire)\r\n def coupePrecedente(self,idCoupe):\r\n return (idCoupe - 1)%(2*self.nCoupes)\r\n #retourne le joueur (0 ou 1) à qui appartient la coupe idCoupe\r\n def joueurCoupe(self,idCoupe):\r\n return 0 if idCoupe < self.nCoupes else 1\r\n #retourne si idCoupe peut être prise (contient 2 ou 3 graines)\r\n def coupePrenable(self,idCoupe):\r\n return (self.plateau[idCoupe]==2 or self.plateau[idCoupe]==3)\r\n def deplacer(self,joueur,idCoupe):\r\n coupeInitiale = idCoupe #id de la coupelle choisie\r\n nGraines = self.plateau[idCoupe]\r\n self.plateau[idCoupe] = 0\r\n while (nGraines != 0): #On redistribue les graines de la coupelle initiale\r\n idCoupe = self.coupeSuivante(idCoupe)\r\n if (idCoupe != coupeInitiale): #On ne redistribue pas dans la coupelle initiale\r\n self.plateau[idCoupe] += 1\r\n nGraines -= 1\r\n coupeFinale = idCoupe\r\n joueurCoupeFinale = self.joueurCoupe(coupeFinale)\r\n if (joueur != joueurCoupeFinale): \r\n #on vérifie si on va affamer l'adversaire\r\n #si non, on prend les graines normalement\r\n if (self.nourrirAdversaire(joueur,coupeFinale)):\r\n while (self.joueurCoupe(idCoupe)==joueurCoupeFinale and self.coupePrenable(idCoupe)):\r\n self.scores[joueur]+=self.plateau[idCoupe]\r\n self.plateau[idCoupe]=0\r\n idCoupe = self.coupePrecedente(idCoupe)\r\n #si on va affamer l'adversaire :\r\n # on ne prend aucune graine donc on ne fait rien\r\n self.tour=(self.tour+1)%2\r\n \r\n #On compte le nombre de graines restantes sur le plateau\r\n def grainesRestantes(self): \r\n return np.sum(self.plateau)\r\n #on compte le nombre de graines restantes sur le plateau pour les coupes de joueur\r\n def grainesRestantesJoueur(self,joueur):\r\n if joueur==0:\r\n return np.sum(self.plateau[0:self.nCoupes])\r\n else:\r\n return np.sum(self.plateau[self.nCoupes:len(self.plateau)])\r\n #détermine si, dans le cas où joueur finit son coup sur la coupe coupeFinale,\r\n #Yson adversaire sera affamé ou pas \r\n #on regarde donc si il restera au moins une graine sur le terrain de l'adversaire\r\n def nourrirAdversaire(self,joueur,coupeFinale): \r\n adversaire = (joueur+1)%2 \r\n #on commence la vérification à la coupe la plus éloignée de adversaire (dans le sens horaire)\r\n admissible = False\r\n idCoupe = (self.nCoupes*(adversaire+1))-1\r\n while (self.joueurCoupe(idCoupe)==adversaire):\r\n #si idCoupe est après coupeFinale et qu'il reste des graines dedans le coup est admissible\r\n if (idCoupe>coupeFinale and self.plateau[idCoupe]!=0):\r\n admissible=True\r\n #si joueur peut pas prendre la coupe idCoupe le coup est admissible\r\n elif (not self.coupePrenable(idCoupe)):\r\n admissible=True\r\n idCoupe=self.coupePrecedente(idCoupe)\r\n #True si le coup est admissible pour la règle \"nourrir\"\r\n return admissible \r\n #coupes admissibles que peut jouer joueur pour nourrir son adversaire\r\n def coupesAdmissiblesNourrir(self,joueur):\r\n coupesAdmissibles = []\r\n #on commence par la coupe la plus proche de l'adversaire (dans le sens trigo)\r\n idCoupe = (self.nCoupes*(joueur+1))-1\r\n distance = 1\r\n while (self.joueurCoupe(idCoupe)==joueur):\r\n #s'il y a plus de graines dans idCoupe que la distance qui la sépare aux coupes de l'adversaire\r\n #le coup est admissible, au moins une graine nourrira l'adversaire\r\n if self.plateau[idCoupe]>=distance:\r\n coupesAdmissibles.append(idCoupe)\r\n idCoupe = self.coupePrecedente(idCoupe)\r\n distance +=1\r\n return coupesAdmissibles\r\n def coupesAdmissibles(self,joueur):\r\n adversaire = (joueur+1)%2\r\n if self.grainesRestantesJoueur(adversaire) == 0:\r\n coupesAdmissibles = self.coupesAdmissiblesNourrir(joueur)\r\n #si aucun coup ne peut être joué pour nourrir l'adversaire\r\n if len(coupesAdmissibles)==0:\r\n self.scores[joueur] += self.grainesRestantes()\r\n self.plateau = np.zeros(2*self.nCoupes,dtype=int)\r\n self.finie = True\r\n #partie terminée\r\n \r\n #sinon toutes les coupes non vides sont admissibles\r\n else :\r\n coupesAdmissibles = [(k+joueur*self.nCoupes) for k in range(self.nCoupes) if self.plateau[(k+joueur*self.nCoupes)]>0]\r\n \r\n return coupesAdmissibles\r\n \r\n def tourDuJoueur(self):\r\n joueur = 0\r\n #si l'adversaire n'a plus de graines, il faut obligatoirement le nourrir\r\n coupesAdmissibles = self.coupesAdmissibles(joueur)\r\n print(\"C'est au tour du joueur 1. Entrez le numéro de la coupelle à jouer:\")\r\n nCoupe = int(input())\r\n #print(\"coupesAdmissibles\",coupesAdmissibles)\r\n while nCoupe<0 or nCoupe>self.nCoupes-1 or (not (nCoupe in coupesAdmissibles)):\r\n #cas où la coupelle n'existe pas, ou correspond à un coup non admissible\r\n print(\"Coupelle incorrecte. Entrez le numéro de la coupelle à jouer.\")\r\n nCoupe = int(input())\r\n self.deplacer(joueur,nCoupe)\r\n self.jouer()\r\n \r\n def tourOrdi(self):\r\n joueur = 1\r\n self.profondeur = 0\r\n self.value = self.alphabeta(joueur,-np.inf,np.inf)\r\n for idCoupe in self.arbreFils.keys():\r\n print(\"coupe = \",idCoupe,\" : valeur = \",self.arbreFils[idCoupe].value)\r\n for idCoupe in self.arbreFils.keys():\r\n if self.value==self.arbreFils[idCoupe].value:\r\n self.deplacer(joueur,idCoupe)\r\n break\r\n \r\n \r\n self.jouer()\r\n \r\n def partieFinie(self):\r\n #True si le plateau ne contient plus aucune graine\r\n limiteGagne = self.nCoupes*self.nGrainesParCoupelleInit\r\n self.finie = (self.grainesRestantes()==0 or self.scores[0]> limiteGagne or self.scores[1]> limiteGagne)\r\n return self.finie\r\n\r\n def afficherPlateau(self):\r\n print(np.array([self.plateau[self.nCoupes:len(self.plateau)][::-1],self.plateau[0:self.nCoupes]])) # [::-1] permet d'inverse la liste\r\n\r\n def afficherScores(self):\r\n print(\"score J1.........\"+str(self.scores[0]))\r\n print(\"score MinMax.....\"+str(self.scores[1]))\r\n\r\n def evaluation(self,joueur):\r\n adversaire = (joueur+1)%2\r\n return self.scores[joueur]-self.scores[adversaire]\r\n \r\n \r\n #Fonction principale\r\n def jouer(self):\r\n \r\n if (not self.partieFinie()) :\r\n self.afficherPlateau()\r\n self.afficherScores()\r\n if (self.tour==0):\r\n self.tourDuJoueur()\r\n else:\r\n self.tourOrdi()\r\n print(\"\\n\")\r\n else:\r\n self.afficherPlateau()\r\n self.afficherScores()\r\n print(\"Partie Finie !\")\r\n\r\n #plus vraiment utile, le code du minimax est repris dans celui de la fonction alphabeta\r\n def minimax(self, joueurMaximisant, profondeurArbre=1): #joueurMaximisant = joueur pour lequel on veut maximiser le score (0 ou 1)\r\n #On simule ici des situations fictives de jeu de manière récursive (l'I.A. lit en quelque sorte l'avenir pour n=profondeur tours en avance)\r\n self.arbreFils = {}\r\n \r\n #on détermine les coups possibles\r\n #si aucun coup n'est possible cette fonction arrête aussi la partie\r\n coupesPossibles = self.coupesAdmissibles(self.tour) \r\n \r\n if (self.profondeur == self.profondeurMinimax or self.finie): #cas de base\r\n self.value = self.evaluation(joueurMaximisant)\r\n return self.value\r\n \r\n if self.tour==joueurMaximisant:\r\n fctComparaison = max\r\n self.value = - np.inf\r\n else:\r\n fctComparaison = min\r\n self.value = np.inf\r\n \r\n #on parcourt tous les coups possibles\r\n for idCoupe in coupesPossibles:\r\n fils=self.clone()\r\n fils.profondeur=self.profondeur+1\r\n fils.deplacer(fils.tour,idCoupe)\r\n fils.value = fils.minimax(joueurMaximisant)\r\n \r\n #on ne remplit effectivement l'arbre (attribut arbreFils)\r\n #que pour une profondeur < à profondeurArbre\r\n #on pourrait même ne pas le remplir du tout mais profondeurArbre = 1\r\n #permet d'afficher les valeurs associées à chaque coup...\r\n if (self.profondeur < profondeurArbre):\r\n self.arbreFils[idCoupe]=fils\r\n self.value = fctComparaison(self.value, fils.value)\r\n \r\n return self.value\r\n \r\n def alphabeta(self, joueurMaximisant, alpha, beta, profondeurArbre=1): #joueurMaximisant = joueur pour lequel on veut maximiser le score (0 ou 1)\r\n #On simule ici des situations fictives de jeu de manière récursive (l'I.A. lit en quelque sorte l'avenir pour n=profondeur tours en avance)\r\n self.arbreFils = {}\r\n \r\n #on détermine les coups possibles\r\n #si aucun coup n'est possible cette fonction arrête aussi la partie\r\n coupesPossibles = self.coupesAdmissibles(self.tour) \r\n \r\n if (self.profondeur == self.profondeurMinimax or self.finie): #cas de base\r\n self.value = self.evaluation(joueurMaximisant)\r\n return self.value\r\n \r\n if self.tour==joueurMaximisant:\r\n fctComparaison = max\r\n self.value = - np.inf\r\n else:\r\n fctComparaison = min\r\n self.value = np.inf\r\n \r\n #on parcourt tous les coups possibles\r\n for idCoupe in coupesPossibles:\r\n fils=self.clone()\r\n fils.profondeur=self.profondeur+1\r\n fils.deplacer(fils.tour,idCoupe)\r\n fils.value = fils.alphabeta(joueurMaximisant,alpha,beta)\r\n \r\n #on ne remplit effectivement l'arbre (attribut arbreFils)\r\n #que pour une profondeur < à profondeurArbre\r\n #on pourrait même ne pas le remplir du tout mais profondeurArbre = 1\r\n #permet d'afficher les valeurs associées à chaque coup...\r\n if (self.profondeur < profondeurArbre):\r\n self.arbreFils[idCoupe]=fils\r\n \r\n self.value = fctComparaison(self.value, fils.value)\r\n \r\n #coupures alpha et beta si on est sûrs d'avoir le meilleur résultat possible\r\n if self.tour==joueurMaximisant:\r\n if self.value >= beta:\r\n return self.value\r\n alpha = fctComparaison(alpha,self.value)\r\n else:\r\n if alpha >= self.value:\r\n return self.value\r\n beta = fctComparaison(beta,self.value)\r\n \r\n return self.value\r\n \r\n \r\n\r\nt = terrainDeJeu(nCoupes=6,nGrainesParCoupelle=4,profondeur=8)\r\nt.jouer()",
"step-ids": [
14,
20,
21,
24,
26
]
}
|
[
14,
20,
21,
24,
26
] |
import string
import random
import os
from threading import Thread
class Process(Thread):
def __init__(self):
Thread.__init__(self)
def run(self):
while True:
prenom = id_generator(random.randint(4, 8))
nom = id_generator(random.randint(4, 8))
password = id_generator(random.randint(4, 8))
mail = id_generator(random.randint(4, 8)) + '.' + id_generator(random.randint(4, 8))
command = "sh attack2.0.sh " + prenom + " " + mail + " " + nom + " " + password
os.system(command)
print "\n" + mail
def id_generator(size=6, chars=string.ascii_lowercase + string.digits):
return ''.join(random.choice(chars) for _ in range(size))
i = 0
while i < 100:
thread_1 = Process()
thread_1.start()
i = i + 1
|
normal
|
{
"blob_id": "b9c058bdb04df93beb379d05939b00f4db423cd3",
"index": 452,
"step-1": "import string\nimport random\nimport os\nfrom threading import Thread\n\nclass Process(Thread):\n def __init__(self):\n Thread.__init__(self)\n\n def run(self):\n while True:\n prenom = id_generator(random.randint(4, 8))\n nom = id_generator(random.randint(4, 8))\n password = id_generator(random.randint(4, 8))\n mail = id_generator(random.randint(4, 8)) + '.' + id_generator(random.randint(4, 8))\n command = \"sh attack2.0.sh \" + prenom + \" \" + mail + \" \" + nom + \" \" + password\n os.system(command)\n print \"\\n\" + mail\n\ndef id_generator(size=6, chars=string.ascii_lowercase + string.digits):\n return ''.join(random.choice(chars) for _ in range(size))\n\ni = 0\nwhile i < 100:\n thread_1 = Process()\n thread_1.start()\n i = i + 1\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
def who_win_line(line):
elements = set(line)
if '.' in elements:
return '.'
elements.discard('T')
if len(elements) >= 2:
return 'D'
else:
return elements.pop()
def who_win_tic_tac_toe(original_rows):
board_full = True
rows = [row[0:TTTSIZE] for row in original_rows]
columns = [[rows[0][0], rows[1][0], rows[2][0], rows[3][0]], [rows[0][1
], rows[1][1], rows[2][1], rows[3][1]], [rows[0][2], rows[1][2],
rows[2][2], rows[3][2]], [rows[0][3], rows[1][3], rows[2][3], rows[
3][3]]]
diagonal1 = [rows[0][0], rows[1][1], rows[2][2], rows[3][3]]
diagonal2 = [rows[0][3], rows[1][2], rows[2][1], rows[3][0]]
lines = rows
lines.extend(columns)
lines.append(diagonal1)
lines.append(diagonal2)
for line in lines:
winner = who_win_line(line)
if winner == 'X':
return 'X won'
elif winner == 'O':
return 'O won'
elif winner == '.':
board_full = False
if board_full:
return 'Draw'
else:
return 'Game has not completed'
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def who_win_line(line):
elements = set(line)
if '.' in elements:
return '.'
elements.discard('T')
if len(elements) >= 2:
return 'D'
else:
return elements.pop()
def who_win_tic_tac_toe(original_rows):
board_full = True
rows = [row[0:TTTSIZE] for row in original_rows]
columns = [[rows[0][0], rows[1][0], rows[2][0], rows[3][0]], [rows[0][1
], rows[1][1], rows[2][1], rows[3][1]], [rows[0][2], rows[1][2],
rows[2][2], rows[3][2]], [rows[0][3], rows[1][3], rows[2][3], rows[
3][3]]]
diagonal1 = [rows[0][0], rows[1][1], rows[2][2], rows[3][3]]
diagonal2 = [rows[0][3], rows[1][2], rows[2][1], rows[3][0]]
lines = rows
lines.extend(columns)
lines.append(diagonal1)
lines.append(diagonal2)
for line in lines:
winner = who_win_line(line)
if winner == 'X':
return 'X won'
elif winner == 'O':
return 'O won'
elif winner == '.':
board_full = False
if board_full:
return 'Draw'
else:
return 'Game has not completed'
<|reserved_special_token_0|>
if __name__ == '__main__':
filename_prefix = sys.argv[1]
filename_in = filename_prefix + '.in'
filename_out = filename_prefix + '.out'
file_in = open(filename_in, 'r')
lines = file_in.readlines()
testcnt = int(lines[0])
idx = 1
file_out = open(filename_out, 'w')
for test in range(testcnt):
res = who_win_tic_tac_toe(lines[idx:idx + TTTSIZE])
file_out.write('Case #{0}: {1}\n'.format(test + 1, res))
idx += TTTSIZE + 1
<|reserved_special_token_1|>
TTTSIZE = 4
def who_win_line(line):
elements = set(line)
if '.' in elements:
return '.'
elements.discard('T')
if len(elements) >= 2:
return 'D'
else:
return elements.pop()
def who_win_tic_tac_toe(original_rows):
board_full = True
rows = [row[0:TTTSIZE] for row in original_rows]
columns = [[rows[0][0], rows[1][0], rows[2][0], rows[3][0]], [rows[0][1
], rows[1][1], rows[2][1], rows[3][1]], [rows[0][2], rows[1][2],
rows[2][2], rows[3][2]], [rows[0][3], rows[1][3], rows[2][3], rows[
3][3]]]
diagonal1 = [rows[0][0], rows[1][1], rows[2][2], rows[3][3]]
diagonal2 = [rows[0][3], rows[1][2], rows[2][1], rows[3][0]]
lines = rows
lines.extend(columns)
lines.append(diagonal1)
lines.append(diagonal2)
for line in lines:
winner = who_win_line(line)
if winner == 'X':
return 'X won'
elif winner == 'O':
return 'O won'
elif winner == '.':
board_full = False
if board_full:
return 'Draw'
else:
return 'Game has not completed'
<|reserved_special_token_0|>
if __name__ == '__main__':
filename_prefix = sys.argv[1]
filename_in = filename_prefix + '.in'
filename_out = filename_prefix + '.out'
file_in = open(filename_in, 'r')
lines = file_in.readlines()
testcnt = int(lines[0])
idx = 1
file_out = open(filename_out, 'w')
for test in range(testcnt):
res = who_win_tic_tac_toe(lines[idx:idx + TTTSIZE])
file_out.write('Case #{0}: {1}\n'.format(test + 1, res))
idx += TTTSIZE + 1
<|reserved_special_token_1|>
TTTSIZE = 4
def who_win_line(line):
elements = set(line)
if '.' in elements:
return '.'
elements.discard('T')
if len(elements) >= 2:
return 'D'
else:
return elements.pop()
def who_win_tic_tac_toe(original_rows):
board_full = True
rows = [row[0:TTTSIZE] for row in original_rows]
columns = [[rows[0][0], rows[1][0], rows[2][0], rows[3][0]], [rows[0][1
], rows[1][1], rows[2][1], rows[3][1]], [rows[0][2], rows[1][2],
rows[2][2], rows[3][2]], [rows[0][3], rows[1][3], rows[2][3], rows[
3][3]]]
diagonal1 = [rows[0][0], rows[1][1], rows[2][2], rows[3][3]]
diagonal2 = [rows[0][3], rows[1][2], rows[2][1], rows[3][0]]
lines = rows
lines.extend(columns)
lines.append(diagonal1)
lines.append(diagonal2)
for line in lines:
winner = who_win_line(line)
if winner == 'X':
return 'X won'
elif winner == 'O':
return 'O won'
elif winner == '.':
board_full = False
if board_full:
return 'Draw'
else:
return 'Game has not completed'
import sys
if __name__ == '__main__':
filename_prefix = sys.argv[1]
filename_in = filename_prefix + '.in'
filename_out = filename_prefix + '.out'
file_in = open(filename_in, 'r')
lines = file_in.readlines()
testcnt = int(lines[0])
idx = 1
file_out = open(filename_out, 'w')
for test in range(testcnt):
res = who_win_tic_tac_toe(lines[idx:idx + TTTSIZE])
file_out.write('Case #{0}: {1}\n'.format(test + 1, res))
idx += TTTSIZE + 1
<|reserved_special_token_1|>
TTTSIZE = 4
def who_win_line(line):
elements = set(line)
if '.' in elements:
return '.'
elements.discard('T')
if len(elements) >= 2:
return 'D'
else:
return elements.pop()
def who_win_tic_tac_toe(original_rows):
#print('%s' % repr(original_rows))
board_full = True
rows = [row[0:TTTSIZE] for row in original_rows]
#print('%s' % repr(rows))
columns = [ [rows[0][0], rows[1][0], rows[2][0], rows[3][0]],
[rows[0][1], rows[1][1], rows[2][1], rows[3][1]],
[rows[0][2], rows[1][2], rows[2][2], rows[3][2]],
[rows[0][3], rows[1][3], rows[2][3], rows[3][3]] ]
diagonal1 = [rows[0][0], rows[1][1], rows[2][2], rows[3][3]]
diagonal2 = [rows[0][3], rows[1][2], rows[2][1], rows[3][0]]
lines = rows
lines.extend(columns)
lines.append(diagonal1)
lines.append(diagonal2)
for line in lines:
winner = who_win_line(line)
if winner == 'X':
return 'X won'
elif winner == 'O':
return 'O won'
elif winner == '.':
board_full = False
if board_full:
return 'Draw'
else:
return 'Game has not completed'
import sys
#import pdb
if __name__ == '__main__':
filename_prefix = sys.argv[1]
filename_in = filename_prefix + ".in"
filename_out = filename_prefix + ".out"
file_in = open(filename_in, 'r')
lines = file_in.readlines()
testcnt = int(lines[0])
idx = 1
file_out = open(filename_out, 'w')
#pdb.set_trace()
for test in range(testcnt):
res = who_win_tic_tac_toe(lines[idx : idx + TTTSIZE])
file_out.write("Case #{0}: {1}\n".format(test + 1, res))
idx += TTTSIZE + 1
|
flexible
|
{
"blob_id": "2e041e33b5c34c2bddc72b36ff641817f1e21db2",
"index": 3735,
"step-1": "<mask token>\n\n\ndef who_win_line(line):\n elements = set(line)\n if '.' in elements:\n return '.'\n elements.discard('T')\n if len(elements) >= 2:\n return 'D'\n else:\n return elements.pop()\n\n\ndef who_win_tic_tac_toe(original_rows):\n board_full = True\n rows = [row[0:TTTSIZE] for row in original_rows]\n columns = [[rows[0][0], rows[1][0], rows[2][0], rows[3][0]], [rows[0][1\n ], rows[1][1], rows[2][1], rows[3][1]], [rows[0][2], rows[1][2],\n rows[2][2], rows[3][2]], [rows[0][3], rows[1][3], rows[2][3], rows[\n 3][3]]]\n diagonal1 = [rows[0][0], rows[1][1], rows[2][2], rows[3][3]]\n diagonal2 = [rows[0][3], rows[1][2], rows[2][1], rows[3][0]]\n lines = rows\n lines.extend(columns)\n lines.append(diagonal1)\n lines.append(diagonal2)\n for line in lines:\n winner = who_win_line(line)\n if winner == 'X':\n return 'X won'\n elif winner == 'O':\n return 'O won'\n elif winner == '.':\n board_full = False\n if board_full:\n return 'Draw'\n else:\n return 'Game has not completed'\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef who_win_line(line):\n elements = set(line)\n if '.' in elements:\n return '.'\n elements.discard('T')\n if len(elements) >= 2:\n return 'D'\n else:\n return elements.pop()\n\n\ndef who_win_tic_tac_toe(original_rows):\n board_full = True\n rows = [row[0:TTTSIZE] for row in original_rows]\n columns = [[rows[0][0], rows[1][0], rows[2][0], rows[3][0]], [rows[0][1\n ], rows[1][1], rows[2][1], rows[3][1]], [rows[0][2], rows[1][2],\n rows[2][2], rows[3][2]], [rows[0][3], rows[1][3], rows[2][3], rows[\n 3][3]]]\n diagonal1 = [rows[0][0], rows[1][1], rows[2][2], rows[3][3]]\n diagonal2 = [rows[0][3], rows[1][2], rows[2][1], rows[3][0]]\n lines = rows\n lines.extend(columns)\n lines.append(diagonal1)\n lines.append(diagonal2)\n for line in lines:\n winner = who_win_line(line)\n if winner == 'X':\n return 'X won'\n elif winner == 'O':\n return 'O won'\n elif winner == '.':\n board_full = False\n if board_full:\n return 'Draw'\n else:\n return 'Game has not completed'\n\n\n<mask token>\nif __name__ == '__main__':\n filename_prefix = sys.argv[1]\n filename_in = filename_prefix + '.in'\n filename_out = filename_prefix + '.out'\n file_in = open(filename_in, 'r')\n lines = file_in.readlines()\n testcnt = int(lines[0])\n idx = 1\n file_out = open(filename_out, 'w')\n for test in range(testcnt):\n res = who_win_tic_tac_toe(lines[idx:idx + TTTSIZE])\n file_out.write('Case #{0}: {1}\\n'.format(test + 1, res))\n idx += TTTSIZE + 1\n",
"step-3": "TTTSIZE = 4\n\n\ndef who_win_line(line):\n elements = set(line)\n if '.' in elements:\n return '.'\n elements.discard('T')\n if len(elements) >= 2:\n return 'D'\n else:\n return elements.pop()\n\n\ndef who_win_tic_tac_toe(original_rows):\n board_full = True\n rows = [row[0:TTTSIZE] for row in original_rows]\n columns = [[rows[0][0], rows[1][0], rows[2][0], rows[3][0]], [rows[0][1\n ], rows[1][1], rows[2][1], rows[3][1]], [rows[0][2], rows[1][2],\n rows[2][2], rows[3][2]], [rows[0][3], rows[1][3], rows[2][3], rows[\n 3][3]]]\n diagonal1 = [rows[0][0], rows[1][1], rows[2][2], rows[3][3]]\n diagonal2 = [rows[0][3], rows[1][2], rows[2][1], rows[3][0]]\n lines = rows\n lines.extend(columns)\n lines.append(diagonal1)\n lines.append(diagonal2)\n for line in lines:\n winner = who_win_line(line)\n if winner == 'X':\n return 'X won'\n elif winner == 'O':\n return 'O won'\n elif winner == '.':\n board_full = False\n if board_full:\n return 'Draw'\n else:\n return 'Game has not completed'\n\n\n<mask token>\nif __name__ == '__main__':\n filename_prefix = sys.argv[1]\n filename_in = filename_prefix + '.in'\n filename_out = filename_prefix + '.out'\n file_in = open(filename_in, 'r')\n lines = file_in.readlines()\n testcnt = int(lines[0])\n idx = 1\n file_out = open(filename_out, 'w')\n for test in range(testcnt):\n res = who_win_tic_tac_toe(lines[idx:idx + TTTSIZE])\n file_out.write('Case #{0}: {1}\\n'.format(test + 1, res))\n idx += TTTSIZE + 1\n",
"step-4": "TTTSIZE = 4\n\n\ndef who_win_line(line):\n elements = set(line)\n if '.' in elements:\n return '.'\n elements.discard('T')\n if len(elements) >= 2:\n return 'D'\n else:\n return elements.pop()\n\n\ndef who_win_tic_tac_toe(original_rows):\n board_full = True\n rows = [row[0:TTTSIZE] for row in original_rows]\n columns = [[rows[0][0], rows[1][0], rows[2][0], rows[3][0]], [rows[0][1\n ], rows[1][1], rows[2][1], rows[3][1]], [rows[0][2], rows[1][2],\n rows[2][2], rows[3][2]], [rows[0][3], rows[1][3], rows[2][3], rows[\n 3][3]]]\n diagonal1 = [rows[0][0], rows[1][1], rows[2][2], rows[3][3]]\n diagonal2 = [rows[0][3], rows[1][2], rows[2][1], rows[3][0]]\n lines = rows\n lines.extend(columns)\n lines.append(diagonal1)\n lines.append(diagonal2)\n for line in lines:\n winner = who_win_line(line)\n if winner == 'X':\n return 'X won'\n elif winner == 'O':\n return 'O won'\n elif winner == '.':\n board_full = False\n if board_full:\n return 'Draw'\n else:\n return 'Game has not completed'\n\n\nimport sys\nif __name__ == '__main__':\n filename_prefix = sys.argv[1]\n filename_in = filename_prefix + '.in'\n filename_out = filename_prefix + '.out'\n file_in = open(filename_in, 'r')\n lines = file_in.readlines()\n testcnt = int(lines[0])\n idx = 1\n file_out = open(filename_out, 'w')\n for test in range(testcnt):\n res = who_win_tic_tac_toe(lines[idx:idx + TTTSIZE])\n file_out.write('Case #{0}: {1}\\n'.format(test + 1, res))\n idx += TTTSIZE + 1\n",
"step-5": "TTTSIZE = 4\r\n\r\ndef who_win_line(line):\r\n elements = set(line)\r\n if '.' in elements:\r\n return '.'\r\n elements.discard('T')\r\n if len(elements) >= 2:\r\n return 'D'\r\n else:\r\n return elements.pop()\r\n\r\ndef who_win_tic_tac_toe(original_rows):\r\n #print('%s' % repr(original_rows))\r\n board_full = True\r\n rows = [row[0:TTTSIZE] for row in original_rows]\r\n #print('%s' % repr(rows))\r\n columns = [ [rows[0][0], rows[1][0], rows[2][0], rows[3][0]],\r\n [rows[0][1], rows[1][1], rows[2][1], rows[3][1]],\r\n [rows[0][2], rows[1][2], rows[2][2], rows[3][2]],\r\n [rows[0][3], rows[1][3], rows[2][3], rows[3][3]] ]\r\n diagonal1 = [rows[0][0], rows[1][1], rows[2][2], rows[3][3]]\r\n diagonal2 = [rows[0][3], rows[1][2], rows[2][1], rows[3][0]]\r\n\r\n lines = rows\r\n lines.extend(columns)\r\n lines.append(diagonal1)\r\n lines.append(diagonal2)\r\n\r\n for line in lines:\r\n winner = who_win_line(line)\r\n if winner == 'X':\r\n return 'X won'\r\n elif winner == 'O':\r\n return 'O won'\r\n elif winner == '.':\r\n board_full = False\r\n if board_full:\r\n return 'Draw'\r\n else:\r\n return 'Game has not completed'\r\n\r\n\r\nimport sys\r\n#import pdb\r\n\r\nif __name__ == '__main__':\r\n filename_prefix = sys.argv[1]\r\n filename_in = filename_prefix + \".in\"\r\n filename_out = filename_prefix + \".out\"\r\n\r\n file_in = open(filename_in, 'r')\r\n lines = file_in.readlines()\r\n\r\n testcnt = int(lines[0])\r\n idx = 1\r\n\r\n file_out = open(filename_out, 'w')\r\n\r\n #pdb.set_trace()\r\n for test in range(testcnt):\r\n res = who_win_tic_tac_toe(lines[idx : idx + TTTSIZE])\r\n file_out.write(\"Case #{0}: {1}\\n\".format(test + 1, res))\r\n idx += TTTSIZE + 1\r\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
def dataframe_to_numpy(dataframe):
numpy_array = dataframe.to_numpy()
return numpy_array
<|reserved_special_token_0|>
def data_slice(data, num_of_data):
data = data[:, 1:num_of_data + 1]
return data
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def load_excel(data_path, data_name, episode_Num):
data_name = data_name + str(episode_Num) + '.xlsx'
dataframe = pd.read_excel(data_path + data_name, index_col=0)
return dataframe
def dataframe_to_numpy(dataframe):
numpy_array = dataframe.to_numpy()
return numpy_array
<|reserved_special_token_0|>
def transform(data, data_path, data_name, episode_Num):
data = load_excel(data_path, data_name, episode_Num)
data = dataframe_to_numpy(data)
data = numpy_to_tensor(data)
return data
def data_slice(data, num_of_data):
data = data[:, 1:num_of_data + 1]
return data
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def load_excel(data_path, data_name, episode_Num):
data_name = data_name + str(episode_Num) + '.xlsx'
dataframe = pd.read_excel(data_path + data_name, index_col=0)
return dataframe
def dataframe_to_numpy(dataframe):
numpy_array = dataframe.to_numpy()
return numpy_array
def numpy_to_tensor(numpy_array):
tensor = torch.from_numpy(numpy_array)
return tensor
def transform(data, data_path, data_name, episode_Num):
data = load_excel(data_path, data_name, episode_Num)
data = dataframe_to_numpy(data)
data = numpy_to_tensor(data)
return data
def data_slice(data, num_of_data):
data = data[:, 1:num_of_data + 1]
return data
<|reserved_special_token_1|>
import pandas as pd
from pandas import Series, DataFrame
def load_excel(data_path, data_name, episode_Num):
data_name = data_name + str(episode_Num) + '.xlsx'
dataframe = pd.read_excel(data_path + data_name, index_col=0)
return dataframe
def dataframe_to_numpy(dataframe):
numpy_array = dataframe.to_numpy()
return numpy_array
def numpy_to_tensor(numpy_array):
tensor = torch.from_numpy(numpy_array)
return tensor
def transform(data, data_path, data_name, episode_Num):
data = load_excel(data_path, data_name, episode_Num)
data = dataframe_to_numpy(data)
data = numpy_to_tensor(data)
return data
def data_slice(data, num_of_data):
data = data[:, 1:num_of_data + 1]
return data
|
flexible
|
{
"blob_id": "b63dc8b9aa2f0593a4a7eb52a722a9c4da6c9e08",
"index": 7804,
"step-1": "<mask token>\n\n\ndef dataframe_to_numpy(dataframe):\n numpy_array = dataframe.to_numpy()\n return numpy_array\n\n\n<mask token>\n\n\ndef data_slice(data, num_of_data):\n data = data[:, 1:num_of_data + 1]\n return data\n",
"step-2": "<mask token>\n\n\ndef load_excel(data_path, data_name, episode_Num):\n data_name = data_name + str(episode_Num) + '.xlsx'\n dataframe = pd.read_excel(data_path + data_name, index_col=0)\n return dataframe\n\n\ndef dataframe_to_numpy(dataframe):\n numpy_array = dataframe.to_numpy()\n return numpy_array\n\n\n<mask token>\n\n\ndef transform(data, data_path, data_name, episode_Num):\n data = load_excel(data_path, data_name, episode_Num)\n data = dataframe_to_numpy(data)\n data = numpy_to_tensor(data)\n return data\n\n\ndef data_slice(data, num_of_data):\n data = data[:, 1:num_of_data + 1]\n return data\n",
"step-3": "<mask token>\n\n\ndef load_excel(data_path, data_name, episode_Num):\n data_name = data_name + str(episode_Num) + '.xlsx'\n dataframe = pd.read_excel(data_path + data_name, index_col=0)\n return dataframe\n\n\ndef dataframe_to_numpy(dataframe):\n numpy_array = dataframe.to_numpy()\n return numpy_array\n\n\ndef numpy_to_tensor(numpy_array):\n tensor = torch.from_numpy(numpy_array)\n return tensor\n\n\ndef transform(data, data_path, data_name, episode_Num):\n data = load_excel(data_path, data_name, episode_Num)\n data = dataframe_to_numpy(data)\n data = numpy_to_tensor(data)\n return data\n\n\ndef data_slice(data, num_of_data):\n data = data[:, 1:num_of_data + 1]\n return data\n",
"step-4": "import pandas as pd\nfrom pandas import Series, DataFrame\n\n\ndef load_excel(data_path, data_name, episode_Num):\n data_name = data_name + str(episode_Num) + '.xlsx'\n dataframe = pd.read_excel(data_path + data_name, index_col=0)\n return dataframe\n\n\ndef dataframe_to_numpy(dataframe):\n numpy_array = dataframe.to_numpy()\n return numpy_array\n\n\ndef numpy_to_tensor(numpy_array):\n tensor = torch.from_numpy(numpy_array)\n return tensor\n\n\ndef transform(data, data_path, data_name, episode_Num):\n data = load_excel(data_path, data_name, episode_Num)\n data = dataframe_to_numpy(data)\n data = numpy_to_tensor(data)\n return data\n\n\ndef data_slice(data, num_of_data):\n data = data[:, 1:num_of_data + 1]\n return data\n",
"step-5": null,
"step-ids": [
2,
4,
5,
6
]
}
|
[
2,
4,
5,
6
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def HP(Di, beta):
"""
Function that calculates shannon entropy
"""
P = np.exp(-Di * beta)
sumP = np.sum(P)
Pi = P / sumP
Hi = -np.sum(Pi * np.log2(Pi))
return Hi, Pi
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import numpy as np
def HP(Di, beta):
"""
Function that calculates shannon entropy
"""
P = np.exp(-Di * beta)
sumP = np.sum(P)
Pi = P / sumP
Hi = -np.sum(Pi * np.log2(Pi))
return Hi, Pi
<|reserved_special_token_1|>
#!/usr/bin/env python3
"""Shannon entropy and P affinities"""
import numpy as np
def HP(Di, beta):
"""
Function that calculates shannon entropy
"""
P = np.exp(-Di * beta)
sumP = np.sum(P)
Pi = P / sumP
Hi = -np.sum(Pi * np.log2(Pi))
return (Hi, Pi)
|
flexible
|
{
"blob_id": "0b05b027e3c3147aa2b9c35a0bdc33633ba6e658",
"index": 7129,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef HP(Di, beta):\n \"\"\"\n Function that calculates shannon entropy\n \"\"\"\n P = np.exp(-Di * beta)\n sumP = np.sum(P)\n Pi = P / sumP\n Hi = -np.sum(Pi * np.log2(Pi))\n return Hi, Pi\n",
"step-3": "<mask token>\nimport numpy as np\n\n\ndef HP(Di, beta):\n \"\"\"\n Function that calculates shannon entropy\n \"\"\"\n P = np.exp(-Di * beta)\n sumP = np.sum(P)\n Pi = P / sumP\n Hi = -np.sum(Pi * np.log2(Pi))\n return Hi, Pi\n",
"step-4": "#!/usr/bin/env python3\n\"\"\"Shannon entropy and P affinities\"\"\"\n\nimport numpy as np\n\n\ndef HP(Di, beta):\n \"\"\"\n Function that calculates shannon entropy\n \"\"\"\n P = np.exp(-Di * beta)\n sumP = np.sum(P)\n Pi = P / sumP\n Hi = -np.sum(Pi * np.log2(Pi))\n return (Hi, Pi)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
@given('I want to send an integer')
def step_impl(context):
pass
<|reserved_special_token_0|>
@given('I want to send two integers with one channel')
def step_impl(context):
pass
@given('I want to send two floats with one channel')
def step_impl(context):
pass
<|reserved_special_token_0|>
@given('I want to send {reps} floats with 1 channel')
def step_impl(context, reps):
rep = reps
@when(
'I define "{host}" and the port {port} and the number is {flt_one}, and the second is {flt_two}'
)
def step_impl(context, host, port, flt_one, flt_two):
client = OSCClient()
client.connect((host, int(port)))
if rep > 2:
for i in range(rep):
if flt_two > 0.0:
client.send(OSCMessage(['/test', flt_one, flt_two]))
else:
client.send(OSCMessage('/test', flt_one))
elif flt_two > 0.0:
client.send(OSCMessage(['/test', flt_one, flt_two]))
else:
client.send(OSCMessage(['/test', flt_one]))
<|reserved_special_token_0|>
@then('I hear a sound that lasts for {son_length} seconds')
def step_impl(context, son_length):
pass
def listen_handler(path, tags, args, source):
"""
Handler for the listener
"""
response = args[0]
assert (response, 'Negative numbers not processed')
@then('I want to receive "{message}"')
def step_impl(context, message):
listen = OSCServer(('127.0.0.1', 4589))
listen.timeout = 0
listen.addMsgHandler('/err', listen_handler)
listen.close()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@given('I want to send an integer')
def step_impl(context):
pass
@given('I want to send a float')
def step_impl(context):
pass
@given('I want to send two integers with one channel')
def step_impl(context):
pass
@given('I want to send two floats with one channel')
def step_impl(context):
pass
<|reserved_special_token_0|>
@given('I want to send {reps} floats with 1 channel')
def step_impl(context, reps):
rep = reps
@when(
'I define "{host}" and the port {port} and the number is {flt_one}, and the second is {flt_two}'
)
def step_impl(context, host, port, flt_one, flt_two):
client = OSCClient()
client.connect((host, int(port)))
if rep > 2:
for i in range(rep):
if flt_two > 0.0:
client.send(OSCMessage(['/test', flt_one, flt_two]))
else:
client.send(OSCMessage('/test', flt_one))
elif flt_two > 0.0:
client.send(OSCMessage(['/test', flt_one, flt_two]))
else:
client.send(OSCMessage(['/test', flt_one]))
<|reserved_special_token_0|>
@then('I hear two sounds')
def step_impl(context):
pass
@then('I hear a sound that lasts for {son_length} seconds')
def step_impl(context, son_length):
pass
def listen_handler(path, tags, args, source):
"""
Handler for the listener
"""
response = args[0]
assert (response, 'Negative numbers not processed')
@then('I want to receive "{message}"')
def step_impl(context, message):
listen = OSCServer(('127.0.0.1', 4589))
listen.timeout = 0
listen.addMsgHandler('/err', listen_handler)
listen.close()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@given('I want to send an integer')
def step_impl(context):
pass
@given('I want to send a float')
def step_impl(context):
pass
@given('I want to send two integers with one channel')
def step_impl(context):
pass
@given('I want to send two floats with one channel')
def step_impl(context):
pass
<|reserved_special_token_0|>
@given('I want to send {reps} floats with 1 channel')
def step_impl(context, reps):
rep = reps
@when(
'I define "{host}" and the port {port} and the number is {flt_one}, and the second is {flt_two}'
)
def step_impl(context, host, port, flt_one, flt_two):
client = OSCClient()
client.connect((host, int(port)))
if rep > 2:
for i in range(rep):
if flt_two > 0.0:
client.send(OSCMessage(['/test', flt_one, flt_two]))
else:
client.send(OSCMessage('/test', flt_one))
elif flt_two > 0.0:
client.send(OSCMessage(['/test', flt_one, flt_two]))
else:
client.send(OSCMessage(['/test', flt_one]))
@when(
'I define "{host}" and the port "{port}" and the integer is {int_one}, and the second is {int_sec}'
)
def step_impl(context, host, port, int_one, int_sec):
client = OSCClient()
client.connect((host, port))
if int_sec > 0:
client.send(OSCMessage('/test', int_one, int_sec))
else:
client.send(OSCMessage('/test', int_one))
@then('I hear a sound')
def step_impl(context):
pass
@then('I hear two sounds')
def step_impl(context):
pass
@then('I hear a sound that lasts for {son_length} seconds')
def step_impl(context, son_length):
pass
def listen_handler(path, tags, args, source):
"""
Handler for the listener
"""
response = args[0]
assert (response, 'Negative numbers not processed')
@then('I want to receive "{message}"')
def step_impl(context, message):
listen = OSCServer(('127.0.0.1', 4589))
listen.timeout = 0
listen.addMsgHandler('/err', listen_handler)
listen.close()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@given('I want to send an integer')
def step_impl(context):
pass
@given('I want to send a float')
def step_impl(context):
pass
@given('I want to send two integers with one channel')
def step_impl(context):
pass
@given('I want to send two floats with one channel')
def step_impl(context):
pass
rep = 0
@given('I want to send {reps} floats with 1 channel')
def step_impl(context, reps):
rep = reps
@when(
'I define "{host}" and the port {port} and the number is {flt_one}, and the second is {flt_two}'
)
def step_impl(context, host, port, flt_one, flt_two):
client = OSCClient()
client.connect((host, int(port)))
if rep > 2:
for i in range(rep):
if flt_two > 0.0:
client.send(OSCMessage(['/test', flt_one, flt_two]))
else:
client.send(OSCMessage('/test', flt_one))
elif flt_two > 0.0:
client.send(OSCMessage(['/test', flt_one, flt_two]))
else:
client.send(OSCMessage(['/test', flt_one]))
@when(
'I define "{host}" and the port "{port}" and the integer is {int_one}, and the second is {int_sec}'
)
def step_impl(context, host, port, int_one, int_sec):
client = OSCClient()
client.connect((host, port))
if int_sec > 0:
client.send(OSCMessage('/test', int_one, int_sec))
else:
client.send(OSCMessage('/test', int_one))
@then('I hear a sound')
def step_impl(context):
pass
@then('I hear two sounds')
def step_impl(context):
pass
@then('I hear a sound that lasts for {son_length} seconds')
def step_impl(context, son_length):
pass
def listen_handler(path, tags, args, source):
"""
Handler for the listener
"""
response = args[0]
assert (response, 'Negative numbers not processed')
@then('I want to receive "{message}"')
def step_impl(context, message):
listen = OSCServer(('127.0.0.1', 4589))
listen.timeout = 0
listen.addMsgHandler('/err', listen_handler)
listen.close()
<|reserved_special_token_1|>
'''
Functional tests for the Write Stream
'''
from behave import given, when, then
from OSC import OSCClient, OSCMessage, OSCServer
@given('I want to send an integer')
def step_impl (context):
pass
@given('I want to send a float')
def step_impl (context):
pass
@given('I want to send two integers with one channel')
def step_impl (context):
pass
@given('I want to send two floats with one channel')
def step_impl (context):
pass
rep=0
@given('I want to send {reps} floats with 1 channel')
def step_impl (context, reps):
rep = reps
@when('I define "{host}" and the port {port} and the number is {flt_one}, and the second is {flt_two}')
def step_impl(context, host, port, flt_one,flt_two):
client = OSCClient()
client.connect((host, int(port)))
if rep > 2:
for i in range(rep):
if flt_two > 0.0:
client.send(OSCMessage(['/test', flt_one, flt_two]))
else:
client.send(OSCMessage('/test', flt_one))
else:
if flt_two > 0.0:
client.send(OSCMessage(['/test', flt_one,flt_two]))
else:
client.send(OSCMessage(['/test', flt_one]))
@when('I define "{host}" and the port "{port}" and the integer is {int_one}, and the second is {int_sec}')
def step_impl(context, host, port, int_one, int_sec):
client = OSCClient()
client.connect((host, port))
if int_sec > 0:
client.send(OSCMessage('/test', int_one, int_sec))
else:
client.send(OSCMessage('/test', int_one))
@then('I hear a sound')
def step_impl(context):
pass
@then('I hear two sounds')
def step_impl(context):
pass
@then('I hear a sound that lasts for {son_length} seconds')
def step_impl(context,son_length):
pass
def listen_handler(path, tags, args, source):
'''
Handler for the listener
'''
response = args[0]
assert(response, "Negative numbers not processed")
@then('I want to receive "{message}"')
def step_impl(context, message):
listen = OSCServer(("127.0.0.1", 4589))
listen.timeout = 0 #infinite timeout
listen.addMsgHandler("/err", listen_handler)
listen.close()
|
flexible
|
{
"blob_id": "3770e59c5bd6837a0fb812f80c6549024e06a9e4",
"index": 5957,
"step-1": "<mask token>\n\n\n@given('I want to send an integer')\ndef step_impl(context):\n pass\n\n\n<mask token>\n\n\n@given('I want to send two integers with one channel')\ndef step_impl(context):\n pass\n\n\n@given('I want to send two floats with one channel')\ndef step_impl(context):\n pass\n\n\n<mask token>\n\n\n@given('I want to send {reps} floats with 1 channel')\ndef step_impl(context, reps):\n rep = reps\n\n\n@when(\n 'I define \"{host}\" and the port {port} and the number is {flt_one}, and the second is {flt_two}'\n )\ndef step_impl(context, host, port, flt_one, flt_two):\n client = OSCClient()\n client.connect((host, int(port)))\n if rep > 2:\n for i in range(rep):\n if flt_two > 0.0:\n client.send(OSCMessage(['/test', flt_one, flt_two]))\n else:\n client.send(OSCMessage('/test', flt_one))\n elif flt_two > 0.0:\n client.send(OSCMessage(['/test', flt_one, flt_two]))\n else:\n client.send(OSCMessage(['/test', flt_one]))\n\n\n<mask token>\n\n\n@then('I hear a sound that lasts for {son_length} seconds')\ndef step_impl(context, son_length):\n pass\n\n\ndef listen_handler(path, tags, args, source):\n \"\"\"\n Handler for the listener\n \"\"\"\n response = args[0]\n assert (response, 'Negative numbers not processed')\n\n\n@then('I want to receive \"{message}\"')\ndef step_impl(context, message):\n listen = OSCServer(('127.0.0.1', 4589))\n listen.timeout = 0\n listen.addMsgHandler('/err', listen_handler)\n listen.close()\n",
"step-2": "<mask token>\n\n\n@given('I want to send an integer')\ndef step_impl(context):\n pass\n\n\n@given('I want to send a float')\ndef step_impl(context):\n pass\n\n\n@given('I want to send two integers with one channel')\ndef step_impl(context):\n pass\n\n\n@given('I want to send two floats with one channel')\ndef step_impl(context):\n pass\n\n\n<mask token>\n\n\n@given('I want to send {reps} floats with 1 channel')\ndef step_impl(context, reps):\n rep = reps\n\n\n@when(\n 'I define \"{host}\" and the port {port} and the number is {flt_one}, and the second is {flt_two}'\n )\ndef step_impl(context, host, port, flt_one, flt_two):\n client = OSCClient()\n client.connect((host, int(port)))\n if rep > 2:\n for i in range(rep):\n if flt_two > 0.0:\n client.send(OSCMessage(['/test', flt_one, flt_two]))\n else:\n client.send(OSCMessage('/test', flt_one))\n elif flt_two > 0.0:\n client.send(OSCMessage(['/test', flt_one, flt_two]))\n else:\n client.send(OSCMessage(['/test', flt_one]))\n\n\n<mask token>\n\n\n@then('I hear two sounds')\ndef step_impl(context):\n pass\n\n\n@then('I hear a sound that lasts for {son_length} seconds')\ndef step_impl(context, son_length):\n pass\n\n\ndef listen_handler(path, tags, args, source):\n \"\"\"\n Handler for the listener\n \"\"\"\n response = args[0]\n assert (response, 'Negative numbers not processed')\n\n\n@then('I want to receive \"{message}\"')\ndef step_impl(context, message):\n listen = OSCServer(('127.0.0.1', 4589))\n listen.timeout = 0\n listen.addMsgHandler('/err', listen_handler)\n listen.close()\n",
"step-3": "<mask token>\n\n\n@given('I want to send an integer')\ndef step_impl(context):\n pass\n\n\n@given('I want to send a float')\ndef step_impl(context):\n pass\n\n\n@given('I want to send two integers with one channel')\ndef step_impl(context):\n pass\n\n\n@given('I want to send two floats with one channel')\ndef step_impl(context):\n pass\n\n\n<mask token>\n\n\n@given('I want to send {reps} floats with 1 channel')\ndef step_impl(context, reps):\n rep = reps\n\n\n@when(\n 'I define \"{host}\" and the port {port} and the number is {flt_one}, and the second is {flt_two}'\n )\ndef step_impl(context, host, port, flt_one, flt_two):\n client = OSCClient()\n client.connect((host, int(port)))\n if rep > 2:\n for i in range(rep):\n if flt_two > 0.0:\n client.send(OSCMessage(['/test', flt_one, flt_two]))\n else:\n client.send(OSCMessage('/test', flt_one))\n elif flt_two > 0.0:\n client.send(OSCMessage(['/test', flt_one, flt_two]))\n else:\n client.send(OSCMessage(['/test', flt_one]))\n\n\n@when(\n 'I define \"{host}\" and the port \"{port}\" and the integer is {int_one}, and the second is {int_sec}'\n )\ndef step_impl(context, host, port, int_one, int_sec):\n client = OSCClient()\n client.connect((host, port))\n if int_sec > 0:\n client.send(OSCMessage('/test', int_one, int_sec))\n else:\n client.send(OSCMessage('/test', int_one))\n\n\n@then('I hear a sound')\ndef step_impl(context):\n pass\n\n\n@then('I hear two sounds')\ndef step_impl(context):\n pass\n\n\n@then('I hear a sound that lasts for {son_length} seconds')\ndef step_impl(context, son_length):\n pass\n\n\ndef listen_handler(path, tags, args, source):\n \"\"\"\n Handler for the listener\n \"\"\"\n response = args[0]\n assert (response, 'Negative numbers not processed')\n\n\n@then('I want to receive \"{message}\"')\ndef step_impl(context, message):\n listen = OSCServer(('127.0.0.1', 4589))\n listen.timeout = 0\n listen.addMsgHandler('/err', listen_handler)\n listen.close()\n",
"step-4": "<mask token>\n\n\n@given('I want to send an integer')\ndef step_impl(context):\n pass\n\n\n@given('I want to send a float')\ndef step_impl(context):\n pass\n\n\n@given('I want to send two integers with one channel')\ndef step_impl(context):\n pass\n\n\n@given('I want to send two floats with one channel')\ndef step_impl(context):\n pass\n\n\nrep = 0\n\n\n@given('I want to send {reps} floats with 1 channel')\ndef step_impl(context, reps):\n rep = reps\n\n\n@when(\n 'I define \"{host}\" and the port {port} and the number is {flt_one}, and the second is {flt_two}'\n )\ndef step_impl(context, host, port, flt_one, flt_two):\n client = OSCClient()\n client.connect((host, int(port)))\n if rep > 2:\n for i in range(rep):\n if flt_two > 0.0:\n client.send(OSCMessage(['/test', flt_one, flt_two]))\n else:\n client.send(OSCMessage('/test', flt_one))\n elif flt_two > 0.0:\n client.send(OSCMessage(['/test', flt_one, flt_two]))\n else:\n client.send(OSCMessage(['/test', flt_one]))\n\n\n@when(\n 'I define \"{host}\" and the port \"{port}\" and the integer is {int_one}, and the second is {int_sec}'\n )\ndef step_impl(context, host, port, int_one, int_sec):\n client = OSCClient()\n client.connect((host, port))\n if int_sec > 0:\n client.send(OSCMessage('/test', int_one, int_sec))\n else:\n client.send(OSCMessage('/test', int_one))\n\n\n@then('I hear a sound')\ndef step_impl(context):\n pass\n\n\n@then('I hear two sounds')\ndef step_impl(context):\n pass\n\n\n@then('I hear a sound that lasts for {son_length} seconds')\ndef step_impl(context, son_length):\n pass\n\n\ndef listen_handler(path, tags, args, source):\n \"\"\"\n Handler for the listener\n \"\"\"\n response = args[0]\n assert (response, 'Negative numbers not processed')\n\n\n@then('I want to receive \"{message}\"')\ndef step_impl(context, message):\n listen = OSCServer(('127.0.0.1', 4589))\n listen.timeout = 0\n listen.addMsgHandler('/err', listen_handler)\n listen.close()\n",
"step-5": "'''\n Functional tests for the Write Stream\n'''\nfrom behave import given, when, then\nfrom OSC import OSCClient, OSCMessage, OSCServer\n\n@given('I want to send an integer')\ndef step_impl (context):\n pass\n\n@given('I want to send a float')\ndef step_impl (context):\n pass\n\n@given('I want to send two integers with one channel')\ndef step_impl (context):\n pass\n\n@given('I want to send two floats with one channel')\ndef step_impl (context):\n pass\n\n\n\nrep=0\n@given('I want to send {reps} floats with 1 channel')\ndef step_impl (context, reps):\n rep = reps\n\n@when('I define \"{host}\" and the port {port} and the number is {flt_one}, and the second is {flt_two}')\ndef step_impl(context, host, port, flt_one,flt_two):\n client = OSCClient()\n client.connect((host, int(port)))\n\n if rep > 2:\n for i in range(rep):\n if flt_two > 0.0:\n client.send(OSCMessage(['/test', flt_one, flt_two]))\n else:\n client.send(OSCMessage('/test', flt_one))\n else:\n if flt_two > 0.0:\n client.send(OSCMessage(['/test', flt_one,flt_two]))\n else:\n client.send(OSCMessage(['/test', flt_one]))\n\n@when('I define \"{host}\" and the port \"{port}\" and the integer is {int_one}, and the second is {int_sec}')\ndef step_impl(context, host, port, int_one, int_sec):\n client = OSCClient()\n client.connect((host, port))\n if int_sec > 0:\n client.send(OSCMessage('/test', int_one, int_sec))\n else:\n client.send(OSCMessage('/test', int_one))\n\n@then('I hear a sound')\ndef step_impl(context):\n pass\n\n@then('I hear two sounds')\ndef step_impl(context):\n pass\n\n@then('I hear a sound that lasts for {son_length} seconds')\ndef step_impl(context,son_length):\n pass\n\ndef listen_handler(path, tags, args, source):\n '''\n Handler for the listener\n '''\n response = args[0]\n assert(response, \"Negative numbers not processed\")\n\n@then('I want to receive \"{message}\"')\ndef step_impl(context, message):\n listen = OSCServer((\"127.0.0.1\", 4589))\n listen.timeout = 0 #infinite timeout\n listen.addMsgHandler(\"/err\", listen_handler)\n listen.close()\n \n",
"step-ids": [
8,
10,
12,
13,
15
]
}
|
[
8,
10,
12,
13,
15
] |
"""
챕터: day4
주제: 반복문(for문)
문제: 1에서 100까지 합을 구하여 출력하시오.
작성자: 한현수
작성일: 2018.9.20.
"""
result = 0
for i in range(101):
result += i
print(result)
|
normal
|
{
"blob_id": "d2754099adebdb4bd2b028fdf9015571ad773754",
"index": 9313,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in range(101):\n result += i\nprint(result)\n",
"step-3": "<mask token>\nresult = 0\nfor i in range(101):\n result += i\nprint(result)\n",
"step-4": "\"\"\"\n챕터: day4\n주제: 반복문(for문)\n문제: 1에서 100까지 합을 구하여 출력하시오.\n작성자: 한현수\n작성일: 2018.9.20.\n\"\"\"\nresult = 0\nfor i in range(101):\n result += i\nprint(result)",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class DateTimeEncoder(json.JSONEncoder):
def default(self, z):
if isinstance(z, datetime.datetime):
return str(z)
else:
return super().default(z)
<|reserved_special_token_0|>
def FindWorkload(waclient, workloadName):
try:
response = waclient.list_workloads(WorkloadNamePrefix=workloadName)
except botocore.exceptions.ParamValidationError as e:
logger.error('ERROR - Parameter validation error: %s' % e)
except botocore.exceptions.ClientError as e:
logger.error('ERROR - Unexpected error: %s' % e)
workloadId = response['WorkloadSummaries'][0]['WorkloadId']
workloadArn = response['WorkloadSummaries'][0]['WorkloadArn']
return workloadId, workloadArn
def DeleteWorkload(waclient, workloadId):
try:
response = waclient.delete_workload(WorkloadId=workloadId)
except botocore.exceptions.ParamValidationError as e:
logger.error('ERROR - Parameter validation error: %s' % e)
except botocore.exceptions.ClientError as e:
logger.error('ERROR - Unexpected error: %s' % e)
<|reserved_special_token_0|>
def listLens(waclient):
try:
response = waclient.list_lenses()
except botocore.exceptions.ParamValidationError as e:
logger.error('ERROR - Parameter validation error: %s' % e)
except botocore.exceptions.ClientError as e:
logger.error('ERROR - Unexpected error: %s' % e)
lenses = jmespath.search('LensSummaries[*].LensAlias', response)
return lenses
def getCurrentLensVersion(waclient, lensAlias):
try:
response = waclient.list_lenses()
except botocore.exceptions.ParamValidationError as e:
logger.error('ERROR - Parameter validation error: %s' % e)
except botocore.exceptions.ClientError as e:
logger.error('ERROR - Unexpected error: %s' % e)
searchString = 'LensSummaries[?LensAlias==`' + lensAlias + '`].LensVersion'
lenses = jmespath.search(searchString, response)
return lenses[0]
def findAllQuestionId(waclient, workloadId, lensAlias):
answers = []
for pillar in PILLAR_PARSE_MAP:
logger.debug('Grabbing answers for %s %s' % (lensAlias, pillar))
try:
response = waclient.list_answers(WorkloadId=workloadId,
LensAlias=lensAlias, PillarId=pillar)
except botocore.exceptions.ParamValidationError as e:
logger.error('ERROR - Parameter validation error: %s' % e)
except botocore.exceptions.ClientError as e:
logger.error('ERROR - Unexpected error: %s' % e)
answers.extend(response['AnswerSummaries'])
while 'NextToken' in response:
try:
response = waclient.list_answers(WorkloadId=workloadId,
LensAlias=lensAlias, PillarId=pillar, NextToken=
response['NextToken'])
except botocore.exceptions.ParamValidationError as e:
logger.error('ERROR - Parameter validation error: %s' % e)
except botocore.exceptions.ClientError as e:
logger.error('ERROR - Unexpected error: %s' % e)
answers.extend(response['AnswerSummaries'])
return answers
def getQuestionDetails(waclient, workloadId, lensAlias, questionId):
try:
response = waclient.get_answer(WorkloadId=workloadId, LensAlias=
lensAlias, QuestionId=questionId)
except botocore.exceptions.ParamValidationError as e:
logger.error('ERROR - Parameter validation error: %s' % e)
except botocore.exceptions.ClientError as e:
logger.error('ERROR - Unexpected error: %s' % e)
qDescription = jmespath.search('Answer.QuestionDescription', response)
qImprovementPlanUrl = jmespath.search('Answer.ImprovementPlanUrl', response
)
qHelpfulResourceUrl = jmespath.search('Answer.HelpfulResourceUrl', response
)
qNotes = jmespath.search('Answer.Notes', response)
return qDescription, qImprovementPlanUrl, qHelpfulResourceUrl, qNotes
def updateAnswersForQuestion(waclient, workloadId, lensAlias, questionId,
selectedChoices, notes):
try:
response = waclient.update_answer(WorkloadId=workloadId, LensAlias=
lensAlias, QuestionId=questionId, SelectedChoices=
selectedChoices, Notes=notes)
except botocore.exceptions.ParamValidationError as e:
logger.error('ERROR - Parameter validation error: %s' % e)
except botocore.exceptions.ClientError as e:
logger.error('ERROR - Unexpected error: %s' % e)
jmesquery = 'Answer.SelectedChoices'
answers = jmespath.search(jmesquery, response)
return answers
def getImprovementPlanItems(waclient, workloadId, lensAlias, QuestionId,
PillarId, ImprovementPlanUrl, ChoiceList):
response = {}
htmlString = ''
urlresponse = urllib.request.urlopen(ImprovementPlanUrl)
htmlBytes = urlresponse.read()
htmlStr = htmlBytes.decode('utf8')
htmlSplit = htmlStr.split('\n')
ipHTMLList = {}
for line in htmlSplit:
for uq in ChoiceList:
if uq in line:
parsed = BeautifulSoup(line, features='html.parser')
ipHTMLList.update({uq: str(parsed.a['href'])})
return ipHTMLList
def getImprovementPlanHTMLDescription(ImprovementPlanUrl, PillarId):
logger.debug('ImprovementPlanUrl: %s for pillar %s ' % (
ImprovementPlanUrl, PILLAR_PARSE_MAP[PillarId]))
stepRaw = ImprovementPlanUrl.rsplit('#')[1]
if len(stepRaw) <= 5:
stepNumber = stepRaw[-1]
else:
stepNumber = stepRaw[-2]
firstItem = 'step' + stepNumber
secondItem = 'step' + str(int(stepNumber) + 1)
logger.debug('Going from %s to %s' % (firstItem, secondItem))
urlresponse = urllib.request.urlopen(ImprovementPlanUrl)
htmlBytes = urlresponse.read()
htmlStr = htmlBytes.decode('utf8')
htmlSplit = htmlStr.split('\n')
foundit = 0
ipString = ''
questionIdText = ''
for i in htmlSplit:
if PILLAR_PARSE_MAP[PillarId] in i:
bsparse = BeautifulSoup(i, features='html.parser')
questionIdText = str(bsparse.text).split(':')[0].strip()
if secondItem in i or '</div>' in i:
foundit = 0
if firstItem in i:
foundit = 1
ipString += i
elif foundit:
ipString += i
prettyHTML = BeautifulSoup(ipString, features='html.parser')
for a in prettyHTML.findAll('a', 'glossref'):
a.replaceWithChildren()
return prettyHTML, questionIdText
def lensTabCreation(WACLIENT, workloadId, lens, workbook,
allQuestionsForLens, workloadName='', AWSAccountId='',
workloadDescription=''):
bold = workbook.add_format({'bold': True})
bold_border = workbook.add_format({'border': 1, 'border_color': 'black',
'text_wrap': True})
bold_border_bold = workbook.add_format({'border': 1, 'border_color':
'black', 'text_wrap': True, 'font_size': 20, 'bold': True})
heading = workbook.add_format({'font_size': 24, 'bold': True})
lineA = workbook.add_format({'border': 1, 'border_color': 'black',
'bg_color': '#E0EBF6', 'align': 'top', 'text_wrap': True})
lineB = workbook.add_format({'border': 1, 'border_color': 'black',
'bg_color': '#E4EFDC', 'align': 'top', 'text_wrap': True})
lineAnoborder = workbook.add_format({'border': 0, 'top': 1, 'left': 1,
'right': 1, 'border_color': 'black', 'bg_color': '#E0EBF6', 'align':
'top', 'text_wrap': True})
lineBnoborder = workbook.add_format({'border': 0, 'top': 1, 'left': 1,
'right': 1, 'border_color': 'black', 'bg_color': '#E4EFDC', 'align':
'top', 'text_wrap': True})
lineAhidden = workbook.add_format({'border': 0, 'left': 1, 'right': 1,
'border_color': 'black', 'bg_color': '#E0EBF6', 'align': 'top',
'text_wrap': False, 'indent': 100})
lineBhidden = workbook.add_format({'border': 0, 'left': 1, 'right': 1,
'border_color': 'black', 'bg_color': '#E4EFDC', 'align': 'top',
'text_wrap': False, 'indent': 100})
sub_heading = workbook.add_format()
sub_heading.set_font_size(20)
sub_heading.set_bold(True)
small_font = workbook.add_format()
small_font.set_font_size(9)
logger.debug("Getting lens version for '" + lens + "'")
versionString = getCurrentLensVersion(WACLIENT, lens)
logger.debug('Adding worksheet using version ' + versionString)
lensName = lens[0:18]
worksheet = workbook.add_worksheet(lensName + ' v' + versionString)
worksheet.set_landscape()
worksheet.set_paper(1)
worksheet.set_column('A:A', 11)
worksheet.set_column('B:B', 32)
worksheet.set_column('C:C', 56)
worksheet.set_column('D:D', 29)
worksheet.set_column('E:E', 57)
worksheet.set_column('F:F', 18)
worksheet.set_column('G:G', 70)
worksheet.merge_range('A1:G1', 'Workload Overview', heading)
worksheet.merge_range('A3:B3', 'Workload Name', bold_border_bold)
worksheet.merge_range('A4:B4', 'AWS Account ID', bold_border_bold)
worksheet.merge_range('A5:B5', 'Workload Description', bold_border_bold)
if WORKLOADID:
worksheet.write('C3', workloadName, bold_border)
accountIdParsed = AWSAccountId.split(':')[4]
worksheet.write('C4', accountIdParsed, bold_border)
worksheet.write('C5', workloadDescription, bold_border)
else:
worksheet.write('C3', '', bold_border)
worksheet.write('C4', '', bold_border)
worksheet.write('C5', '', bold_border)
worksheet.write('D3', 'Enter the name of system', small_font)
worksheet.write('D4', 'Enter 12-degit AWS account ID', small_font)
worksheet.write('D5',
'Briefly describe system architecture and workload, flow etc.',
small_font)
worksheet.write('A8', 'Pillar', sub_heading)
worksheet.write('B8', 'Question', sub_heading)
worksheet.write('C8', 'Explanation', sub_heading)
worksheet.write('D8', 'Choice (Best Practice)', sub_heading)
worksheet.write('E8', 'Detail', sub_heading)
worksheet.write('F8', 'Response', sub_heading)
worksheet.write('G8', 'Notes (optional)', sub_heading)
worksheet.freeze_panes(8, 0)
worksheet.autofilter('A8:B8')
worksheet.repeat_rows(1, 8)
worksheet.fit_to_pages(1, 99)
cellPosition = 8
myCell = lineA
myCellhidden = lineAhidden
myCellnoborder = lineAnoborder
for pillar in PILLAR_PARSE_MAP:
qNum = 1
jmesquery = "[?PillarId=='" + pillar + "']"
allQuestionsForPillar = jmespath.search(jmesquery, allQuestionsForLens)
for answers in allQuestionsForPillar:
questionTitle = PILLAR_PARSE_MAP[answers['PillarId']] + str(qNum
) + ' - ' + answers['QuestionTitle']
(qDescription, qImprovementPlanUrl, qHelpfulResourceUrl, qNotes
) = (getQuestionDetails(WACLIENT, workloadId, lens, answers
['QuestionId']))
qDescription = qDescription.replace('\n ', '').replace(' '
, '').replace('\t', '').replace('\n', '')
qDescription = qDescription.rstrip()
qDescription = qDescription.strip()
logger.debug("Working on '" + questionTitle + "'")
logger.debug('It has answers of: ' + json.dumps(answers[
'SelectedChoices']))
cellID = cellPosition + 1
if qImprovementPlanUrl:
jmesquery = "[?QuestionId=='" + answers['QuestionId'
] + "'].Choices[].ChoiceId"
choiceList = jmespath.search(jmesquery, allQuestionsForLens)
ipList = getImprovementPlanItems(WACLIENT, workloadId, lens,
answers['QuestionId'], answers['PillarId'],
qImprovementPlanUrl, choiceList)
else:
ipList = []
startingCellID = cellID
firstTimePillar = True
for choices in answers['Choices']:
cell = 'A' + str(cellID)
if firstTimePillar:
worksheet.write(cell, PILLAR_PROPER_NAME_MAP[pillar],
myCellnoborder)
cell = 'B' + str(cellID)
worksheet.write(cell, questionTitle, myCellnoborder)
firstTimePillar = False
else:
worksheet.write(cell, PILLAR_PROPER_NAME_MAP[pillar],
myCellhidden)
cell = 'B' + str(cellID)
worksheet.write(cell, questionTitle, myCellhidden)
cell = 'D' + str(cellID)
Title = choices['Title'].replace(' ', '').replace('\t', ''
).replace('\n', '')
if any(choices['ChoiceId'] in d for d in ipList):
worksheet.write_url(cell, ipList[choices['ChoiceId']],
myCell, string=Title)
htmlString = ''
htmlString = htmlString.replace('\n ', '').replace(
' ', '').replace('\t', '').strip().rstrip()
worksheet.write_comment(cell, htmlString, {'author':
'Improvement Plan'})
else:
worksheet.write(cell, Title, myCell)
cell = 'E' + str(cellID)
Description = choices['Description'].replace(
'\n ', '')
Description = Description.replace('\n ', '')
Description = Description.replace(' ', '').replace('\t', ''
).replace('\n', '')
Description = Description.rstrip()
Description = Description.strip()
worksheet.write(cell, Description, myCell)
cell = 'F' + str(cellID)
responseText = ''
if choices['ChoiceId'] in answers['SelectedChoices']:
responseText = 'SELECTED'
else:
responseText = ''
worksheet.write(cell, responseText, myCell)
cellID += 1
cellMerge = 'C' + str(startingCellID) + ':C' + str(cellID - 1)
worksheet.merge_range(cellMerge, qDescription, myCell)
cellMerge = 'G' + str(startingCellID) + ':G' + str(cellID - 1)
if WORKLOADID:
worksheet.merge_range(cellMerge, qNotes, myCell)
else:
worksheet.merge_range(cellMerge, '', myCell)
cellID -= 1
qNum += 1
cellPosition = cellID
if myCell == lineA:
myCell = lineB
myCellhidden = lineBhidden
myCellnoborder = lineBnoborder
else:
myCell = lineA
myCellhidden = lineAhidden
myCellnoborder = lineAnoborder
def main():
boto3_min_version = '1.16.38'
if packaging.version.parse(boto3.__version__) < packaging.version.parse(
boto3_min_version):
logger.error(
'Your Boto3 version (%s) is less than %s. You must ugprade to run this script (pip3 upgrade boto3)'
% (boto3.__version__, boto3_min_version))
exit()
logger.info('Script version %s' % __version__)
logger.info('Starting Boto %s Session' % boto3.__version__)
SESSION1 = boto3.session.Session(profile_name=PROFILE)
WACLIENT = SESSION1.client(service_name='wellarchitected', region_name=
REGION_NAME)
if WORKLOADID:
logger.info('User specified workload id of %s' % WORKLOADID)
workloadJson = GetWorkload(WACLIENT, WORKLOADID)
LENSES = workloadJson['Lenses']
logger.info('Lenses for %s: %s' % (WORKLOADID, json.dumps(LENSES)))
WORKLOADNAME = workloadJson['WorkloadName']
DESCRIPTION = workloadJson['Description']
REVIEWOWNER = workloadJson['ReviewOwner']
ENVIRONMENT = workloadJson['Environment']
AWSREGIONS = workloadJson['AwsRegions']
workloadId = WORKLOADID
workloadARN = workloadJson['WorkloadArn']
else:
logger.info('No workload ID specified, we will create a TEMP workload')
LENSES = listLens(WACLIENT)
logger.info('Lenses available: ' + json.dumps(LENSES))
WORKLOADNAME = 'TEMP DO NOT USE WORKLOAD'
DESCRIPTION = 'TEMP DO NOT USE WORKLOAD'
REVIEWOWNER = 'WA Python Script'
ENVIRONMENT = 'PRODUCTION'
AWSREGIONS = [REGION_NAME]
logger.info('Creating a new workload to gather questions and answers')
workloadId, workloadARN = CreateNewWorkload(WACLIENT, WORKLOADNAME,
DESCRIPTION, REVIEWOWNER, ENVIRONMENT, AWSREGIONS, LENSES, '[]',
'[]')
logger.info("Creating xlsx file '" + FILENAME + "'")
workbook = xlsxwriter.Workbook(FILENAME)
workbook.set_size(2800, 1600)
LENSES.sort(reverse=True)
for lens in LENSES:
allQuestions = findAllQuestionId(WACLIENT, workloadId, lens)
if WORKLOADID:
logger.debug('Not answering questions for existing workload')
lensTabCreation(WACLIENT, workloadId, lens, workbook,
allQuestions, WORKLOADNAME, workloadARN, DESCRIPTION)
else:
jmesquery = (
'[*].{QuestionId: QuestionId, PillarId: PillarId, Choices: Choices[].ChoiceId}'
)
allQuestionIds = jmespath.search(jmesquery, allQuestions)
for question in allQuestionIds:
logger.debug('Answering question %s in the %s lens' % (
question['QuestionId'], lens))
updateAnswersForQuestion(WACLIENT, workloadId, lens,
question['QuestionId'], question['Choices'],
'TEMP WORKLOAD - Added by export script')
lensTabCreation(WACLIENT, workloadId, lens, workbook, allQuestions)
logger.info('Closing Workbook File')
workbook.close()
if not WORKLOADID:
if not KEEPTEMP:
logger.info('Removing TEMP Workload')
DeleteWorkload(WACLIENT, workloadId)
logger.info('Done')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class DateTimeEncoder(json.JSONEncoder):
def default(self, z):
if isinstance(z, datetime.datetime):
return str(z)
else:
return super().default(z)
def CreateNewWorkload(waclient, workloadName, description, reviewOwner,
environment, awsRegions, lenses, tags, pillarPriorities, notes='',
nonAwsRegions=[], architecturalDesign='', industryType='', industry='',
accountIds=[]):
try:
response = waclient.create_workload(WorkloadName=workloadName,
Description=description, ReviewOwner=reviewOwner, Environment=
environment, AwsRegions=awsRegions, Lenses=lenses,
NonAwsRegions=nonAwsRegions, ArchitecturalDesign=
architecturalDesign, IndustryType=industryType, Industry=
industry, Notes=notes, AccountIds=accountIds)
except waclient.exceptions.ConflictException as e:
workloadId, workloadARN = FindWorkload(waclient, workloadName)
logger.error(
'ERROR - The workload name %s already exists as workloadId %s' %
(workloadName, workloadId))
return workloadId, workloadARN
except botocore.exceptions.ParamValidationError as e:
logger.error('ERROR - Parameter validation error: %s' % e)
except botocore.exceptions.ClientError as e:
logger.error('ERROR - Unexpected error: %s' % e)
workloadId = response['WorkloadId']
workloadARN = response['WorkloadArn']
return workloadId, workloadARN
def FindWorkload(waclient, workloadName):
try:
response = waclient.list_workloads(WorkloadNamePrefix=workloadName)
except botocore.exceptions.ParamValidationError as e:
logger.error('ERROR - Parameter validation error: %s' % e)
except botocore.exceptions.ClientError as e:
logger.error('ERROR - Unexpected error: %s' % e)
workloadId = response['WorkloadSummaries'][0]['WorkloadId']
workloadArn = response['WorkloadSummaries'][0]['WorkloadArn']
return workloadId, workloadArn
def DeleteWorkload(waclient, workloadId):
try:
response = waclient.delete_workload(WorkloadId=workloadId)
except botocore.exceptions.ParamValidationError as e:
logger.error('ERROR - Parameter validation error: %s' % e)
except botocore.exceptions.ClientError as e:
logger.error('ERROR - Unexpected error: %s' % e)
def GetWorkload(waclient, workloadId):
try:
response = waclient.get_workload(WorkloadId=workloadId)
except botocore.exceptions.ParamValidationError as e:
logger.error('ERROR - Parameter validation error: %s' % e)
except botocore.exceptions.ClientError as e:
logger.error('ERROR - Unexpected error: %s' % e)
exit()
workload = response['Workload']
return workload
def listLens(waclient):
try:
response = waclient.list_lenses()
except botocore.exceptions.ParamValidationError as e:
logger.error('ERROR - Parameter validation error: %s' % e)
except botocore.exceptions.ClientError as e:
logger.error('ERROR - Unexpected error: %s' % e)
lenses = jmespath.search('LensSummaries[*].LensAlias', response)
return lenses
def getCurrentLensVersion(waclient, lensAlias):
try:
response = waclient.list_lenses()
except botocore.exceptions.ParamValidationError as e:
logger.error('ERROR - Parameter validation error: %s' % e)
except botocore.exceptions.ClientError as e:
logger.error('ERROR - Unexpected error: %s' % e)
searchString = 'LensSummaries[?LensAlias==`' + lensAlias + '`].LensVersion'
lenses = jmespath.search(searchString, response)
return lenses[0]
def findAllQuestionId(waclient, workloadId, lensAlias):
answers = []
for pillar in PILLAR_PARSE_MAP:
logger.debug('Grabbing answers for %s %s' % (lensAlias, pillar))
try:
response = waclient.list_answers(WorkloadId=workloadId,
LensAlias=lensAlias, PillarId=pillar)
except botocore.exceptions.ParamValidationError as e:
logger.error('ERROR - Parameter validation error: %s' % e)
except botocore.exceptions.ClientError as e:
logger.error('ERROR - Unexpected error: %s' % e)
answers.extend(response['AnswerSummaries'])
while 'NextToken' in response:
try:
response = waclient.list_answers(WorkloadId=workloadId,
LensAlias=lensAlias, PillarId=pillar, NextToken=
response['NextToken'])
except botocore.exceptions.ParamValidationError as e:
logger.error('ERROR - Parameter validation error: %s' % e)
except botocore.exceptions.ClientError as e:
logger.error('ERROR - Unexpected error: %s' % e)
answers.extend(response['AnswerSummaries'])
return answers
def getQuestionDetails(waclient, workloadId, lensAlias, questionId):
try:
response = waclient.get_answer(WorkloadId=workloadId, LensAlias=
lensAlias, QuestionId=questionId)
except botocore.exceptions.ParamValidationError as e:
logger.error('ERROR - Parameter validation error: %s' % e)
except botocore.exceptions.ClientError as e:
logger.error('ERROR - Unexpected error: %s' % e)
qDescription = jmespath.search('Answer.QuestionDescription', response)
qImprovementPlanUrl = jmespath.search('Answer.ImprovementPlanUrl', response
)
qHelpfulResourceUrl = jmespath.search('Answer.HelpfulResourceUrl', response
)
qNotes = jmespath.search('Answer.Notes', response)
return qDescription, qImprovementPlanUrl, qHelpfulResourceUrl, qNotes
def updateAnswersForQuestion(waclient, workloadId, lensAlias, questionId,
selectedChoices, notes):
try:
response = waclient.update_answer(WorkloadId=workloadId, LensAlias=
lensAlias, QuestionId=questionId, SelectedChoices=
selectedChoices, Notes=notes)
except botocore.exceptions.ParamValidationError as e:
logger.error('ERROR - Parameter validation error: %s' % e)
except botocore.exceptions.ClientError as e:
logger.error('ERROR - Unexpected error: %s' % e)
jmesquery = 'Answer.SelectedChoices'
answers = jmespath.search(jmesquery, response)
return answers
def getImprovementPlanItems(waclient, workloadId, lensAlias, QuestionId,
PillarId, ImprovementPlanUrl, ChoiceList):
response = {}
htmlString = ''
urlresponse = urllib.request.urlopen(ImprovementPlanUrl)
htmlBytes = urlresponse.read()
htmlStr = htmlBytes.decode('utf8')
htmlSplit = htmlStr.split('\n')
ipHTMLList = {}
for line in htmlSplit:
for uq in ChoiceList:
if uq in line:
parsed = BeautifulSoup(line, features='html.parser')
ipHTMLList.update({uq: str(parsed.a['href'])})
return ipHTMLList
def getImprovementPlanHTMLDescription(ImprovementPlanUrl, PillarId):
logger.debug('ImprovementPlanUrl: %s for pillar %s ' % (
ImprovementPlanUrl, PILLAR_PARSE_MAP[PillarId]))
stepRaw = ImprovementPlanUrl.rsplit('#')[1]
if len(stepRaw) <= 5:
stepNumber = stepRaw[-1]
else:
stepNumber = stepRaw[-2]
firstItem = 'step' + stepNumber
secondItem = 'step' + str(int(stepNumber) + 1)
logger.debug('Going from %s to %s' % (firstItem, secondItem))
urlresponse = urllib.request.urlopen(ImprovementPlanUrl)
htmlBytes = urlresponse.read()
htmlStr = htmlBytes.decode('utf8')
htmlSplit = htmlStr.split('\n')
foundit = 0
ipString = ''
questionIdText = ''
for i in htmlSplit:
if PILLAR_PARSE_MAP[PillarId] in i:
bsparse = BeautifulSoup(i, features='html.parser')
questionIdText = str(bsparse.text).split(':')[0].strip()
if secondItem in i or '</div>' in i:
foundit = 0
if firstItem in i:
foundit = 1
ipString += i
elif foundit:
ipString += i
prettyHTML = BeautifulSoup(ipString, features='html.parser')
for a in prettyHTML.findAll('a', 'glossref'):
a.replaceWithChildren()
return prettyHTML, questionIdText
def lensTabCreation(WACLIENT, workloadId, lens, workbook,
allQuestionsForLens, workloadName='', AWSAccountId='',
workloadDescription=''):
bold = workbook.add_format({'bold': True})
bold_border = workbook.add_format({'border': 1, 'border_color': 'black',
'text_wrap': True})
bold_border_bold = workbook.add_format({'border': 1, 'border_color':
'black', 'text_wrap': True, 'font_size': 20, 'bold': True})
heading = workbook.add_format({'font_size': 24, 'bold': True})
lineA = workbook.add_format({'border': 1, 'border_color': 'black',
'bg_color': '#E0EBF6', 'align': 'top', 'text_wrap': True})
lineB = workbook.add_format({'border': 1, 'border_color': 'black',
'bg_color': '#E4EFDC', 'align': 'top', 'text_wrap': True})
lineAnoborder = workbook.add_format({'border': 0, 'top': 1, 'left': 1,
'right': 1, 'border_color': 'black', 'bg_color': '#E0EBF6', 'align':
'top', 'text_wrap': True})
lineBnoborder = workbook.add_format({'border': 0, 'top': 1, 'left': 1,
'right': 1, 'border_color': 'black', 'bg_color': '#E4EFDC', 'align':
'top', 'text_wrap': True})
lineAhidden = workbook.add_format({'border': 0, 'left': 1, 'right': 1,
'border_color': 'black', 'bg_color': '#E0EBF6', 'align': 'top',
'text_wrap': False, 'indent': 100})
lineBhidden = workbook.add_format({'border': 0, 'left': 1, 'right': 1,
'border_color': 'black', 'bg_color': '#E4EFDC', 'align': 'top',
'text_wrap': False, 'indent': 100})
sub_heading = workbook.add_format()
sub_heading.set_font_size(20)
sub_heading.set_bold(True)
small_font = workbook.add_format()
small_font.set_font_size(9)
logger.debug("Getting lens version for '" + lens + "'")
versionString = getCurrentLensVersion(WACLIENT, lens)
logger.debug('Adding worksheet using version ' + versionString)
lensName = lens[0:18]
worksheet = workbook.add_worksheet(lensName + ' v' + versionString)
worksheet.set_landscape()
worksheet.set_paper(1)
worksheet.set_column('A:A', 11)
worksheet.set_column('B:B', 32)
worksheet.set_column('C:C', 56)
worksheet.set_column('D:D', 29)
worksheet.set_column('E:E', 57)
worksheet.set_column('F:F', 18)
worksheet.set_column('G:G', 70)
worksheet.merge_range('A1:G1', 'Workload Overview', heading)
worksheet.merge_range('A3:B3', 'Workload Name', bold_border_bold)
worksheet.merge_range('A4:B4', 'AWS Account ID', bold_border_bold)
worksheet.merge_range('A5:B5', 'Workload Description', bold_border_bold)
if WORKLOADID:
worksheet.write('C3', workloadName, bold_border)
accountIdParsed = AWSAccountId.split(':')[4]
worksheet.write('C4', accountIdParsed, bold_border)
worksheet.write('C5', workloadDescription, bold_border)
else:
worksheet.write('C3', '', bold_border)
worksheet.write('C4', '', bold_border)
worksheet.write('C5', '', bold_border)
worksheet.write('D3', 'Enter the name of system', small_font)
worksheet.write('D4', 'Enter 12-degit AWS account ID', small_font)
worksheet.write('D5',
'Briefly describe system architecture and workload, flow etc.',
small_font)
worksheet.write('A8', 'Pillar', sub_heading)
worksheet.write('B8', 'Question', sub_heading)
worksheet.write('C8', 'Explanation', sub_heading)
worksheet.write('D8', 'Choice (Best Practice)', sub_heading)
worksheet.write('E8', 'Detail', sub_heading)
worksheet.write('F8', 'Response', sub_heading)
worksheet.write('G8', 'Notes (optional)', sub_heading)
worksheet.freeze_panes(8, 0)
worksheet.autofilter('A8:B8')
worksheet.repeat_rows(1, 8)
worksheet.fit_to_pages(1, 99)
cellPosition = 8
myCell = lineA
myCellhidden = lineAhidden
myCellnoborder = lineAnoborder
for pillar in PILLAR_PARSE_MAP:
qNum = 1
jmesquery = "[?PillarId=='" + pillar + "']"
allQuestionsForPillar = jmespath.search(jmesquery, allQuestionsForLens)
for answers in allQuestionsForPillar:
questionTitle = PILLAR_PARSE_MAP[answers['PillarId']] + str(qNum
) + ' - ' + answers['QuestionTitle']
(qDescription, qImprovementPlanUrl, qHelpfulResourceUrl, qNotes
) = (getQuestionDetails(WACLIENT, workloadId, lens, answers
['QuestionId']))
qDescription = qDescription.replace('\n ', '').replace(' '
, '').replace('\t', '').replace('\n', '')
qDescription = qDescription.rstrip()
qDescription = qDescription.strip()
logger.debug("Working on '" + questionTitle + "'")
logger.debug('It has answers of: ' + json.dumps(answers[
'SelectedChoices']))
cellID = cellPosition + 1
if qImprovementPlanUrl:
jmesquery = "[?QuestionId=='" + answers['QuestionId'
] + "'].Choices[].ChoiceId"
choiceList = jmespath.search(jmesquery, allQuestionsForLens)
ipList = getImprovementPlanItems(WACLIENT, workloadId, lens,
answers['QuestionId'], answers['PillarId'],
qImprovementPlanUrl, choiceList)
else:
ipList = []
startingCellID = cellID
firstTimePillar = True
for choices in answers['Choices']:
cell = 'A' + str(cellID)
if firstTimePillar:
worksheet.write(cell, PILLAR_PROPER_NAME_MAP[pillar],
myCellnoborder)
cell = 'B' + str(cellID)
worksheet.write(cell, questionTitle, myCellnoborder)
firstTimePillar = False
else:
worksheet.write(cell, PILLAR_PROPER_NAME_MAP[pillar],
myCellhidden)
cell = 'B' + str(cellID)
worksheet.write(cell, questionTitle, myCellhidden)
cell = 'D' + str(cellID)
Title = choices['Title'].replace(' ', '').replace('\t', ''
).replace('\n', '')
if any(choices['ChoiceId'] in d for d in ipList):
worksheet.write_url(cell, ipList[choices['ChoiceId']],
myCell, string=Title)
htmlString = ''
htmlString = htmlString.replace('\n ', '').replace(
' ', '').replace('\t', '').strip().rstrip()
worksheet.write_comment(cell, htmlString, {'author':
'Improvement Plan'})
else:
worksheet.write(cell, Title, myCell)
cell = 'E' + str(cellID)
Description = choices['Description'].replace(
'\n ', '')
Description = Description.replace('\n ', '')
Description = Description.replace(' ', '').replace('\t', ''
).replace('\n', '')
Description = Description.rstrip()
Description = Description.strip()
worksheet.write(cell, Description, myCell)
cell = 'F' + str(cellID)
responseText = ''
if choices['ChoiceId'] in answers['SelectedChoices']:
responseText = 'SELECTED'
else:
responseText = ''
worksheet.write(cell, responseText, myCell)
cellID += 1
cellMerge = 'C' + str(startingCellID) + ':C' + str(cellID - 1)
worksheet.merge_range(cellMerge, qDescription, myCell)
cellMerge = 'G' + str(startingCellID) + ':G' + str(cellID - 1)
if WORKLOADID:
worksheet.merge_range(cellMerge, qNotes, myCell)
else:
worksheet.merge_range(cellMerge, '', myCell)
cellID -= 1
qNum += 1
cellPosition = cellID
if myCell == lineA:
myCell = lineB
myCellhidden = lineBhidden
myCellnoborder = lineBnoborder
else:
myCell = lineA
myCellhidden = lineAhidden
myCellnoborder = lineAnoborder
def main():
boto3_min_version = '1.16.38'
if packaging.version.parse(boto3.__version__) < packaging.version.parse(
boto3_min_version):
logger.error(
'Your Boto3 version (%s) is less than %s. You must ugprade to run this script (pip3 upgrade boto3)'
% (boto3.__version__, boto3_min_version))
exit()
logger.info('Script version %s' % __version__)
logger.info('Starting Boto %s Session' % boto3.__version__)
SESSION1 = boto3.session.Session(profile_name=PROFILE)
WACLIENT = SESSION1.client(service_name='wellarchitected', region_name=
REGION_NAME)
if WORKLOADID:
logger.info('User specified workload id of %s' % WORKLOADID)
workloadJson = GetWorkload(WACLIENT, WORKLOADID)
LENSES = workloadJson['Lenses']
logger.info('Lenses for %s: %s' % (WORKLOADID, json.dumps(LENSES)))
WORKLOADNAME = workloadJson['WorkloadName']
DESCRIPTION = workloadJson['Description']
REVIEWOWNER = workloadJson['ReviewOwner']
ENVIRONMENT = workloadJson['Environment']
AWSREGIONS = workloadJson['AwsRegions']
workloadId = WORKLOADID
workloadARN = workloadJson['WorkloadArn']
else:
logger.info('No workload ID specified, we will create a TEMP workload')
LENSES = listLens(WACLIENT)
logger.info('Lenses available: ' + json.dumps(LENSES))
WORKLOADNAME = 'TEMP DO NOT USE WORKLOAD'
DESCRIPTION = 'TEMP DO NOT USE WORKLOAD'
REVIEWOWNER = 'WA Python Script'
ENVIRONMENT = 'PRODUCTION'
AWSREGIONS = [REGION_NAME]
logger.info('Creating a new workload to gather questions and answers')
workloadId, workloadARN = CreateNewWorkload(WACLIENT, WORKLOADNAME,
DESCRIPTION, REVIEWOWNER, ENVIRONMENT, AWSREGIONS, LENSES, '[]',
'[]')
logger.info("Creating xlsx file '" + FILENAME + "'")
workbook = xlsxwriter.Workbook(FILENAME)
workbook.set_size(2800, 1600)
LENSES.sort(reverse=True)
for lens in LENSES:
allQuestions = findAllQuestionId(WACLIENT, workloadId, lens)
if WORKLOADID:
logger.debug('Not answering questions for existing workload')
lensTabCreation(WACLIENT, workloadId, lens, workbook,
allQuestions, WORKLOADNAME, workloadARN, DESCRIPTION)
else:
jmesquery = (
'[*].{QuestionId: QuestionId, PillarId: PillarId, Choices: Choices[].ChoiceId}'
)
allQuestionIds = jmespath.search(jmesquery, allQuestions)
for question in allQuestionIds:
logger.debug('Answering question %s in the %s lens' % (
question['QuestionId'], lens))
updateAnswersForQuestion(WACLIENT, workloadId, lens,
question['QuestionId'], question['Choices'],
'TEMP WORKLOAD - Added by export script')
lensTabCreation(WACLIENT, workloadId, lens, workbook, allQuestions)
logger.info('Closing Workbook File')
workbook.close()
if not WORKLOADID:
if not KEEPTEMP:
logger.info('Removing TEMP Workload')
DeleteWorkload(WACLIENT, workloadId)
logger.info('Done')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
logging.basicConfig(level=logging.DEBUG, format=
'%(asctime)s.%(msecs)03d %(levelname)s %(module)s - %(funcName)s: %(message)s'
, datefmt='%Y-%m-%d %H:%M:%S')
<|reserved_special_token_0|>
logging.getLogger('boto3').setLevel(logging.CRITICAL)
logging.getLogger('botocore').setLevel(logging.CRITICAL)
logging.getLogger('s3transfer').setLevel(logging.CRITICAL)
logging.getLogger('urllib3').setLevel(logging.CRITICAL)
<|reserved_special_token_0|>
PARSER.add_argument('-p', '--profile', required=False, default='default',
help='AWS CLI Profile Name')
PARSER.add_argument('-r', '--region', required=False, default='us-east-1',
help='From Region Name. Example: us-east-1')
PARSER.add_argument('-w', '--workloadid', required=False, default='', help=
'Workload Id to use instead of creating a TEMP workload')
PARSER.add_argument('-k', '--keeptempworkload', action='store_true', help=
'If you want to keep the TEMP workload created at the end of the export')
PARSER.add_argument('-f', '--fileName', required=True, default=
'./demo.xlsx', help='FileName to export XLSX')
PARSER.add_argument('-v', '--debug', action='store_true', help=
'print debug messages to stderr')
<|reserved_special_token_0|>
if ARGUMENTS.debug:
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.INFO)
<|reserved_special_token_0|>
class DateTimeEncoder(json.JSONEncoder):
def default(self, z):
if isinstance(z, datetime.datetime):
return str(z)
else:
return super().default(z)
def CreateNewWorkload(waclient, workloadName, description, reviewOwner,
environment, awsRegions, lenses, tags, pillarPriorities, notes='',
nonAwsRegions=[], architecturalDesign='', industryType='', industry='',
accountIds=[]):
try:
response = waclient.create_workload(WorkloadName=workloadName,
Description=description, ReviewOwner=reviewOwner, Environment=
environment, AwsRegions=awsRegions, Lenses=lenses,
NonAwsRegions=nonAwsRegions, ArchitecturalDesign=
architecturalDesign, IndustryType=industryType, Industry=
industry, Notes=notes, AccountIds=accountIds)
except waclient.exceptions.ConflictException as e:
workloadId, workloadARN = FindWorkload(waclient, workloadName)
logger.error(
'ERROR - The workload name %s already exists as workloadId %s' %
(workloadName, workloadId))
return workloadId, workloadARN
except botocore.exceptions.ParamValidationError as e:
logger.error('ERROR - Parameter validation error: %s' % e)
except botocore.exceptions.ClientError as e:
logger.error('ERROR - Unexpected error: %s' % e)
workloadId = response['WorkloadId']
workloadARN = response['WorkloadArn']
return workloadId, workloadARN
def FindWorkload(waclient, workloadName):
try:
response = waclient.list_workloads(WorkloadNamePrefix=workloadName)
except botocore.exceptions.ParamValidationError as e:
logger.error('ERROR - Parameter validation error: %s' % e)
except botocore.exceptions.ClientError as e:
logger.error('ERROR - Unexpected error: %s' % e)
workloadId = response['WorkloadSummaries'][0]['WorkloadId']
workloadArn = response['WorkloadSummaries'][0]['WorkloadArn']
return workloadId, workloadArn
def DeleteWorkload(waclient, workloadId):
try:
response = waclient.delete_workload(WorkloadId=workloadId)
except botocore.exceptions.ParamValidationError as e:
logger.error('ERROR - Parameter validation error: %s' % e)
except botocore.exceptions.ClientError as e:
logger.error('ERROR - Unexpected error: %s' % e)
def GetWorkload(waclient, workloadId):
try:
response = waclient.get_workload(WorkloadId=workloadId)
except botocore.exceptions.ParamValidationError as e:
logger.error('ERROR - Parameter validation error: %s' % e)
except botocore.exceptions.ClientError as e:
logger.error('ERROR - Unexpected error: %s' % e)
exit()
workload = response['Workload']
return workload
def listLens(waclient):
try:
response = waclient.list_lenses()
except botocore.exceptions.ParamValidationError as e:
logger.error('ERROR - Parameter validation error: %s' % e)
except botocore.exceptions.ClientError as e:
logger.error('ERROR - Unexpected error: %s' % e)
lenses = jmespath.search('LensSummaries[*].LensAlias', response)
return lenses
def getCurrentLensVersion(waclient, lensAlias):
try:
response = waclient.list_lenses()
except botocore.exceptions.ParamValidationError as e:
logger.error('ERROR - Parameter validation error: %s' % e)
except botocore.exceptions.ClientError as e:
logger.error('ERROR - Unexpected error: %s' % e)
searchString = 'LensSummaries[?LensAlias==`' + lensAlias + '`].LensVersion'
lenses = jmespath.search(searchString, response)
return lenses[0]
def findAllQuestionId(waclient, workloadId, lensAlias):
answers = []
for pillar in PILLAR_PARSE_MAP:
logger.debug('Grabbing answers for %s %s' % (lensAlias, pillar))
try:
response = waclient.list_answers(WorkloadId=workloadId,
LensAlias=lensAlias, PillarId=pillar)
except botocore.exceptions.ParamValidationError as e:
logger.error('ERROR - Parameter validation error: %s' % e)
except botocore.exceptions.ClientError as e:
logger.error('ERROR - Unexpected error: %s' % e)
answers.extend(response['AnswerSummaries'])
while 'NextToken' in response:
try:
response = waclient.list_answers(WorkloadId=workloadId,
LensAlias=lensAlias, PillarId=pillar, NextToken=
response['NextToken'])
except botocore.exceptions.ParamValidationError as e:
logger.error('ERROR - Parameter validation error: %s' % e)
except botocore.exceptions.ClientError as e:
logger.error('ERROR - Unexpected error: %s' % e)
answers.extend(response['AnswerSummaries'])
return answers
def getQuestionDetails(waclient, workloadId, lensAlias, questionId):
try:
response = waclient.get_answer(WorkloadId=workloadId, LensAlias=
lensAlias, QuestionId=questionId)
except botocore.exceptions.ParamValidationError as e:
logger.error('ERROR - Parameter validation error: %s' % e)
except botocore.exceptions.ClientError as e:
logger.error('ERROR - Unexpected error: %s' % e)
qDescription = jmespath.search('Answer.QuestionDescription', response)
qImprovementPlanUrl = jmespath.search('Answer.ImprovementPlanUrl', response
)
qHelpfulResourceUrl = jmespath.search('Answer.HelpfulResourceUrl', response
)
qNotes = jmespath.search('Answer.Notes', response)
return qDescription, qImprovementPlanUrl, qHelpfulResourceUrl, qNotes
def updateAnswersForQuestion(waclient, workloadId, lensAlias, questionId,
selectedChoices, notes):
try:
response = waclient.update_answer(WorkloadId=workloadId, LensAlias=
lensAlias, QuestionId=questionId, SelectedChoices=
selectedChoices, Notes=notes)
except botocore.exceptions.ParamValidationError as e:
logger.error('ERROR - Parameter validation error: %s' % e)
except botocore.exceptions.ClientError as e:
logger.error('ERROR - Unexpected error: %s' % e)
jmesquery = 'Answer.SelectedChoices'
answers = jmespath.search(jmesquery, response)
return answers
def getImprovementPlanItems(waclient, workloadId, lensAlias, QuestionId,
PillarId, ImprovementPlanUrl, ChoiceList):
response = {}
htmlString = ''
urlresponse = urllib.request.urlopen(ImprovementPlanUrl)
htmlBytes = urlresponse.read()
htmlStr = htmlBytes.decode('utf8')
htmlSplit = htmlStr.split('\n')
ipHTMLList = {}
for line in htmlSplit:
for uq in ChoiceList:
if uq in line:
parsed = BeautifulSoup(line, features='html.parser')
ipHTMLList.update({uq: str(parsed.a['href'])})
return ipHTMLList
def getImprovementPlanHTMLDescription(ImprovementPlanUrl, PillarId):
logger.debug('ImprovementPlanUrl: %s for pillar %s ' % (
ImprovementPlanUrl, PILLAR_PARSE_MAP[PillarId]))
stepRaw = ImprovementPlanUrl.rsplit('#')[1]
if len(stepRaw) <= 5:
stepNumber = stepRaw[-1]
else:
stepNumber = stepRaw[-2]
firstItem = 'step' + stepNumber
secondItem = 'step' + str(int(stepNumber) + 1)
logger.debug('Going from %s to %s' % (firstItem, secondItem))
urlresponse = urllib.request.urlopen(ImprovementPlanUrl)
htmlBytes = urlresponse.read()
htmlStr = htmlBytes.decode('utf8')
htmlSplit = htmlStr.split('\n')
foundit = 0
ipString = ''
questionIdText = ''
for i in htmlSplit:
if PILLAR_PARSE_MAP[PillarId] in i:
bsparse = BeautifulSoup(i, features='html.parser')
questionIdText = str(bsparse.text).split(':')[0].strip()
if secondItem in i or '</div>' in i:
foundit = 0
if firstItem in i:
foundit = 1
ipString += i
elif foundit:
ipString += i
prettyHTML = BeautifulSoup(ipString, features='html.parser')
for a in prettyHTML.findAll('a', 'glossref'):
a.replaceWithChildren()
return prettyHTML, questionIdText
def lensTabCreation(WACLIENT, workloadId, lens, workbook,
allQuestionsForLens, workloadName='', AWSAccountId='',
workloadDescription=''):
bold = workbook.add_format({'bold': True})
bold_border = workbook.add_format({'border': 1, 'border_color': 'black',
'text_wrap': True})
bold_border_bold = workbook.add_format({'border': 1, 'border_color':
'black', 'text_wrap': True, 'font_size': 20, 'bold': True})
heading = workbook.add_format({'font_size': 24, 'bold': True})
lineA = workbook.add_format({'border': 1, 'border_color': 'black',
'bg_color': '#E0EBF6', 'align': 'top', 'text_wrap': True})
lineB = workbook.add_format({'border': 1, 'border_color': 'black',
'bg_color': '#E4EFDC', 'align': 'top', 'text_wrap': True})
lineAnoborder = workbook.add_format({'border': 0, 'top': 1, 'left': 1,
'right': 1, 'border_color': 'black', 'bg_color': '#E0EBF6', 'align':
'top', 'text_wrap': True})
lineBnoborder = workbook.add_format({'border': 0, 'top': 1, 'left': 1,
'right': 1, 'border_color': 'black', 'bg_color': '#E4EFDC', 'align':
'top', 'text_wrap': True})
lineAhidden = workbook.add_format({'border': 0, 'left': 1, 'right': 1,
'border_color': 'black', 'bg_color': '#E0EBF6', 'align': 'top',
'text_wrap': False, 'indent': 100})
lineBhidden = workbook.add_format({'border': 0, 'left': 1, 'right': 1,
'border_color': 'black', 'bg_color': '#E4EFDC', 'align': 'top',
'text_wrap': False, 'indent': 100})
sub_heading = workbook.add_format()
sub_heading.set_font_size(20)
sub_heading.set_bold(True)
small_font = workbook.add_format()
small_font.set_font_size(9)
logger.debug("Getting lens version for '" + lens + "'")
versionString = getCurrentLensVersion(WACLIENT, lens)
logger.debug('Adding worksheet using version ' + versionString)
lensName = lens[0:18]
worksheet = workbook.add_worksheet(lensName + ' v' + versionString)
worksheet.set_landscape()
worksheet.set_paper(1)
worksheet.set_column('A:A', 11)
worksheet.set_column('B:B', 32)
worksheet.set_column('C:C', 56)
worksheet.set_column('D:D', 29)
worksheet.set_column('E:E', 57)
worksheet.set_column('F:F', 18)
worksheet.set_column('G:G', 70)
worksheet.merge_range('A1:G1', 'Workload Overview', heading)
worksheet.merge_range('A3:B3', 'Workload Name', bold_border_bold)
worksheet.merge_range('A4:B4', 'AWS Account ID', bold_border_bold)
worksheet.merge_range('A5:B5', 'Workload Description', bold_border_bold)
if WORKLOADID:
worksheet.write('C3', workloadName, bold_border)
accountIdParsed = AWSAccountId.split(':')[4]
worksheet.write('C4', accountIdParsed, bold_border)
worksheet.write('C5', workloadDescription, bold_border)
else:
worksheet.write('C3', '', bold_border)
worksheet.write('C4', '', bold_border)
worksheet.write('C5', '', bold_border)
worksheet.write('D3', 'Enter the name of system', small_font)
worksheet.write('D4', 'Enter 12-degit AWS account ID', small_font)
worksheet.write('D5',
'Briefly describe system architecture and workload, flow etc.',
small_font)
worksheet.write('A8', 'Pillar', sub_heading)
worksheet.write('B8', 'Question', sub_heading)
worksheet.write('C8', 'Explanation', sub_heading)
worksheet.write('D8', 'Choice (Best Practice)', sub_heading)
worksheet.write('E8', 'Detail', sub_heading)
worksheet.write('F8', 'Response', sub_heading)
worksheet.write('G8', 'Notes (optional)', sub_heading)
worksheet.freeze_panes(8, 0)
worksheet.autofilter('A8:B8')
worksheet.repeat_rows(1, 8)
worksheet.fit_to_pages(1, 99)
cellPosition = 8
myCell = lineA
myCellhidden = lineAhidden
myCellnoborder = lineAnoborder
for pillar in PILLAR_PARSE_MAP:
qNum = 1
jmesquery = "[?PillarId=='" + pillar + "']"
allQuestionsForPillar = jmespath.search(jmesquery, allQuestionsForLens)
for answers in allQuestionsForPillar:
questionTitle = PILLAR_PARSE_MAP[answers['PillarId']] + str(qNum
) + ' - ' + answers['QuestionTitle']
(qDescription, qImprovementPlanUrl, qHelpfulResourceUrl, qNotes
) = (getQuestionDetails(WACLIENT, workloadId, lens, answers
['QuestionId']))
qDescription = qDescription.replace('\n ', '').replace(' '
, '').replace('\t', '').replace('\n', '')
qDescription = qDescription.rstrip()
qDescription = qDescription.strip()
logger.debug("Working on '" + questionTitle + "'")
logger.debug('It has answers of: ' + json.dumps(answers[
'SelectedChoices']))
cellID = cellPosition + 1
if qImprovementPlanUrl:
jmesquery = "[?QuestionId=='" + answers['QuestionId'
] + "'].Choices[].ChoiceId"
choiceList = jmespath.search(jmesquery, allQuestionsForLens)
ipList = getImprovementPlanItems(WACLIENT, workloadId, lens,
answers['QuestionId'], answers['PillarId'],
qImprovementPlanUrl, choiceList)
else:
ipList = []
startingCellID = cellID
firstTimePillar = True
for choices in answers['Choices']:
cell = 'A' + str(cellID)
if firstTimePillar:
worksheet.write(cell, PILLAR_PROPER_NAME_MAP[pillar],
myCellnoborder)
cell = 'B' + str(cellID)
worksheet.write(cell, questionTitle, myCellnoborder)
firstTimePillar = False
else:
worksheet.write(cell, PILLAR_PROPER_NAME_MAP[pillar],
myCellhidden)
cell = 'B' + str(cellID)
worksheet.write(cell, questionTitle, myCellhidden)
cell = 'D' + str(cellID)
Title = choices['Title'].replace(' ', '').replace('\t', ''
).replace('\n', '')
if any(choices['ChoiceId'] in d for d in ipList):
worksheet.write_url(cell, ipList[choices['ChoiceId']],
myCell, string=Title)
htmlString = ''
htmlString = htmlString.replace('\n ', '').replace(
' ', '').replace('\t', '').strip().rstrip()
worksheet.write_comment(cell, htmlString, {'author':
'Improvement Plan'})
else:
worksheet.write(cell, Title, myCell)
cell = 'E' + str(cellID)
Description = choices['Description'].replace(
'\n ', '')
Description = Description.replace('\n ', '')
Description = Description.replace(' ', '').replace('\t', ''
).replace('\n', '')
Description = Description.rstrip()
Description = Description.strip()
worksheet.write(cell, Description, myCell)
cell = 'F' + str(cellID)
responseText = ''
if choices['ChoiceId'] in answers['SelectedChoices']:
responseText = 'SELECTED'
else:
responseText = ''
worksheet.write(cell, responseText, myCell)
cellID += 1
cellMerge = 'C' + str(startingCellID) + ':C' + str(cellID - 1)
worksheet.merge_range(cellMerge, qDescription, myCell)
cellMerge = 'G' + str(startingCellID) + ':G' + str(cellID - 1)
if WORKLOADID:
worksheet.merge_range(cellMerge, qNotes, myCell)
else:
worksheet.merge_range(cellMerge, '', myCell)
cellID -= 1
qNum += 1
cellPosition = cellID
if myCell == lineA:
myCell = lineB
myCellhidden = lineBhidden
myCellnoborder = lineBnoborder
else:
myCell = lineA
myCellhidden = lineAhidden
myCellnoborder = lineAnoborder
def main():
boto3_min_version = '1.16.38'
if packaging.version.parse(boto3.__version__) < packaging.version.parse(
boto3_min_version):
logger.error(
'Your Boto3 version (%s) is less than %s. You must ugprade to run this script (pip3 upgrade boto3)'
% (boto3.__version__, boto3_min_version))
exit()
logger.info('Script version %s' % __version__)
logger.info('Starting Boto %s Session' % boto3.__version__)
SESSION1 = boto3.session.Session(profile_name=PROFILE)
WACLIENT = SESSION1.client(service_name='wellarchitected', region_name=
REGION_NAME)
if WORKLOADID:
logger.info('User specified workload id of %s' % WORKLOADID)
workloadJson = GetWorkload(WACLIENT, WORKLOADID)
LENSES = workloadJson['Lenses']
logger.info('Lenses for %s: %s' % (WORKLOADID, json.dumps(LENSES)))
WORKLOADNAME = workloadJson['WorkloadName']
DESCRIPTION = workloadJson['Description']
REVIEWOWNER = workloadJson['ReviewOwner']
ENVIRONMENT = workloadJson['Environment']
AWSREGIONS = workloadJson['AwsRegions']
workloadId = WORKLOADID
workloadARN = workloadJson['WorkloadArn']
else:
logger.info('No workload ID specified, we will create a TEMP workload')
LENSES = listLens(WACLIENT)
logger.info('Lenses available: ' + json.dumps(LENSES))
WORKLOADNAME = 'TEMP DO NOT USE WORKLOAD'
DESCRIPTION = 'TEMP DO NOT USE WORKLOAD'
REVIEWOWNER = 'WA Python Script'
ENVIRONMENT = 'PRODUCTION'
AWSREGIONS = [REGION_NAME]
logger.info('Creating a new workload to gather questions and answers')
workloadId, workloadARN = CreateNewWorkload(WACLIENT, WORKLOADNAME,
DESCRIPTION, REVIEWOWNER, ENVIRONMENT, AWSREGIONS, LENSES, '[]',
'[]')
logger.info("Creating xlsx file '" + FILENAME + "'")
workbook = xlsxwriter.Workbook(FILENAME)
workbook.set_size(2800, 1600)
LENSES.sort(reverse=True)
for lens in LENSES:
allQuestions = findAllQuestionId(WACLIENT, workloadId, lens)
if WORKLOADID:
logger.debug('Not answering questions for existing workload')
lensTabCreation(WACLIENT, workloadId, lens, workbook,
allQuestions, WORKLOADNAME, workloadARN, DESCRIPTION)
else:
jmesquery = (
'[*].{QuestionId: QuestionId, PillarId: PillarId, Choices: Choices[].ChoiceId}'
)
allQuestionIds = jmespath.search(jmesquery, allQuestions)
for question in allQuestionIds:
logger.debug('Answering question %s in the %s lens' % (
question['QuestionId'], lens))
updateAnswersForQuestion(WACLIENT, workloadId, lens,
question['QuestionId'], question['Choices'],
'TEMP WORKLOAD - Added by export script')
lensTabCreation(WACLIENT, workloadId, lens, workbook, allQuestions)
logger.info('Closing Workbook File')
workbook.close()
if not WORKLOADID:
if not KEEPTEMP:
logger.info('Removing TEMP Workload')
DeleteWorkload(WACLIENT, workloadId)
logger.info('Done')
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
import botocore
import boto3
import json
import datetime
import logging
import jmespath
import xlsxwriter
import argparse
from pkg_resources import packaging
import urllib.request
from bs4 import BeautifulSoup, NavigableString, Tag
__author__ = 'Eric Pullen'
__email__ = 'eppullen@amazon.com'
__copyright__ = (
'Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.')
__credits__ = ['Eric Pullen']
__version__ = '0.1'
REGION_NAME = 'us-east-1'
blankjson = {}
response = ''
logging.basicConfig(level=logging.DEBUG, format=
'%(asctime)s.%(msecs)03d %(levelname)s %(module)s - %(funcName)s: %(message)s'
, datefmt='%Y-%m-%d %H:%M:%S')
logger = logging.getLogger()
logging.getLogger('boto3').setLevel(logging.CRITICAL)
logging.getLogger('botocore').setLevel(logging.CRITICAL)
logging.getLogger('s3transfer').setLevel(logging.CRITICAL)
logging.getLogger('urllib3').setLevel(logging.CRITICAL)
PARSER = argparse.ArgumentParser(formatter_class=argparse.
RawDescriptionHelpFormatter, description=
"""This utility has two options to run:
------------------------------------
1) If you provide a workloadid, this will gather all of the answers across all Well-Architected Lenss and export them to a spreadsheet.
2) If you do not provide a workloadid, the utility will generate a TEMP workload and auto-answer every question. It will then generate a spreadsheet with all of the questions, best practices, and even the improvement plan links for each.
"""
)
PARSER.add_argument('-p', '--profile', required=False, default='default',
help='AWS CLI Profile Name')
PARSER.add_argument('-r', '--region', required=False, default='us-east-1',
help='From Region Name. Example: us-east-1')
PARSER.add_argument('-w', '--workloadid', required=False, default='', help=
'Workload Id to use instead of creating a TEMP workload')
PARSER.add_argument('-k', '--keeptempworkload', action='store_true', help=
'If you want to keep the TEMP workload created at the end of the export')
PARSER.add_argument('-f', '--fileName', required=True, default=
'./demo.xlsx', help='FileName to export XLSX')
PARSER.add_argument('-v', '--debug', action='store_true', help=
'print debug messages to stderr')
ARGUMENTS = PARSER.parse_args()
PROFILE = ARGUMENTS.profile
FILENAME = ARGUMENTS.fileName
REGION_NAME = ARGUMENTS.region
WORKLOADID = ARGUMENTS.workloadid
KEEPTEMP = ARGUMENTS.keeptempworkload
if ARGUMENTS.debug:
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.INFO)
PILLAR_PARSE_MAP = {'operationalExcellence': 'OPS', 'security': 'SEC',
'reliability': 'REL', 'performance': 'PERF', 'costOptimization': 'COST'}
PILLAR_PROPER_NAME_MAP = {'operationalExcellence': 'Operational Excellence',
'security': 'Security', 'reliability': 'Reliability', 'performance':
'Performance Efficiency', 'costOptimization': 'Cost Optimization'}
class DateTimeEncoder(json.JSONEncoder):
def default(self, z):
if isinstance(z, datetime.datetime):
return str(z)
else:
return super().default(z)
def CreateNewWorkload(waclient, workloadName, description, reviewOwner,
environment, awsRegions, lenses, tags, pillarPriorities, notes='',
nonAwsRegions=[], architecturalDesign='', industryType='', industry='',
accountIds=[]):
try:
response = waclient.create_workload(WorkloadName=workloadName,
Description=description, ReviewOwner=reviewOwner, Environment=
environment, AwsRegions=awsRegions, Lenses=lenses,
NonAwsRegions=nonAwsRegions, ArchitecturalDesign=
architecturalDesign, IndustryType=industryType, Industry=
industry, Notes=notes, AccountIds=accountIds)
except waclient.exceptions.ConflictException as e:
workloadId, workloadARN = FindWorkload(waclient, workloadName)
logger.error(
'ERROR - The workload name %s already exists as workloadId %s' %
(workloadName, workloadId))
return workloadId, workloadARN
except botocore.exceptions.ParamValidationError as e:
logger.error('ERROR - Parameter validation error: %s' % e)
except botocore.exceptions.ClientError as e:
logger.error('ERROR - Unexpected error: %s' % e)
workloadId = response['WorkloadId']
workloadARN = response['WorkloadArn']
return workloadId, workloadARN
def FindWorkload(waclient, workloadName):
try:
response = waclient.list_workloads(WorkloadNamePrefix=workloadName)
except botocore.exceptions.ParamValidationError as e:
logger.error('ERROR - Parameter validation error: %s' % e)
except botocore.exceptions.ClientError as e:
logger.error('ERROR - Unexpected error: %s' % e)
workloadId = response['WorkloadSummaries'][0]['WorkloadId']
workloadArn = response['WorkloadSummaries'][0]['WorkloadArn']
return workloadId, workloadArn
def DeleteWorkload(waclient, workloadId):
try:
response = waclient.delete_workload(WorkloadId=workloadId)
except botocore.exceptions.ParamValidationError as e:
logger.error('ERROR - Parameter validation error: %s' % e)
except botocore.exceptions.ClientError as e:
logger.error('ERROR - Unexpected error: %s' % e)
def GetWorkload(waclient, workloadId):
try:
response = waclient.get_workload(WorkloadId=workloadId)
except botocore.exceptions.ParamValidationError as e:
logger.error('ERROR - Parameter validation error: %s' % e)
except botocore.exceptions.ClientError as e:
logger.error('ERROR - Unexpected error: %s' % e)
exit()
workload = response['Workload']
return workload
def listLens(waclient):
try:
response = waclient.list_lenses()
except botocore.exceptions.ParamValidationError as e:
logger.error('ERROR - Parameter validation error: %s' % e)
except botocore.exceptions.ClientError as e:
logger.error('ERROR - Unexpected error: %s' % e)
lenses = jmespath.search('LensSummaries[*].LensAlias', response)
return lenses
def getCurrentLensVersion(waclient, lensAlias):
try:
response = waclient.list_lenses()
except botocore.exceptions.ParamValidationError as e:
logger.error('ERROR - Parameter validation error: %s' % e)
except botocore.exceptions.ClientError as e:
logger.error('ERROR - Unexpected error: %s' % e)
searchString = 'LensSummaries[?LensAlias==`' + lensAlias + '`].LensVersion'
lenses = jmespath.search(searchString, response)
return lenses[0]
def findAllQuestionId(waclient, workloadId, lensAlias):
answers = []
for pillar in PILLAR_PARSE_MAP:
logger.debug('Grabbing answers for %s %s' % (lensAlias, pillar))
try:
response = waclient.list_answers(WorkloadId=workloadId,
LensAlias=lensAlias, PillarId=pillar)
except botocore.exceptions.ParamValidationError as e:
logger.error('ERROR - Parameter validation error: %s' % e)
except botocore.exceptions.ClientError as e:
logger.error('ERROR - Unexpected error: %s' % e)
answers.extend(response['AnswerSummaries'])
while 'NextToken' in response:
try:
response = waclient.list_answers(WorkloadId=workloadId,
LensAlias=lensAlias, PillarId=pillar, NextToken=
response['NextToken'])
except botocore.exceptions.ParamValidationError as e:
logger.error('ERROR - Parameter validation error: %s' % e)
except botocore.exceptions.ClientError as e:
logger.error('ERROR - Unexpected error: %s' % e)
answers.extend(response['AnswerSummaries'])
return answers
def getQuestionDetails(waclient, workloadId, lensAlias, questionId):
try:
response = waclient.get_answer(WorkloadId=workloadId, LensAlias=
lensAlias, QuestionId=questionId)
except botocore.exceptions.ParamValidationError as e:
logger.error('ERROR - Parameter validation error: %s' % e)
except botocore.exceptions.ClientError as e:
logger.error('ERROR - Unexpected error: %s' % e)
qDescription = jmespath.search('Answer.QuestionDescription', response)
qImprovementPlanUrl = jmespath.search('Answer.ImprovementPlanUrl', response
)
qHelpfulResourceUrl = jmespath.search('Answer.HelpfulResourceUrl', response
)
qNotes = jmespath.search('Answer.Notes', response)
return qDescription, qImprovementPlanUrl, qHelpfulResourceUrl, qNotes
def updateAnswersForQuestion(waclient, workloadId, lensAlias, questionId,
selectedChoices, notes):
try:
response = waclient.update_answer(WorkloadId=workloadId, LensAlias=
lensAlias, QuestionId=questionId, SelectedChoices=
selectedChoices, Notes=notes)
except botocore.exceptions.ParamValidationError as e:
logger.error('ERROR - Parameter validation error: %s' % e)
except botocore.exceptions.ClientError as e:
logger.error('ERROR - Unexpected error: %s' % e)
jmesquery = 'Answer.SelectedChoices'
answers = jmespath.search(jmesquery, response)
return answers
def getImprovementPlanItems(waclient, workloadId, lensAlias, QuestionId,
PillarId, ImprovementPlanUrl, ChoiceList):
response = {}
htmlString = ''
urlresponse = urllib.request.urlopen(ImprovementPlanUrl)
htmlBytes = urlresponse.read()
htmlStr = htmlBytes.decode('utf8')
htmlSplit = htmlStr.split('\n')
ipHTMLList = {}
for line in htmlSplit:
for uq in ChoiceList:
if uq in line:
parsed = BeautifulSoup(line, features='html.parser')
ipHTMLList.update({uq: str(parsed.a['href'])})
return ipHTMLList
def getImprovementPlanHTMLDescription(ImprovementPlanUrl, PillarId):
logger.debug('ImprovementPlanUrl: %s for pillar %s ' % (
ImprovementPlanUrl, PILLAR_PARSE_MAP[PillarId]))
stepRaw = ImprovementPlanUrl.rsplit('#')[1]
if len(stepRaw) <= 5:
stepNumber = stepRaw[-1]
else:
stepNumber = stepRaw[-2]
firstItem = 'step' + stepNumber
secondItem = 'step' + str(int(stepNumber) + 1)
logger.debug('Going from %s to %s' % (firstItem, secondItem))
urlresponse = urllib.request.urlopen(ImprovementPlanUrl)
htmlBytes = urlresponse.read()
htmlStr = htmlBytes.decode('utf8')
htmlSplit = htmlStr.split('\n')
foundit = 0
ipString = ''
questionIdText = ''
for i in htmlSplit:
if PILLAR_PARSE_MAP[PillarId] in i:
bsparse = BeautifulSoup(i, features='html.parser')
questionIdText = str(bsparse.text).split(':')[0].strip()
if secondItem in i or '</div>' in i:
foundit = 0
if firstItem in i:
foundit = 1
ipString += i
elif foundit:
ipString += i
prettyHTML = BeautifulSoup(ipString, features='html.parser')
for a in prettyHTML.findAll('a', 'glossref'):
a.replaceWithChildren()
return prettyHTML, questionIdText
def lensTabCreation(WACLIENT, workloadId, lens, workbook,
allQuestionsForLens, workloadName='', AWSAccountId='',
workloadDescription=''):
bold = workbook.add_format({'bold': True})
bold_border = workbook.add_format({'border': 1, 'border_color': 'black',
'text_wrap': True})
bold_border_bold = workbook.add_format({'border': 1, 'border_color':
'black', 'text_wrap': True, 'font_size': 20, 'bold': True})
heading = workbook.add_format({'font_size': 24, 'bold': True})
lineA = workbook.add_format({'border': 1, 'border_color': 'black',
'bg_color': '#E0EBF6', 'align': 'top', 'text_wrap': True})
lineB = workbook.add_format({'border': 1, 'border_color': 'black',
'bg_color': '#E4EFDC', 'align': 'top', 'text_wrap': True})
lineAnoborder = workbook.add_format({'border': 0, 'top': 1, 'left': 1,
'right': 1, 'border_color': 'black', 'bg_color': '#E0EBF6', 'align':
'top', 'text_wrap': True})
lineBnoborder = workbook.add_format({'border': 0, 'top': 1, 'left': 1,
'right': 1, 'border_color': 'black', 'bg_color': '#E4EFDC', 'align':
'top', 'text_wrap': True})
lineAhidden = workbook.add_format({'border': 0, 'left': 1, 'right': 1,
'border_color': 'black', 'bg_color': '#E0EBF6', 'align': 'top',
'text_wrap': False, 'indent': 100})
lineBhidden = workbook.add_format({'border': 0, 'left': 1, 'right': 1,
'border_color': 'black', 'bg_color': '#E4EFDC', 'align': 'top',
'text_wrap': False, 'indent': 100})
sub_heading = workbook.add_format()
sub_heading.set_font_size(20)
sub_heading.set_bold(True)
small_font = workbook.add_format()
small_font.set_font_size(9)
logger.debug("Getting lens version for '" + lens + "'")
versionString = getCurrentLensVersion(WACLIENT, lens)
logger.debug('Adding worksheet using version ' + versionString)
lensName = lens[0:18]
worksheet = workbook.add_worksheet(lensName + ' v' + versionString)
worksheet.set_landscape()
worksheet.set_paper(1)
worksheet.set_column('A:A', 11)
worksheet.set_column('B:B', 32)
worksheet.set_column('C:C', 56)
worksheet.set_column('D:D', 29)
worksheet.set_column('E:E', 57)
worksheet.set_column('F:F', 18)
worksheet.set_column('G:G', 70)
worksheet.merge_range('A1:G1', 'Workload Overview', heading)
worksheet.merge_range('A3:B3', 'Workload Name', bold_border_bold)
worksheet.merge_range('A4:B4', 'AWS Account ID', bold_border_bold)
worksheet.merge_range('A5:B5', 'Workload Description', bold_border_bold)
if WORKLOADID:
worksheet.write('C3', workloadName, bold_border)
accountIdParsed = AWSAccountId.split(':')[4]
worksheet.write('C4', accountIdParsed, bold_border)
worksheet.write('C5', workloadDescription, bold_border)
else:
worksheet.write('C3', '', bold_border)
worksheet.write('C4', '', bold_border)
worksheet.write('C5', '', bold_border)
worksheet.write('D3', 'Enter the name of system', small_font)
worksheet.write('D4', 'Enter 12-degit AWS account ID', small_font)
worksheet.write('D5',
'Briefly describe system architecture and workload, flow etc.',
small_font)
worksheet.write('A8', 'Pillar', sub_heading)
worksheet.write('B8', 'Question', sub_heading)
worksheet.write('C8', 'Explanation', sub_heading)
worksheet.write('D8', 'Choice (Best Practice)', sub_heading)
worksheet.write('E8', 'Detail', sub_heading)
worksheet.write('F8', 'Response', sub_heading)
worksheet.write('G8', 'Notes (optional)', sub_heading)
worksheet.freeze_panes(8, 0)
worksheet.autofilter('A8:B8')
worksheet.repeat_rows(1, 8)
worksheet.fit_to_pages(1, 99)
cellPosition = 8
myCell = lineA
myCellhidden = lineAhidden
myCellnoborder = lineAnoborder
for pillar in PILLAR_PARSE_MAP:
qNum = 1
jmesquery = "[?PillarId=='" + pillar + "']"
allQuestionsForPillar = jmespath.search(jmesquery, allQuestionsForLens)
for answers in allQuestionsForPillar:
questionTitle = PILLAR_PARSE_MAP[answers['PillarId']] + str(qNum
) + ' - ' + answers['QuestionTitle']
(qDescription, qImprovementPlanUrl, qHelpfulResourceUrl, qNotes
) = (getQuestionDetails(WACLIENT, workloadId, lens, answers
['QuestionId']))
qDescription = qDescription.replace('\n ', '').replace(' '
, '').replace('\t', '').replace('\n', '')
qDescription = qDescription.rstrip()
qDescription = qDescription.strip()
logger.debug("Working on '" + questionTitle + "'")
logger.debug('It has answers of: ' + json.dumps(answers[
'SelectedChoices']))
cellID = cellPosition + 1
if qImprovementPlanUrl:
jmesquery = "[?QuestionId=='" + answers['QuestionId'
] + "'].Choices[].ChoiceId"
choiceList = jmespath.search(jmesquery, allQuestionsForLens)
ipList = getImprovementPlanItems(WACLIENT, workloadId, lens,
answers['QuestionId'], answers['PillarId'],
qImprovementPlanUrl, choiceList)
else:
ipList = []
startingCellID = cellID
firstTimePillar = True
for choices in answers['Choices']:
cell = 'A' + str(cellID)
if firstTimePillar:
worksheet.write(cell, PILLAR_PROPER_NAME_MAP[pillar],
myCellnoborder)
cell = 'B' + str(cellID)
worksheet.write(cell, questionTitle, myCellnoborder)
firstTimePillar = False
else:
worksheet.write(cell, PILLAR_PROPER_NAME_MAP[pillar],
myCellhidden)
cell = 'B' + str(cellID)
worksheet.write(cell, questionTitle, myCellhidden)
cell = 'D' + str(cellID)
Title = choices['Title'].replace(' ', '').replace('\t', ''
).replace('\n', '')
if any(choices['ChoiceId'] in d for d in ipList):
worksheet.write_url(cell, ipList[choices['ChoiceId']],
myCell, string=Title)
htmlString = ''
htmlString = htmlString.replace('\n ', '').replace(
' ', '').replace('\t', '').strip().rstrip()
worksheet.write_comment(cell, htmlString, {'author':
'Improvement Plan'})
else:
worksheet.write(cell, Title, myCell)
cell = 'E' + str(cellID)
Description = choices['Description'].replace(
'\n ', '')
Description = Description.replace('\n ', '')
Description = Description.replace(' ', '').replace('\t', ''
).replace('\n', '')
Description = Description.rstrip()
Description = Description.strip()
worksheet.write(cell, Description, myCell)
cell = 'F' + str(cellID)
responseText = ''
if choices['ChoiceId'] in answers['SelectedChoices']:
responseText = 'SELECTED'
else:
responseText = ''
worksheet.write(cell, responseText, myCell)
cellID += 1
cellMerge = 'C' + str(startingCellID) + ':C' + str(cellID - 1)
worksheet.merge_range(cellMerge, qDescription, myCell)
cellMerge = 'G' + str(startingCellID) + ':G' + str(cellID - 1)
if WORKLOADID:
worksheet.merge_range(cellMerge, qNotes, myCell)
else:
worksheet.merge_range(cellMerge, '', myCell)
cellID -= 1
qNum += 1
cellPosition = cellID
if myCell == lineA:
myCell = lineB
myCellhidden = lineBhidden
myCellnoborder = lineBnoborder
else:
myCell = lineA
myCellhidden = lineAhidden
myCellnoborder = lineAnoborder
def main():
boto3_min_version = '1.16.38'
if packaging.version.parse(boto3.__version__) < packaging.version.parse(
boto3_min_version):
logger.error(
'Your Boto3 version (%s) is less than %s. You must ugprade to run this script (pip3 upgrade boto3)'
% (boto3.__version__, boto3_min_version))
exit()
logger.info('Script version %s' % __version__)
logger.info('Starting Boto %s Session' % boto3.__version__)
SESSION1 = boto3.session.Session(profile_name=PROFILE)
WACLIENT = SESSION1.client(service_name='wellarchitected', region_name=
REGION_NAME)
if WORKLOADID:
logger.info('User specified workload id of %s' % WORKLOADID)
workloadJson = GetWorkload(WACLIENT, WORKLOADID)
LENSES = workloadJson['Lenses']
logger.info('Lenses for %s: %s' % (WORKLOADID, json.dumps(LENSES)))
WORKLOADNAME = workloadJson['WorkloadName']
DESCRIPTION = workloadJson['Description']
REVIEWOWNER = workloadJson['ReviewOwner']
ENVIRONMENT = workloadJson['Environment']
AWSREGIONS = workloadJson['AwsRegions']
workloadId = WORKLOADID
workloadARN = workloadJson['WorkloadArn']
else:
logger.info('No workload ID specified, we will create a TEMP workload')
LENSES = listLens(WACLIENT)
logger.info('Lenses available: ' + json.dumps(LENSES))
WORKLOADNAME = 'TEMP DO NOT USE WORKLOAD'
DESCRIPTION = 'TEMP DO NOT USE WORKLOAD'
REVIEWOWNER = 'WA Python Script'
ENVIRONMENT = 'PRODUCTION'
AWSREGIONS = [REGION_NAME]
logger.info('Creating a new workload to gather questions and answers')
workloadId, workloadARN = CreateNewWorkload(WACLIENT, WORKLOADNAME,
DESCRIPTION, REVIEWOWNER, ENVIRONMENT, AWSREGIONS, LENSES, '[]',
'[]')
logger.info("Creating xlsx file '" + FILENAME + "'")
workbook = xlsxwriter.Workbook(FILENAME)
workbook.set_size(2800, 1600)
LENSES.sort(reverse=True)
for lens in LENSES:
allQuestions = findAllQuestionId(WACLIENT, workloadId, lens)
if WORKLOADID:
logger.debug('Not answering questions for existing workload')
lensTabCreation(WACLIENT, workloadId, lens, workbook,
allQuestions, WORKLOADNAME, workloadARN, DESCRIPTION)
else:
jmesquery = (
'[*].{QuestionId: QuestionId, PillarId: PillarId, Choices: Choices[].ChoiceId}'
)
allQuestionIds = jmespath.search(jmesquery, allQuestions)
for question in allQuestionIds:
logger.debug('Answering question %s in the %s lens' % (
question['QuestionId'], lens))
updateAnswersForQuestion(WACLIENT, workloadId, lens,
question['QuestionId'], question['Choices'],
'TEMP WORKLOAD - Added by export script')
lensTabCreation(WACLIENT, workloadId, lens, workbook, allQuestions)
logger.info('Closing Workbook File')
workbook.close()
if not WORKLOADID:
if not KEEPTEMP:
logger.info('Removing TEMP Workload')
DeleteWorkload(WACLIENT, workloadId)
logger.info('Done')
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
#!/usr/bin/env python3
# This is a tool to export the WA framework answers to a XLSX file
#
# This code is only for use in Well-Architected labs
# *** NOT FOR PRODUCTION USE ***
#
# Licensed under the Apache 2.0 and MITnoAttr License.
#
# Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at
# https://aws.amazon.com/apache2.0/
import botocore
import boto3
import json
import datetime
import logging
import jmespath
import xlsxwriter
import argparse
from pkg_resources import packaging
import urllib.request
from bs4 import BeautifulSoup, NavigableString, Tag
__author__ = "Eric Pullen"
__email__ = "eppullen@amazon.com"
__copyright__ = "Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved."
__credits__ = ["Eric Pullen"]
__version__ = "0.1"
# Default region listed here
REGION_NAME = "us-east-1"
blankjson = {}
response = ""
# Setup Logging
logging.basicConfig(
level=logging.DEBUG,
format='%(asctime)s.%(msecs)03d %(levelname)s %(module)s - %(funcName)s: %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
)
logger = logging.getLogger()
logging.getLogger('boto3').setLevel(logging.CRITICAL)
logging.getLogger('botocore').setLevel(logging.CRITICAL)
logging.getLogger('s3transfer').setLevel(logging.CRITICAL)
logging.getLogger('urllib3').setLevel(logging.CRITICAL)
PARSER = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description='''\
This utility has two options to run:
------------------------------------
1) If you provide a workloadid, this will gather all of the answers across all Well-Architected Lenss and export them to a spreadsheet.
2) If you do not provide a workloadid, the utility will generate a TEMP workload and auto-answer every question. It will then generate a spreadsheet with all of the questions, best practices, and even the improvement plan links for each.
'''
)
PARSER.add_argument('-p','--profile', required=False, default="default", help='AWS CLI Profile Name')
PARSER.add_argument('-r','--region', required=False, default="us-east-1", help='From Region Name. Example: us-east-1')
PARSER.add_argument('-w','--workloadid', required=False, default="", help='Workload Id to use instead of creating a TEMP workload')
PARSER.add_argument('-k','--keeptempworkload', action='store_true', help='If you want to keep the TEMP workload created at the end of the export')
PARSER.add_argument('-f','--fileName', required=True, default="./demo.xlsx", help='FileName to export XLSX')
PARSER.add_argument('-v','--debug', action='store_true', help='print debug messages to stderr')
ARGUMENTS = PARSER.parse_args()
PROFILE = ARGUMENTS.profile
FILENAME = ARGUMENTS.fileName
REGION_NAME = ARGUMENTS.region
WORKLOADID = ARGUMENTS.workloadid
KEEPTEMP = ARGUMENTS.keeptempworkload
if ARGUMENTS.debug:
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.INFO)
# To map our short hand names in the console to the API defined pillars
# Example: print(PILLAR_PARSE_MAP['performance'])
PILLAR_PARSE_MAP = {
"operationalExcellence": "OPS",
"security": "SEC",
"reliability": "REL",
"performance": "PERF",
"costOptimization": "COST"
}
PILLAR_PROPER_NAME_MAP = {
"operationalExcellence": "Operational Excellence",
"security": "Security",
"reliability": "Reliability",
"performance": "Performance Efficiency",
"costOptimization": "Cost Optimization"
}
# Helper class to convert a datetime item to JSON.
class DateTimeEncoder(json.JSONEncoder):
def default(self, z):
if isinstance(z, datetime.datetime):
return (str(z))
else:
return super().default(z)
def CreateNewWorkload(
waclient,
workloadName,
description,
reviewOwner,
environment,
awsRegions,
lenses,
tags,
pillarPriorities,
notes="",
nonAwsRegions=[],
architecturalDesign='',
industryType='',
industry='',
accountIds=[]
):
# Create your workload
try:
response=waclient.create_workload(
WorkloadName=workloadName,
Description=description,
ReviewOwner=reviewOwner,
Environment=environment,
AwsRegions=awsRegions,
Lenses=lenses,
NonAwsRegions=nonAwsRegions,
ArchitecturalDesign=architecturalDesign,
IndustryType=industryType,
Industry=industry,
Notes=notes,
AccountIds=accountIds
)
except waclient.exceptions.ConflictException as e:
workloadId,workloadARN = FindWorkload(waclient,workloadName)
logger.error("ERROR - The workload name %s already exists as workloadId %s" % (workloadName, workloadId))
return workloadId, workloadARN
except botocore.exceptions.ParamValidationError as e:
logger.error("ERROR - Parameter validation error: %s" % e)
except botocore.exceptions.ClientError as e:
logger.error("ERROR - Unexpected error: %s" % e)
workloadId = response['WorkloadId']
workloadARN = response['WorkloadArn']
return workloadId, workloadARN
def FindWorkload(
waclient,
workloadName
):
# Finding your WorkloadId
try:
response=waclient.list_workloads(
WorkloadNamePrefix=workloadName
)
except botocore.exceptions.ParamValidationError as e:
logger.error("ERROR - Parameter validation error: %s" % e)
except botocore.exceptions.ClientError as e:
logger.error("ERROR - Unexpected error: %s" % e)
# print("Full JSON:",json.dumps(response['WorkloadSummaries'], cls=DateTimeEncoder))
workloadId = response['WorkloadSummaries'][0]['WorkloadId']
workloadArn = response['WorkloadSummaries'][0]['WorkloadArn']
# print("WorkloadId",workloadId)
return workloadId, workloadArn
def DeleteWorkload(
waclient,
workloadId
):
# Delete the WorkloadId
try:
response=waclient.delete_workload(
WorkloadId=workloadId
)
except botocore.exceptions.ParamValidationError as e:
logger.error("ERROR - Parameter validation error: %s" % e)
except botocore.exceptions.ClientError as e:
logger.error("ERROR - Unexpected error: %s" % e)
def GetWorkload(
waclient,
workloadId
):
# Get the WorkloadId
try:
response=waclient.get_workload(
WorkloadId=workloadId
)
except botocore.exceptions.ParamValidationError as e:
logger.error("ERROR - Parameter validation error: %s" % e)
except botocore.exceptions.ClientError as e:
logger.error("ERROR - Unexpected error: %s" % e)
exit()
# print("Full JSON:",json.dumps(response['Workload'], cls=DateTimeEncoder))
workload = response['Workload']
# print("WorkloadId",workloadId)
return workload
def listLens(
waclient
):
# List all lenses currently available
try:
response=waclient.list_lenses()
except botocore.exceptions.ParamValidationError as e:
logger.error("ERROR - Parameter validation error: %s" % e)
except botocore.exceptions.ClientError as e:
logger.error("ERROR - Unexpected error: %s" % e)
# print(json.dumps(response))
lenses = jmespath.search("LensSummaries[*].LensAlias", response)
return lenses
def getCurrentLensVersion(
waclient,
lensAlias
):
# List all lenses currently available
try:
response=waclient.list_lenses()
except botocore.exceptions.ParamValidationError as e:
logger.error("ERROR - Parameter validation error: %s" % e)
except botocore.exceptions.ClientError as e:
logger.error("ERROR - Unexpected error: %s" % e)
# print(json.dumps(response))
searchString = "LensSummaries[?LensAlias==`"+lensAlias+"`].LensVersion"
lenses = jmespath.search(searchString, response)
return lenses[0]
def findAllQuestionId(
waclient,
workloadId,
lensAlias
):
answers = []
# Due to a bug in some lenses, I have to iterate over each pillar in order to
# retrieve the correct results.
for pillar in PILLAR_PARSE_MAP:
logger.debug("Grabbing answers for %s %s" % (lensAlias, pillar))
# Find a questionID using the questionTitle
try:
response=waclient.list_answers(
WorkloadId=workloadId,
LensAlias=lensAlias,
PillarId=pillar
)
except botocore.exceptions.ParamValidationError as e:
logger.error("ERROR - Parameter validation error: %s" % e)
except botocore.exceptions.ClientError as e:
logger.error("ERROR - Unexpected error: %s" % e)
answers.extend(response["AnswerSummaries"])
while "NextToken" in response:
try:
response = waclient.list_answers(WorkloadId=workloadId,LensAlias=lensAlias,PillarId=pillar,NextToken=response["NextToken"])
except botocore.exceptions.ParamValidationError as e:
logger.error("ERROR - Parameter validation error: %s" % e)
except botocore.exceptions.ClientError as e:
logger.error("ERROR - Unexpected error: %s" % e)
answers.extend(response["AnswerSummaries"])
return answers
def getQuestionDetails(
waclient,
workloadId,
lensAlias,
questionId
):
# Find a answer for a questionId
try:
response=waclient.get_answer(
WorkloadId=workloadId,
LensAlias=lensAlias,
QuestionId=questionId
)
except botocore.exceptions.ParamValidationError as e:
logger.error("ERROR - Parameter validation error: %s" % e)
except botocore.exceptions.ClientError as e:
logger.error("ERROR - Unexpected error: %s" % e)
qDescription = jmespath.search("Answer.QuestionDescription", response)
qImprovementPlanUrl = jmespath.search("Answer.ImprovementPlanUrl", response)
qHelpfulResourceUrl = jmespath.search("Answer.HelpfulResourceUrl", response)
qNotes = jmespath.search("Answer.Notes", response)
return qDescription, qImprovementPlanUrl, qHelpfulResourceUrl, qNotes
def updateAnswersForQuestion(
waclient,
workloadId,
lensAlias,
questionId,
selectedChoices,
notes
):
# Update a answer to a question
try:
response=waclient.update_answer(
WorkloadId=workloadId,
LensAlias=lensAlias,
QuestionId=questionId,
SelectedChoices=selectedChoices,
Notes=notes
)
except botocore.exceptions.ParamValidationError as e:
logger.error("ERROR - Parameter validation error: %s" % e)
except botocore.exceptions.ClientError as e:
logger.error("ERROR - Unexpected error: %s" % e)
# print(json.dumps(response))
jmesquery = "Answer.SelectedChoices"
answers = jmespath.search(jmesquery, response)
return answers
def getImprovementPlanItems(
waclient,
workloadId,
lensAlias,
QuestionId,
PillarId,
ImprovementPlanUrl,
ChoiceList
):
# This will parse the IP Items to gather the links we need
response = {}
htmlString = ""
# unanswered = getUnansweredForQuestion(waclient,workloadId,'wellarchitected',QuestionId)
urlresponse = urllib.request.urlopen(ImprovementPlanUrl)
htmlBytes = urlresponse.read()
htmlStr = htmlBytes.decode("utf8")
htmlSplit = htmlStr.split('\n')
ipHTMLList = {}
for line in htmlSplit:
for uq in ChoiceList:
if uq in line:
parsed = BeautifulSoup(line,features="html.parser")
ipHTMLList.update({uq: str(parsed.a['href'])})
return ipHTMLList
def getImprovementPlanHTMLDescription(
ImprovementPlanUrl,
PillarId
):
logger.debug("ImprovementPlanUrl: %s for pillar %s " % (ImprovementPlanUrl,PILLAR_PARSE_MAP[PillarId]))
stepRaw = ImprovementPlanUrl.rsplit('#')[1]
# Grab the number of the step we are referencing
# This will work as long as their are less than 99 steps.
if len(stepRaw) <= 5:
stepNumber = stepRaw[-1]
else:
stepNumber = stepRaw[-2]
#Generate the string for the step number
firstItem = "step"+stepNumber
secondItem = ("step"+str((int(stepNumber)+1)))
logger.debug ("Going from %s to %s" % (firstItem, secondItem))
urlresponse = urllib.request.urlopen(ImprovementPlanUrl)
htmlBytes = urlresponse.read()
htmlStr = htmlBytes.decode("utf8")
htmlSplit = htmlStr.split('\n')
foundit = 0
ipString = ""
questionIdText = ""
for i in htmlSplit:
if PILLAR_PARSE_MAP[PillarId] in i:
bsparse = BeautifulSoup(i,features="html.parser")
questionIdText = str(bsparse.text).split(':')[0].strip()
if (secondItem in i) or ("</div>" in i):
foundit = 0
if firstItem in i:
foundit = 1
ipString+=i
elif foundit:
ipString+=i
prettyHTML = BeautifulSoup(ipString,features="html.parser")
# Need to remove all of the "local glossary links" since they point to relative paths
for a in prettyHTML.findAll('a', 'glossref'):
a.replaceWithChildren()
return prettyHTML, questionIdText
def lensTabCreation(
WACLIENT,
workloadId,
lens,
workbook,
allQuestionsForLens,
workloadName="",
AWSAccountId="",
workloadDescription=""
):
# Setup some formatting for the workbook
bold = workbook.add_format({'bold': True})
bold_border = workbook.add_format({
'border': 1,
'border_color': 'black',
'text_wrap': True
})
bold_border_bold = workbook.add_format({
'border': 1,
'border_color': 'black',
'text_wrap': True,
'font_size': 20,
'bold': True
})
heading = workbook.add_format({
'font_size': 24,
'bold': True
})
lineA = workbook.add_format({
'border': 1,
'border_color': 'black',
'bg_color': '#E0EBF6',
'align': 'top',
'text_wrap': True
})
lineB = workbook.add_format({
'border': 1,
'border_color': 'black',
'bg_color': '#E4EFDC',
'align': 'top',
'text_wrap': True
})
lineAnoborder = workbook.add_format({
'border': 0,
'top': 1,
'left': 1,
'right': 1,
'border_color': 'black',
'bg_color': '#E0EBF6',
'align': 'top',
'text_wrap': True
})
lineBnoborder = workbook.add_format({
'border': 0,
'top': 1,
'left': 1,
'right': 1,
'border_color': 'black',
'bg_color': '#E4EFDC',
'align': 'top',
'text_wrap': True
})
lineAhidden = workbook.add_format({
'border': 0,
'left': 1,
'right': 1,
'border_color': 'black',
'bg_color': '#E0EBF6',
'align': 'top',
'text_wrap': False,
'indent': 100
})
lineBhidden = workbook.add_format({
'border': 0,
'left': 1,
'right': 1,
'border_color': 'black',
'bg_color': '#E4EFDC',
'align': 'top',
'text_wrap': False,
'indent': 100
})
sub_heading = workbook.add_format()
sub_heading.set_font_size(20)
sub_heading.set_bold(True)
small_font = workbook.add_format()
small_font.set_font_size(9)
# Get the current version of Lens
logger.debug("Getting lens version for '"+lens+"'")
versionString = getCurrentLensVersion(WACLIENT,lens)
logger.debug("Adding worksheet using version "+versionString)
lensName = lens[0:18]
worksheet = workbook.add_worksheet((lensName+' v'+versionString))
# Print in landscape
worksheet.set_landscape()
# Set to 8.5x11 paper size
worksheet.set_paper(1)
# Set the column widths
worksheet.set_column('A:A', 11)
worksheet.set_column('B:B', 32)
worksheet.set_column('C:C', 56)
worksheet.set_column('D:D', 29)
worksheet.set_column('E:E', 57)
worksheet.set_column('F:F', 18)
worksheet.set_column('G:G', 70)
# Top of sheet
worksheet.merge_range('A1:G1', 'Workload Overview', heading)
worksheet.merge_range('A3:B3', 'Workload Name', bold_border_bold)
worksheet.merge_range('A4:B4', 'AWS Account ID', bold_border_bold)
worksheet.merge_range('A5:B5', 'Workload Description', bold_border_bold)
# If we are using an existing workload, then display the Name, ID, and Description at the top
# or else just make it blank
if WORKLOADID:
worksheet.write('C3', workloadName, bold_border)
accountIdParsed = AWSAccountId.split(':')[4]
worksheet.write('C4', accountIdParsed, bold_border)
worksheet.write('C5', workloadDescription, bold_border)
else:
worksheet.write('C3', '', bold_border)
worksheet.write('C4', '', bold_border)
worksheet.write('C5', '', bold_border)
worksheet.write('D3', 'Enter the name of system', small_font)
worksheet.write('D4', 'Enter 12-degit AWS account ID', small_font)
worksheet.write('D5', 'Briefly describe system architecture and workload, flow etc.', small_font)
# Subheadings for columns
worksheet.write('A8', 'Pillar', sub_heading)
worksheet.write('B8', 'Question', sub_heading)
worksheet.write('C8', 'Explanation', sub_heading)
worksheet.write('D8', 'Choice (Best Practice)', sub_heading)
worksheet.write('E8', 'Detail', sub_heading)
worksheet.write('F8', 'Response', sub_heading)
worksheet.write('G8', 'Notes (optional)', sub_heading)
# Freeze the top of the sheet
worksheet.freeze_panes(8,0)
# AutoFilter on the first two columns
worksheet.autofilter('A8:B8')
# Make it easier to print
worksheet.repeat_rows(1, 8)
worksheet.fit_to_pages(1, 99)
# Starting point for pillar questions
cellPosition = 8
# Starting cell look with lineA. Will switch back and forth
myCell = lineA
myCellhidden = lineAhidden
myCellnoborder = lineAnoborder
for pillar in PILLAR_PARSE_MAP:
# This is the question number for each pillar (ex: OPS1, OPS2, etc)
qNum = 1
# The query will return all questions for a lens and pillar
jmesquery = "[?PillarId=='"+pillar+"']"
allQuestionsForPillar = jmespath.search(jmesquery, allQuestionsForLens)
# For each of the possible answers, parse them and put into the Worksheet
for answers in allQuestionsForPillar:
# List all best practices
questionTitle = PILLAR_PARSE_MAP[answers['PillarId']]+str(qNum)+" - "+answers['QuestionTitle']
qDescription, qImprovementPlanUrl, qHelpfulResourceUrl, qNotes = getQuestionDetails(WACLIENT,workloadId,lens,answers['QuestionId'])
# Some of the questions have extra whitespaces and I need to remove those to fit into the cell
qDescription = qDescription.replace('\n ','').replace(' ','').replace('\t', '').replace('\n', '')
qDescription = qDescription.rstrip()
qDescription = qDescription.strip()
logger.debug("Working on '"+questionTitle+"'")
logger.debug("It has answers of: "+json.dumps(answers['SelectedChoices']))
cellID = cellPosition + 1
# If the question has been answered (which we do for the TEMP workload) we grab the URL and parse for the HTML content
if qImprovementPlanUrl:
jmesquery = "[?QuestionId=='"+answers['QuestionId']+"'].Choices[].ChoiceId"
choiceList = jmespath.search(jmesquery, allQuestionsForLens)
ipList = getImprovementPlanItems(WACLIENT,workloadId,lens,answers['QuestionId'],answers['PillarId'],qImprovementPlanUrl,choiceList)
else:
ipList = []
startingCellID=cellID
# If its the first time through this particular pillar question:
# I want to only write the name once, but I need to fill in
# each cell with the same data so the autosort works properly
# (else it will only show the first best practice)
firstTimePillar=True
for choices in answers['Choices']:
# Write the pillar name and question in every cell for autosort, but only show the first one
cell = 'A'+str(cellID)
if firstTimePillar:
worksheet.write(cell, PILLAR_PROPER_NAME_MAP[pillar], myCellnoborder)
cell = 'B'+str(cellID)
worksheet.write(cell, questionTitle, myCellnoborder)
firstTimePillar=False
else:
worksheet.write(cell, PILLAR_PROPER_NAME_MAP[pillar], myCellhidden)
cell = 'B'+str(cellID)
worksheet.write(cell, questionTitle, myCellhidden)
# Start writing each of the BP's, details, etc
cell = 'D'+str(cellID)
Title = choices['Title'].replace(' ','').replace('\t', '').replace('\n', '')
if any(choices['ChoiceId'] in d for d in ipList):
worksheet.write_url(cell, ipList[choices['ChoiceId']], myCell, string=Title)
#ipItemHTML, questionIdText = getImprovementPlanHTMLDescription(ipList[choices['ChoiceId']],answers['PillarId'])
#htmlString = ipItemHTML.text
htmlString = ""
htmlString = htmlString.replace('\n ','').replace(' ','').replace('\t', '').strip().rstrip()
# print(htmlString)
worksheet.write_comment(cell, htmlString, {'author': 'Improvement Plan'})
else:
worksheet.write(cell,Title,myCell)
# Add all Details for each best practice/choice
cell = 'E'+str(cellID)
# Remove all of the extra spaces in the description field
Description = choices['Description'].replace('\n ','')
Description = Description.replace('\n ','')
Description = Description.replace(' ','').replace('\t', '').replace('\n', '')
Description = Description.rstrip()
Description = Description.strip()
worksheet.write(cell, Description ,myCell)
# If this is an existing workload, we will show SELECTED if the have it checked
# I would love to use a XLSX checkbox, but this library doesn't support it
cell = 'F'+str(cellID)
responseText = ""
if choices['ChoiceId'] in answers['SelectedChoices']:
responseText = "SELECTED"
else:
responseText = ""
worksheet.write(cell, responseText ,myCell)
cellID+=1
# We are out of the choice/detail/response loop, so know how many rows were consumed
# and we can create the explanation and notes field to span all of them
# Explanantion field
cellMerge = 'C'+str(startingCellID)+':C'+str(cellID-1)
worksheet.merge_range(cellMerge, qDescription,myCell)
# Notes field
cellMerge = 'G'+str(startingCellID)+':G'+str(cellID-1)
if WORKLOADID:
worksheet.merge_range(cellMerge, qNotes, myCell)
else:
worksheet.merge_range(cellMerge, "", myCell)
cellID-=1
# Increase the question number
qNum += 1
# Reset the starting cellPosition to the last cellID
cellPosition = cellID
# Reset the cell formatting to alternate between the two colors
if myCell == lineA:
myCell = lineB
myCellhidden = lineBhidden
myCellnoborder = lineBnoborder
else:
myCell = lineA
myCellhidden = lineAhidden
myCellnoborder = lineAnoborder
def main():
boto3_min_version = "1.16.38"
# Verify if the version of Boto3 we are running has the wellarchitected APIs included
if (packaging.version.parse(boto3.__version__) < packaging.version.parse(boto3_min_version)):
logger.error("Your Boto3 version (%s) is less than %s. You must ugprade to run this script (pip3 upgrade boto3)" % (boto3.__version__, boto3_min_version))
exit()
logger.info("Script version %s" % __version__)
logger.info("Starting Boto %s Session" % boto3.__version__)
# Create a new boto3 session
SESSION1 = boto3.session.Session(profile_name=PROFILE)
# Initiate the well-architected session using the region defined above
WACLIENT = SESSION1.client(
service_name='wellarchitected',
region_name=REGION_NAME,
)
# If this is an existing workload, we need to query for the various workload properties
if WORKLOADID:
logger.info("User specified workload id of %s" % WORKLOADID)
workloadJson = GetWorkload(WACLIENT,WORKLOADID)
LENSES = workloadJson['Lenses']
logger.info("Lenses for %s: %s" % (WORKLOADID, json.dumps(LENSES)))
WORKLOADNAME = workloadJson['WorkloadName']
DESCRIPTION = workloadJson['Description']
REVIEWOWNER = workloadJson['ReviewOwner']
ENVIRONMENT= workloadJson['Environment']
AWSREGIONS = workloadJson['AwsRegions']
workloadId = WORKLOADID
workloadARN = workloadJson['WorkloadArn']
else:
# In order to gather all of the questions, you must create a TEMP Workload
logger.info("No workload ID specified, we will create a TEMP workload")
# Grab all lenses that are currently available
LENSES = listLens(WACLIENT)
logger.info("Lenses available: "+json.dumps(LENSES))
# Set the needed workload variables before we create it
WORKLOADNAME = 'TEMP DO NOT USE WORKLOAD'
DESCRIPTION = 'TEMP DO NOT USE WORKLOAD'
REVIEWOWNER = 'WA Python Script'
ENVIRONMENT= 'PRODUCTION'
AWSREGIONS = [REGION_NAME]
# Creating the TEMP workload
logger.info("Creating a new workload to gather questions and answers")
workloadId, workloadARN = CreateNewWorkload(WACLIENT,WORKLOADNAME,DESCRIPTION,REVIEWOWNER,ENVIRONMENT,AWSREGIONS,LENSES,"[]","[]")
# Create an new xlsx file and add a worksheet.
logger.info("Creating xlsx file '"+FILENAME+"'")
workbook = xlsxwriter.Workbook(FILENAME)
workbook.set_size(2800, 1600)
# Simple hack to get Wellarchitected base framework first (reverse sort)
# This will no longer work if we ever have a lens that starts with WB*, X, Y, or Z :)
LENSES.sort(reverse=True)
# Iterate over each lens that we either have added or is in the workload
for lens in LENSES:
# Grab all questions for a particular lens
allQuestions = findAllQuestionId(WACLIENT,workloadId,lens)
if WORKLOADID:
# If this is an existing workload, just go ahead and create the Tab and cells
logger.debug("Not answering questions for existing workload")
lensTabCreation(WACLIENT,workloadId,lens,workbook,allQuestions,WORKLOADNAME,workloadARN,DESCRIPTION)
else:
# If this is the TEMP workload, we need to first gather all of the questionIDs possible
jmesquery = "[*].{QuestionId: QuestionId, PillarId: PillarId, Choices: Choices[].ChoiceId}"
allQuestionIds = jmespath.search(jmesquery, allQuestions)
# Next we answer all of the questions across all lenses in the TEMP workload
for question in allQuestionIds:
logger.debug("Answering question %s in the %s lens" % (question['QuestionId'], lens))
updateAnswersForQuestion(WACLIENT,workloadId,lens,question['QuestionId'],question['Choices'],'TEMP WORKLOAD - Added by export script')
# Once the questions have been answered, we go ahead and create the tab for each
lensTabCreation(WACLIENT,workloadId,lens,workbook,allQuestions)
# Close out the workbook file
logger.info("Closing Workbook File")
workbook.close()
# If this is TEMP workload, we may remove it if it has not been set to keep
if not WORKLOADID:
if not KEEPTEMP:
logger.info("Removing TEMP Workload")
DeleteWorkload(WACLIENT, workloadId)
logger.info("Done")
if __name__ == "__main__":
main()
|
flexible
|
{
"blob_id": "c5e003d625d7798eaf4ef5bca28f6311edccb316",
"index": 7235,
"step-1": "<mask token>\n\n\nclass DateTimeEncoder(json.JSONEncoder):\n\n def default(self, z):\n if isinstance(z, datetime.datetime):\n return str(z)\n else:\n return super().default(z)\n\n\n<mask token>\n\n\ndef FindWorkload(waclient, workloadName):\n try:\n response = waclient.list_workloads(WorkloadNamePrefix=workloadName)\n except botocore.exceptions.ParamValidationError as e:\n logger.error('ERROR - Parameter validation error: %s' % e)\n except botocore.exceptions.ClientError as e:\n logger.error('ERROR - Unexpected error: %s' % e)\n workloadId = response['WorkloadSummaries'][0]['WorkloadId']\n workloadArn = response['WorkloadSummaries'][0]['WorkloadArn']\n return workloadId, workloadArn\n\n\ndef DeleteWorkload(waclient, workloadId):\n try:\n response = waclient.delete_workload(WorkloadId=workloadId)\n except botocore.exceptions.ParamValidationError as e:\n logger.error('ERROR - Parameter validation error: %s' % e)\n except botocore.exceptions.ClientError as e:\n logger.error('ERROR - Unexpected error: %s' % e)\n\n\n<mask token>\n\n\ndef listLens(waclient):\n try:\n response = waclient.list_lenses()\n except botocore.exceptions.ParamValidationError as e:\n logger.error('ERROR - Parameter validation error: %s' % e)\n except botocore.exceptions.ClientError as e:\n logger.error('ERROR - Unexpected error: %s' % e)\n lenses = jmespath.search('LensSummaries[*].LensAlias', response)\n return lenses\n\n\ndef getCurrentLensVersion(waclient, lensAlias):\n try:\n response = waclient.list_lenses()\n except botocore.exceptions.ParamValidationError as e:\n logger.error('ERROR - Parameter validation error: %s' % e)\n except botocore.exceptions.ClientError as e:\n logger.error('ERROR - Unexpected error: %s' % e)\n searchString = 'LensSummaries[?LensAlias==`' + lensAlias + '`].LensVersion'\n lenses = jmespath.search(searchString, response)\n return lenses[0]\n\n\ndef findAllQuestionId(waclient, workloadId, lensAlias):\n answers = []\n for pillar in PILLAR_PARSE_MAP:\n logger.debug('Grabbing answers for %s %s' % (lensAlias, pillar))\n try:\n response = waclient.list_answers(WorkloadId=workloadId,\n LensAlias=lensAlias, PillarId=pillar)\n except botocore.exceptions.ParamValidationError as e:\n logger.error('ERROR - Parameter validation error: %s' % e)\n except botocore.exceptions.ClientError as e:\n logger.error('ERROR - Unexpected error: %s' % e)\n answers.extend(response['AnswerSummaries'])\n while 'NextToken' in response:\n try:\n response = waclient.list_answers(WorkloadId=workloadId,\n LensAlias=lensAlias, PillarId=pillar, NextToken=\n response['NextToken'])\n except botocore.exceptions.ParamValidationError as e:\n logger.error('ERROR - Parameter validation error: %s' % e)\n except botocore.exceptions.ClientError as e:\n logger.error('ERROR - Unexpected error: %s' % e)\n answers.extend(response['AnswerSummaries'])\n return answers\n\n\ndef getQuestionDetails(waclient, workloadId, lensAlias, questionId):\n try:\n response = waclient.get_answer(WorkloadId=workloadId, LensAlias=\n lensAlias, QuestionId=questionId)\n except botocore.exceptions.ParamValidationError as e:\n logger.error('ERROR - Parameter validation error: %s' % e)\n except botocore.exceptions.ClientError as e:\n logger.error('ERROR - Unexpected error: %s' % e)\n qDescription = jmespath.search('Answer.QuestionDescription', response)\n qImprovementPlanUrl = jmespath.search('Answer.ImprovementPlanUrl', response\n )\n qHelpfulResourceUrl = jmespath.search('Answer.HelpfulResourceUrl', response\n )\n qNotes = jmespath.search('Answer.Notes', response)\n return qDescription, qImprovementPlanUrl, qHelpfulResourceUrl, qNotes\n\n\ndef updateAnswersForQuestion(waclient, workloadId, lensAlias, questionId,\n selectedChoices, notes):\n try:\n response = waclient.update_answer(WorkloadId=workloadId, LensAlias=\n lensAlias, QuestionId=questionId, SelectedChoices=\n selectedChoices, Notes=notes)\n except botocore.exceptions.ParamValidationError as e:\n logger.error('ERROR - Parameter validation error: %s' % e)\n except botocore.exceptions.ClientError as e:\n logger.error('ERROR - Unexpected error: %s' % e)\n jmesquery = 'Answer.SelectedChoices'\n answers = jmespath.search(jmesquery, response)\n return answers\n\n\ndef getImprovementPlanItems(waclient, workloadId, lensAlias, QuestionId,\n PillarId, ImprovementPlanUrl, ChoiceList):\n response = {}\n htmlString = ''\n urlresponse = urllib.request.urlopen(ImprovementPlanUrl)\n htmlBytes = urlresponse.read()\n htmlStr = htmlBytes.decode('utf8')\n htmlSplit = htmlStr.split('\\n')\n ipHTMLList = {}\n for line in htmlSplit:\n for uq in ChoiceList:\n if uq in line:\n parsed = BeautifulSoup(line, features='html.parser')\n ipHTMLList.update({uq: str(parsed.a['href'])})\n return ipHTMLList\n\n\ndef getImprovementPlanHTMLDescription(ImprovementPlanUrl, PillarId):\n logger.debug('ImprovementPlanUrl: %s for pillar %s ' % (\n ImprovementPlanUrl, PILLAR_PARSE_MAP[PillarId]))\n stepRaw = ImprovementPlanUrl.rsplit('#')[1]\n if len(stepRaw) <= 5:\n stepNumber = stepRaw[-1]\n else:\n stepNumber = stepRaw[-2]\n firstItem = 'step' + stepNumber\n secondItem = 'step' + str(int(stepNumber) + 1)\n logger.debug('Going from %s to %s' % (firstItem, secondItem))\n urlresponse = urllib.request.urlopen(ImprovementPlanUrl)\n htmlBytes = urlresponse.read()\n htmlStr = htmlBytes.decode('utf8')\n htmlSplit = htmlStr.split('\\n')\n foundit = 0\n ipString = ''\n questionIdText = ''\n for i in htmlSplit:\n if PILLAR_PARSE_MAP[PillarId] in i:\n bsparse = BeautifulSoup(i, features='html.parser')\n questionIdText = str(bsparse.text).split(':')[0].strip()\n if secondItem in i or '</div>' in i:\n foundit = 0\n if firstItem in i:\n foundit = 1\n ipString += i\n elif foundit:\n ipString += i\n prettyHTML = BeautifulSoup(ipString, features='html.parser')\n for a in prettyHTML.findAll('a', 'glossref'):\n a.replaceWithChildren()\n return prettyHTML, questionIdText\n\n\ndef lensTabCreation(WACLIENT, workloadId, lens, workbook,\n allQuestionsForLens, workloadName='', AWSAccountId='',\n workloadDescription=''):\n bold = workbook.add_format({'bold': True})\n bold_border = workbook.add_format({'border': 1, 'border_color': 'black',\n 'text_wrap': True})\n bold_border_bold = workbook.add_format({'border': 1, 'border_color':\n 'black', 'text_wrap': True, 'font_size': 20, 'bold': True})\n heading = workbook.add_format({'font_size': 24, 'bold': True})\n lineA = workbook.add_format({'border': 1, 'border_color': 'black',\n 'bg_color': '#E0EBF6', 'align': 'top', 'text_wrap': True})\n lineB = workbook.add_format({'border': 1, 'border_color': 'black',\n 'bg_color': '#E4EFDC', 'align': 'top', 'text_wrap': True})\n lineAnoborder = workbook.add_format({'border': 0, 'top': 1, 'left': 1,\n 'right': 1, 'border_color': 'black', 'bg_color': '#E0EBF6', 'align':\n 'top', 'text_wrap': True})\n lineBnoborder = workbook.add_format({'border': 0, 'top': 1, 'left': 1,\n 'right': 1, 'border_color': 'black', 'bg_color': '#E4EFDC', 'align':\n 'top', 'text_wrap': True})\n lineAhidden = workbook.add_format({'border': 0, 'left': 1, 'right': 1,\n 'border_color': 'black', 'bg_color': '#E0EBF6', 'align': 'top',\n 'text_wrap': False, 'indent': 100})\n lineBhidden = workbook.add_format({'border': 0, 'left': 1, 'right': 1,\n 'border_color': 'black', 'bg_color': '#E4EFDC', 'align': 'top',\n 'text_wrap': False, 'indent': 100})\n sub_heading = workbook.add_format()\n sub_heading.set_font_size(20)\n sub_heading.set_bold(True)\n small_font = workbook.add_format()\n small_font.set_font_size(9)\n logger.debug(\"Getting lens version for '\" + lens + \"'\")\n versionString = getCurrentLensVersion(WACLIENT, lens)\n logger.debug('Adding worksheet using version ' + versionString)\n lensName = lens[0:18]\n worksheet = workbook.add_worksheet(lensName + ' v' + versionString)\n worksheet.set_landscape()\n worksheet.set_paper(1)\n worksheet.set_column('A:A', 11)\n worksheet.set_column('B:B', 32)\n worksheet.set_column('C:C', 56)\n worksheet.set_column('D:D', 29)\n worksheet.set_column('E:E', 57)\n worksheet.set_column('F:F', 18)\n worksheet.set_column('G:G', 70)\n worksheet.merge_range('A1:G1', 'Workload Overview', heading)\n worksheet.merge_range('A3:B3', 'Workload Name', bold_border_bold)\n worksheet.merge_range('A4:B4', 'AWS Account ID', bold_border_bold)\n worksheet.merge_range('A5:B5', 'Workload Description', bold_border_bold)\n if WORKLOADID:\n worksheet.write('C3', workloadName, bold_border)\n accountIdParsed = AWSAccountId.split(':')[4]\n worksheet.write('C4', accountIdParsed, bold_border)\n worksheet.write('C5', workloadDescription, bold_border)\n else:\n worksheet.write('C3', '', bold_border)\n worksheet.write('C4', '', bold_border)\n worksheet.write('C5', '', bold_border)\n worksheet.write('D3', 'Enter the name of system', small_font)\n worksheet.write('D4', 'Enter 12-degit AWS account ID', small_font)\n worksheet.write('D5',\n 'Briefly describe system architecture and workload, flow etc.',\n small_font)\n worksheet.write('A8', 'Pillar', sub_heading)\n worksheet.write('B8', 'Question', sub_heading)\n worksheet.write('C8', 'Explanation', sub_heading)\n worksheet.write('D8', 'Choice (Best Practice)', sub_heading)\n worksheet.write('E8', 'Detail', sub_heading)\n worksheet.write('F8', 'Response', sub_heading)\n worksheet.write('G8', 'Notes (optional)', sub_heading)\n worksheet.freeze_panes(8, 0)\n worksheet.autofilter('A8:B8')\n worksheet.repeat_rows(1, 8)\n worksheet.fit_to_pages(1, 99)\n cellPosition = 8\n myCell = lineA\n myCellhidden = lineAhidden\n myCellnoborder = lineAnoborder\n for pillar in PILLAR_PARSE_MAP:\n qNum = 1\n jmesquery = \"[?PillarId=='\" + pillar + \"']\"\n allQuestionsForPillar = jmespath.search(jmesquery, allQuestionsForLens)\n for answers in allQuestionsForPillar:\n questionTitle = PILLAR_PARSE_MAP[answers['PillarId']] + str(qNum\n ) + ' - ' + answers['QuestionTitle']\n (qDescription, qImprovementPlanUrl, qHelpfulResourceUrl, qNotes\n ) = (getQuestionDetails(WACLIENT, workloadId, lens, answers\n ['QuestionId']))\n qDescription = qDescription.replace('\\n ', '').replace(' '\n , '').replace('\\t', '').replace('\\n', '')\n qDescription = qDescription.rstrip()\n qDescription = qDescription.strip()\n logger.debug(\"Working on '\" + questionTitle + \"'\")\n logger.debug('It has answers of: ' + json.dumps(answers[\n 'SelectedChoices']))\n cellID = cellPosition + 1\n if qImprovementPlanUrl:\n jmesquery = \"[?QuestionId=='\" + answers['QuestionId'\n ] + \"'].Choices[].ChoiceId\"\n choiceList = jmespath.search(jmesquery, allQuestionsForLens)\n ipList = getImprovementPlanItems(WACLIENT, workloadId, lens,\n answers['QuestionId'], answers['PillarId'],\n qImprovementPlanUrl, choiceList)\n else:\n ipList = []\n startingCellID = cellID\n firstTimePillar = True\n for choices in answers['Choices']:\n cell = 'A' + str(cellID)\n if firstTimePillar:\n worksheet.write(cell, PILLAR_PROPER_NAME_MAP[pillar],\n myCellnoborder)\n cell = 'B' + str(cellID)\n worksheet.write(cell, questionTitle, myCellnoborder)\n firstTimePillar = False\n else:\n worksheet.write(cell, PILLAR_PROPER_NAME_MAP[pillar],\n myCellhidden)\n cell = 'B' + str(cellID)\n worksheet.write(cell, questionTitle, myCellhidden)\n cell = 'D' + str(cellID)\n Title = choices['Title'].replace(' ', '').replace('\\t', ''\n ).replace('\\n', '')\n if any(choices['ChoiceId'] in d for d in ipList):\n worksheet.write_url(cell, ipList[choices['ChoiceId']],\n myCell, string=Title)\n htmlString = ''\n htmlString = htmlString.replace('\\n ', '').replace(\n ' ', '').replace('\\t', '').strip().rstrip()\n worksheet.write_comment(cell, htmlString, {'author':\n 'Improvement Plan'})\n else:\n worksheet.write(cell, Title, myCell)\n cell = 'E' + str(cellID)\n Description = choices['Description'].replace(\n '\\n ', '')\n Description = Description.replace('\\n ', '')\n Description = Description.replace(' ', '').replace('\\t', ''\n ).replace('\\n', '')\n Description = Description.rstrip()\n Description = Description.strip()\n worksheet.write(cell, Description, myCell)\n cell = 'F' + str(cellID)\n responseText = ''\n if choices['ChoiceId'] in answers['SelectedChoices']:\n responseText = 'SELECTED'\n else:\n responseText = ''\n worksheet.write(cell, responseText, myCell)\n cellID += 1\n cellMerge = 'C' + str(startingCellID) + ':C' + str(cellID - 1)\n worksheet.merge_range(cellMerge, qDescription, myCell)\n cellMerge = 'G' + str(startingCellID) + ':G' + str(cellID - 1)\n if WORKLOADID:\n worksheet.merge_range(cellMerge, qNotes, myCell)\n else:\n worksheet.merge_range(cellMerge, '', myCell)\n cellID -= 1\n qNum += 1\n cellPosition = cellID\n if myCell == lineA:\n myCell = lineB\n myCellhidden = lineBhidden\n myCellnoborder = lineBnoborder\n else:\n myCell = lineA\n myCellhidden = lineAhidden\n myCellnoborder = lineAnoborder\n\n\ndef main():\n boto3_min_version = '1.16.38'\n if packaging.version.parse(boto3.__version__) < packaging.version.parse(\n boto3_min_version):\n logger.error(\n 'Your Boto3 version (%s) is less than %s. You must ugprade to run this script (pip3 upgrade boto3)'\n % (boto3.__version__, boto3_min_version))\n exit()\n logger.info('Script version %s' % __version__)\n logger.info('Starting Boto %s Session' % boto3.__version__)\n SESSION1 = boto3.session.Session(profile_name=PROFILE)\n WACLIENT = SESSION1.client(service_name='wellarchitected', region_name=\n REGION_NAME)\n if WORKLOADID:\n logger.info('User specified workload id of %s' % WORKLOADID)\n workloadJson = GetWorkload(WACLIENT, WORKLOADID)\n LENSES = workloadJson['Lenses']\n logger.info('Lenses for %s: %s' % (WORKLOADID, json.dumps(LENSES)))\n WORKLOADNAME = workloadJson['WorkloadName']\n DESCRIPTION = workloadJson['Description']\n REVIEWOWNER = workloadJson['ReviewOwner']\n ENVIRONMENT = workloadJson['Environment']\n AWSREGIONS = workloadJson['AwsRegions']\n workloadId = WORKLOADID\n workloadARN = workloadJson['WorkloadArn']\n else:\n logger.info('No workload ID specified, we will create a TEMP workload')\n LENSES = listLens(WACLIENT)\n logger.info('Lenses available: ' + json.dumps(LENSES))\n WORKLOADNAME = 'TEMP DO NOT USE WORKLOAD'\n DESCRIPTION = 'TEMP DO NOT USE WORKLOAD'\n REVIEWOWNER = 'WA Python Script'\n ENVIRONMENT = 'PRODUCTION'\n AWSREGIONS = [REGION_NAME]\n logger.info('Creating a new workload to gather questions and answers')\n workloadId, workloadARN = CreateNewWorkload(WACLIENT, WORKLOADNAME,\n DESCRIPTION, REVIEWOWNER, ENVIRONMENT, AWSREGIONS, LENSES, '[]',\n '[]')\n logger.info(\"Creating xlsx file '\" + FILENAME + \"'\")\n workbook = xlsxwriter.Workbook(FILENAME)\n workbook.set_size(2800, 1600)\n LENSES.sort(reverse=True)\n for lens in LENSES:\n allQuestions = findAllQuestionId(WACLIENT, workloadId, lens)\n if WORKLOADID:\n logger.debug('Not answering questions for existing workload')\n lensTabCreation(WACLIENT, workloadId, lens, workbook,\n allQuestions, WORKLOADNAME, workloadARN, DESCRIPTION)\n else:\n jmesquery = (\n '[*].{QuestionId: QuestionId, PillarId: PillarId, Choices: Choices[].ChoiceId}'\n )\n allQuestionIds = jmespath.search(jmesquery, allQuestions)\n for question in allQuestionIds:\n logger.debug('Answering question %s in the %s lens' % (\n question['QuestionId'], lens))\n updateAnswersForQuestion(WACLIENT, workloadId, lens,\n question['QuestionId'], question['Choices'],\n 'TEMP WORKLOAD - Added by export script')\n lensTabCreation(WACLIENT, workloadId, lens, workbook, allQuestions)\n logger.info('Closing Workbook File')\n workbook.close()\n if not WORKLOADID:\n if not KEEPTEMP:\n logger.info('Removing TEMP Workload')\n DeleteWorkload(WACLIENT, workloadId)\n logger.info('Done')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass DateTimeEncoder(json.JSONEncoder):\n\n def default(self, z):\n if isinstance(z, datetime.datetime):\n return str(z)\n else:\n return super().default(z)\n\n\ndef CreateNewWorkload(waclient, workloadName, description, reviewOwner,\n environment, awsRegions, lenses, tags, pillarPriorities, notes='',\n nonAwsRegions=[], architecturalDesign='', industryType='', industry='',\n accountIds=[]):\n try:\n response = waclient.create_workload(WorkloadName=workloadName,\n Description=description, ReviewOwner=reviewOwner, Environment=\n environment, AwsRegions=awsRegions, Lenses=lenses,\n NonAwsRegions=nonAwsRegions, ArchitecturalDesign=\n architecturalDesign, IndustryType=industryType, Industry=\n industry, Notes=notes, AccountIds=accountIds)\n except waclient.exceptions.ConflictException as e:\n workloadId, workloadARN = FindWorkload(waclient, workloadName)\n logger.error(\n 'ERROR - The workload name %s already exists as workloadId %s' %\n (workloadName, workloadId))\n return workloadId, workloadARN\n except botocore.exceptions.ParamValidationError as e:\n logger.error('ERROR - Parameter validation error: %s' % e)\n except botocore.exceptions.ClientError as e:\n logger.error('ERROR - Unexpected error: %s' % e)\n workloadId = response['WorkloadId']\n workloadARN = response['WorkloadArn']\n return workloadId, workloadARN\n\n\ndef FindWorkload(waclient, workloadName):\n try:\n response = waclient.list_workloads(WorkloadNamePrefix=workloadName)\n except botocore.exceptions.ParamValidationError as e:\n logger.error('ERROR - Parameter validation error: %s' % e)\n except botocore.exceptions.ClientError as e:\n logger.error('ERROR - Unexpected error: %s' % e)\n workloadId = response['WorkloadSummaries'][0]['WorkloadId']\n workloadArn = response['WorkloadSummaries'][0]['WorkloadArn']\n return workloadId, workloadArn\n\n\ndef DeleteWorkload(waclient, workloadId):\n try:\n response = waclient.delete_workload(WorkloadId=workloadId)\n except botocore.exceptions.ParamValidationError as e:\n logger.error('ERROR - Parameter validation error: %s' % e)\n except botocore.exceptions.ClientError as e:\n logger.error('ERROR - Unexpected error: %s' % e)\n\n\ndef GetWorkload(waclient, workloadId):\n try:\n response = waclient.get_workload(WorkloadId=workloadId)\n except botocore.exceptions.ParamValidationError as e:\n logger.error('ERROR - Parameter validation error: %s' % e)\n except botocore.exceptions.ClientError as e:\n logger.error('ERROR - Unexpected error: %s' % e)\n exit()\n workload = response['Workload']\n return workload\n\n\ndef listLens(waclient):\n try:\n response = waclient.list_lenses()\n except botocore.exceptions.ParamValidationError as e:\n logger.error('ERROR - Parameter validation error: %s' % e)\n except botocore.exceptions.ClientError as e:\n logger.error('ERROR - Unexpected error: %s' % e)\n lenses = jmespath.search('LensSummaries[*].LensAlias', response)\n return lenses\n\n\ndef getCurrentLensVersion(waclient, lensAlias):\n try:\n response = waclient.list_lenses()\n except botocore.exceptions.ParamValidationError as e:\n logger.error('ERROR - Parameter validation error: %s' % e)\n except botocore.exceptions.ClientError as e:\n logger.error('ERROR - Unexpected error: %s' % e)\n searchString = 'LensSummaries[?LensAlias==`' + lensAlias + '`].LensVersion'\n lenses = jmespath.search(searchString, response)\n return lenses[0]\n\n\ndef findAllQuestionId(waclient, workloadId, lensAlias):\n answers = []\n for pillar in PILLAR_PARSE_MAP:\n logger.debug('Grabbing answers for %s %s' % (lensAlias, pillar))\n try:\n response = waclient.list_answers(WorkloadId=workloadId,\n LensAlias=lensAlias, PillarId=pillar)\n except botocore.exceptions.ParamValidationError as e:\n logger.error('ERROR - Parameter validation error: %s' % e)\n except botocore.exceptions.ClientError as e:\n logger.error('ERROR - Unexpected error: %s' % e)\n answers.extend(response['AnswerSummaries'])\n while 'NextToken' in response:\n try:\n response = waclient.list_answers(WorkloadId=workloadId,\n LensAlias=lensAlias, PillarId=pillar, NextToken=\n response['NextToken'])\n except botocore.exceptions.ParamValidationError as e:\n logger.error('ERROR - Parameter validation error: %s' % e)\n except botocore.exceptions.ClientError as e:\n logger.error('ERROR - Unexpected error: %s' % e)\n answers.extend(response['AnswerSummaries'])\n return answers\n\n\ndef getQuestionDetails(waclient, workloadId, lensAlias, questionId):\n try:\n response = waclient.get_answer(WorkloadId=workloadId, LensAlias=\n lensAlias, QuestionId=questionId)\n except botocore.exceptions.ParamValidationError as e:\n logger.error('ERROR - Parameter validation error: %s' % e)\n except botocore.exceptions.ClientError as e:\n logger.error('ERROR - Unexpected error: %s' % e)\n qDescription = jmespath.search('Answer.QuestionDescription', response)\n qImprovementPlanUrl = jmespath.search('Answer.ImprovementPlanUrl', response\n )\n qHelpfulResourceUrl = jmespath.search('Answer.HelpfulResourceUrl', response\n )\n qNotes = jmespath.search('Answer.Notes', response)\n return qDescription, qImprovementPlanUrl, qHelpfulResourceUrl, qNotes\n\n\ndef updateAnswersForQuestion(waclient, workloadId, lensAlias, questionId,\n selectedChoices, notes):\n try:\n response = waclient.update_answer(WorkloadId=workloadId, LensAlias=\n lensAlias, QuestionId=questionId, SelectedChoices=\n selectedChoices, Notes=notes)\n except botocore.exceptions.ParamValidationError as e:\n logger.error('ERROR - Parameter validation error: %s' % e)\n except botocore.exceptions.ClientError as e:\n logger.error('ERROR - Unexpected error: %s' % e)\n jmesquery = 'Answer.SelectedChoices'\n answers = jmespath.search(jmesquery, response)\n return answers\n\n\ndef getImprovementPlanItems(waclient, workloadId, lensAlias, QuestionId,\n PillarId, ImprovementPlanUrl, ChoiceList):\n response = {}\n htmlString = ''\n urlresponse = urllib.request.urlopen(ImprovementPlanUrl)\n htmlBytes = urlresponse.read()\n htmlStr = htmlBytes.decode('utf8')\n htmlSplit = htmlStr.split('\\n')\n ipHTMLList = {}\n for line in htmlSplit:\n for uq in ChoiceList:\n if uq in line:\n parsed = BeautifulSoup(line, features='html.parser')\n ipHTMLList.update({uq: str(parsed.a['href'])})\n return ipHTMLList\n\n\ndef getImprovementPlanHTMLDescription(ImprovementPlanUrl, PillarId):\n logger.debug('ImprovementPlanUrl: %s for pillar %s ' % (\n ImprovementPlanUrl, PILLAR_PARSE_MAP[PillarId]))\n stepRaw = ImprovementPlanUrl.rsplit('#')[1]\n if len(stepRaw) <= 5:\n stepNumber = stepRaw[-1]\n else:\n stepNumber = stepRaw[-2]\n firstItem = 'step' + stepNumber\n secondItem = 'step' + str(int(stepNumber) + 1)\n logger.debug('Going from %s to %s' % (firstItem, secondItem))\n urlresponse = urllib.request.urlopen(ImprovementPlanUrl)\n htmlBytes = urlresponse.read()\n htmlStr = htmlBytes.decode('utf8')\n htmlSplit = htmlStr.split('\\n')\n foundit = 0\n ipString = ''\n questionIdText = ''\n for i in htmlSplit:\n if PILLAR_PARSE_MAP[PillarId] in i:\n bsparse = BeautifulSoup(i, features='html.parser')\n questionIdText = str(bsparse.text).split(':')[0].strip()\n if secondItem in i or '</div>' in i:\n foundit = 0\n if firstItem in i:\n foundit = 1\n ipString += i\n elif foundit:\n ipString += i\n prettyHTML = BeautifulSoup(ipString, features='html.parser')\n for a in prettyHTML.findAll('a', 'glossref'):\n a.replaceWithChildren()\n return prettyHTML, questionIdText\n\n\ndef lensTabCreation(WACLIENT, workloadId, lens, workbook,\n allQuestionsForLens, workloadName='', AWSAccountId='',\n workloadDescription=''):\n bold = workbook.add_format({'bold': True})\n bold_border = workbook.add_format({'border': 1, 'border_color': 'black',\n 'text_wrap': True})\n bold_border_bold = workbook.add_format({'border': 1, 'border_color':\n 'black', 'text_wrap': True, 'font_size': 20, 'bold': True})\n heading = workbook.add_format({'font_size': 24, 'bold': True})\n lineA = workbook.add_format({'border': 1, 'border_color': 'black',\n 'bg_color': '#E0EBF6', 'align': 'top', 'text_wrap': True})\n lineB = workbook.add_format({'border': 1, 'border_color': 'black',\n 'bg_color': '#E4EFDC', 'align': 'top', 'text_wrap': True})\n lineAnoborder = workbook.add_format({'border': 0, 'top': 1, 'left': 1,\n 'right': 1, 'border_color': 'black', 'bg_color': '#E0EBF6', 'align':\n 'top', 'text_wrap': True})\n lineBnoborder = workbook.add_format({'border': 0, 'top': 1, 'left': 1,\n 'right': 1, 'border_color': 'black', 'bg_color': '#E4EFDC', 'align':\n 'top', 'text_wrap': True})\n lineAhidden = workbook.add_format({'border': 0, 'left': 1, 'right': 1,\n 'border_color': 'black', 'bg_color': '#E0EBF6', 'align': 'top',\n 'text_wrap': False, 'indent': 100})\n lineBhidden = workbook.add_format({'border': 0, 'left': 1, 'right': 1,\n 'border_color': 'black', 'bg_color': '#E4EFDC', 'align': 'top',\n 'text_wrap': False, 'indent': 100})\n sub_heading = workbook.add_format()\n sub_heading.set_font_size(20)\n sub_heading.set_bold(True)\n small_font = workbook.add_format()\n small_font.set_font_size(9)\n logger.debug(\"Getting lens version for '\" + lens + \"'\")\n versionString = getCurrentLensVersion(WACLIENT, lens)\n logger.debug('Adding worksheet using version ' + versionString)\n lensName = lens[0:18]\n worksheet = workbook.add_worksheet(lensName + ' v' + versionString)\n worksheet.set_landscape()\n worksheet.set_paper(1)\n worksheet.set_column('A:A', 11)\n worksheet.set_column('B:B', 32)\n worksheet.set_column('C:C', 56)\n worksheet.set_column('D:D', 29)\n worksheet.set_column('E:E', 57)\n worksheet.set_column('F:F', 18)\n worksheet.set_column('G:G', 70)\n worksheet.merge_range('A1:G1', 'Workload Overview', heading)\n worksheet.merge_range('A3:B3', 'Workload Name', bold_border_bold)\n worksheet.merge_range('A4:B4', 'AWS Account ID', bold_border_bold)\n worksheet.merge_range('A5:B5', 'Workload Description', bold_border_bold)\n if WORKLOADID:\n worksheet.write('C3', workloadName, bold_border)\n accountIdParsed = AWSAccountId.split(':')[4]\n worksheet.write('C4', accountIdParsed, bold_border)\n worksheet.write('C5', workloadDescription, bold_border)\n else:\n worksheet.write('C3', '', bold_border)\n worksheet.write('C4', '', bold_border)\n worksheet.write('C5', '', bold_border)\n worksheet.write('D3', 'Enter the name of system', small_font)\n worksheet.write('D4', 'Enter 12-degit AWS account ID', small_font)\n worksheet.write('D5',\n 'Briefly describe system architecture and workload, flow etc.',\n small_font)\n worksheet.write('A8', 'Pillar', sub_heading)\n worksheet.write('B8', 'Question', sub_heading)\n worksheet.write('C8', 'Explanation', sub_heading)\n worksheet.write('D8', 'Choice (Best Practice)', sub_heading)\n worksheet.write('E8', 'Detail', sub_heading)\n worksheet.write('F8', 'Response', sub_heading)\n worksheet.write('G8', 'Notes (optional)', sub_heading)\n worksheet.freeze_panes(8, 0)\n worksheet.autofilter('A8:B8')\n worksheet.repeat_rows(1, 8)\n worksheet.fit_to_pages(1, 99)\n cellPosition = 8\n myCell = lineA\n myCellhidden = lineAhidden\n myCellnoborder = lineAnoborder\n for pillar in PILLAR_PARSE_MAP:\n qNum = 1\n jmesquery = \"[?PillarId=='\" + pillar + \"']\"\n allQuestionsForPillar = jmespath.search(jmesquery, allQuestionsForLens)\n for answers in allQuestionsForPillar:\n questionTitle = PILLAR_PARSE_MAP[answers['PillarId']] + str(qNum\n ) + ' - ' + answers['QuestionTitle']\n (qDescription, qImprovementPlanUrl, qHelpfulResourceUrl, qNotes\n ) = (getQuestionDetails(WACLIENT, workloadId, lens, answers\n ['QuestionId']))\n qDescription = qDescription.replace('\\n ', '').replace(' '\n , '').replace('\\t', '').replace('\\n', '')\n qDescription = qDescription.rstrip()\n qDescription = qDescription.strip()\n logger.debug(\"Working on '\" + questionTitle + \"'\")\n logger.debug('It has answers of: ' + json.dumps(answers[\n 'SelectedChoices']))\n cellID = cellPosition + 1\n if qImprovementPlanUrl:\n jmesquery = \"[?QuestionId=='\" + answers['QuestionId'\n ] + \"'].Choices[].ChoiceId\"\n choiceList = jmespath.search(jmesquery, allQuestionsForLens)\n ipList = getImprovementPlanItems(WACLIENT, workloadId, lens,\n answers['QuestionId'], answers['PillarId'],\n qImprovementPlanUrl, choiceList)\n else:\n ipList = []\n startingCellID = cellID\n firstTimePillar = True\n for choices in answers['Choices']:\n cell = 'A' + str(cellID)\n if firstTimePillar:\n worksheet.write(cell, PILLAR_PROPER_NAME_MAP[pillar],\n myCellnoborder)\n cell = 'B' + str(cellID)\n worksheet.write(cell, questionTitle, myCellnoborder)\n firstTimePillar = False\n else:\n worksheet.write(cell, PILLAR_PROPER_NAME_MAP[pillar],\n myCellhidden)\n cell = 'B' + str(cellID)\n worksheet.write(cell, questionTitle, myCellhidden)\n cell = 'D' + str(cellID)\n Title = choices['Title'].replace(' ', '').replace('\\t', ''\n ).replace('\\n', '')\n if any(choices['ChoiceId'] in d for d in ipList):\n worksheet.write_url(cell, ipList[choices['ChoiceId']],\n myCell, string=Title)\n htmlString = ''\n htmlString = htmlString.replace('\\n ', '').replace(\n ' ', '').replace('\\t', '').strip().rstrip()\n worksheet.write_comment(cell, htmlString, {'author':\n 'Improvement Plan'})\n else:\n worksheet.write(cell, Title, myCell)\n cell = 'E' + str(cellID)\n Description = choices['Description'].replace(\n '\\n ', '')\n Description = Description.replace('\\n ', '')\n Description = Description.replace(' ', '').replace('\\t', ''\n ).replace('\\n', '')\n Description = Description.rstrip()\n Description = Description.strip()\n worksheet.write(cell, Description, myCell)\n cell = 'F' + str(cellID)\n responseText = ''\n if choices['ChoiceId'] in answers['SelectedChoices']:\n responseText = 'SELECTED'\n else:\n responseText = ''\n worksheet.write(cell, responseText, myCell)\n cellID += 1\n cellMerge = 'C' + str(startingCellID) + ':C' + str(cellID - 1)\n worksheet.merge_range(cellMerge, qDescription, myCell)\n cellMerge = 'G' + str(startingCellID) + ':G' + str(cellID - 1)\n if WORKLOADID:\n worksheet.merge_range(cellMerge, qNotes, myCell)\n else:\n worksheet.merge_range(cellMerge, '', myCell)\n cellID -= 1\n qNum += 1\n cellPosition = cellID\n if myCell == lineA:\n myCell = lineB\n myCellhidden = lineBhidden\n myCellnoborder = lineBnoborder\n else:\n myCell = lineA\n myCellhidden = lineAhidden\n myCellnoborder = lineAnoborder\n\n\ndef main():\n boto3_min_version = '1.16.38'\n if packaging.version.parse(boto3.__version__) < packaging.version.parse(\n boto3_min_version):\n logger.error(\n 'Your Boto3 version (%s) is less than %s. You must ugprade to run this script (pip3 upgrade boto3)'\n % (boto3.__version__, boto3_min_version))\n exit()\n logger.info('Script version %s' % __version__)\n logger.info('Starting Boto %s Session' % boto3.__version__)\n SESSION1 = boto3.session.Session(profile_name=PROFILE)\n WACLIENT = SESSION1.client(service_name='wellarchitected', region_name=\n REGION_NAME)\n if WORKLOADID:\n logger.info('User specified workload id of %s' % WORKLOADID)\n workloadJson = GetWorkload(WACLIENT, WORKLOADID)\n LENSES = workloadJson['Lenses']\n logger.info('Lenses for %s: %s' % (WORKLOADID, json.dumps(LENSES)))\n WORKLOADNAME = workloadJson['WorkloadName']\n DESCRIPTION = workloadJson['Description']\n REVIEWOWNER = workloadJson['ReviewOwner']\n ENVIRONMENT = workloadJson['Environment']\n AWSREGIONS = workloadJson['AwsRegions']\n workloadId = WORKLOADID\n workloadARN = workloadJson['WorkloadArn']\n else:\n logger.info('No workload ID specified, we will create a TEMP workload')\n LENSES = listLens(WACLIENT)\n logger.info('Lenses available: ' + json.dumps(LENSES))\n WORKLOADNAME = 'TEMP DO NOT USE WORKLOAD'\n DESCRIPTION = 'TEMP DO NOT USE WORKLOAD'\n REVIEWOWNER = 'WA Python Script'\n ENVIRONMENT = 'PRODUCTION'\n AWSREGIONS = [REGION_NAME]\n logger.info('Creating a new workload to gather questions and answers')\n workloadId, workloadARN = CreateNewWorkload(WACLIENT, WORKLOADNAME,\n DESCRIPTION, REVIEWOWNER, ENVIRONMENT, AWSREGIONS, LENSES, '[]',\n '[]')\n logger.info(\"Creating xlsx file '\" + FILENAME + \"'\")\n workbook = xlsxwriter.Workbook(FILENAME)\n workbook.set_size(2800, 1600)\n LENSES.sort(reverse=True)\n for lens in LENSES:\n allQuestions = findAllQuestionId(WACLIENT, workloadId, lens)\n if WORKLOADID:\n logger.debug('Not answering questions for existing workload')\n lensTabCreation(WACLIENT, workloadId, lens, workbook,\n allQuestions, WORKLOADNAME, workloadARN, DESCRIPTION)\n else:\n jmesquery = (\n '[*].{QuestionId: QuestionId, PillarId: PillarId, Choices: Choices[].ChoiceId}'\n )\n allQuestionIds = jmespath.search(jmesquery, allQuestions)\n for question in allQuestionIds:\n logger.debug('Answering question %s in the %s lens' % (\n question['QuestionId'], lens))\n updateAnswersForQuestion(WACLIENT, workloadId, lens,\n question['QuestionId'], question['Choices'],\n 'TEMP WORKLOAD - Added by export script')\n lensTabCreation(WACLIENT, workloadId, lens, workbook, allQuestions)\n logger.info('Closing Workbook File')\n workbook.close()\n if not WORKLOADID:\n if not KEEPTEMP:\n logger.info('Removing TEMP Workload')\n DeleteWorkload(WACLIENT, workloadId)\n logger.info('Done')\n\n\n<mask token>\n",
"step-3": "<mask token>\nlogging.basicConfig(level=logging.DEBUG, format=\n '%(asctime)s.%(msecs)03d %(levelname)s %(module)s - %(funcName)s: %(message)s'\n , datefmt='%Y-%m-%d %H:%M:%S')\n<mask token>\nlogging.getLogger('boto3').setLevel(logging.CRITICAL)\nlogging.getLogger('botocore').setLevel(logging.CRITICAL)\nlogging.getLogger('s3transfer').setLevel(logging.CRITICAL)\nlogging.getLogger('urllib3').setLevel(logging.CRITICAL)\n<mask token>\nPARSER.add_argument('-p', '--profile', required=False, default='default',\n help='AWS CLI Profile Name')\nPARSER.add_argument('-r', '--region', required=False, default='us-east-1',\n help='From Region Name. Example: us-east-1')\nPARSER.add_argument('-w', '--workloadid', required=False, default='', help=\n 'Workload Id to use instead of creating a TEMP workload')\nPARSER.add_argument('-k', '--keeptempworkload', action='store_true', help=\n 'If you want to keep the TEMP workload created at the end of the export')\nPARSER.add_argument('-f', '--fileName', required=True, default=\n './demo.xlsx', help='FileName to export XLSX')\nPARSER.add_argument('-v', '--debug', action='store_true', help=\n 'print debug messages to stderr')\n<mask token>\nif ARGUMENTS.debug:\n logger.setLevel(logging.DEBUG)\nelse:\n logger.setLevel(logging.INFO)\n<mask token>\n\n\nclass DateTimeEncoder(json.JSONEncoder):\n\n def default(self, z):\n if isinstance(z, datetime.datetime):\n return str(z)\n else:\n return super().default(z)\n\n\ndef CreateNewWorkload(waclient, workloadName, description, reviewOwner,\n environment, awsRegions, lenses, tags, pillarPriorities, notes='',\n nonAwsRegions=[], architecturalDesign='', industryType='', industry='',\n accountIds=[]):\n try:\n response = waclient.create_workload(WorkloadName=workloadName,\n Description=description, ReviewOwner=reviewOwner, Environment=\n environment, AwsRegions=awsRegions, Lenses=lenses,\n NonAwsRegions=nonAwsRegions, ArchitecturalDesign=\n architecturalDesign, IndustryType=industryType, Industry=\n industry, Notes=notes, AccountIds=accountIds)\n except waclient.exceptions.ConflictException as e:\n workloadId, workloadARN = FindWorkload(waclient, workloadName)\n logger.error(\n 'ERROR - The workload name %s already exists as workloadId %s' %\n (workloadName, workloadId))\n return workloadId, workloadARN\n except botocore.exceptions.ParamValidationError as e:\n logger.error('ERROR - Parameter validation error: %s' % e)\n except botocore.exceptions.ClientError as e:\n logger.error('ERROR - Unexpected error: %s' % e)\n workloadId = response['WorkloadId']\n workloadARN = response['WorkloadArn']\n return workloadId, workloadARN\n\n\ndef FindWorkload(waclient, workloadName):\n try:\n response = waclient.list_workloads(WorkloadNamePrefix=workloadName)\n except botocore.exceptions.ParamValidationError as e:\n logger.error('ERROR - Parameter validation error: %s' % e)\n except botocore.exceptions.ClientError as e:\n logger.error('ERROR - Unexpected error: %s' % e)\n workloadId = response['WorkloadSummaries'][0]['WorkloadId']\n workloadArn = response['WorkloadSummaries'][0]['WorkloadArn']\n return workloadId, workloadArn\n\n\ndef DeleteWorkload(waclient, workloadId):\n try:\n response = waclient.delete_workload(WorkloadId=workloadId)\n except botocore.exceptions.ParamValidationError as e:\n logger.error('ERROR - Parameter validation error: %s' % e)\n except botocore.exceptions.ClientError as e:\n logger.error('ERROR - Unexpected error: %s' % e)\n\n\ndef GetWorkload(waclient, workloadId):\n try:\n response = waclient.get_workload(WorkloadId=workloadId)\n except botocore.exceptions.ParamValidationError as e:\n logger.error('ERROR - Parameter validation error: %s' % e)\n except botocore.exceptions.ClientError as e:\n logger.error('ERROR - Unexpected error: %s' % e)\n exit()\n workload = response['Workload']\n return workload\n\n\ndef listLens(waclient):\n try:\n response = waclient.list_lenses()\n except botocore.exceptions.ParamValidationError as e:\n logger.error('ERROR - Parameter validation error: %s' % e)\n except botocore.exceptions.ClientError as e:\n logger.error('ERROR - Unexpected error: %s' % e)\n lenses = jmespath.search('LensSummaries[*].LensAlias', response)\n return lenses\n\n\ndef getCurrentLensVersion(waclient, lensAlias):\n try:\n response = waclient.list_lenses()\n except botocore.exceptions.ParamValidationError as e:\n logger.error('ERROR - Parameter validation error: %s' % e)\n except botocore.exceptions.ClientError as e:\n logger.error('ERROR - Unexpected error: %s' % e)\n searchString = 'LensSummaries[?LensAlias==`' + lensAlias + '`].LensVersion'\n lenses = jmespath.search(searchString, response)\n return lenses[0]\n\n\ndef findAllQuestionId(waclient, workloadId, lensAlias):\n answers = []\n for pillar in PILLAR_PARSE_MAP:\n logger.debug('Grabbing answers for %s %s' % (lensAlias, pillar))\n try:\n response = waclient.list_answers(WorkloadId=workloadId,\n LensAlias=lensAlias, PillarId=pillar)\n except botocore.exceptions.ParamValidationError as e:\n logger.error('ERROR - Parameter validation error: %s' % e)\n except botocore.exceptions.ClientError as e:\n logger.error('ERROR - Unexpected error: %s' % e)\n answers.extend(response['AnswerSummaries'])\n while 'NextToken' in response:\n try:\n response = waclient.list_answers(WorkloadId=workloadId,\n LensAlias=lensAlias, PillarId=pillar, NextToken=\n response['NextToken'])\n except botocore.exceptions.ParamValidationError as e:\n logger.error('ERROR - Parameter validation error: %s' % e)\n except botocore.exceptions.ClientError as e:\n logger.error('ERROR - Unexpected error: %s' % e)\n answers.extend(response['AnswerSummaries'])\n return answers\n\n\ndef getQuestionDetails(waclient, workloadId, lensAlias, questionId):\n try:\n response = waclient.get_answer(WorkloadId=workloadId, LensAlias=\n lensAlias, QuestionId=questionId)\n except botocore.exceptions.ParamValidationError as e:\n logger.error('ERROR - Parameter validation error: %s' % e)\n except botocore.exceptions.ClientError as e:\n logger.error('ERROR - Unexpected error: %s' % e)\n qDescription = jmespath.search('Answer.QuestionDescription', response)\n qImprovementPlanUrl = jmespath.search('Answer.ImprovementPlanUrl', response\n )\n qHelpfulResourceUrl = jmespath.search('Answer.HelpfulResourceUrl', response\n )\n qNotes = jmespath.search('Answer.Notes', response)\n return qDescription, qImprovementPlanUrl, qHelpfulResourceUrl, qNotes\n\n\ndef updateAnswersForQuestion(waclient, workloadId, lensAlias, questionId,\n selectedChoices, notes):\n try:\n response = waclient.update_answer(WorkloadId=workloadId, LensAlias=\n lensAlias, QuestionId=questionId, SelectedChoices=\n selectedChoices, Notes=notes)\n except botocore.exceptions.ParamValidationError as e:\n logger.error('ERROR - Parameter validation error: %s' % e)\n except botocore.exceptions.ClientError as e:\n logger.error('ERROR - Unexpected error: %s' % e)\n jmesquery = 'Answer.SelectedChoices'\n answers = jmespath.search(jmesquery, response)\n return answers\n\n\ndef getImprovementPlanItems(waclient, workloadId, lensAlias, QuestionId,\n PillarId, ImprovementPlanUrl, ChoiceList):\n response = {}\n htmlString = ''\n urlresponse = urllib.request.urlopen(ImprovementPlanUrl)\n htmlBytes = urlresponse.read()\n htmlStr = htmlBytes.decode('utf8')\n htmlSplit = htmlStr.split('\\n')\n ipHTMLList = {}\n for line in htmlSplit:\n for uq in ChoiceList:\n if uq in line:\n parsed = BeautifulSoup(line, features='html.parser')\n ipHTMLList.update({uq: str(parsed.a['href'])})\n return ipHTMLList\n\n\ndef getImprovementPlanHTMLDescription(ImprovementPlanUrl, PillarId):\n logger.debug('ImprovementPlanUrl: %s for pillar %s ' % (\n ImprovementPlanUrl, PILLAR_PARSE_MAP[PillarId]))\n stepRaw = ImprovementPlanUrl.rsplit('#')[1]\n if len(stepRaw) <= 5:\n stepNumber = stepRaw[-1]\n else:\n stepNumber = stepRaw[-2]\n firstItem = 'step' + stepNumber\n secondItem = 'step' + str(int(stepNumber) + 1)\n logger.debug('Going from %s to %s' % (firstItem, secondItem))\n urlresponse = urllib.request.urlopen(ImprovementPlanUrl)\n htmlBytes = urlresponse.read()\n htmlStr = htmlBytes.decode('utf8')\n htmlSplit = htmlStr.split('\\n')\n foundit = 0\n ipString = ''\n questionIdText = ''\n for i in htmlSplit:\n if PILLAR_PARSE_MAP[PillarId] in i:\n bsparse = BeautifulSoup(i, features='html.parser')\n questionIdText = str(bsparse.text).split(':')[0].strip()\n if secondItem in i or '</div>' in i:\n foundit = 0\n if firstItem in i:\n foundit = 1\n ipString += i\n elif foundit:\n ipString += i\n prettyHTML = BeautifulSoup(ipString, features='html.parser')\n for a in prettyHTML.findAll('a', 'glossref'):\n a.replaceWithChildren()\n return prettyHTML, questionIdText\n\n\ndef lensTabCreation(WACLIENT, workloadId, lens, workbook,\n allQuestionsForLens, workloadName='', AWSAccountId='',\n workloadDescription=''):\n bold = workbook.add_format({'bold': True})\n bold_border = workbook.add_format({'border': 1, 'border_color': 'black',\n 'text_wrap': True})\n bold_border_bold = workbook.add_format({'border': 1, 'border_color':\n 'black', 'text_wrap': True, 'font_size': 20, 'bold': True})\n heading = workbook.add_format({'font_size': 24, 'bold': True})\n lineA = workbook.add_format({'border': 1, 'border_color': 'black',\n 'bg_color': '#E0EBF6', 'align': 'top', 'text_wrap': True})\n lineB = workbook.add_format({'border': 1, 'border_color': 'black',\n 'bg_color': '#E4EFDC', 'align': 'top', 'text_wrap': True})\n lineAnoborder = workbook.add_format({'border': 0, 'top': 1, 'left': 1,\n 'right': 1, 'border_color': 'black', 'bg_color': '#E0EBF6', 'align':\n 'top', 'text_wrap': True})\n lineBnoborder = workbook.add_format({'border': 0, 'top': 1, 'left': 1,\n 'right': 1, 'border_color': 'black', 'bg_color': '#E4EFDC', 'align':\n 'top', 'text_wrap': True})\n lineAhidden = workbook.add_format({'border': 0, 'left': 1, 'right': 1,\n 'border_color': 'black', 'bg_color': '#E0EBF6', 'align': 'top',\n 'text_wrap': False, 'indent': 100})\n lineBhidden = workbook.add_format({'border': 0, 'left': 1, 'right': 1,\n 'border_color': 'black', 'bg_color': '#E4EFDC', 'align': 'top',\n 'text_wrap': False, 'indent': 100})\n sub_heading = workbook.add_format()\n sub_heading.set_font_size(20)\n sub_heading.set_bold(True)\n small_font = workbook.add_format()\n small_font.set_font_size(9)\n logger.debug(\"Getting lens version for '\" + lens + \"'\")\n versionString = getCurrentLensVersion(WACLIENT, lens)\n logger.debug('Adding worksheet using version ' + versionString)\n lensName = lens[0:18]\n worksheet = workbook.add_worksheet(lensName + ' v' + versionString)\n worksheet.set_landscape()\n worksheet.set_paper(1)\n worksheet.set_column('A:A', 11)\n worksheet.set_column('B:B', 32)\n worksheet.set_column('C:C', 56)\n worksheet.set_column('D:D', 29)\n worksheet.set_column('E:E', 57)\n worksheet.set_column('F:F', 18)\n worksheet.set_column('G:G', 70)\n worksheet.merge_range('A1:G1', 'Workload Overview', heading)\n worksheet.merge_range('A3:B3', 'Workload Name', bold_border_bold)\n worksheet.merge_range('A4:B4', 'AWS Account ID', bold_border_bold)\n worksheet.merge_range('A5:B5', 'Workload Description', bold_border_bold)\n if WORKLOADID:\n worksheet.write('C3', workloadName, bold_border)\n accountIdParsed = AWSAccountId.split(':')[4]\n worksheet.write('C4', accountIdParsed, bold_border)\n worksheet.write('C5', workloadDescription, bold_border)\n else:\n worksheet.write('C3', '', bold_border)\n worksheet.write('C4', '', bold_border)\n worksheet.write('C5', '', bold_border)\n worksheet.write('D3', 'Enter the name of system', small_font)\n worksheet.write('D4', 'Enter 12-degit AWS account ID', small_font)\n worksheet.write('D5',\n 'Briefly describe system architecture and workload, flow etc.',\n small_font)\n worksheet.write('A8', 'Pillar', sub_heading)\n worksheet.write('B8', 'Question', sub_heading)\n worksheet.write('C8', 'Explanation', sub_heading)\n worksheet.write('D8', 'Choice (Best Practice)', sub_heading)\n worksheet.write('E8', 'Detail', sub_heading)\n worksheet.write('F8', 'Response', sub_heading)\n worksheet.write('G8', 'Notes (optional)', sub_heading)\n worksheet.freeze_panes(8, 0)\n worksheet.autofilter('A8:B8')\n worksheet.repeat_rows(1, 8)\n worksheet.fit_to_pages(1, 99)\n cellPosition = 8\n myCell = lineA\n myCellhidden = lineAhidden\n myCellnoborder = lineAnoborder\n for pillar in PILLAR_PARSE_MAP:\n qNum = 1\n jmesquery = \"[?PillarId=='\" + pillar + \"']\"\n allQuestionsForPillar = jmespath.search(jmesquery, allQuestionsForLens)\n for answers in allQuestionsForPillar:\n questionTitle = PILLAR_PARSE_MAP[answers['PillarId']] + str(qNum\n ) + ' - ' + answers['QuestionTitle']\n (qDescription, qImprovementPlanUrl, qHelpfulResourceUrl, qNotes\n ) = (getQuestionDetails(WACLIENT, workloadId, lens, answers\n ['QuestionId']))\n qDescription = qDescription.replace('\\n ', '').replace(' '\n , '').replace('\\t', '').replace('\\n', '')\n qDescription = qDescription.rstrip()\n qDescription = qDescription.strip()\n logger.debug(\"Working on '\" + questionTitle + \"'\")\n logger.debug('It has answers of: ' + json.dumps(answers[\n 'SelectedChoices']))\n cellID = cellPosition + 1\n if qImprovementPlanUrl:\n jmesquery = \"[?QuestionId=='\" + answers['QuestionId'\n ] + \"'].Choices[].ChoiceId\"\n choiceList = jmespath.search(jmesquery, allQuestionsForLens)\n ipList = getImprovementPlanItems(WACLIENT, workloadId, lens,\n answers['QuestionId'], answers['PillarId'],\n qImprovementPlanUrl, choiceList)\n else:\n ipList = []\n startingCellID = cellID\n firstTimePillar = True\n for choices in answers['Choices']:\n cell = 'A' + str(cellID)\n if firstTimePillar:\n worksheet.write(cell, PILLAR_PROPER_NAME_MAP[pillar],\n myCellnoborder)\n cell = 'B' + str(cellID)\n worksheet.write(cell, questionTitle, myCellnoborder)\n firstTimePillar = False\n else:\n worksheet.write(cell, PILLAR_PROPER_NAME_MAP[pillar],\n myCellhidden)\n cell = 'B' + str(cellID)\n worksheet.write(cell, questionTitle, myCellhidden)\n cell = 'D' + str(cellID)\n Title = choices['Title'].replace(' ', '').replace('\\t', ''\n ).replace('\\n', '')\n if any(choices['ChoiceId'] in d for d in ipList):\n worksheet.write_url(cell, ipList[choices['ChoiceId']],\n myCell, string=Title)\n htmlString = ''\n htmlString = htmlString.replace('\\n ', '').replace(\n ' ', '').replace('\\t', '').strip().rstrip()\n worksheet.write_comment(cell, htmlString, {'author':\n 'Improvement Plan'})\n else:\n worksheet.write(cell, Title, myCell)\n cell = 'E' + str(cellID)\n Description = choices['Description'].replace(\n '\\n ', '')\n Description = Description.replace('\\n ', '')\n Description = Description.replace(' ', '').replace('\\t', ''\n ).replace('\\n', '')\n Description = Description.rstrip()\n Description = Description.strip()\n worksheet.write(cell, Description, myCell)\n cell = 'F' + str(cellID)\n responseText = ''\n if choices['ChoiceId'] in answers['SelectedChoices']:\n responseText = 'SELECTED'\n else:\n responseText = ''\n worksheet.write(cell, responseText, myCell)\n cellID += 1\n cellMerge = 'C' + str(startingCellID) + ':C' + str(cellID - 1)\n worksheet.merge_range(cellMerge, qDescription, myCell)\n cellMerge = 'G' + str(startingCellID) + ':G' + str(cellID - 1)\n if WORKLOADID:\n worksheet.merge_range(cellMerge, qNotes, myCell)\n else:\n worksheet.merge_range(cellMerge, '', myCell)\n cellID -= 1\n qNum += 1\n cellPosition = cellID\n if myCell == lineA:\n myCell = lineB\n myCellhidden = lineBhidden\n myCellnoborder = lineBnoborder\n else:\n myCell = lineA\n myCellhidden = lineAhidden\n myCellnoborder = lineAnoborder\n\n\ndef main():\n boto3_min_version = '1.16.38'\n if packaging.version.parse(boto3.__version__) < packaging.version.parse(\n boto3_min_version):\n logger.error(\n 'Your Boto3 version (%s) is less than %s. You must ugprade to run this script (pip3 upgrade boto3)'\n % (boto3.__version__, boto3_min_version))\n exit()\n logger.info('Script version %s' % __version__)\n logger.info('Starting Boto %s Session' % boto3.__version__)\n SESSION1 = boto3.session.Session(profile_name=PROFILE)\n WACLIENT = SESSION1.client(service_name='wellarchitected', region_name=\n REGION_NAME)\n if WORKLOADID:\n logger.info('User specified workload id of %s' % WORKLOADID)\n workloadJson = GetWorkload(WACLIENT, WORKLOADID)\n LENSES = workloadJson['Lenses']\n logger.info('Lenses for %s: %s' % (WORKLOADID, json.dumps(LENSES)))\n WORKLOADNAME = workloadJson['WorkloadName']\n DESCRIPTION = workloadJson['Description']\n REVIEWOWNER = workloadJson['ReviewOwner']\n ENVIRONMENT = workloadJson['Environment']\n AWSREGIONS = workloadJson['AwsRegions']\n workloadId = WORKLOADID\n workloadARN = workloadJson['WorkloadArn']\n else:\n logger.info('No workload ID specified, we will create a TEMP workload')\n LENSES = listLens(WACLIENT)\n logger.info('Lenses available: ' + json.dumps(LENSES))\n WORKLOADNAME = 'TEMP DO NOT USE WORKLOAD'\n DESCRIPTION = 'TEMP DO NOT USE WORKLOAD'\n REVIEWOWNER = 'WA Python Script'\n ENVIRONMENT = 'PRODUCTION'\n AWSREGIONS = [REGION_NAME]\n logger.info('Creating a new workload to gather questions and answers')\n workloadId, workloadARN = CreateNewWorkload(WACLIENT, WORKLOADNAME,\n DESCRIPTION, REVIEWOWNER, ENVIRONMENT, AWSREGIONS, LENSES, '[]',\n '[]')\n logger.info(\"Creating xlsx file '\" + FILENAME + \"'\")\n workbook = xlsxwriter.Workbook(FILENAME)\n workbook.set_size(2800, 1600)\n LENSES.sort(reverse=True)\n for lens in LENSES:\n allQuestions = findAllQuestionId(WACLIENT, workloadId, lens)\n if WORKLOADID:\n logger.debug('Not answering questions for existing workload')\n lensTabCreation(WACLIENT, workloadId, lens, workbook,\n allQuestions, WORKLOADNAME, workloadARN, DESCRIPTION)\n else:\n jmesquery = (\n '[*].{QuestionId: QuestionId, PillarId: PillarId, Choices: Choices[].ChoiceId}'\n )\n allQuestionIds = jmespath.search(jmesquery, allQuestions)\n for question in allQuestionIds:\n logger.debug('Answering question %s in the %s lens' % (\n question['QuestionId'], lens))\n updateAnswersForQuestion(WACLIENT, workloadId, lens,\n question['QuestionId'], question['Choices'],\n 'TEMP WORKLOAD - Added by export script')\n lensTabCreation(WACLIENT, workloadId, lens, workbook, allQuestions)\n logger.info('Closing Workbook File')\n workbook.close()\n if not WORKLOADID:\n if not KEEPTEMP:\n logger.info('Removing TEMP Workload')\n DeleteWorkload(WACLIENT, workloadId)\n logger.info('Done')\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "import botocore\nimport boto3\nimport json\nimport datetime\nimport logging\nimport jmespath\nimport xlsxwriter\nimport argparse\nfrom pkg_resources import packaging\nimport urllib.request\nfrom bs4 import BeautifulSoup, NavigableString, Tag\n__author__ = 'Eric Pullen'\n__email__ = 'eppullen@amazon.com'\n__copyright__ = (\n 'Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.')\n__credits__ = ['Eric Pullen']\n__version__ = '0.1'\nREGION_NAME = 'us-east-1'\nblankjson = {}\nresponse = ''\nlogging.basicConfig(level=logging.DEBUG, format=\n '%(asctime)s.%(msecs)03d %(levelname)s %(module)s - %(funcName)s: %(message)s'\n , datefmt='%Y-%m-%d %H:%M:%S')\nlogger = logging.getLogger()\nlogging.getLogger('boto3').setLevel(logging.CRITICAL)\nlogging.getLogger('botocore').setLevel(logging.CRITICAL)\nlogging.getLogger('s3transfer').setLevel(logging.CRITICAL)\nlogging.getLogger('urllib3').setLevel(logging.CRITICAL)\nPARSER = argparse.ArgumentParser(formatter_class=argparse.\n RawDescriptionHelpFormatter, description=\n \"\"\"This utility has two options to run:\n------------------------------------\n1) If you provide a workloadid, this will gather all of the answers across all Well-Architected Lenss and export them to a spreadsheet.\n2) If you do not provide a workloadid, the utility will generate a TEMP workload and auto-answer every question. It will then generate a spreadsheet with all of the questions, best practices, and even the improvement plan links for each.\n \"\"\"\n )\nPARSER.add_argument('-p', '--profile', required=False, default='default',\n help='AWS CLI Profile Name')\nPARSER.add_argument('-r', '--region', required=False, default='us-east-1',\n help='From Region Name. Example: us-east-1')\nPARSER.add_argument('-w', '--workloadid', required=False, default='', help=\n 'Workload Id to use instead of creating a TEMP workload')\nPARSER.add_argument('-k', '--keeptempworkload', action='store_true', help=\n 'If you want to keep the TEMP workload created at the end of the export')\nPARSER.add_argument('-f', '--fileName', required=True, default=\n './demo.xlsx', help='FileName to export XLSX')\nPARSER.add_argument('-v', '--debug', action='store_true', help=\n 'print debug messages to stderr')\nARGUMENTS = PARSER.parse_args()\nPROFILE = ARGUMENTS.profile\nFILENAME = ARGUMENTS.fileName\nREGION_NAME = ARGUMENTS.region\nWORKLOADID = ARGUMENTS.workloadid\nKEEPTEMP = ARGUMENTS.keeptempworkload\nif ARGUMENTS.debug:\n logger.setLevel(logging.DEBUG)\nelse:\n logger.setLevel(logging.INFO)\nPILLAR_PARSE_MAP = {'operationalExcellence': 'OPS', 'security': 'SEC',\n 'reliability': 'REL', 'performance': 'PERF', 'costOptimization': 'COST'}\nPILLAR_PROPER_NAME_MAP = {'operationalExcellence': 'Operational Excellence',\n 'security': 'Security', 'reliability': 'Reliability', 'performance':\n 'Performance Efficiency', 'costOptimization': 'Cost Optimization'}\n\n\nclass DateTimeEncoder(json.JSONEncoder):\n\n def default(self, z):\n if isinstance(z, datetime.datetime):\n return str(z)\n else:\n return super().default(z)\n\n\ndef CreateNewWorkload(waclient, workloadName, description, reviewOwner,\n environment, awsRegions, lenses, tags, pillarPriorities, notes='',\n nonAwsRegions=[], architecturalDesign='', industryType='', industry='',\n accountIds=[]):\n try:\n response = waclient.create_workload(WorkloadName=workloadName,\n Description=description, ReviewOwner=reviewOwner, Environment=\n environment, AwsRegions=awsRegions, Lenses=lenses,\n NonAwsRegions=nonAwsRegions, ArchitecturalDesign=\n architecturalDesign, IndustryType=industryType, Industry=\n industry, Notes=notes, AccountIds=accountIds)\n except waclient.exceptions.ConflictException as e:\n workloadId, workloadARN = FindWorkload(waclient, workloadName)\n logger.error(\n 'ERROR - The workload name %s already exists as workloadId %s' %\n (workloadName, workloadId))\n return workloadId, workloadARN\n except botocore.exceptions.ParamValidationError as e:\n logger.error('ERROR - Parameter validation error: %s' % e)\n except botocore.exceptions.ClientError as e:\n logger.error('ERROR - Unexpected error: %s' % e)\n workloadId = response['WorkloadId']\n workloadARN = response['WorkloadArn']\n return workloadId, workloadARN\n\n\ndef FindWorkload(waclient, workloadName):\n try:\n response = waclient.list_workloads(WorkloadNamePrefix=workloadName)\n except botocore.exceptions.ParamValidationError as e:\n logger.error('ERROR - Parameter validation error: %s' % e)\n except botocore.exceptions.ClientError as e:\n logger.error('ERROR - Unexpected error: %s' % e)\n workloadId = response['WorkloadSummaries'][0]['WorkloadId']\n workloadArn = response['WorkloadSummaries'][0]['WorkloadArn']\n return workloadId, workloadArn\n\n\ndef DeleteWorkload(waclient, workloadId):\n try:\n response = waclient.delete_workload(WorkloadId=workloadId)\n except botocore.exceptions.ParamValidationError as e:\n logger.error('ERROR - Parameter validation error: %s' % e)\n except botocore.exceptions.ClientError as e:\n logger.error('ERROR - Unexpected error: %s' % e)\n\n\ndef GetWorkload(waclient, workloadId):\n try:\n response = waclient.get_workload(WorkloadId=workloadId)\n except botocore.exceptions.ParamValidationError as e:\n logger.error('ERROR - Parameter validation error: %s' % e)\n except botocore.exceptions.ClientError as e:\n logger.error('ERROR - Unexpected error: %s' % e)\n exit()\n workload = response['Workload']\n return workload\n\n\ndef listLens(waclient):\n try:\n response = waclient.list_lenses()\n except botocore.exceptions.ParamValidationError as e:\n logger.error('ERROR - Parameter validation error: %s' % e)\n except botocore.exceptions.ClientError as e:\n logger.error('ERROR - Unexpected error: %s' % e)\n lenses = jmespath.search('LensSummaries[*].LensAlias', response)\n return lenses\n\n\ndef getCurrentLensVersion(waclient, lensAlias):\n try:\n response = waclient.list_lenses()\n except botocore.exceptions.ParamValidationError as e:\n logger.error('ERROR - Parameter validation error: %s' % e)\n except botocore.exceptions.ClientError as e:\n logger.error('ERROR - Unexpected error: %s' % e)\n searchString = 'LensSummaries[?LensAlias==`' + lensAlias + '`].LensVersion'\n lenses = jmespath.search(searchString, response)\n return lenses[0]\n\n\ndef findAllQuestionId(waclient, workloadId, lensAlias):\n answers = []\n for pillar in PILLAR_PARSE_MAP:\n logger.debug('Grabbing answers for %s %s' % (lensAlias, pillar))\n try:\n response = waclient.list_answers(WorkloadId=workloadId,\n LensAlias=lensAlias, PillarId=pillar)\n except botocore.exceptions.ParamValidationError as e:\n logger.error('ERROR - Parameter validation error: %s' % e)\n except botocore.exceptions.ClientError as e:\n logger.error('ERROR - Unexpected error: %s' % e)\n answers.extend(response['AnswerSummaries'])\n while 'NextToken' in response:\n try:\n response = waclient.list_answers(WorkloadId=workloadId,\n LensAlias=lensAlias, PillarId=pillar, NextToken=\n response['NextToken'])\n except botocore.exceptions.ParamValidationError as e:\n logger.error('ERROR - Parameter validation error: %s' % e)\n except botocore.exceptions.ClientError as e:\n logger.error('ERROR - Unexpected error: %s' % e)\n answers.extend(response['AnswerSummaries'])\n return answers\n\n\ndef getQuestionDetails(waclient, workloadId, lensAlias, questionId):\n try:\n response = waclient.get_answer(WorkloadId=workloadId, LensAlias=\n lensAlias, QuestionId=questionId)\n except botocore.exceptions.ParamValidationError as e:\n logger.error('ERROR - Parameter validation error: %s' % e)\n except botocore.exceptions.ClientError as e:\n logger.error('ERROR - Unexpected error: %s' % e)\n qDescription = jmespath.search('Answer.QuestionDescription', response)\n qImprovementPlanUrl = jmespath.search('Answer.ImprovementPlanUrl', response\n )\n qHelpfulResourceUrl = jmespath.search('Answer.HelpfulResourceUrl', response\n )\n qNotes = jmespath.search('Answer.Notes', response)\n return qDescription, qImprovementPlanUrl, qHelpfulResourceUrl, qNotes\n\n\ndef updateAnswersForQuestion(waclient, workloadId, lensAlias, questionId,\n selectedChoices, notes):\n try:\n response = waclient.update_answer(WorkloadId=workloadId, LensAlias=\n lensAlias, QuestionId=questionId, SelectedChoices=\n selectedChoices, Notes=notes)\n except botocore.exceptions.ParamValidationError as e:\n logger.error('ERROR - Parameter validation error: %s' % e)\n except botocore.exceptions.ClientError as e:\n logger.error('ERROR - Unexpected error: %s' % e)\n jmesquery = 'Answer.SelectedChoices'\n answers = jmespath.search(jmesquery, response)\n return answers\n\n\ndef getImprovementPlanItems(waclient, workloadId, lensAlias, QuestionId,\n PillarId, ImprovementPlanUrl, ChoiceList):\n response = {}\n htmlString = ''\n urlresponse = urllib.request.urlopen(ImprovementPlanUrl)\n htmlBytes = urlresponse.read()\n htmlStr = htmlBytes.decode('utf8')\n htmlSplit = htmlStr.split('\\n')\n ipHTMLList = {}\n for line in htmlSplit:\n for uq in ChoiceList:\n if uq in line:\n parsed = BeautifulSoup(line, features='html.parser')\n ipHTMLList.update({uq: str(parsed.a['href'])})\n return ipHTMLList\n\n\ndef getImprovementPlanHTMLDescription(ImprovementPlanUrl, PillarId):\n logger.debug('ImprovementPlanUrl: %s for pillar %s ' % (\n ImprovementPlanUrl, PILLAR_PARSE_MAP[PillarId]))\n stepRaw = ImprovementPlanUrl.rsplit('#')[1]\n if len(stepRaw) <= 5:\n stepNumber = stepRaw[-1]\n else:\n stepNumber = stepRaw[-2]\n firstItem = 'step' + stepNumber\n secondItem = 'step' + str(int(stepNumber) + 1)\n logger.debug('Going from %s to %s' % (firstItem, secondItem))\n urlresponse = urllib.request.urlopen(ImprovementPlanUrl)\n htmlBytes = urlresponse.read()\n htmlStr = htmlBytes.decode('utf8')\n htmlSplit = htmlStr.split('\\n')\n foundit = 0\n ipString = ''\n questionIdText = ''\n for i in htmlSplit:\n if PILLAR_PARSE_MAP[PillarId] in i:\n bsparse = BeautifulSoup(i, features='html.parser')\n questionIdText = str(bsparse.text).split(':')[0].strip()\n if secondItem in i or '</div>' in i:\n foundit = 0\n if firstItem in i:\n foundit = 1\n ipString += i\n elif foundit:\n ipString += i\n prettyHTML = BeautifulSoup(ipString, features='html.parser')\n for a in prettyHTML.findAll('a', 'glossref'):\n a.replaceWithChildren()\n return prettyHTML, questionIdText\n\n\ndef lensTabCreation(WACLIENT, workloadId, lens, workbook,\n allQuestionsForLens, workloadName='', AWSAccountId='',\n workloadDescription=''):\n bold = workbook.add_format({'bold': True})\n bold_border = workbook.add_format({'border': 1, 'border_color': 'black',\n 'text_wrap': True})\n bold_border_bold = workbook.add_format({'border': 1, 'border_color':\n 'black', 'text_wrap': True, 'font_size': 20, 'bold': True})\n heading = workbook.add_format({'font_size': 24, 'bold': True})\n lineA = workbook.add_format({'border': 1, 'border_color': 'black',\n 'bg_color': '#E0EBF6', 'align': 'top', 'text_wrap': True})\n lineB = workbook.add_format({'border': 1, 'border_color': 'black',\n 'bg_color': '#E4EFDC', 'align': 'top', 'text_wrap': True})\n lineAnoborder = workbook.add_format({'border': 0, 'top': 1, 'left': 1,\n 'right': 1, 'border_color': 'black', 'bg_color': '#E0EBF6', 'align':\n 'top', 'text_wrap': True})\n lineBnoborder = workbook.add_format({'border': 0, 'top': 1, 'left': 1,\n 'right': 1, 'border_color': 'black', 'bg_color': '#E4EFDC', 'align':\n 'top', 'text_wrap': True})\n lineAhidden = workbook.add_format({'border': 0, 'left': 1, 'right': 1,\n 'border_color': 'black', 'bg_color': '#E0EBF6', 'align': 'top',\n 'text_wrap': False, 'indent': 100})\n lineBhidden = workbook.add_format({'border': 0, 'left': 1, 'right': 1,\n 'border_color': 'black', 'bg_color': '#E4EFDC', 'align': 'top',\n 'text_wrap': False, 'indent': 100})\n sub_heading = workbook.add_format()\n sub_heading.set_font_size(20)\n sub_heading.set_bold(True)\n small_font = workbook.add_format()\n small_font.set_font_size(9)\n logger.debug(\"Getting lens version for '\" + lens + \"'\")\n versionString = getCurrentLensVersion(WACLIENT, lens)\n logger.debug('Adding worksheet using version ' + versionString)\n lensName = lens[0:18]\n worksheet = workbook.add_worksheet(lensName + ' v' + versionString)\n worksheet.set_landscape()\n worksheet.set_paper(1)\n worksheet.set_column('A:A', 11)\n worksheet.set_column('B:B', 32)\n worksheet.set_column('C:C', 56)\n worksheet.set_column('D:D', 29)\n worksheet.set_column('E:E', 57)\n worksheet.set_column('F:F', 18)\n worksheet.set_column('G:G', 70)\n worksheet.merge_range('A1:G1', 'Workload Overview', heading)\n worksheet.merge_range('A3:B3', 'Workload Name', bold_border_bold)\n worksheet.merge_range('A4:B4', 'AWS Account ID', bold_border_bold)\n worksheet.merge_range('A5:B5', 'Workload Description', bold_border_bold)\n if WORKLOADID:\n worksheet.write('C3', workloadName, bold_border)\n accountIdParsed = AWSAccountId.split(':')[4]\n worksheet.write('C4', accountIdParsed, bold_border)\n worksheet.write('C5', workloadDescription, bold_border)\n else:\n worksheet.write('C3', '', bold_border)\n worksheet.write('C4', '', bold_border)\n worksheet.write('C5', '', bold_border)\n worksheet.write('D3', 'Enter the name of system', small_font)\n worksheet.write('D4', 'Enter 12-degit AWS account ID', small_font)\n worksheet.write('D5',\n 'Briefly describe system architecture and workload, flow etc.',\n small_font)\n worksheet.write('A8', 'Pillar', sub_heading)\n worksheet.write('B8', 'Question', sub_heading)\n worksheet.write('C8', 'Explanation', sub_heading)\n worksheet.write('D8', 'Choice (Best Practice)', sub_heading)\n worksheet.write('E8', 'Detail', sub_heading)\n worksheet.write('F8', 'Response', sub_heading)\n worksheet.write('G8', 'Notes (optional)', sub_heading)\n worksheet.freeze_panes(8, 0)\n worksheet.autofilter('A8:B8')\n worksheet.repeat_rows(1, 8)\n worksheet.fit_to_pages(1, 99)\n cellPosition = 8\n myCell = lineA\n myCellhidden = lineAhidden\n myCellnoborder = lineAnoborder\n for pillar in PILLAR_PARSE_MAP:\n qNum = 1\n jmesquery = \"[?PillarId=='\" + pillar + \"']\"\n allQuestionsForPillar = jmespath.search(jmesquery, allQuestionsForLens)\n for answers in allQuestionsForPillar:\n questionTitle = PILLAR_PARSE_MAP[answers['PillarId']] + str(qNum\n ) + ' - ' + answers['QuestionTitle']\n (qDescription, qImprovementPlanUrl, qHelpfulResourceUrl, qNotes\n ) = (getQuestionDetails(WACLIENT, workloadId, lens, answers\n ['QuestionId']))\n qDescription = qDescription.replace('\\n ', '').replace(' '\n , '').replace('\\t', '').replace('\\n', '')\n qDescription = qDescription.rstrip()\n qDescription = qDescription.strip()\n logger.debug(\"Working on '\" + questionTitle + \"'\")\n logger.debug('It has answers of: ' + json.dumps(answers[\n 'SelectedChoices']))\n cellID = cellPosition + 1\n if qImprovementPlanUrl:\n jmesquery = \"[?QuestionId=='\" + answers['QuestionId'\n ] + \"'].Choices[].ChoiceId\"\n choiceList = jmespath.search(jmesquery, allQuestionsForLens)\n ipList = getImprovementPlanItems(WACLIENT, workloadId, lens,\n answers['QuestionId'], answers['PillarId'],\n qImprovementPlanUrl, choiceList)\n else:\n ipList = []\n startingCellID = cellID\n firstTimePillar = True\n for choices in answers['Choices']:\n cell = 'A' + str(cellID)\n if firstTimePillar:\n worksheet.write(cell, PILLAR_PROPER_NAME_MAP[pillar],\n myCellnoborder)\n cell = 'B' + str(cellID)\n worksheet.write(cell, questionTitle, myCellnoborder)\n firstTimePillar = False\n else:\n worksheet.write(cell, PILLAR_PROPER_NAME_MAP[pillar],\n myCellhidden)\n cell = 'B' + str(cellID)\n worksheet.write(cell, questionTitle, myCellhidden)\n cell = 'D' + str(cellID)\n Title = choices['Title'].replace(' ', '').replace('\\t', ''\n ).replace('\\n', '')\n if any(choices['ChoiceId'] in d for d in ipList):\n worksheet.write_url(cell, ipList[choices['ChoiceId']],\n myCell, string=Title)\n htmlString = ''\n htmlString = htmlString.replace('\\n ', '').replace(\n ' ', '').replace('\\t', '').strip().rstrip()\n worksheet.write_comment(cell, htmlString, {'author':\n 'Improvement Plan'})\n else:\n worksheet.write(cell, Title, myCell)\n cell = 'E' + str(cellID)\n Description = choices['Description'].replace(\n '\\n ', '')\n Description = Description.replace('\\n ', '')\n Description = Description.replace(' ', '').replace('\\t', ''\n ).replace('\\n', '')\n Description = Description.rstrip()\n Description = Description.strip()\n worksheet.write(cell, Description, myCell)\n cell = 'F' + str(cellID)\n responseText = ''\n if choices['ChoiceId'] in answers['SelectedChoices']:\n responseText = 'SELECTED'\n else:\n responseText = ''\n worksheet.write(cell, responseText, myCell)\n cellID += 1\n cellMerge = 'C' + str(startingCellID) + ':C' + str(cellID - 1)\n worksheet.merge_range(cellMerge, qDescription, myCell)\n cellMerge = 'G' + str(startingCellID) + ':G' + str(cellID - 1)\n if WORKLOADID:\n worksheet.merge_range(cellMerge, qNotes, myCell)\n else:\n worksheet.merge_range(cellMerge, '', myCell)\n cellID -= 1\n qNum += 1\n cellPosition = cellID\n if myCell == lineA:\n myCell = lineB\n myCellhidden = lineBhidden\n myCellnoborder = lineBnoborder\n else:\n myCell = lineA\n myCellhidden = lineAhidden\n myCellnoborder = lineAnoborder\n\n\ndef main():\n boto3_min_version = '1.16.38'\n if packaging.version.parse(boto3.__version__) < packaging.version.parse(\n boto3_min_version):\n logger.error(\n 'Your Boto3 version (%s) is less than %s. You must ugprade to run this script (pip3 upgrade boto3)'\n % (boto3.__version__, boto3_min_version))\n exit()\n logger.info('Script version %s' % __version__)\n logger.info('Starting Boto %s Session' % boto3.__version__)\n SESSION1 = boto3.session.Session(profile_name=PROFILE)\n WACLIENT = SESSION1.client(service_name='wellarchitected', region_name=\n REGION_NAME)\n if WORKLOADID:\n logger.info('User specified workload id of %s' % WORKLOADID)\n workloadJson = GetWorkload(WACLIENT, WORKLOADID)\n LENSES = workloadJson['Lenses']\n logger.info('Lenses for %s: %s' % (WORKLOADID, json.dumps(LENSES)))\n WORKLOADNAME = workloadJson['WorkloadName']\n DESCRIPTION = workloadJson['Description']\n REVIEWOWNER = workloadJson['ReviewOwner']\n ENVIRONMENT = workloadJson['Environment']\n AWSREGIONS = workloadJson['AwsRegions']\n workloadId = WORKLOADID\n workloadARN = workloadJson['WorkloadArn']\n else:\n logger.info('No workload ID specified, we will create a TEMP workload')\n LENSES = listLens(WACLIENT)\n logger.info('Lenses available: ' + json.dumps(LENSES))\n WORKLOADNAME = 'TEMP DO NOT USE WORKLOAD'\n DESCRIPTION = 'TEMP DO NOT USE WORKLOAD'\n REVIEWOWNER = 'WA Python Script'\n ENVIRONMENT = 'PRODUCTION'\n AWSREGIONS = [REGION_NAME]\n logger.info('Creating a new workload to gather questions and answers')\n workloadId, workloadARN = CreateNewWorkload(WACLIENT, WORKLOADNAME,\n DESCRIPTION, REVIEWOWNER, ENVIRONMENT, AWSREGIONS, LENSES, '[]',\n '[]')\n logger.info(\"Creating xlsx file '\" + FILENAME + \"'\")\n workbook = xlsxwriter.Workbook(FILENAME)\n workbook.set_size(2800, 1600)\n LENSES.sort(reverse=True)\n for lens in LENSES:\n allQuestions = findAllQuestionId(WACLIENT, workloadId, lens)\n if WORKLOADID:\n logger.debug('Not answering questions for existing workload')\n lensTabCreation(WACLIENT, workloadId, lens, workbook,\n allQuestions, WORKLOADNAME, workloadARN, DESCRIPTION)\n else:\n jmesquery = (\n '[*].{QuestionId: QuestionId, PillarId: PillarId, Choices: Choices[].ChoiceId}'\n )\n allQuestionIds = jmespath.search(jmesquery, allQuestions)\n for question in allQuestionIds:\n logger.debug('Answering question %s in the %s lens' % (\n question['QuestionId'], lens))\n updateAnswersForQuestion(WACLIENT, workloadId, lens,\n question['QuestionId'], question['Choices'],\n 'TEMP WORKLOAD - Added by export script')\n lensTabCreation(WACLIENT, workloadId, lens, workbook, allQuestions)\n logger.info('Closing Workbook File')\n workbook.close()\n if not WORKLOADID:\n if not KEEPTEMP:\n logger.info('Removing TEMP Workload')\n DeleteWorkload(WACLIENT, workloadId)\n logger.info('Done')\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "#!/usr/bin/env python3\n\n# This is a tool to export the WA framework answers to a XLSX file\n#\n# This code is only for use in Well-Architected labs\n# *** NOT FOR PRODUCTION USE ***\n#\n# Licensed under the Apache 2.0 and MITnoAttr License.\n#\n# Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"). You may not use this file except in compliance with the License. A copy of the License is located at\n# https://aws.amazon.com/apache2.0/\n\nimport botocore\nimport boto3\nimport json\nimport datetime\nimport logging\nimport jmespath\nimport xlsxwriter\nimport argparse\nfrom pkg_resources import packaging\nimport urllib.request\nfrom bs4 import BeautifulSoup, NavigableString, Tag\n\n\n__author__ = \"Eric Pullen\"\n__email__ = \"eppullen@amazon.com\"\n__copyright__ = \"Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.\"\n__credits__ = [\"Eric Pullen\"]\n__version__ = \"0.1\"\n\n# Default region listed here\nREGION_NAME = \"us-east-1\"\nblankjson = {}\nresponse = \"\"\n\n# Setup Logging\nlogging.basicConfig(\n level=logging.DEBUG,\n format='%(asctime)s.%(msecs)03d %(levelname)s %(module)s - %(funcName)s: %(message)s',\n datefmt='%Y-%m-%d %H:%M:%S',\n)\n\nlogger = logging.getLogger()\nlogging.getLogger('boto3').setLevel(logging.CRITICAL)\nlogging.getLogger('botocore').setLevel(logging.CRITICAL)\nlogging.getLogger('s3transfer').setLevel(logging.CRITICAL)\nlogging.getLogger('urllib3').setLevel(logging.CRITICAL)\n\nPARSER = argparse.ArgumentParser(\n formatter_class=argparse.RawDescriptionHelpFormatter,\n description='''\\\nThis utility has two options to run:\n------------------------------------\n1) If you provide a workloadid, this will gather all of the answers across all Well-Architected Lenss and export them to a spreadsheet.\n2) If you do not provide a workloadid, the utility will generate a TEMP workload and auto-answer every question. It will then generate a spreadsheet with all of the questions, best practices, and even the improvement plan links for each.\n '''\n )\n\nPARSER.add_argument('-p','--profile', required=False, default=\"default\", help='AWS CLI Profile Name')\nPARSER.add_argument('-r','--region', required=False, default=\"us-east-1\", help='From Region Name. Example: us-east-1')\nPARSER.add_argument('-w','--workloadid', required=False, default=\"\", help='Workload Id to use instead of creating a TEMP workload')\nPARSER.add_argument('-k','--keeptempworkload', action='store_true', help='If you want to keep the TEMP workload created at the end of the export')\n\nPARSER.add_argument('-f','--fileName', required=True, default=\"./demo.xlsx\", help='FileName to export XLSX')\nPARSER.add_argument('-v','--debug', action='store_true', help='print debug messages to stderr')\n\n\nARGUMENTS = PARSER.parse_args()\nPROFILE = ARGUMENTS.profile\nFILENAME = ARGUMENTS.fileName\nREGION_NAME = ARGUMENTS.region\nWORKLOADID = ARGUMENTS.workloadid\nKEEPTEMP = ARGUMENTS.keeptempworkload\n\nif ARGUMENTS.debug:\n logger.setLevel(logging.DEBUG)\nelse:\n logger.setLevel(logging.INFO)\n\n# To map our short hand names in the console to the API defined pillars\n# Example: print(PILLAR_PARSE_MAP['performance'])\nPILLAR_PARSE_MAP = {\n \"operationalExcellence\": \"OPS\",\n \"security\": \"SEC\",\n \"reliability\": \"REL\",\n \"performance\": \"PERF\",\n \"costOptimization\": \"COST\"\n }\n\nPILLAR_PROPER_NAME_MAP = {\n \"operationalExcellence\": \"Operational Excellence\",\n \"security\": \"Security\",\n \"reliability\": \"Reliability\",\n \"performance\": \"Performance Efficiency\",\n \"costOptimization\": \"Cost Optimization\"\n}\n\n# Helper class to convert a datetime item to JSON.\nclass DateTimeEncoder(json.JSONEncoder):\n def default(self, z):\n if isinstance(z, datetime.datetime):\n return (str(z))\n else:\n return super().default(z)\n\ndef CreateNewWorkload(\n waclient,\n workloadName,\n description,\n reviewOwner,\n environment,\n awsRegions,\n lenses,\n tags,\n pillarPriorities,\n notes=\"\",\n nonAwsRegions=[],\n architecturalDesign='',\n industryType='',\n industry='',\n accountIds=[]\n ):\n # Create your workload\n try:\n response=waclient.create_workload(\n WorkloadName=workloadName,\n Description=description,\n ReviewOwner=reviewOwner,\n Environment=environment,\n AwsRegions=awsRegions,\n Lenses=lenses,\n NonAwsRegions=nonAwsRegions,\n ArchitecturalDesign=architecturalDesign,\n IndustryType=industryType,\n Industry=industry,\n Notes=notes,\n AccountIds=accountIds\n )\n except waclient.exceptions.ConflictException as e:\n workloadId,workloadARN = FindWorkload(waclient,workloadName)\n logger.error(\"ERROR - The workload name %s already exists as workloadId %s\" % (workloadName, workloadId))\n return workloadId, workloadARN\n except botocore.exceptions.ParamValidationError as e:\n logger.error(\"ERROR - Parameter validation error: %s\" % e)\n except botocore.exceptions.ClientError as e:\n logger.error(\"ERROR - Unexpected error: %s\" % e)\n\n workloadId = response['WorkloadId']\n workloadARN = response['WorkloadArn']\n return workloadId, workloadARN\n\ndef FindWorkload(\n waclient,\n workloadName\n ):\n # Finding your WorkloadId\n try:\n response=waclient.list_workloads(\n WorkloadNamePrefix=workloadName\n )\n except botocore.exceptions.ParamValidationError as e:\n logger.error(\"ERROR - Parameter validation error: %s\" % e)\n except botocore.exceptions.ClientError as e:\n logger.error(\"ERROR - Unexpected error: %s\" % e)\n\n # print(\"Full JSON:\",json.dumps(response['WorkloadSummaries'], cls=DateTimeEncoder))\n workloadId = response['WorkloadSummaries'][0]['WorkloadId']\n workloadArn = response['WorkloadSummaries'][0]['WorkloadArn']\n # print(\"WorkloadId\",workloadId)\n return workloadId, workloadArn\n\ndef DeleteWorkload(\n waclient,\n workloadId\n ):\n\n # Delete the WorkloadId\n try:\n response=waclient.delete_workload(\n WorkloadId=workloadId\n )\n except botocore.exceptions.ParamValidationError as e:\n logger.error(\"ERROR - Parameter validation error: %s\" % e)\n except botocore.exceptions.ClientError as e:\n logger.error(\"ERROR - Unexpected error: %s\" % e)\n\ndef GetWorkload(\n waclient,\n workloadId\n ):\n\n # Get the WorkloadId\n try:\n response=waclient.get_workload(\n WorkloadId=workloadId\n )\n except botocore.exceptions.ParamValidationError as e:\n logger.error(\"ERROR - Parameter validation error: %s\" % e)\n except botocore.exceptions.ClientError as e:\n logger.error(\"ERROR - Unexpected error: %s\" % e)\n exit()\n\n # print(\"Full JSON:\",json.dumps(response['Workload'], cls=DateTimeEncoder))\n workload = response['Workload']\n # print(\"WorkloadId\",workloadId)\n return workload\n\ndef listLens(\n waclient\n ):\n # List all lenses currently available\n try:\n response=waclient.list_lenses()\n except botocore.exceptions.ParamValidationError as e:\n logger.error(\"ERROR - Parameter validation error: %s\" % e)\n except botocore.exceptions.ClientError as e:\n logger.error(\"ERROR - Unexpected error: %s\" % e)\n\n # print(json.dumps(response))\n lenses = jmespath.search(\"LensSummaries[*].LensAlias\", response)\n\n return lenses\n\ndef getCurrentLensVersion(\n waclient,\n lensAlias\n ):\n\n # List all lenses currently available\n try:\n response=waclient.list_lenses()\n except botocore.exceptions.ParamValidationError as e:\n logger.error(\"ERROR - Parameter validation error: %s\" % e)\n except botocore.exceptions.ClientError as e:\n logger.error(\"ERROR - Unexpected error: %s\" % e)\n\n # print(json.dumps(response))\n searchString = \"LensSummaries[?LensAlias==`\"+lensAlias+\"`].LensVersion\"\n lenses = jmespath.search(searchString, response)\n\n return lenses[0]\n\ndef findAllQuestionId(\n waclient,\n workloadId,\n lensAlias\n ):\n\n answers = []\n # Due to a bug in some lenses, I have to iterate over each pillar in order to\n # retrieve the correct results.\n for pillar in PILLAR_PARSE_MAP:\n logger.debug(\"Grabbing answers for %s %s\" % (lensAlias, pillar))\n # Find a questionID using the questionTitle\n try:\n response=waclient.list_answers(\n WorkloadId=workloadId,\n LensAlias=lensAlias,\n PillarId=pillar\n )\n except botocore.exceptions.ParamValidationError as e:\n logger.error(\"ERROR - Parameter validation error: %s\" % e)\n except botocore.exceptions.ClientError as e:\n logger.error(\"ERROR - Unexpected error: %s\" % e)\n\n answers.extend(response[\"AnswerSummaries\"])\n while \"NextToken\" in response:\n try:\n response = waclient.list_answers(WorkloadId=workloadId,LensAlias=lensAlias,PillarId=pillar,NextToken=response[\"NextToken\"])\n except botocore.exceptions.ParamValidationError as e:\n logger.error(\"ERROR - Parameter validation error: %s\" % e)\n except botocore.exceptions.ClientError as e:\n logger.error(\"ERROR - Unexpected error: %s\" % e)\n answers.extend(response[\"AnswerSummaries\"])\n return answers\n\ndef getQuestionDetails(\n waclient,\n workloadId,\n lensAlias,\n questionId\n ):\n\n # Find a answer for a questionId\n try:\n response=waclient.get_answer(\n WorkloadId=workloadId,\n LensAlias=lensAlias,\n QuestionId=questionId\n )\n except botocore.exceptions.ParamValidationError as e:\n logger.error(\"ERROR - Parameter validation error: %s\" % e)\n except botocore.exceptions.ClientError as e:\n logger.error(\"ERROR - Unexpected error: %s\" % e)\n\n\n\n qDescription = jmespath.search(\"Answer.QuestionDescription\", response)\n qImprovementPlanUrl = jmespath.search(\"Answer.ImprovementPlanUrl\", response)\n qHelpfulResourceUrl = jmespath.search(\"Answer.HelpfulResourceUrl\", response)\n qNotes = jmespath.search(\"Answer.Notes\", response)\n return qDescription, qImprovementPlanUrl, qHelpfulResourceUrl, qNotes\n\n\ndef updateAnswersForQuestion(\n waclient,\n workloadId,\n lensAlias,\n questionId,\n selectedChoices,\n notes\n ):\n\n # Update a answer to a question\n try:\n response=waclient.update_answer(\n WorkloadId=workloadId,\n LensAlias=lensAlias,\n QuestionId=questionId,\n SelectedChoices=selectedChoices,\n Notes=notes\n )\n except botocore.exceptions.ParamValidationError as e:\n logger.error(\"ERROR - Parameter validation error: %s\" % e)\n except botocore.exceptions.ClientError as e:\n logger.error(\"ERROR - Unexpected error: %s\" % e)\n\n # print(json.dumps(response))\n jmesquery = \"Answer.SelectedChoices\"\n answers = jmespath.search(jmesquery, response)\n return answers\n\ndef getImprovementPlanItems(\n waclient,\n workloadId,\n lensAlias,\n QuestionId,\n PillarId,\n ImprovementPlanUrl,\n ChoiceList\n):\n # This will parse the IP Items to gather the links we need\n response = {}\n htmlString = \"\"\n # unanswered = getUnansweredForQuestion(waclient,workloadId,'wellarchitected',QuestionId)\n urlresponse = urllib.request.urlopen(ImprovementPlanUrl)\n htmlBytes = urlresponse.read()\n htmlStr = htmlBytes.decode(\"utf8\")\n htmlSplit = htmlStr.split('\\n')\n ipHTMLList = {}\n for line in htmlSplit:\n for uq in ChoiceList:\n if uq in line:\n parsed = BeautifulSoup(line,features=\"html.parser\")\n ipHTMLList.update({uq: str(parsed.a['href'])})\n return ipHTMLList\n\ndef getImprovementPlanHTMLDescription(\n ImprovementPlanUrl,\n PillarId\n ):\n\n logger.debug(\"ImprovementPlanUrl: %s for pillar %s \" % (ImprovementPlanUrl,PILLAR_PARSE_MAP[PillarId]))\n stepRaw = ImprovementPlanUrl.rsplit('#')[1]\n\n # Grab the number of the step we are referencing\n # This will work as long as their are less than 99 steps.\n if len(stepRaw) <= 5:\n stepNumber = stepRaw[-1]\n else:\n stepNumber = stepRaw[-2]\n\n #Generate the string for the step number\n firstItem = \"step\"+stepNumber\n secondItem = (\"step\"+str((int(stepNumber)+1)))\n logger.debug (\"Going from %s to %s\" % (firstItem, secondItem))\n urlresponse = urllib.request.urlopen(ImprovementPlanUrl)\n htmlBytes = urlresponse.read()\n htmlStr = htmlBytes.decode(\"utf8\")\n htmlSplit = htmlStr.split('\\n')\n\n foundit = 0\n ipString = \"\"\n questionIdText = \"\"\n for i in htmlSplit:\n if PILLAR_PARSE_MAP[PillarId] in i:\n bsparse = BeautifulSoup(i,features=\"html.parser\")\n questionIdText = str(bsparse.text).split(':')[0].strip()\n if (secondItem in i) or (\"</div>\" in i):\n foundit = 0\n if firstItem in i:\n foundit = 1\n ipString+=i\n elif foundit:\n ipString+=i\n\n prettyHTML = BeautifulSoup(ipString,features=\"html.parser\")\n # Need to remove all of the \"local glossary links\" since they point to relative paths\n for a in prettyHTML.findAll('a', 'glossref'):\n a.replaceWithChildren()\n\n return prettyHTML, questionIdText\n\ndef lensTabCreation(\n WACLIENT,\n workloadId,\n lens,\n workbook,\n allQuestionsForLens,\n workloadName=\"\",\n AWSAccountId=\"\",\n workloadDescription=\"\"\n ):\n\n # Setup some formatting for the workbook\n bold = workbook.add_format({'bold': True})\n bold_border = workbook.add_format({\n 'border': 1,\n 'border_color': 'black',\n 'text_wrap': True\n })\n bold_border_bold = workbook.add_format({\n 'border': 1,\n 'border_color': 'black',\n 'text_wrap': True,\n 'font_size': 20,\n 'bold': True\n })\n\n heading = workbook.add_format({\n 'font_size': 24,\n 'bold': True\n })\n\n lineA = workbook.add_format({\n 'border': 1,\n 'border_color': 'black',\n 'bg_color': '#E0EBF6',\n 'align': 'top',\n 'text_wrap': True\n })\n\n lineB = workbook.add_format({\n 'border': 1,\n 'border_color': 'black',\n 'bg_color': '#E4EFDC',\n 'align': 'top',\n 'text_wrap': True\n })\n\n lineAnoborder = workbook.add_format({\n 'border': 0,\n 'top': 1,\n 'left': 1,\n 'right': 1,\n 'border_color': 'black',\n 'bg_color': '#E0EBF6',\n 'align': 'top',\n 'text_wrap': True\n })\n\n lineBnoborder = workbook.add_format({\n 'border': 0,\n 'top': 1,\n 'left': 1,\n 'right': 1,\n 'border_color': 'black',\n 'bg_color': '#E4EFDC',\n 'align': 'top',\n 'text_wrap': True\n })\n\n\n lineAhidden = workbook.add_format({\n 'border': 0,\n 'left': 1,\n 'right': 1,\n 'border_color': 'black',\n 'bg_color': '#E0EBF6',\n 'align': 'top',\n 'text_wrap': False,\n 'indent': 100\n })\n\n lineBhidden = workbook.add_format({\n 'border': 0,\n 'left': 1,\n 'right': 1,\n 'border_color': 'black',\n 'bg_color': '#E4EFDC',\n 'align': 'top',\n 'text_wrap': False,\n 'indent': 100\n })\n\n sub_heading = workbook.add_format()\n sub_heading.set_font_size(20)\n sub_heading.set_bold(True)\n\n small_font = workbook.add_format()\n small_font.set_font_size(9)\n\n # Get the current version of Lens\n logger.debug(\"Getting lens version for '\"+lens+\"'\")\n versionString = getCurrentLensVersion(WACLIENT,lens)\n logger.debug(\"Adding worksheet using version \"+versionString)\n lensName = lens[0:18]\n worksheet = workbook.add_worksheet((lensName+' v'+versionString))\n # Print in landscape\n worksheet.set_landscape()\n # Set to 8.5x11 paper size\n worksheet.set_paper(1)\n\n # Set the column widths\n worksheet.set_column('A:A', 11)\n worksheet.set_column('B:B', 32)\n worksheet.set_column('C:C', 56)\n worksheet.set_column('D:D', 29)\n worksheet.set_column('E:E', 57)\n worksheet.set_column('F:F', 18)\n worksheet.set_column('G:G', 70)\n\n # Top of sheet\n worksheet.merge_range('A1:G1', 'Workload Overview', heading)\n worksheet.merge_range('A3:B3', 'Workload Name', bold_border_bold)\n worksheet.merge_range('A4:B4', 'AWS Account ID', bold_border_bold)\n worksheet.merge_range('A5:B5', 'Workload Description', bold_border_bold)\n\n # If we are using an existing workload, then display the Name, ID, and Description at the top\n # or else just make it blank\n if WORKLOADID:\n worksheet.write('C3', workloadName, bold_border)\n accountIdParsed = AWSAccountId.split(':')[4]\n worksheet.write('C4', accountIdParsed, bold_border)\n worksheet.write('C5', workloadDescription, bold_border)\n else:\n worksheet.write('C3', '', bold_border)\n worksheet.write('C4', '', bold_border)\n worksheet.write('C5', '', bold_border)\n worksheet.write('D3', 'Enter the name of system', small_font)\n worksheet.write('D4', 'Enter 12-degit AWS account ID', small_font)\n worksheet.write('D5', 'Briefly describe system architecture and workload, flow etc.', small_font)\n\n # Subheadings for columns\n worksheet.write('A8', 'Pillar', sub_heading)\n worksheet.write('B8', 'Question', sub_heading)\n worksheet.write('C8', 'Explanation', sub_heading)\n worksheet.write('D8', 'Choice (Best Practice)', sub_heading)\n worksheet.write('E8', 'Detail', sub_heading)\n worksheet.write('F8', 'Response', sub_heading)\n worksheet.write('G8', 'Notes (optional)', sub_heading)\n\n # Freeze the top of the sheet\n worksheet.freeze_panes(8,0)\n\n # AutoFilter on the first two columns\n worksheet.autofilter('A8:B8')\n\n # Make it easier to print\n worksheet.repeat_rows(1, 8)\n worksheet.fit_to_pages(1, 99)\n\n # Starting point for pillar questions\n cellPosition = 8\n\n # Starting cell look with lineA. Will switch back and forth\n myCell = lineA\n myCellhidden = lineAhidden\n myCellnoborder = lineAnoborder\n\n for pillar in PILLAR_PARSE_MAP:\n # This is the question number for each pillar (ex: OPS1, OPS2, etc)\n qNum = 1\n\n # The query will return all questions for a lens and pillar\n jmesquery = \"[?PillarId=='\"+pillar+\"']\"\n allQuestionsForPillar = jmespath.search(jmesquery, allQuestionsForLens)\n\n # For each of the possible answers, parse them and put into the Worksheet\n for answers in allQuestionsForPillar:\n # List all best practices\n questionTitle = PILLAR_PARSE_MAP[answers['PillarId']]+str(qNum)+\" - \"+answers['QuestionTitle']\n qDescription, qImprovementPlanUrl, qHelpfulResourceUrl, qNotes = getQuestionDetails(WACLIENT,workloadId,lens,answers['QuestionId'])\n # Some of the questions have extra whitespaces and I need to remove those to fit into the cell\n qDescription = qDescription.replace('\\n ','').replace(' ','').replace('\\t', '').replace('\\n', '')\n qDescription = qDescription.rstrip()\n qDescription = qDescription.strip()\n\n logger.debug(\"Working on '\"+questionTitle+\"'\")\n logger.debug(\"It has answers of: \"+json.dumps(answers['SelectedChoices']))\n\n cellID = cellPosition + 1\n\n # If the question has been answered (which we do for the TEMP workload) we grab the URL and parse for the HTML content\n if qImprovementPlanUrl:\n jmesquery = \"[?QuestionId=='\"+answers['QuestionId']+\"'].Choices[].ChoiceId\"\n choiceList = jmespath.search(jmesquery, allQuestionsForLens)\n ipList = getImprovementPlanItems(WACLIENT,workloadId,lens,answers['QuestionId'],answers['PillarId'],qImprovementPlanUrl,choiceList)\n else:\n ipList = []\n\n startingCellID=cellID\n # If its the first time through this particular pillar question:\n # I want to only write the name once, but I need to fill in\n # each cell with the same data so the autosort works properly\n # (else it will only show the first best practice)\n firstTimePillar=True\n\n for choices in answers['Choices']:\n\n # Write the pillar name and question in every cell for autosort, but only show the first one\n cell = 'A'+str(cellID)\n if firstTimePillar:\n worksheet.write(cell, PILLAR_PROPER_NAME_MAP[pillar], myCellnoborder)\n cell = 'B'+str(cellID)\n worksheet.write(cell, questionTitle, myCellnoborder)\n firstTimePillar=False\n else:\n worksheet.write(cell, PILLAR_PROPER_NAME_MAP[pillar], myCellhidden)\n cell = 'B'+str(cellID)\n worksheet.write(cell, questionTitle, myCellhidden)\n\n # Start writing each of the BP's, details, etc\n cell = 'D'+str(cellID)\n Title = choices['Title'].replace(' ','').replace('\\t', '').replace('\\n', '')\n if any(choices['ChoiceId'] in d for d in ipList):\n worksheet.write_url(cell, ipList[choices['ChoiceId']], myCell, string=Title)\n #ipItemHTML, questionIdText = getImprovementPlanHTMLDescription(ipList[choices['ChoiceId']],answers['PillarId'])\n #htmlString = ipItemHTML.text\n htmlString = \"\" \n htmlString = htmlString.replace('\\n ','').replace(' ','').replace('\\t', '').strip().rstrip()\n # print(htmlString)\n worksheet.write_comment(cell, htmlString, {'author': 'Improvement Plan'})\n else:\n worksheet.write(cell,Title,myCell)\n\n # Add all Details for each best practice/choice\n cell = 'E'+str(cellID)\n # Remove all of the extra spaces in the description field\n Description = choices['Description'].replace('\\n ','')\n Description = Description.replace('\\n ','')\n Description = Description.replace(' ','').replace('\\t', '').replace('\\n', '')\n Description = Description.rstrip()\n Description = Description.strip()\n worksheet.write(cell, Description ,myCell)\n\n # If this is an existing workload, we will show SELECTED if the have it checked\n # I would love to use a XLSX checkbox, but this library doesn't support it\n cell = 'F'+str(cellID)\n responseText = \"\"\n if choices['ChoiceId'] in answers['SelectedChoices']:\n responseText = \"SELECTED\"\n else:\n responseText = \"\"\n worksheet.write(cell, responseText ,myCell)\n cellID+=1\n\n # We are out of the choice/detail/response loop, so know how many rows were consumed\n # and we can create the explanation and notes field to span all of them\n # Explanantion field\n cellMerge = 'C'+str(startingCellID)+':C'+str(cellID-1)\n worksheet.merge_range(cellMerge, qDescription,myCell)\n\n # Notes field\n cellMerge = 'G'+str(startingCellID)+':G'+str(cellID-1)\n if WORKLOADID:\n worksheet.merge_range(cellMerge, qNotes, myCell)\n else:\n worksheet.merge_range(cellMerge, \"\", myCell)\n\n cellID-=1\n # Increase the question number\n qNum += 1\n # Reset the starting cellPosition to the last cellID\n cellPosition = cellID\n\n # Reset the cell formatting to alternate between the two colors\n if myCell == lineA:\n myCell = lineB\n myCellhidden = lineBhidden\n myCellnoborder = lineBnoborder\n else:\n myCell = lineA\n myCellhidden = lineAhidden\n myCellnoborder = lineAnoborder\n\ndef main():\n boto3_min_version = \"1.16.38\"\n # Verify if the version of Boto3 we are running has the wellarchitected APIs included\n if (packaging.version.parse(boto3.__version__) < packaging.version.parse(boto3_min_version)):\n logger.error(\"Your Boto3 version (%s) is less than %s. You must ugprade to run this script (pip3 upgrade boto3)\" % (boto3.__version__, boto3_min_version))\n exit()\n\n logger.info(\"Script version %s\" % __version__)\n logger.info(\"Starting Boto %s Session\" % boto3.__version__)\n # Create a new boto3 session\n SESSION1 = boto3.session.Session(profile_name=PROFILE)\n # Initiate the well-architected session using the region defined above\n WACLIENT = SESSION1.client(\n service_name='wellarchitected',\n region_name=REGION_NAME,\n )\n\n # If this is an existing workload, we need to query for the various workload properties\n if WORKLOADID:\n logger.info(\"User specified workload id of %s\" % WORKLOADID)\n workloadJson = GetWorkload(WACLIENT,WORKLOADID)\n LENSES = workloadJson['Lenses']\n logger.info(\"Lenses for %s: %s\" % (WORKLOADID, json.dumps(LENSES)))\n WORKLOADNAME = workloadJson['WorkloadName']\n DESCRIPTION = workloadJson['Description']\n REVIEWOWNER = workloadJson['ReviewOwner']\n ENVIRONMENT= workloadJson['Environment']\n AWSREGIONS = workloadJson['AwsRegions']\n workloadId = WORKLOADID\n workloadARN = workloadJson['WorkloadArn']\n else:\n # In order to gather all of the questions, you must create a TEMP Workload\n logger.info(\"No workload ID specified, we will create a TEMP workload\")\n # Grab all lenses that are currently available\n LENSES = listLens(WACLIENT)\n logger.info(\"Lenses available: \"+json.dumps(LENSES))\n # Set the needed workload variables before we create it\n WORKLOADNAME = 'TEMP DO NOT USE WORKLOAD'\n DESCRIPTION = 'TEMP DO NOT USE WORKLOAD'\n REVIEWOWNER = 'WA Python Script'\n ENVIRONMENT= 'PRODUCTION'\n AWSREGIONS = [REGION_NAME]\n # Creating the TEMP workload\n logger.info(\"Creating a new workload to gather questions and answers\")\n workloadId, workloadARN = CreateNewWorkload(WACLIENT,WORKLOADNAME,DESCRIPTION,REVIEWOWNER,ENVIRONMENT,AWSREGIONS,LENSES,\"[]\",\"[]\")\n\n\n\n # Create an new xlsx file and add a worksheet.\n logger.info(\"Creating xlsx file '\"+FILENAME+\"'\")\n workbook = xlsxwriter.Workbook(FILENAME)\n workbook.set_size(2800, 1600)\n\n # Simple hack to get Wellarchitected base framework first (reverse sort)\n # This will no longer work if we ever have a lens that starts with WB*, X, Y, or Z :)\n LENSES.sort(reverse=True)\n\n # Iterate over each lens that we either have added or is in the workload\n for lens in LENSES:\n # Grab all questions for a particular lens\n allQuestions = findAllQuestionId(WACLIENT,workloadId,lens)\n if WORKLOADID:\n # If this is an existing workload, just go ahead and create the Tab and cells\n logger.debug(\"Not answering questions for existing workload\")\n lensTabCreation(WACLIENT,workloadId,lens,workbook,allQuestions,WORKLOADNAME,workloadARN,DESCRIPTION)\n else:\n # If this is the TEMP workload, we need to first gather all of the questionIDs possible\n jmesquery = \"[*].{QuestionId: QuestionId, PillarId: PillarId, Choices: Choices[].ChoiceId}\"\n allQuestionIds = jmespath.search(jmesquery, allQuestions)\n # Next we answer all of the questions across all lenses in the TEMP workload\n for question in allQuestionIds:\n logger.debug(\"Answering question %s in the %s lens\" % (question['QuestionId'], lens))\n updateAnswersForQuestion(WACLIENT,workloadId,lens,question['QuestionId'],question['Choices'],'TEMP WORKLOAD - Added by export script')\n # Once the questions have been answered, we go ahead and create the tab for each\n lensTabCreation(WACLIENT,workloadId,lens,workbook,allQuestions)\n\n\n # Close out the workbook file\n logger.info(\"Closing Workbook File\")\n workbook.close()\n\n # If this is TEMP workload, we may remove it if it has not been set to keep\n if not WORKLOADID:\n if not KEEPTEMP:\n logger.info(\"Removing TEMP Workload\")\n DeleteWorkload(WACLIENT, workloadId)\n logger.info(\"Done\")\n\n\nif __name__ == \"__main__\":\n main()\n",
"step-ids": [
13,
15,
16,
18,
19
]
}
|
[
13,
15,
16,
18,
19
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def get_token_from_request(request):
token_tuple = request.COOKIES.get('money_api_token')
matches = re.search('(<Token: (\\S*)>)', token_tuple)
token = matches.groups(0)[1]
return token
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def get_token_from_request(request):
token_tuple = request.COOKIES.get('money_api_token')
matches = re.search('(<Token: (\\S*)>)', token_tuple)
token = matches.groups(0)[1]
return token
def get_student_from_request(request):
current_token = get_token_from_request(request)
current_user = Token.objects.filter(key=current_token).last().user
current_email = User.objects.filter(username=current_user).last().email
return ValidatedStudent.objects.filter(email=current_email).last()
<|reserved_special_token_1|>
import re
from .models import ValidatedStudent
from rest_framework.authtoken.models import Token
from django.contrib.auth.models import User
def get_token_from_request(request):
token_tuple = request.COOKIES.get('money_api_token')
matches = re.search('(<Token: (\\S*)>)', token_tuple)
token = matches.groups(0)[1]
return token
def get_student_from_request(request):
current_token = get_token_from_request(request)
current_user = Token.objects.filter(key=current_token).last().user
current_email = User.objects.filter(username=current_user).last().email
return ValidatedStudent.objects.filter(email=current_email).last()
<|reserved_special_token_1|>
import re
from .models import ValidatedStudent
from rest_framework.authtoken.models import Token
from django.contrib.auth.models import User
def get_token_from_request(request):
token_tuple = request.COOKIES.get('money_api_token')
matches = re.search(r'(<Token: (\S*)>)', token_tuple)
token = matches.groups(0)[1]
return token
def get_student_from_request(request):
current_token = get_token_from_request(request)
current_user = Token.objects.filter(key=current_token).last().user
current_email = User.objects.filter(username=current_user).last().email
return ValidatedStudent.objects.filter(email=current_email).last()
|
flexible
|
{
"blob_id": "2187f38dc9b14ecc355e98fe15d36fdefd548f04",
"index": 1159,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_token_from_request(request):\n token_tuple = request.COOKIES.get('money_api_token')\n matches = re.search('(<Token: (\\\\S*)>)', token_tuple)\n token = matches.groups(0)[1]\n return token\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef get_token_from_request(request):\n token_tuple = request.COOKIES.get('money_api_token')\n matches = re.search('(<Token: (\\\\S*)>)', token_tuple)\n token = matches.groups(0)[1]\n return token\n\n\ndef get_student_from_request(request):\n current_token = get_token_from_request(request)\n current_user = Token.objects.filter(key=current_token).last().user\n current_email = User.objects.filter(username=current_user).last().email\n return ValidatedStudent.objects.filter(email=current_email).last()\n",
"step-4": "import re\nfrom .models import ValidatedStudent\nfrom rest_framework.authtoken.models import Token\nfrom django.contrib.auth.models import User\n\n\ndef get_token_from_request(request):\n token_tuple = request.COOKIES.get('money_api_token')\n matches = re.search('(<Token: (\\\\S*)>)', token_tuple)\n token = matches.groups(0)[1]\n return token\n\n\ndef get_student_from_request(request):\n current_token = get_token_from_request(request)\n current_user = Token.objects.filter(key=current_token).last().user\n current_email = User.objects.filter(username=current_user).last().email\n return ValidatedStudent.objects.filter(email=current_email).last()\n",
"step-5": "import re\nfrom .models import ValidatedStudent\nfrom rest_framework.authtoken.models import Token\nfrom django.contrib.auth.models import User\n\n\ndef get_token_from_request(request):\n token_tuple = request.COOKIES.get('money_api_token')\n matches = re.search(r'(<Token: (\\S*)>)', token_tuple)\n token = matches.groups(0)[1]\n return token\n\n\ndef get_student_from_request(request):\n current_token = get_token_from_request(request)\n current_user = Token.objects.filter(key=current_token).last().user\n current_email = User.objects.filter(username=current_user).last().email\n return ValidatedStudent.objects.filter(email=current_email).last()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class GIFTCommand(BaseInterface):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __init__(self, **inputs):
super(GIFTCommand, self).__init__(**inputs)
self.inputs.on_trait_change(self._matlab_cmd_update, ['matlab_cmd',
'mfile', 'paths', 'use_mcr'])
self._find_mlab_cmd_defaults()
self._check_mlab_inputs()
self._matlab_cmd_update()
@classmethod
def set_mlab_paths(cls, matlab_cmd=None, paths=None, use_mcr=None):
cls._matlab_cmd = matlab_cmd
cls._paths = paths
cls._use_mcr = use_mcr
def _find_mlab_cmd_defaults(self):
if self._use_mcr:
self._use_mcr = True
def _matlab_cmd_update(self):
matlab_cmd_str = self.inputs.matlab_cmd
if isdefined(self.inputs.use_mcr) and self.inputs.use_mcr:
if not matlab_cmd_str[-1] == ' ':
matlab_cmd_str = matlab_cmd_str + ' '
self.mlab = MatlabCommand(matlab_cmd=matlab_cmd_str, mfile=self.
inputs.mfile, paths=self.inputs.paths)
self.mlab.inputs.script_file = ('pyscript_%s.m' % self.__class__.
__name__.split('.')[-1].lower())
if isdefined(self.inputs.use_mcr) and self.inputs.use_mcr:
self.mlab.inputs.nodesktop = Undefined
self.mlab.inputs.nosplash = Undefined
self.mlab.inputs.single_comp_thread = Undefined
self.mlab.inputs.uses_mcr = True
self.mlab.inputs.mfile = True
def _check_mlab_inputs(self):
if not isdefined(self.inputs.matlab_cmd) and self._matlab_cmd:
self.inputs.matlab_cmd = self._matlab_cmd
if not isdefined(self.inputs.paths) and self._paths:
self.inputs.paths = self._paths
if not isdefined(self.inputs.use_mcr) and self._use_mcr:
self.inputs.use_mcr = self._use_mcr
def _run_interface(self, runtime):
"""Executes the GIFT function using MATLAB."""
self.mlab.inputs.script = self._make_matlab_command()
results = self.mlab.run()
runtime.returncode = results.runtime.returncode
if self.mlab.inputs.uses_mcr:
if 'Skipped' in results.runtime.stdout:
self.raise_exception(runtime)
runtime.stdout = results.runtime.stdout
runtime.stderr = results.runtime.stderr
runtime.merged = results.runtime.merged
return runtime
def _list_outputs(self):
"""Determine the expected outputs based on inputs."""
outputs = self._outputs().get()
return outputs
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class GIFTCommand(BaseInterface):
<|reserved_special_token_0|>
input_spec = GIFTCommandInputSpec
output_spec = GIFTCommandOutputSpec
_matlab_cmd = None
_paths = None
_use_mcr = None
def __init__(self, **inputs):
super(GIFTCommand, self).__init__(**inputs)
self.inputs.on_trait_change(self._matlab_cmd_update, ['matlab_cmd',
'mfile', 'paths', 'use_mcr'])
self._find_mlab_cmd_defaults()
self._check_mlab_inputs()
self._matlab_cmd_update()
@classmethod
def set_mlab_paths(cls, matlab_cmd=None, paths=None, use_mcr=None):
cls._matlab_cmd = matlab_cmd
cls._paths = paths
cls._use_mcr = use_mcr
def _find_mlab_cmd_defaults(self):
if self._use_mcr:
self._use_mcr = True
def _matlab_cmd_update(self):
matlab_cmd_str = self.inputs.matlab_cmd
if isdefined(self.inputs.use_mcr) and self.inputs.use_mcr:
if not matlab_cmd_str[-1] == ' ':
matlab_cmd_str = matlab_cmd_str + ' '
self.mlab = MatlabCommand(matlab_cmd=matlab_cmd_str, mfile=self.
inputs.mfile, paths=self.inputs.paths)
self.mlab.inputs.script_file = ('pyscript_%s.m' % self.__class__.
__name__.split('.')[-1].lower())
if isdefined(self.inputs.use_mcr) and self.inputs.use_mcr:
self.mlab.inputs.nodesktop = Undefined
self.mlab.inputs.nosplash = Undefined
self.mlab.inputs.single_comp_thread = Undefined
self.mlab.inputs.uses_mcr = True
self.mlab.inputs.mfile = True
def _check_mlab_inputs(self):
if not isdefined(self.inputs.matlab_cmd) and self._matlab_cmd:
self.inputs.matlab_cmd = self._matlab_cmd
if not isdefined(self.inputs.paths) and self._paths:
self.inputs.paths = self._paths
if not isdefined(self.inputs.use_mcr) and self._use_mcr:
self.inputs.use_mcr = self._use_mcr
def _run_interface(self, runtime):
"""Executes the GIFT function using MATLAB."""
self.mlab.inputs.script = self._make_matlab_command()
results = self.mlab.run()
runtime.returncode = results.runtime.returncode
if self.mlab.inputs.uses_mcr:
if 'Skipped' in results.runtime.stdout:
self.raise_exception(runtime)
runtime.stdout = results.runtime.stdout
runtime.stderr = results.runtime.stderr
runtime.merged = results.runtime.merged
return runtime
def _list_outputs(self):
"""Determine the expected outputs based on inputs."""
outputs = self._outputs().get()
return outputs
def _make_matlab_command(self):
"""Generates a mfile to build job structure
Returns
-------
mscript : string
contents of a script called by matlab
"""
raise NotImplementedError
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class GIFTCommandInputSpec(BaseInterfaceInputSpec):
matlab_cmd = traits.Str(desc='matlab command to use')
paths = InputMultiPath(Directory(), desc='Paths to add to matlabpath')
mfile = traits.Bool(True, desc='Run m-code using m-file', usedefault=True)
use_mcr = traits.Bool(desc='Run m-code using GIFT MCR')
class GIFTCommandOutputSpec(BaseInterfaceInputSpec):
matlab_output = traits.Str()
class GIFTCommand(BaseInterface):
"""Extends `BaseInterface` class to implement GIFT specific interfaces.
WARNING: Pseudo prototype class, meant to be subclassed
"""
input_spec = GIFTCommandInputSpec
output_spec = GIFTCommandOutputSpec
_matlab_cmd = None
_paths = None
_use_mcr = None
def __init__(self, **inputs):
super(GIFTCommand, self).__init__(**inputs)
self.inputs.on_trait_change(self._matlab_cmd_update, ['matlab_cmd',
'mfile', 'paths', 'use_mcr'])
self._find_mlab_cmd_defaults()
self._check_mlab_inputs()
self._matlab_cmd_update()
@classmethod
def set_mlab_paths(cls, matlab_cmd=None, paths=None, use_mcr=None):
cls._matlab_cmd = matlab_cmd
cls._paths = paths
cls._use_mcr = use_mcr
def _find_mlab_cmd_defaults(self):
if self._use_mcr:
self._use_mcr = True
def _matlab_cmd_update(self):
matlab_cmd_str = self.inputs.matlab_cmd
if isdefined(self.inputs.use_mcr) and self.inputs.use_mcr:
if not matlab_cmd_str[-1] == ' ':
matlab_cmd_str = matlab_cmd_str + ' '
self.mlab = MatlabCommand(matlab_cmd=matlab_cmd_str, mfile=self.
inputs.mfile, paths=self.inputs.paths)
self.mlab.inputs.script_file = ('pyscript_%s.m' % self.__class__.
__name__.split('.')[-1].lower())
if isdefined(self.inputs.use_mcr) and self.inputs.use_mcr:
self.mlab.inputs.nodesktop = Undefined
self.mlab.inputs.nosplash = Undefined
self.mlab.inputs.single_comp_thread = Undefined
self.mlab.inputs.uses_mcr = True
self.mlab.inputs.mfile = True
def _check_mlab_inputs(self):
if not isdefined(self.inputs.matlab_cmd) and self._matlab_cmd:
self.inputs.matlab_cmd = self._matlab_cmd
if not isdefined(self.inputs.paths) and self._paths:
self.inputs.paths = self._paths
if not isdefined(self.inputs.use_mcr) and self._use_mcr:
self.inputs.use_mcr = self._use_mcr
def _run_interface(self, runtime):
"""Executes the GIFT function using MATLAB."""
self.mlab.inputs.script = self._make_matlab_command()
results = self.mlab.run()
runtime.returncode = results.runtime.returncode
if self.mlab.inputs.uses_mcr:
if 'Skipped' in results.runtime.stdout:
self.raise_exception(runtime)
runtime.stdout = results.runtime.stdout
runtime.stderr = results.runtime.stderr
runtime.merged = results.runtime.merged
return runtime
def _list_outputs(self):
"""Determine the expected outputs based on inputs."""
outputs = self._outputs().get()
return outputs
def _make_matlab_command(self):
"""Generates a mfile to build job structure
Returns
-------
mscript : string
contents of a script called by matlab
"""
raise NotImplementedError
<|reserved_special_token_1|>
<|reserved_special_token_0|>
__docformat__ = 'restructuredtext'
<|reserved_special_token_0|>
class GIFTCommandInputSpec(BaseInterfaceInputSpec):
matlab_cmd = traits.Str(desc='matlab command to use')
paths = InputMultiPath(Directory(), desc='Paths to add to matlabpath')
mfile = traits.Bool(True, desc='Run m-code using m-file', usedefault=True)
use_mcr = traits.Bool(desc='Run m-code using GIFT MCR')
class GIFTCommandOutputSpec(BaseInterfaceInputSpec):
matlab_output = traits.Str()
class GIFTCommand(BaseInterface):
"""Extends `BaseInterface` class to implement GIFT specific interfaces.
WARNING: Pseudo prototype class, meant to be subclassed
"""
input_spec = GIFTCommandInputSpec
output_spec = GIFTCommandOutputSpec
_matlab_cmd = None
_paths = None
_use_mcr = None
def __init__(self, **inputs):
super(GIFTCommand, self).__init__(**inputs)
self.inputs.on_trait_change(self._matlab_cmd_update, ['matlab_cmd',
'mfile', 'paths', 'use_mcr'])
self._find_mlab_cmd_defaults()
self._check_mlab_inputs()
self._matlab_cmd_update()
@classmethod
def set_mlab_paths(cls, matlab_cmd=None, paths=None, use_mcr=None):
cls._matlab_cmd = matlab_cmd
cls._paths = paths
cls._use_mcr = use_mcr
def _find_mlab_cmd_defaults(self):
if self._use_mcr:
self._use_mcr = True
def _matlab_cmd_update(self):
matlab_cmd_str = self.inputs.matlab_cmd
if isdefined(self.inputs.use_mcr) and self.inputs.use_mcr:
if not matlab_cmd_str[-1] == ' ':
matlab_cmd_str = matlab_cmd_str + ' '
self.mlab = MatlabCommand(matlab_cmd=matlab_cmd_str, mfile=self.
inputs.mfile, paths=self.inputs.paths)
self.mlab.inputs.script_file = ('pyscript_%s.m' % self.__class__.
__name__.split('.')[-1].lower())
if isdefined(self.inputs.use_mcr) and self.inputs.use_mcr:
self.mlab.inputs.nodesktop = Undefined
self.mlab.inputs.nosplash = Undefined
self.mlab.inputs.single_comp_thread = Undefined
self.mlab.inputs.uses_mcr = True
self.mlab.inputs.mfile = True
def _check_mlab_inputs(self):
if not isdefined(self.inputs.matlab_cmd) and self._matlab_cmd:
self.inputs.matlab_cmd = self._matlab_cmd
if not isdefined(self.inputs.paths) and self._paths:
self.inputs.paths = self._paths
if not isdefined(self.inputs.use_mcr) and self._use_mcr:
self.inputs.use_mcr = self._use_mcr
def _run_interface(self, runtime):
"""Executes the GIFT function using MATLAB."""
self.mlab.inputs.script = self._make_matlab_command()
results = self.mlab.run()
runtime.returncode = results.runtime.returncode
if self.mlab.inputs.uses_mcr:
if 'Skipped' in results.runtime.stdout:
self.raise_exception(runtime)
runtime.stdout = results.runtime.stdout
runtime.stderr = results.runtime.stderr
runtime.merged = results.runtime.merged
return runtime
def _list_outputs(self):
"""Determine the expected outputs based on inputs."""
outputs = self._outputs().get()
return outputs
def _make_matlab_command(self):
"""Generates a mfile to build job structure
Returns
-------
mscript : string
contents of a script called by matlab
"""
raise NotImplementedError
<|reserved_special_token_1|>
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""The GIFT module provides basic functions for interfacing with some of the GIFT tools.
In order to use the standalone MCR version of GIFT, you need to ensure that
the following commands are executed at the beginning of your script::
from nipype.interfaces import gift
matlab_cmd = '/path/to/run_groupica.sh /path/to/compiler_runtime/v901/ '
gift.GICACommand.set_mlab_paths(matlab_cmd=matlab_cmd,use_mcr=True)
"""
__docformat__ = 'restructuredtext'
# Standard library imports
import os
# Local imports
from ..base import (BaseInterface, traits, isdefined, InputMultiPath,
BaseInterfaceInputSpec, Directory, Undefined)
from ..matlab import MatlabCommand
class GIFTCommandInputSpec(BaseInterfaceInputSpec):
matlab_cmd = traits.Str(desc='matlab command to use')
paths = InputMultiPath(Directory(), desc='Paths to add to matlabpath')
mfile = traits.Bool(True, desc='Run m-code using m-file', usedefault=True)
use_mcr = traits.Bool(desc='Run m-code using GIFT MCR')
class GIFTCommandOutputSpec( BaseInterfaceInputSpec):
matlab_output = traits.Str( )
class GIFTCommand(BaseInterface):
"""Extends `BaseInterface` class to implement GIFT specific interfaces.
WARNING: Pseudo prototype class, meant to be subclassed
"""
input_spec = GIFTCommandInputSpec
output_spec = GIFTCommandOutputSpec
_matlab_cmd = None
_paths = None
_use_mcr = None
def __init__(self, **inputs):
super(GIFTCommand, self).__init__(**inputs)
self.inputs.on_trait_change(self._matlab_cmd_update, ['matlab_cmd','mfile','paths','use_mcr'])
self._find_mlab_cmd_defaults()
self._check_mlab_inputs()
self._matlab_cmd_update()
@classmethod
def set_mlab_paths(cls, matlab_cmd=None, paths=None, use_mcr=None):
cls._matlab_cmd = matlab_cmd
cls._paths = paths
cls._use_mcr = use_mcr
def _find_mlab_cmd_defaults(self):
# check if the user has set environment variables to enforce
# the standalone (MCR) version of GIFT
if self._use_mcr:
self._use_mcr = True
def _matlab_cmd_update(self):
# MatlabCommand has to be created here,
# because matlab_cmb is not a proper input
# and can be set only during init
matlab_cmd_str = self.inputs.matlab_cmd
if isdefined(self.inputs.use_mcr) and self.inputs.use_mcr:
if not matlab_cmd_str[-1] == " ":
matlab_cmd_str = matlab_cmd_str + " "
self.mlab = MatlabCommand(matlab_cmd=matlab_cmd_str,
mfile=self.inputs.mfile,
paths=self.inputs.paths)
self.mlab.inputs.script_file = 'pyscript_%s.m' % self.__class__.__name__.split('.')[-1].lower()
if isdefined(self.inputs.use_mcr) and self.inputs.use_mcr:
self.mlab.inputs.nodesktop = Undefined
self.mlab.inputs.nosplash = Undefined
self.mlab.inputs.single_comp_thread = Undefined
self.mlab.inputs.uses_mcr = True
self.mlab.inputs.mfile = True
def _check_mlab_inputs(self):
if not isdefined(self.inputs.matlab_cmd) and self._matlab_cmd:
self.inputs.matlab_cmd = self._matlab_cmd
if not isdefined(self.inputs.paths) and self._paths:
self.inputs.paths = self._paths
if not isdefined(self.inputs.use_mcr) and self._use_mcr:
self.inputs.use_mcr = self._use_mcr
def _run_interface(self, runtime):
"""Executes the GIFT function using MATLAB."""
self.mlab.inputs.script = self._make_matlab_command()
results = self.mlab.run()
runtime.returncode = results.runtime.returncode
if self.mlab.inputs.uses_mcr:
if 'Skipped' in results.runtime.stdout:
self.raise_exception(runtime)
runtime.stdout = results.runtime.stdout
runtime.stderr = results.runtime.stderr
runtime.merged = results.runtime.merged
return runtime
def _list_outputs(self):
"""Determine the expected outputs based on inputs."""
outputs = self._outputs().get()
return outputs
def _make_matlab_command(self):
"""Generates a mfile to build job structure
Returns
-------
mscript : string
contents of a script called by matlab
"""
raise NotImplementedError
|
flexible
|
{
"blob_id": "fef1cf75de8358807f29cd06d2338e087d6f2d23",
"index": 9162,
"step-1": "<mask token>\n\n\nclass GIFTCommand(BaseInterface):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self, **inputs):\n super(GIFTCommand, self).__init__(**inputs)\n self.inputs.on_trait_change(self._matlab_cmd_update, ['matlab_cmd',\n 'mfile', 'paths', 'use_mcr'])\n self._find_mlab_cmd_defaults()\n self._check_mlab_inputs()\n self._matlab_cmd_update()\n\n @classmethod\n def set_mlab_paths(cls, matlab_cmd=None, paths=None, use_mcr=None):\n cls._matlab_cmd = matlab_cmd\n cls._paths = paths\n cls._use_mcr = use_mcr\n\n def _find_mlab_cmd_defaults(self):\n if self._use_mcr:\n self._use_mcr = True\n\n def _matlab_cmd_update(self):\n matlab_cmd_str = self.inputs.matlab_cmd\n if isdefined(self.inputs.use_mcr) and self.inputs.use_mcr:\n if not matlab_cmd_str[-1] == ' ':\n matlab_cmd_str = matlab_cmd_str + ' '\n self.mlab = MatlabCommand(matlab_cmd=matlab_cmd_str, mfile=self.\n inputs.mfile, paths=self.inputs.paths)\n self.mlab.inputs.script_file = ('pyscript_%s.m' % self.__class__.\n __name__.split('.')[-1].lower())\n if isdefined(self.inputs.use_mcr) and self.inputs.use_mcr:\n self.mlab.inputs.nodesktop = Undefined\n self.mlab.inputs.nosplash = Undefined\n self.mlab.inputs.single_comp_thread = Undefined\n self.mlab.inputs.uses_mcr = True\n self.mlab.inputs.mfile = True\n\n def _check_mlab_inputs(self):\n if not isdefined(self.inputs.matlab_cmd) and self._matlab_cmd:\n self.inputs.matlab_cmd = self._matlab_cmd\n if not isdefined(self.inputs.paths) and self._paths:\n self.inputs.paths = self._paths\n if not isdefined(self.inputs.use_mcr) and self._use_mcr:\n self.inputs.use_mcr = self._use_mcr\n\n def _run_interface(self, runtime):\n \"\"\"Executes the GIFT function using MATLAB.\"\"\"\n self.mlab.inputs.script = self._make_matlab_command()\n results = self.mlab.run()\n runtime.returncode = results.runtime.returncode\n if self.mlab.inputs.uses_mcr:\n if 'Skipped' in results.runtime.stdout:\n self.raise_exception(runtime)\n runtime.stdout = results.runtime.stdout\n runtime.stderr = results.runtime.stderr\n runtime.merged = results.runtime.merged\n return runtime\n\n def _list_outputs(self):\n \"\"\"Determine the expected outputs based on inputs.\"\"\"\n outputs = self._outputs().get()\n return outputs\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass GIFTCommand(BaseInterface):\n <mask token>\n input_spec = GIFTCommandInputSpec\n output_spec = GIFTCommandOutputSpec\n _matlab_cmd = None\n _paths = None\n _use_mcr = None\n\n def __init__(self, **inputs):\n super(GIFTCommand, self).__init__(**inputs)\n self.inputs.on_trait_change(self._matlab_cmd_update, ['matlab_cmd',\n 'mfile', 'paths', 'use_mcr'])\n self._find_mlab_cmd_defaults()\n self._check_mlab_inputs()\n self._matlab_cmd_update()\n\n @classmethod\n def set_mlab_paths(cls, matlab_cmd=None, paths=None, use_mcr=None):\n cls._matlab_cmd = matlab_cmd\n cls._paths = paths\n cls._use_mcr = use_mcr\n\n def _find_mlab_cmd_defaults(self):\n if self._use_mcr:\n self._use_mcr = True\n\n def _matlab_cmd_update(self):\n matlab_cmd_str = self.inputs.matlab_cmd\n if isdefined(self.inputs.use_mcr) and self.inputs.use_mcr:\n if not matlab_cmd_str[-1] == ' ':\n matlab_cmd_str = matlab_cmd_str + ' '\n self.mlab = MatlabCommand(matlab_cmd=matlab_cmd_str, mfile=self.\n inputs.mfile, paths=self.inputs.paths)\n self.mlab.inputs.script_file = ('pyscript_%s.m' % self.__class__.\n __name__.split('.')[-1].lower())\n if isdefined(self.inputs.use_mcr) and self.inputs.use_mcr:\n self.mlab.inputs.nodesktop = Undefined\n self.mlab.inputs.nosplash = Undefined\n self.mlab.inputs.single_comp_thread = Undefined\n self.mlab.inputs.uses_mcr = True\n self.mlab.inputs.mfile = True\n\n def _check_mlab_inputs(self):\n if not isdefined(self.inputs.matlab_cmd) and self._matlab_cmd:\n self.inputs.matlab_cmd = self._matlab_cmd\n if not isdefined(self.inputs.paths) and self._paths:\n self.inputs.paths = self._paths\n if not isdefined(self.inputs.use_mcr) and self._use_mcr:\n self.inputs.use_mcr = self._use_mcr\n\n def _run_interface(self, runtime):\n \"\"\"Executes the GIFT function using MATLAB.\"\"\"\n self.mlab.inputs.script = self._make_matlab_command()\n results = self.mlab.run()\n runtime.returncode = results.runtime.returncode\n if self.mlab.inputs.uses_mcr:\n if 'Skipped' in results.runtime.stdout:\n self.raise_exception(runtime)\n runtime.stdout = results.runtime.stdout\n runtime.stderr = results.runtime.stderr\n runtime.merged = results.runtime.merged\n return runtime\n\n def _list_outputs(self):\n \"\"\"Determine the expected outputs based on inputs.\"\"\"\n outputs = self._outputs().get()\n return outputs\n\n def _make_matlab_command(self):\n \"\"\"Generates a mfile to build job structure\n \n Returns\n -------\n mscript : string\n contents of a script called by matlab\n\n \"\"\"\n raise NotImplementedError\n",
"step-3": "<mask token>\n\n\nclass GIFTCommandInputSpec(BaseInterfaceInputSpec):\n matlab_cmd = traits.Str(desc='matlab command to use')\n paths = InputMultiPath(Directory(), desc='Paths to add to matlabpath')\n mfile = traits.Bool(True, desc='Run m-code using m-file', usedefault=True)\n use_mcr = traits.Bool(desc='Run m-code using GIFT MCR')\n\n\nclass GIFTCommandOutputSpec(BaseInterfaceInputSpec):\n matlab_output = traits.Str()\n\n\nclass GIFTCommand(BaseInterface):\n \"\"\"Extends `BaseInterface` class to implement GIFT specific interfaces.\n\n WARNING: Pseudo prototype class, meant to be subclassed\n \"\"\"\n input_spec = GIFTCommandInputSpec\n output_spec = GIFTCommandOutputSpec\n _matlab_cmd = None\n _paths = None\n _use_mcr = None\n\n def __init__(self, **inputs):\n super(GIFTCommand, self).__init__(**inputs)\n self.inputs.on_trait_change(self._matlab_cmd_update, ['matlab_cmd',\n 'mfile', 'paths', 'use_mcr'])\n self._find_mlab_cmd_defaults()\n self._check_mlab_inputs()\n self._matlab_cmd_update()\n\n @classmethod\n def set_mlab_paths(cls, matlab_cmd=None, paths=None, use_mcr=None):\n cls._matlab_cmd = matlab_cmd\n cls._paths = paths\n cls._use_mcr = use_mcr\n\n def _find_mlab_cmd_defaults(self):\n if self._use_mcr:\n self._use_mcr = True\n\n def _matlab_cmd_update(self):\n matlab_cmd_str = self.inputs.matlab_cmd\n if isdefined(self.inputs.use_mcr) and self.inputs.use_mcr:\n if not matlab_cmd_str[-1] == ' ':\n matlab_cmd_str = matlab_cmd_str + ' '\n self.mlab = MatlabCommand(matlab_cmd=matlab_cmd_str, mfile=self.\n inputs.mfile, paths=self.inputs.paths)\n self.mlab.inputs.script_file = ('pyscript_%s.m' % self.__class__.\n __name__.split('.')[-1].lower())\n if isdefined(self.inputs.use_mcr) and self.inputs.use_mcr:\n self.mlab.inputs.nodesktop = Undefined\n self.mlab.inputs.nosplash = Undefined\n self.mlab.inputs.single_comp_thread = Undefined\n self.mlab.inputs.uses_mcr = True\n self.mlab.inputs.mfile = True\n\n def _check_mlab_inputs(self):\n if not isdefined(self.inputs.matlab_cmd) and self._matlab_cmd:\n self.inputs.matlab_cmd = self._matlab_cmd\n if not isdefined(self.inputs.paths) and self._paths:\n self.inputs.paths = self._paths\n if not isdefined(self.inputs.use_mcr) and self._use_mcr:\n self.inputs.use_mcr = self._use_mcr\n\n def _run_interface(self, runtime):\n \"\"\"Executes the GIFT function using MATLAB.\"\"\"\n self.mlab.inputs.script = self._make_matlab_command()\n results = self.mlab.run()\n runtime.returncode = results.runtime.returncode\n if self.mlab.inputs.uses_mcr:\n if 'Skipped' in results.runtime.stdout:\n self.raise_exception(runtime)\n runtime.stdout = results.runtime.stdout\n runtime.stderr = results.runtime.stderr\n runtime.merged = results.runtime.merged\n return runtime\n\n def _list_outputs(self):\n \"\"\"Determine the expected outputs based on inputs.\"\"\"\n outputs = self._outputs().get()\n return outputs\n\n def _make_matlab_command(self):\n \"\"\"Generates a mfile to build job structure\n \n Returns\n -------\n mscript : string\n contents of a script called by matlab\n\n \"\"\"\n raise NotImplementedError\n",
"step-4": "<mask token>\n__docformat__ = 'restructuredtext'\n<mask token>\n\n\nclass GIFTCommandInputSpec(BaseInterfaceInputSpec):\n matlab_cmd = traits.Str(desc='matlab command to use')\n paths = InputMultiPath(Directory(), desc='Paths to add to matlabpath')\n mfile = traits.Bool(True, desc='Run m-code using m-file', usedefault=True)\n use_mcr = traits.Bool(desc='Run m-code using GIFT MCR')\n\n\nclass GIFTCommandOutputSpec(BaseInterfaceInputSpec):\n matlab_output = traits.Str()\n\n\nclass GIFTCommand(BaseInterface):\n \"\"\"Extends `BaseInterface` class to implement GIFT specific interfaces.\n\n WARNING: Pseudo prototype class, meant to be subclassed\n \"\"\"\n input_spec = GIFTCommandInputSpec\n output_spec = GIFTCommandOutputSpec\n _matlab_cmd = None\n _paths = None\n _use_mcr = None\n\n def __init__(self, **inputs):\n super(GIFTCommand, self).__init__(**inputs)\n self.inputs.on_trait_change(self._matlab_cmd_update, ['matlab_cmd',\n 'mfile', 'paths', 'use_mcr'])\n self._find_mlab_cmd_defaults()\n self._check_mlab_inputs()\n self._matlab_cmd_update()\n\n @classmethod\n def set_mlab_paths(cls, matlab_cmd=None, paths=None, use_mcr=None):\n cls._matlab_cmd = matlab_cmd\n cls._paths = paths\n cls._use_mcr = use_mcr\n\n def _find_mlab_cmd_defaults(self):\n if self._use_mcr:\n self._use_mcr = True\n\n def _matlab_cmd_update(self):\n matlab_cmd_str = self.inputs.matlab_cmd\n if isdefined(self.inputs.use_mcr) and self.inputs.use_mcr:\n if not matlab_cmd_str[-1] == ' ':\n matlab_cmd_str = matlab_cmd_str + ' '\n self.mlab = MatlabCommand(matlab_cmd=matlab_cmd_str, mfile=self.\n inputs.mfile, paths=self.inputs.paths)\n self.mlab.inputs.script_file = ('pyscript_%s.m' % self.__class__.\n __name__.split('.')[-1].lower())\n if isdefined(self.inputs.use_mcr) and self.inputs.use_mcr:\n self.mlab.inputs.nodesktop = Undefined\n self.mlab.inputs.nosplash = Undefined\n self.mlab.inputs.single_comp_thread = Undefined\n self.mlab.inputs.uses_mcr = True\n self.mlab.inputs.mfile = True\n\n def _check_mlab_inputs(self):\n if not isdefined(self.inputs.matlab_cmd) and self._matlab_cmd:\n self.inputs.matlab_cmd = self._matlab_cmd\n if not isdefined(self.inputs.paths) and self._paths:\n self.inputs.paths = self._paths\n if not isdefined(self.inputs.use_mcr) and self._use_mcr:\n self.inputs.use_mcr = self._use_mcr\n\n def _run_interface(self, runtime):\n \"\"\"Executes the GIFT function using MATLAB.\"\"\"\n self.mlab.inputs.script = self._make_matlab_command()\n results = self.mlab.run()\n runtime.returncode = results.runtime.returncode\n if self.mlab.inputs.uses_mcr:\n if 'Skipped' in results.runtime.stdout:\n self.raise_exception(runtime)\n runtime.stdout = results.runtime.stdout\n runtime.stderr = results.runtime.stderr\n runtime.merged = results.runtime.merged\n return runtime\n\n def _list_outputs(self):\n \"\"\"Determine the expected outputs based on inputs.\"\"\"\n outputs = self._outputs().get()\n return outputs\n\n def _make_matlab_command(self):\n \"\"\"Generates a mfile to build job structure\n \n Returns\n -------\n mscript : string\n contents of a script called by matlab\n\n \"\"\"\n raise NotImplementedError\n",
"step-5": "# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-\n# vi: set ft=python sts=4 ts=4 sw=4 et:\n\"\"\"The GIFT module provides basic functions for interfacing with some of the GIFT tools.\n\nIn order to use the standalone MCR version of GIFT, you need to ensure that\nthe following commands are executed at the beginning of your script::\n\n from nipype.interfaces import gift \n matlab_cmd = '/path/to/run_groupica.sh /path/to/compiler_runtime/v901/ '\n gift.GICACommand.set_mlab_paths(matlab_cmd=matlab_cmd,use_mcr=True)\n\"\"\"\n\n__docformat__ = 'restructuredtext'\n\n# Standard library imports\nimport os\n\n# Local imports\nfrom ..base import (BaseInterface, traits, isdefined, InputMultiPath,\n BaseInterfaceInputSpec, Directory, Undefined)\nfrom ..matlab import MatlabCommand\n\nclass GIFTCommandInputSpec(BaseInterfaceInputSpec):\n matlab_cmd = traits.Str(desc='matlab command to use')\n paths = InputMultiPath(Directory(), desc='Paths to add to matlabpath')\n mfile = traits.Bool(True, desc='Run m-code using m-file', usedefault=True)\n use_mcr = traits.Bool(desc='Run m-code using GIFT MCR') \n\t\nclass GIFTCommandOutputSpec( BaseInterfaceInputSpec):\n matlab_output = traits.Str( )\t\n\nclass GIFTCommand(BaseInterface):\n \"\"\"Extends `BaseInterface` class to implement GIFT specific interfaces.\n\n WARNING: Pseudo prototype class, meant to be subclassed\n \"\"\"\n input_spec = GIFTCommandInputSpec\n output_spec = GIFTCommandOutputSpec\n \n _matlab_cmd = None\n _paths = None\n _use_mcr = None\n\n def __init__(self, **inputs):\n super(GIFTCommand, self).__init__(**inputs)\n self.inputs.on_trait_change(self._matlab_cmd_update, ['matlab_cmd','mfile','paths','use_mcr'])\n self._find_mlab_cmd_defaults()\n self._check_mlab_inputs()\n self._matlab_cmd_update()\n\n @classmethod\n def set_mlab_paths(cls, matlab_cmd=None, paths=None, use_mcr=None):\n cls._matlab_cmd = matlab_cmd\n cls._paths = paths\n cls._use_mcr = use_mcr\n\n def _find_mlab_cmd_defaults(self):\n # check if the user has set environment variables to enforce\n # the standalone (MCR) version of GIFT \n if self._use_mcr:\n self._use_mcr = True\n \n\n def _matlab_cmd_update(self):\n # MatlabCommand has to be created here,\n # because matlab_cmb is not a proper input\n # and can be set only during init\t\n matlab_cmd_str = self.inputs.matlab_cmd\t\n if isdefined(self.inputs.use_mcr) and self.inputs.use_mcr:\n if not matlab_cmd_str[-1] == \" \":\n matlab_cmd_str = matlab_cmd_str + \" \"\n self.mlab = MatlabCommand(matlab_cmd=matlab_cmd_str,\n mfile=self.inputs.mfile,\n paths=self.inputs.paths) \n self.mlab.inputs.script_file = 'pyscript_%s.m' % self.__class__.__name__.split('.')[-1].lower()\n if isdefined(self.inputs.use_mcr) and self.inputs.use_mcr:\n self.mlab.inputs.nodesktop = Undefined\n self.mlab.inputs.nosplash = Undefined\n self.mlab.inputs.single_comp_thread = Undefined\n self.mlab.inputs.uses_mcr = True\n self.mlab.inputs.mfile = True\n \n def _check_mlab_inputs(self):\n if not isdefined(self.inputs.matlab_cmd) and self._matlab_cmd:\n self.inputs.matlab_cmd = self._matlab_cmd\n if not isdefined(self.inputs.paths) and self._paths:\n self.inputs.paths = self._paths\n if not isdefined(self.inputs.use_mcr) and self._use_mcr:\n self.inputs.use_mcr = self._use_mcr\n\n def _run_interface(self, runtime):\n \"\"\"Executes the GIFT function using MATLAB.\"\"\"\n self.mlab.inputs.script = self._make_matlab_command() \t\n results = self.mlab.run()\n runtime.returncode = results.runtime.returncode\n if self.mlab.inputs.uses_mcr:\t\t\n if 'Skipped' in results.runtime.stdout:\n self.raise_exception(runtime)\n runtime.stdout = results.runtime.stdout\n runtime.stderr = results.runtime.stderr\n runtime.merged = results.runtime.merged\n return runtime\n\n def _list_outputs(self):\n \"\"\"Determine the expected outputs based on inputs.\"\"\"\n \n outputs = self._outputs().get()\n return outputs\n\n \n def _make_matlab_command(self):\n \"\"\"Generates a mfile to build job structure\n \n Returns\n -------\n mscript : string\n contents of a script called by matlab\n\n \"\"\"\n \n raise NotImplementedError\n\n",
"step-ids": [
8,
10,
15,
16,
18
]
}
|
[
8,
10,
15,
16,
18
] |
__author__ = 'Administrator'
class People:
def __init__(self,name,age):
self.name = name
self.age = age
def eat(self):
pass
print("%s is eating..." % self.name)
def sleep(self):
print("%s is sleeping..." % self.name)
def talk(self):
print("%s is talking..." % self.name)
class Man(People):
def __init__(self,name,age,money):
# People.__init__(self,name,age)
super(Man,self).__init__(name,age)
self.money = money
print("%s 一出生就有%s money..." % (name,money))
def piao(self):
print("%s is piaoing...20s...isdone" % self.name)
def sleep(self):
#People.sleep(self)
print("man is sleeping")
class Women(People):
pass
def get_birth(self):
print("%s is born a baby...." % self.name)
m1 = Man("chenronghua",22,10000)
m1.eat()
m1.sleep()
m1.talk()
m1.piao()
w1 = Women("ronghua",26)
w1.get_birth()
|
normal
|
{
"blob_id": "6fdc9b2091652b05d6c1207d2f78b75c880fadda",
"index": 9084,
"step-1": "<mask token>\n\n\nclass People:\n <mask token>\n\n def eat(self):\n pass\n print('%s is eating...' % self.name)\n <mask token>\n <mask token>\n\n\nclass Man(People):\n\n def __init__(self, name, age, money):\n super(Man, self).__init__(name, age)\n self.money = money\n print('%s 一出生就有%s money...' % (name, money))\n\n def piao(self):\n print('%s is piaoing...20s...isdone' % self.name)\n\n def sleep(self):\n print('man is sleeping')\n\n\nclass Women(People):\n pass\n\n def get_birth(self):\n print('%s is born a baby....' % self.name)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass People:\n\n def __init__(self, name, age):\n self.name = name\n self.age = age\n\n def eat(self):\n pass\n print('%s is eating...' % self.name)\n <mask token>\n\n def talk(self):\n print('%s is talking...' % self.name)\n\n\nclass Man(People):\n\n def __init__(self, name, age, money):\n super(Man, self).__init__(name, age)\n self.money = money\n print('%s 一出生就有%s money...' % (name, money))\n\n def piao(self):\n print('%s is piaoing...20s...isdone' % self.name)\n\n def sleep(self):\n print('man is sleeping')\n\n\nclass Women(People):\n pass\n\n def get_birth(self):\n print('%s is born a baby....' % self.name)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass People:\n\n def __init__(self, name, age):\n self.name = name\n self.age = age\n\n def eat(self):\n pass\n print('%s is eating...' % self.name)\n\n def sleep(self):\n print('%s is sleeping...' % self.name)\n\n def talk(self):\n print('%s is talking...' % self.name)\n\n\nclass Man(People):\n\n def __init__(self, name, age, money):\n super(Man, self).__init__(name, age)\n self.money = money\n print('%s 一出生就有%s money...' % (name, money))\n\n def piao(self):\n print('%s is piaoing...20s...isdone' % self.name)\n\n def sleep(self):\n print('man is sleeping')\n\n\nclass Women(People):\n pass\n\n def get_birth(self):\n print('%s is born a baby....' % self.name)\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass People:\n\n def __init__(self, name, age):\n self.name = name\n self.age = age\n\n def eat(self):\n pass\n print('%s is eating...' % self.name)\n\n def sleep(self):\n print('%s is sleeping...' % self.name)\n\n def talk(self):\n print('%s is talking...' % self.name)\n\n\nclass Man(People):\n\n def __init__(self, name, age, money):\n super(Man, self).__init__(name, age)\n self.money = money\n print('%s 一出生就有%s money...' % (name, money))\n\n def piao(self):\n print('%s is piaoing...20s...isdone' % self.name)\n\n def sleep(self):\n print('man is sleeping')\n\n\nclass Women(People):\n pass\n\n def get_birth(self):\n print('%s is born a baby....' % self.name)\n\n\n<mask token>\nm1.eat()\nm1.sleep()\nm1.talk()\nm1.piao()\n<mask token>\nw1.get_birth()\n",
"step-5": "__author__ = 'Administrator'\n\n\nclass People:\n def __init__(self,name,age):\n self.name = name\n self.age = age\n def eat(self):\n pass\n print(\"%s is eating...\" % self.name)\n\n def sleep(self):\n print(\"%s is sleeping...\" % self.name)\n\n def talk(self):\n print(\"%s is talking...\" % self.name)\n\nclass Man(People):\n def __init__(self,name,age,money):\n # People.__init__(self,name,age)\n super(Man,self).__init__(name,age)\n\n self.money = money\n print(\"%s 一出生就有%s money...\" % (name,money))\n def piao(self):\n print(\"%s is piaoing...20s...isdone\" % self.name)\n\n def sleep(self):\n #People.sleep(self)\n print(\"man is sleeping\")\n\nclass Women(People):\n pass\n def get_birth(self):\n print(\"%s is born a baby....\" % self.name)\n\n\n\nm1 = Man(\"chenronghua\",22,10000)\n\nm1.eat()\n\nm1.sleep()\n\nm1.talk()\n\nm1.piao()\n\n\nw1 = Women(\"ronghua\",26)\n\nw1.get_birth()\n\n\n\n\n\n",
"step-ids": [
8,
10,
11,
12,
14
]
}
|
[
8,
10,
11,
12,
14
] |
from pydub import AudioSegment
import sys
import tensorflow as tf
import numpy as np
from adwtmk.audio import Audio
from adwtmk.encoder import *
from adwtmk.decoder import *
class DAE(object):
def __init__(self,model_name):
self.model_name = model_name
self.process = 0
self.loss = 0
self.origin_loss = 0
self.core_size = 3
self.batch_size = 600
self.Epoches = 100
def _get_batches(self,batch_size,data,core_size):
assert batch_size % core_size == 0
dim_0 = len(data)
#print("dim_0:",dim_0)
length = len(data[0])
num_batches = length // batch_size
remainder_length = length % batch_size
res = list()
for i in range(num_batches):
res.append(data[:,i*batch_size:(i+1)*batch_size])
res = [np.array(x,np.float64).reshape(dim_0,batch_size//core_size,core_size) for x in res]
remainder = data[:,-remainder_length:]
return res,remainder
#np.set_printoptions(threshold=1e6)
#def _my_config():
#core_size = 5
#batch_size = 500
#Epoches = 200
def fast_training(self,sound):
self.core_size = 100
self.batch_size = 1000
self.Epoches = 50
self._main(sound,100,1000,50)
def medium_training(self,sound):
self.core_size = 5
self.batch_size = 500
self.Epoches = 100
self._main(sound,5,500,100)
def slow_training(self,sound):
self.core_size = 3
self.batch_size = 300
self.Epoches = 100
self._main(sound,3,300,150)
def get_train_result_music_file(self):
if (self.new_sound):
return self.new_sound
else:
raise Exception("You should run training firstly !")
def get_current_training_process(self):
return self.process
def test(self,sound):
audio_matrix = sound.get_reshaped_samples()
#max_value = np.max(audio_matrix)
#min_value = np.min(audio_matrix)
#audio_matrix = (audio_matrix-min_value) / (max_value-min_value)
mean_value = np.mean(audio_matrix)
std_value = np.std(audio_matrix)
audio_matrix = (audio_matrix-mean_value) / std_value
channels = len(audio_matrix)
batches,remainder = self._get_batches(batch_size=self.batch_size,core_size=self.core_size,data=audio_matrix)
losses = list()
for i in range(len(batches)):
dropout_indicator = np.random.rand()
if (dropout_indicator <= 0.2):
losses.append(np.sum(abs(batches[i])))
batches[i] *= 0.00
losses.append(0)
sum_losses = np.sum(np.array(losses).reshape(-1))
#print("losses:")
#print(np.array(losses).reshape(-1))
#print(sum_losses)
test_batches = np.array(batches,np.float64).reshape(channels,-1)
test_batches = np.concatenate((test_batches,remainder),axis=1)
count = audio_matrix.shape
count = count[0]*count[1]
self.origin_loss = sum_losses/(float)(count)
test_batches = test_batches * std_value + mean_value
test_sound = sound.spawn(test_batches)
self._main(test_sound,self.core_size,self.batch_size,1,1.0)
return test_sound,self.new_sound
def _main(self,sound,core_size,batch_size,Epoches,drop_out_rate=0.9):
self.new_sound = None
self.process = 0
self.loss = 0
#print(sound.frame_rate,sound.duration_seconds, len(sound.get_array_of_samples()))
audio_matrix = sound.get_reshaped_samples()
#max_value = np.max(audio_matrix)
#min_value = np.min(audio_matrix)
#audio_matrix = (audio_matrix-min_value) / (max_value-min_value)
mean_value = np.mean(audio_matrix)
std_value = np.std(audio_matrix)
audio_matrix = (audio_matrix-mean_value) / std_value
batches,remainder = self._get_batches(batch_size=batch_size,core_size=core_size,data=audio_matrix)
steps = batch_size // core_size
channels = len(audio_matrix)
best_output = ""
with tf.Session() as sess:
fw_cell = tf.contrib.rnn.DropoutWrapper(tf.contrib.rnn.BasicLSTMCell(core_size),drop_out_rate)
fw_rnn_cell = tf.contrib.rnn.MultiRNNCell([fw_cell]*2)
bw_cell = tf.contrib.rnn.DropoutWrapper(tf.contrib.rnn.BasicLSTMCell(core_size),drop_out_rate)
bw_rnn_cell = tf.contrib.rnn.MultiRNNCell([bw_cell]*2)
input_data = tf.placeholder(shape=[channels,steps,core_size],dtype=tf.float64)
in_weights = tf.get_variable(name="in_weight",shape=[steps*core_size,steps*core_size],dtype=tf.float64)
in_bias = tf.get_variable(name="in_bias",shape=[core_size*steps],dtype=tf.float64)
hidden_data = tf.tanh(tf.nn.xw_plus_b(tf.reshape(input_data,(channels,-1)),in_weights,in_bias))
hidden_data_out = tf.reshape(hidden_data,[channels,steps,core_size])
bi_outputs,last_state = tf.nn.bidirectional_dynamic_rnn(fw_rnn_cell,bw_rnn_cell,hidden_data_out,dtype=tf.float64)
out_weights = tf.get_variable(name="out_weight",shape=[steps*core_size*2,steps*core_size],dtype=tf.float64)
out_bias = tf.get_variable(name="out_bias",shape=[core_size*steps],dtype=tf.float64)
outputs = tf.nn.xw_plus_b(tf.reshape(tf.concat(bi_outputs,2),(channels,-1)),out_weights,out_bias)
#outputs,last_state = tf.nn.dynamic_rnn(fw_rnn_cell,input_data,dtype=tf.float64)
loss = tf.reduce_mean(tf.sqrt(tf.squared_difference(tf.reshape(input_data,(channels,-1)),outputs)))
train = tf.train.AdamOptimizer(0.001).minimize(loss)
saver = tf.train.Saver()
train_loss = 999999999
try:
saver.restore(sess,self.model_name)
print("model restored")
except:
sess.run(tf.global_variables_initializer())
print("restore failed, randomly initialize")
for i in range(Epoches):
loss_temp = 0
outputs_temp = list()
for item in batches:
if (drop_out_rate < 1):
epoch_outputs,epoch_loss,_ = sess.run([outputs,loss,train],feed_dict={
input_data:item
})
else:
epoch_outputs,epoch_loss = sess.run([outputs,loss],feed_dict={
input_data:item
})
loss_temp += epoch_loss
outputs_temp.append(epoch_outputs)
loss_temp /= len(batches)
if (i == 0 and drop_out_rate<1):
self.origin_loss = loss_temp
self.process = i/Epoches
self.loss = loss_temp
#print("process:%f,loss:%f" % (i/Epoches,loss_temp))
if (loss_temp < train_loss):
train_loss = loss_temp
if (drop_out_rate < 1):
saver.save(sess,self.model_name)
best_output = outputs_temp
#best_output = best_output.append(remainder)
best_output = np.array(best_output,np.float64).reshape(channels,-1)
best_output = np.concatenate((best_output,remainder),axis=1)
#best_output = best_output.T
#best_output = best_output.reshape(-1)
best_output = best_output*std_value+mean_value
#best_output *= max_value-min_value
#best_output += min_value
self.new_sound = sound.spawn(best_output)
#new_sound.export("test.flac","flac")
#ex.add_artifact(filename="./test.flac")
#ex.add_artifact(filename="./rnn_model_key_multirnn_bi_input.ckpt*")
#audio_matrix = np.array(audio_matrix,np.float64).reshape(channels,-1)
#audio_matrix = audio_matrix.T
#audio_matrix = audio_matrix.reshape(-1)
#audio_matrix = audio_matrix * (max_value-min_value)+min_value
audio_matrix = audio_matrix * std_value + mean_value
new_sound = sound.spawn(audio_matrix)
#new_sound.export("test2.flac","flac")
#sound = Audio.from_file("./mark.flac", format="flac")
#fast_training(sound)
|
normal
|
{
"blob_id": "6f53702d9265a7fc57d2ec2e47dc35a0bc7a9f87",
"index": 9012,
"step-1": "<mask token>\n\n\nclass DAE(object):\n <mask token>\n <mask token>\n\n def fast_training(self, sound):\n self.core_size = 100\n self.batch_size = 1000\n self.Epoches = 50\n self._main(sound, 100, 1000, 50)\n\n def medium_training(self, sound):\n self.core_size = 5\n self.batch_size = 500\n self.Epoches = 100\n self._main(sound, 5, 500, 100)\n <mask token>\n <mask token>\n <mask token>\n\n def test(self, sound):\n audio_matrix = sound.get_reshaped_samples()\n mean_value = np.mean(audio_matrix)\n std_value = np.std(audio_matrix)\n audio_matrix = (audio_matrix - mean_value) / std_value\n channels = len(audio_matrix)\n batches, remainder = self._get_batches(batch_size=self.batch_size,\n core_size=self.core_size, data=audio_matrix)\n losses = list()\n for i in range(len(batches)):\n dropout_indicator = np.random.rand()\n if dropout_indicator <= 0.2:\n losses.append(np.sum(abs(batches[i])))\n batches[i] *= 0.0\n losses.append(0)\n sum_losses = np.sum(np.array(losses).reshape(-1))\n test_batches = np.array(batches, np.float64).reshape(channels, -1)\n test_batches = np.concatenate((test_batches, remainder), axis=1)\n count = audio_matrix.shape\n count = count[0] * count[1]\n self.origin_loss = sum_losses / float(count)\n test_batches = test_batches * std_value + mean_value\n test_sound = sound.spawn(test_batches)\n self._main(test_sound, self.core_size, self.batch_size, 1, 1.0)\n return test_sound, self.new_sound\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass DAE(object):\n\n def __init__(self, model_name):\n self.model_name = model_name\n self.process = 0\n self.loss = 0\n self.origin_loss = 0\n self.core_size = 3\n self.batch_size = 600\n self.Epoches = 100\n <mask token>\n\n def fast_training(self, sound):\n self.core_size = 100\n self.batch_size = 1000\n self.Epoches = 50\n self._main(sound, 100, 1000, 50)\n\n def medium_training(self, sound):\n self.core_size = 5\n self.batch_size = 500\n self.Epoches = 100\n self._main(sound, 5, 500, 100)\n <mask token>\n <mask token>\n\n def get_current_training_process(self):\n return self.process\n\n def test(self, sound):\n audio_matrix = sound.get_reshaped_samples()\n mean_value = np.mean(audio_matrix)\n std_value = np.std(audio_matrix)\n audio_matrix = (audio_matrix - mean_value) / std_value\n channels = len(audio_matrix)\n batches, remainder = self._get_batches(batch_size=self.batch_size,\n core_size=self.core_size, data=audio_matrix)\n losses = list()\n for i in range(len(batches)):\n dropout_indicator = np.random.rand()\n if dropout_indicator <= 0.2:\n losses.append(np.sum(abs(batches[i])))\n batches[i] *= 0.0\n losses.append(0)\n sum_losses = np.sum(np.array(losses).reshape(-1))\n test_batches = np.array(batches, np.float64).reshape(channels, -1)\n test_batches = np.concatenate((test_batches, remainder), axis=1)\n count = audio_matrix.shape\n count = count[0] * count[1]\n self.origin_loss = sum_losses / float(count)\n test_batches = test_batches * std_value + mean_value\n test_sound = sound.spawn(test_batches)\n self._main(test_sound, self.core_size, self.batch_size, 1, 1.0)\n return test_sound, self.new_sound\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass DAE(object):\n\n def __init__(self, model_name):\n self.model_name = model_name\n self.process = 0\n self.loss = 0\n self.origin_loss = 0\n self.core_size = 3\n self.batch_size = 600\n self.Epoches = 100\n\n def _get_batches(self, batch_size, data, core_size):\n assert batch_size % core_size == 0\n dim_0 = len(data)\n length = len(data[0])\n num_batches = length // batch_size\n remainder_length = length % batch_size\n res = list()\n for i in range(num_batches):\n res.append(data[:, i * batch_size:(i + 1) * batch_size])\n res = [np.array(x, np.float64).reshape(dim_0, batch_size //\n core_size, core_size) for x in res]\n remainder = data[:, -remainder_length:]\n return res, remainder\n\n def fast_training(self, sound):\n self.core_size = 100\n self.batch_size = 1000\n self.Epoches = 50\n self._main(sound, 100, 1000, 50)\n\n def medium_training(self, sound):\n self.core_size = 5\n self.batch_size = 500\n self.Epoches = 100\n self._main(sound, 5, 500, 100)\n\n def slow_training(self, sound):\n self.core_size = 3\n self.batch_size = 300\n self.Epoches = 100\n self._main(sound, 3, 300, 150)\n <mask token>\n\n def get_current_training_process(self):\n return self.process\n\n def test(self, sound):\n audio_matrix = sound.get_reshaped_samples()\n mean_value = np.mean(audio_matrix)\n std_value = np.std(audio_matrix)\n audio_matrix = (audio_matrix - mean_value) / std_value\n channels = len(audio_matrix)\n batches, remainder = self._get_batches(batch_size=self.batch_size,\n core_size=self.core_size, data=audio_matrix)\n losses = list()\n for i in range(len(batches)):\n dropout_indicator = np.random.rand()\n if dropout_indicator <= 0.2:\n losses.append(np.sum(abs(batches[i])))\n batches[i] *= 0.0\n losses.append(0)\n sum_losses = np.sum(np.array(losses).reshape(-1))\n test_batches = np.array(batches, np.float64).reshape(channels, -1)\n test_batches = np.concatenate((test_batches, remainder), axis=1)\n count = audio_matrix.shape\n count = count[0] * count[1]\n self.origin_loss = sum_losses / float(count)\n test_batches = test_batches * std_value + mean_value\n test_sound = sound.spawn(test_batches)\n self._main(test_sound, self.core_size, self.batch_size, 1, 1.0)\n return test_sound, self.new_sound\n\n def _main(self, sound, core_size, batch_size, Epoches, drop_out_rate=0.9):\n self.new_sound = None\n self.process = 0\n self.loss = 0\n audio_matrix = sound.get_reshaped_samples()\n mean_value = np.mean(audio_matrix)\n std_value = np.std(audio_matrix)\n audio_matrix = (audio_matrix - mean_value) / std_value\n batches, remainder = self._get_batches(batch_size=batch_size,\n core_size=core_size, data=audio_matrix)\n steps = batch_size // core_size\n channels = len(audio_matrix)\n best_output = ''\n with tf.Session() as sess:\n fw_cell = tf.contrib.rnn.DropoutWrapper(tf.contrib.rnn.\n BasicLSTMCell(core_size), drop_out_rate)\n fw_rnn_cell = tf.contrib.rnn.MultiRNNCell([fw_cell] * 2)\n bw_cell = tf.contrib.rnn.DropoutWrapper(tf.contrib.rnn.\n BasicLSTMCell(core_size), drop_out_rate)\n bw_rnn_cell = tf.contrib.rnn.MultiRNNCell([bw_cell] * 2)\n input_data = tf.placeholder(shape=[channels, steps, core_size],\n dtype=tf.float64)\n in_weights = tf.get_variable(name='in_weight', shape=[steps *\n core_size, steps * core_size], dtype=tf.float64)\n in_bias = tf.get_variable(name='in_bias', shape=[core_size *\n steps], dtype=tf.float64)\n hidden_data = tf.tanh(tf.nn.xw_plus_b(tf.reshape(input_data, (\n channels, -1)), in_weights, in_bias))\n hidden_data_out = tf.reshape(hidden_data, [channels, steps,\n core_size])\n bi_outputs, last_state = tf.nn.bidirectional_dynamic_rnn(\n fw_rnn_cell, bw_rnn_cell, hidden_data_out, dtype=tf.float64)\n out_weights = tf.get_variable(name='out_weight', shape=[steps *\n core_size * 2, steps * core_size], dtype=tf.float64)\n out_bias = tf.get_variable(name='out_bias', shape=[core_size *\n steps], dtype=tf.float64)\n outputs = tf.nn.xw_plus_b(tf.reshape(tf.concat(bi_outputs, 2),\n (channels, -1)), out_weights, out_bias)\n loss = tf.reduce_mean(tf.sqrt(tf.squared_difference(tf.reshape(\n input_data, (channels, -1)), outputs)))\n train = tf.train.AdamOptimizer(0.001).minimize(loss)\n saver = tf.train.Saver()\n train_loss = 999999999\n try:\n saver.restore(sess, self.model_name)\n print('model restored')\n except:\n sess.run(tf.global_variables_initializer())\n print('restore failed, randomly initialize')\n for i in range(Epoches):\n loss_temp = 0\n outputs_temp = list()\n for item in batches:\n if drop_out_rate < 1:\n epoch_outputs, epoch_loss, _ = sess.run([outputs,\n loss, train], feed_dict={input_data: item})\n else:\n epoch_outputs, epoch_loss = sess.run([outputs, loss\n ], feed_dict={input_data: item})\n loss_temp += epoch_loss\n outputs_temp.append(epoch_outputs)\n loss_temp /= len(batches)\n if i == 0 and drop_out_rate < 1:\n self.origin_loss = loss_temp\n self.process = i / Epoches\n self.loss = loss_temp\n if loss_temp < train_loss:\n train_loss = loss_temp\n if drop_out_rate < 1:\n saver.save(sess, self.model_name)\n best_output = outputs_temp\n best_output = np.array(best_output, np.float64).reshape(channels,\n -1)\n best_output = np.concatenate((best_output, remainder), axis=1)\n best_output = best_output * std_value + mean_value\n self.new_sound = sound.spawn(best_output)\n audio_matrix = audio_matrix * std_value + mean_value\n new_sound = sound.spawn(audio_matrix)\n",
"step-4": "<mask token>\n\n\nclass DAE(object):\n\n def __init__(self, model_name):\n self.model_name = model_name\n self.process = 0\n self.loss = 0\n self.origin_loss = 0\n self.core_size = 3\n self.batch_size = 600\n self.Epoches = 100\n\n def _get_batches(self, batch_size, data, core_size):\n assert batch_size % core_size == 0\n dim_0 = len(data)\n length = len(data[0])\n num_batches = length // batch_size\n remainder_length = length % batch_size\n res = list()\n for i in range(num_batches):\n res.append(data[:, i * batch_size:(i + 1) * batch_size])\n res = [np.array(x, np.float64).reshape(dim_0, batch_size //\n core_size, core_size) for x in res]\n remainder = data[:, -remainder_length:]\n return res, remainder\n\n def fast_training(self, sound):\n self.core_size = 100\n self.batch_size = 1000\n self.Epoches = 50\n self._main(sound, 100, 1000, 50)\n\n def medium_training(self, sound):\n self.core_size = 5\n self.batch_size = 500\n self.Epoches = 100\n self._main(sound, 5, 500, 100)\n\n def slow_training(self, sound):\n self.core_size = 3\n self.batch_size = 300\n self.Epoches = 100\n self._main(sound, 3, 300, 150)\n\n def get_train_result_music_file(self):\n if self.new_sound:\n return self.new_sound\n else:\n raise Exception('You should run training firstly !')\n\n def get_current_training_process(self):\n return self.process\n\n def test(self, sound):\n audio_matrix = sound.get_reshaped_samples()\n mean_value = np.mean(audio_matrix)\n std_value = np.std(audio_matrix)\n audio_matrix = (audio_matrix - mean_value) / std_value\n channels = len(audio_matrix)\n batches, remainder = self._get_batches(batch_size=self.batch_size,\n core_size=self.core_size, data=audio_matrix)\n losses = list()\n for i in range(len(batches)):\n dropout_indicator = np.random.rand()\n if dropout_indicator <= 0.2:\n losses.append(np.sum(abs(batches[i])))\n batches[i] *= 0.0\n losses.append(0)\n sum_losses = np.sum(np.array(losses).reshape(-1))\n test_batches = np.array(batches, np.float64).reshape(channels, -1)\n test_batches = np.concatenate((test_batches, remainder), axis=1)\n count = audio_matrix.shape\n count = count[0] * count[1]\n self.origin_loss = sum_losses / float(count)\n test_batches = test_batches * std_value + mean_value\n test_sound = sound.spawn(test_batches)\n self._main(test_sound, self.core_size, self.batch_size, 1, 1.0)\n return test_sound, self.new_sound\n\n def _main(self, sound, core_size, batch_size, Epoches, drop_out_rate=0.9):\n self.new_sound = None\n self.process = 0\n self.loss = 0\n audio_matrix = sound.get_reshaped_samples()\n mean_value = np.mean(audio_matrix)\n std_value = np.std(audio_matrix)\n audio_matrix = (audio_matrix - mean_value) / std_value\n batches, remainder = self._get_batches(batch_size=batch_size,\n core_size=core_size, data=audio_matrix)\n steps = batch_size // core_size\n channels = len(audio_matrix)\n best_output = ''\n with tf.Session() as sess:\n fw_cell = tf.contrib.rnn.DropoutWrapper(tf.contrib.rnn.\n BasicLSTMCell(core_size), drop_out_rate)\n fw_rnn_cell = tf.contrib.rnn.MultiRNNCell([fw_cell] * 2)\n bw_cell = tf.contrib.rnn.DropoutWrapper(tf.contrib.rnn.\n BasicLSTMCell(core_size), drop_out_rate)\n bw_rnn_cell = tf.contrib.rnn.MultiRNNCell([bw_cell] * 2)\n input_data = tf.placeholder(shape=[channels, steps, core_size],\n dtype=tf.float64)\n in_weights = tf.get_variable(name='in_weight', shape=[steps *\n core_size, steps * core_size], dtype=tf.float64)\n in_bias = tf.get_variable(name='in_bias', shape=[core_size *\n steps], dtype=tf.float64)\n hidden_data = tf.tanh(tf.nn.xw_plus_b(tf.reshape(input_data, (\n channels, -1)), in_weights, in_bias))\n hidden_data_out = tf.reshape(hidden_data, [channels, steps,\n core_size])\n bi_outputs, last_state = tf.nn.bidirectional_dynamic_rnn(\n fw_rnn_cell, bw_rnn_cell, hidden_data_out, dtype=tf.float64)\n out_weights = tf.get_variable(name='out_weight', shape=[steps *\n core_size * 2, steps * core_size], dtype=tf.float64)\n out_bias = tf.get_variable(name='out_bias', shape=[core_size *\n steps], dtype=tf.float64)\n outputs = tf.nn.xw_plus_b(tf.reshape(tf.concat(bi_outputs, 2),\n (channels, -1)), out_weights, out_bias)\n loss = tf.reduce_mean(tf.sqrt(tf.squared_difference(tf.reshape(\n input_data, (channels, -1)), outputs)))\n train = tf.train.AdamOptimizer(0.001).minimize(loss)\n saver = tf.train.Saver()\n train_loss = 999999999\n try:\n saver.restore(sess, self.model_name)\n print('model restored')\n except:\n sess.run(tf.global_variables_initializer())\n print('restore failed, randomly initialize')\n for i in range(Epoches):\n loss_temp = 0\n outputs_temp = list()\n for item in batches:\n if drop_out_rate < 1:\n epoch_outputs, epoch_loss, _ = sess.run([outputs,\n loss, train], feed_dict={input_data: item})\n else:\n epoch_outputs, epoch_loss = sess.run([outputs, loss\n ], feed_dict={input_data: item})\n loss_temp += epoch_loss\n outputs_temp.append(epoch_outputs)\n loss_temp /= len(batches)\n if i == 0 and drop_out_rate < 1:\n self.origin_loss = loss_temp\n self.process = i / Epoches\n self.loss = loss_temp\n if loss_temp < train_loss:\n train_loss = loss_temp\n if drop_out_rate < 1:\n saver.save(sess, self.model_name)\n best_output = outputs_temp\n best_output = np.array(best_output, np.float64).reshape(channels,\n -1)\n best_output = np.concatenate((best_output, remainder), axis=1)\n best_output = best_output * std_value + mean_value\n self.new_sound = sound.spawn(best_output)\n audio_matrix = audio_matrix * std_value + mean_value\n new_sound = sound.spawn(audio_matrix)\n",
"step-5": "from pydub import AudioSegment\nimport sys\nimport tensorflow as tf\nimport numpy as np\nfrom adwtmk.audio import Audio\nfrom adwtmk.encoder import *\nfrom adwtmk.decoder import *\nclass DAE(object):\n def __init__(self,model_name):\n self.model_name = model_name\n self.process = 0\n self.loss = 0\n self.origin_loss = 0\n self.core_size = 3\n self.batch_size = 600\n self.Epoches = 100\n\n def _get_batches(self,batch_size,data,core_size):\n assert batch_size % core_size == 0\n dim_0 = len(data)\n #print(\"dim_0:\",dim_0)\n length = len(data[0])\n num_batches = length // batch_size\n remainder_length = length % batch_size\n res = list()\n for i in range(num_batches):\n res.append(data[:,i*batch_size:(i+1)*batch_size])\n res = [np.array(x,np.float64).reshape(dim_0,batch_size//core_size,core_size) for x in res]\n remainder = data[:,-remainder_length:]\n return res,remainder \n\n \n\n\n #np.set_printoptions(threshold=1e6)\n #def _my_config():\n #core_size = 5\n #batch_size = 500\n #Epoches = 200\n\n def fast_training(self,sound):\n self.core_size = 100\n self.batch_size = 1000\n self.Epoches = 50\n self._main(sound,100,1000,50)\n\n def medium_training(self,sound):\n self.core_size = 5\n self.batch_size = 500\n self.Epoches = 100\n self._main(sound,5,500,100)\n\n def slow_training(self,sound):\n self.core_size = 3\n self.batch_size = 300\n self.Epoches = 100\n self._main(sound,3,300,150)\n\n def get_train_result_music_file(self):\n if (self.new_sound):\n return self.new_sound\n else:\n raise Exception(\"You should run training firstly !\")\n\n def get_current_training_process(self):\n return self.process\n\n def test(self,sound):\n audio_matrix = sound.get_reshaped_samples()\n #max_value = np.max(audio_matrix)\n #min_value = np.min(audio_matrix)\n #audio_matrix = (audio_matrix-min_value) / (max_value-min_value)\n mean_value = np.mean(audio_matrix)\n std_value = np.std(audio_matrix)\n audio_matrix = (audio_matrix-mean_value) / std_value\n channels = len(audio_matrix)\n batches,remainder = self._get_batches(batch_size=self.batch_size,core_size=self.core_size,data=audio_matrix)\n losses = list()\n for i in range(len(batches)):\n dropout_indicator = np.random.rand()\n if (dropout_indicator <= 0.2):\n losses.append(np.sum(abs(batches[i])))\n batches[i] *= 0.00\n losses.append(0)\n sum_losses = np.sum(np.array(losses).reshape(-1))\n #print(\"losses:\")\n #print(np.array(losses).reshape(-1))\n #print(sum_losses)\n test_batches = np.array(batches,np.float64).reshape(channels,-1)\n test_batches = np.concatenate((test_batches,remainder),axis=1)\n count = audio_matrix.shape\n count = count[0]*count[1]\n self.origin_loss = sum_losses/(float)(count)\n test_batches = test_batches * std_value + mean_value\n test_sound = sound.spawn(test_batches)\n self._main(test_sound,self.core_size,self.batch_size,1,1.0)\n return test_sound,self.new_sound\n\n\n\n def _main(self,sound,core_size,batch_size,Epoches,drop_out_rate=0.9):\n self.new_sound = None\n self.process = 0\n self.loss = 0\n\n #print(sound.frame_rate,sound.duration_seconds, len(sound.get_array_of_samples()))\n\n audio_matrix = sound.get_reshaped_samples()\n #max_value = np.max(audio_matrix)\n #min_value = np.min(audio_matrix)\n #audio_matrix = (audio_matrix-min_value) / (max_value-min_value)\n mean_value = np.mean(audio_matrix)\n std_value = np.std(audio_matrix)\n audio_matrix = (audio_matrix-mean_value) / std_value\n\n\n batches,remainder = self._get_batches(batch_size=batch_size,core_size=core_size,data=audio_matrix)\n\n steps = batch_size // core_size\n channels = len(audio_matrix)\n\n\n best_output = \"\"\n\n with tf.Session() as sess:\n fw_cell = tf.contrib.rnn.DropoutWrapper(tf.contrib.rnn.BasicLSTMCell(core_size),drop_out_rate)\n fw_rnn_cell = tf.contrib.rnn.MultiRNNCell([fw_cell]*2) \n bw_cell = tf.contrib.rnn.DropoutWrapper(tf.contrib.rnn.BasicLSTMCell(core_size),drop_out_rate)\n bw_rnn_cell = tf.contrib.rnn.MultiRNNCell([bw_cell]*2) \n input_data = tf.placeholder(shape=[channels,steps,core_size],dtype=tf.float64)\n in_weights = tf.get_variable(name=\"in_weight\",shape=[steps*core_size,steps*core_size],dtype=tf.float64)\n in_bias = tf.get_variable(name=\"in_bias\",shape=[core_size*steps],dtype=tf.float64)\n hidden_data = tf.tanh(tf.nn.xw_plus_b(tf.reshape(input_data,(channels,-1)),in_weights,in_bias))\n hidden_data_out = tf.reshape(hidden_data,[channels,steps,core_size])\n bi_outputs,last_state = tf.nn.bidirectional_dynamic_rnn(fw_rnn_cell,bw_rnn_cell,hidden_data_out,dtype=tf.float64)\n out_weights = tf.get_variable(name=\"out_weight\",shape=[steps*core_size*2,steps*core_size],dtype=tf.float64)\n out_bias = tf.get_variable(name=\"out_bias\",shape=[core_size*steps],dtype=tf.float64)\n outputs = tf.nn.xw_plus_b(tf.reshape(tf.concat(bi_outputs,2),(channels,-1)),out_weights,out_bias)\n #outputs,last_state = tf.nn.dynamic_rnn(fw_rnn_cell,input_data,dtype=tf.float64)\n loss = tf.reduce_mean(tf.sqrt(tf.squared_difference(tf.reshape(input_data,(channels,-1)),outputs)))\n train = tf.train.AdamOptimizer(0.001).minimize(loss)\n saver = tf.train.Saver()\n train_loss = 999999999\n try:\n saver.restore(sess,self.model_name)\n print(\"model restored\")\n except:\n sess.run(tf.global_variables_initializer())\n print(\"restore failed, randomly initialize\")\n for i in range(Epoches):\n loss_temp = 0\n outputs_temp = list()\n for item in batches:\n if (drop_out_rate < 1):\n epoch_outputs,epoch_loss,_ = sess.run([outputs,loss,train],feed_dict={\n input_data:item\n }) \n else:\n epoch_outputs,epoch_loss = sess.run([outputs,loss],feed_dict={\n input_data:item\n }) \n loss_temp += epoch_loss\n outputs_temp.append(epoch_outputs)\n loss_temp /= len(batches)\n if (i == 0 and drop_out_rate<1):\n self.origin_loss = loss_temp\n self.process = i/Epoches\n self.loss = loss_temp\n #print(\"process:%f,loss:%f\" % (i/Epoches,loss_temp))\n if (loss_temp < train_loss):\n train_loss = loss_temp\n if (drop_out_rate < 1):\n saver.save(sess,self.model_name)\n best_output = outputs_temp\n #best_output = best_output.append(remainder)\n best_output = np.array(best_output,np.float64).reshape(channels,-1)\n best_output = np.concatenate((best_output,remainder),axis=1)\n #best_output = best_output.T\n #best_output = best_output.reshape(-1)\n best_output = best_output*std_value+mean_value\n #best_output *= max_value-min_value\n #best_output += min_value\n\n self.new_sound = sound.spawn(best_output)\n #new_sound.export(\"test.flac\",\"flac\")\n #ex.add_artifact(filename=\"./test.flac\")\n #ex.add_artifact(filename=\"./rnn_model_key_multirnn_bi_input.ckpt*\")\n #audio_matrix = np.array(audio_matrix,np.float64).reshape(channels,-1)\n #audio_matrix = audio_matrix.T\n #audio_matrix = audio_matrix.reshape(-1)\n #audio_matrix = audio_matrix * (max_value-min_value)+min_value\n audio_matrix = audio_matrix * std_value + mean_value\n new_sound = sound.spawn(audio_matrix)\n #new_sound.export(\"test2.flac\",\"flac\")\n\n #sound = Audio.from_file(\"./mark.flac\", format=\"flac\")\n #fast_training(sound)\n",
"step-ids": [
4,
6,
9,
10,
12
]
}
|
[
4,
6,
9,
10,
12
] |
from django.contrib import admin
from django.urls import path
from django.conf.urls import url
from . import views
urlpatterns = [
path('admin/', admin.site.urls),
path(r'', views.index, name='index'),
]
|
normal
|
{
"blob_id": "b0fad3847519bb18365a8cd4226d06e9d96a8308",
"index": 1258,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nurlpatterns = [path('admin/', admin.site.urls), path('', views.index, name=\n 'index')]\n",
"step-3": "from django.contrib import admin\nfrom django.urls import path\nfrom django.conf.urls import url\nfrom . import views\nurlpatterns = [path('admin/', admin.site.urls), path('', views.index, name=\n 'index')]\n",
"step-4": "from django.contrib import admin\nfrom django.urls import path\nfrom django.conf.urls import url\nfrom . import views\nurlpatterns = [\n path('admin/', admin.site.urls),\n path(r'', views.index, name='index'),\n]\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
COG_QUOTAS = (30, 25, 20, 15, 10, 5, 2, 1), (45, 40, 35, 30, 25, 20, 15, 10)
COG_UNSEEN = 1
COG_BATTLED = 2
COG_DEFEATED = 3
COG_COMPLETE1 = 4
COG_COMPLETE2 = 5
<|reserved_special_token_1|>
# Fuck you Disyer. Stealing my fucking paypal. GET FUCKED: toontown.shtiker.CogPageGlobals
COG_QUOTAS = ((30, 25, 20, 15, 10, 5, 2, 1), (45, 40, 35, 30, 25, 20, 15, 10))
COG_UNSEEN = 1
COG_BATTLED = 2
COG_DEFEATED = 3
COG_COMPLETE1 = 4
COG_COMPLETE2 = 5
|
flexible
|
{
"blob_id": "fdb680f12dfb4b29f25cfe4f7af80469dc4294cf",
"index": 2437,
"step-1": "<mask token>\n",
"step-2": "COG_QUOTAS = (30, 25, 20, 15, 10, 5, 2, 1), (45, 40, 35, 30, 25, 20, 15, 10)\nCOG_UNSEEN = 1\nCOG_BATTLED = 2\nCOG_DEFEATED = 3\nCOG_COMPLETE1 = 4\nCOG_COMPLETE2 = 5\n",
"step-3": "# Fuck you Disyer. Stealing my fucking paypal. GET FUCKED: toontown.shtiker.CogPageGlobals\r\nCOG_QUOTAS = ((30, 25, 20, 15, 10, 5, 2, 1), (45, 40, 35, 30, 25, 20, 15, 10))\r\nCOG_UNSEEN = 1\r\nCOG_BATTLED = 2\r\nCOG_DEFEATED = 3\r\nCOG_COMPLETE1 = 4\r\nCOG_COMPLETE2 = 5",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
class Variables:
size: bytes
name: str
class cstruct:
structname: string
<|reserved_special_token_0|>
def cpreprosscssor():
maintokens = lexer(mainfile)
return
def cprocessor():
return
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Variables:
size: bytes
name: str
class cstruct:
structname: string
def claprocessor():
print(sys.argv)
i = 0
for stri in sys.argv:
if stri.__eq__('-o'):
outfilename = sys.argv(i + 1)
if stri.__eq__('-ASM') or stri.__eq__('-asm'):
assemblyfilename = sys.argv(i + 1)
if stri.__contains__('.c'):
sourcefiles.append(stri)
if stri.__contains__('.h'):
headerfiles.append(stri)
i += 1
return
def cpreprosscssor():
maintokens = lexer(mainfile)
return
def cprocessor():
return
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
sourcefiles: str = []
headerfiles: str = []
mainfile: str = ''
<|reserved_special_token_0|>
class Variables:
size: bytes
name: str
class cstruct:
structname: string
def claprocessor():
print(sys.argv)
i = 0
for stri in sys.argv:
if stri.__eq__('-o'):
outfilename = sys.argv(i + 1)
if stri.__eq__('-ASM') or stri.__eq__('-asm'):
assemblyfilename = sys.argv(i + 1)
if stri.__contains__('.c'):
sourcefiles.append(stri)
if stri.__contains__('.h'):
headerfiles.append(stri)
i += 1
return
def cpreprosscssor():
maintokens = lexer(mainfile)
return
def cprocessor():
return
if __name__ == '__main__':
claprocessor()
<|reserved_special_token_1|>
flags = []
sourcefiles: str = []
headerfiles: str = []
mainfile: str = ''
outfilename = 'a.out'
assemblyfilename = 'a.asm'
includedfilenames = []
class Variables:
size: bytes
name: str
class cstruct:
structname: string
def claprocessor():
print(sys.argv)
i = 0
for stri in sys.argv:
if stri.__eq__('-o'):
outfilename = sys.argv(i + 1)
if stri.__eq__('-ASM') or stri.__eq__('-asm'):
assemblyfilename = sys.argv(i + 1)
if stri.__contains__('.c'):
sourcefiles.append(stri)
if stri.__contains__('.h'):
headerfiles.append(stri)
i += 1
return
def cpreprosscssor():
maintokens = lexer(mainfile)
return
def cprocessor():
return
if __name__ == '__main__':
claprocessor()
<|reserved_special_token_1|>
flags =[]
sourcefiles:str = []
headerfiles:str = []
mainfile:str = ""
outfilename = "a.out"
assemblyfilename = "a.asm"
includedfilenames = []
class Variables:
size:bytes
name:str
class cstruct:
structname:string
def claprocessor():
print(sys.argv)
i=0
for stri in sys.argv:
if stri.__eq__("-o"):
outfilename=sys.argv(i+1)
if stri.__eq__("-ASM") or stri.__eq__("-asm") :
assemblyfilename = sys.argv(i+1)
if stri.__contains__(".c"):
sourcefiles.append(stri)
if stri.__contains__(".h"):
headerfiles.append(stri)
i += 1
return
def cpreprosscssor():
maintokens = lexer(mainfile)
return
def cprocessor():
return
if __name__ == '__main__':
claprocessor()
|
flexible
|
{
"blob_id": "24187284ff3e03cf79b8545415005c71f9355ddc",
"index": 9062,
"step-1": "<mask token>\n\n\nclass Variables:\n size: bytes\n name: str\n\n\nclass cstruct:\n structname: string\n\n\n<mask token>\n\n\ndef cpreprosscssor():\n maintokens = lexer(mainfile)\n return\n\n\ndef cprocessor():\n return\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Variables:\n size: bytes\n name: str\n\n\nclass cstruct:\n structname: string\n\n\ndef claprocessor():\n print(sys.argv)\n i = 0\n for stri in sys.argv:\n if stri.__eq__('-o'):\n outfilename = sys.argv(i + 1)\n if stri.__eq__('-ASM') or stri.__eq__('-asm'):\n assemblyfilename = sys.argv(i + 1)\n if stri.__contains__('.c'):\n sourcefiles.append(stri)\n if stri.__contains__('.h'):\n headerfiles.append(stri)\n i += 1\n return\n\n\ndef cpreprosscssor():\n maintokens = lexer(mainfile)\n return\n\n\ndef cprocessor():\n return\n\n\n<mask token>\n",
"step-3": "<mask token>\nsourcefiles: str = []\nheaderfiles: str = []\nmainfile: str = ''\n<mask token>\n\n\nclass Variables:\n size: bytes\n name: str\n\n\nclass cstruct:\n structname: string\n\n\ndef claprocessor():\n print(sys.argv)\n i = 0\n for stri in sys.argv:\n if stri.__eq__('-o'):\n outfilename = sys.argv(i + 1)\n if stri.__eq__('-ASM') or stri.__eq__('-asm'):\n assemblyfilename = sys.argv(i + 1)\n if stri.__contains__('.c'):\n sourcefiles.append(stri)\n if stri.__contains__('.h'):\n headerfiles.append(stri)\n i += 1\n return\n\n\ndef cpreprosscssor():\n maintokens = lexer(mainfile)\n return\n\n\ndef cprocessor():\n return\n\n\nif __name__ == '__main__':\n claprocessor()\n",
"step-4": "flags = []\nsourcefiles: str = []\nheaderfiles: str = []\nmainfile: str = ''\noutfilename = 'a.out'\nassemblyfilename = 'a.asm'\nincludedfilenames = []\n\n\nclass Variables:\n size: bytes\n name: str\n\n\nclass cstruct:\n structname: string\n\n\ndef claprocessor():\n print(sys.argv)\n i = 0\n for stri in sys.argv:\n if stri.__eq__('-o'):\n outfilename = sys.argv(i + 1)\n if stri.__eq__('-ASM') or stri.__eq__('-asm'):\n assemblyfilename = sys.argv(i + 1)\n if stri.__contains__('.c'):\n sourcefiles.append(stri)\n if stri.__contains__('.h'):\n headerfiles.append(stri)\n i += 1\n return\n\n\ndef cpreprosscssor():\n maintokens = lexer(mainfile)\n return\n\n\ndef cprocessor():\n return\n\n\nif __name__ == '__main__':\n claprocessor()\n",
"step-5": "flags =[]\nsourcefiles:str = []\nheaderfiles:str = []\nmainfile:str = \"\"\noutfilename = \"a.out\"\nassemblyfilename = \"a.asm\"\nincludedfilenames = []\n\n\nclass Variables:\n size:bytes\n name:str\n\n\nclass cstruct:\n structname:string\n\n\ndef claprocessor():\n print(sys.argv)\n i=0\n for stri in sys.argv:\n if stri.__eq__(\"-o\"):\n outfilename=sys.argv(i+1)\n if stri.__eq__(\"-ASM\") or stri.__eq__(\"-asm\") :\n assemblyfilename = sys.argv(i+1)\n if stri.__contains__(\".c\"):\n sourcefiles.append(stri)\n if stri.__contains__(\".h\"):\n headerfiles.append(stri)\n\n i += 1\n return\n\n\ndef cpreprosscssor():\n maintokens = lexer(mainfile)\n return\n\n\ndef cprocessor():\n\n return\n\n\nif __name__ == '__main__':\n claprocessor()\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__),
'../sherlock')))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import sys
import os
import subprocess as sp
from time import sleep
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__),
'../sherlock')))
<|reserved_special_token_1|>
"""Sherlock Tests
This package contains various submodules used to run tests.
"""
import sys
import os
import subprocess as sp
from time import sleep
# uncomment this if using nose
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '../sherlock')))
# import sherlock
|
flexible
|
{
"blob_id": "8f7b1313ba31d761edcadac7b0d04b62f7af8dff",
"index": 4759,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nsys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__),\n '../sherlock')))\n",
"step-3": "<mask token>\nimport sys\nimport os\nimport subprocess as sp\nfrom time import sleep\nsys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__),\n '../sherlock')))\n",
"step-4": "\"\"\"Sherlock Tests\r\n\r\nThis package contains various submodules used to run tests.\r\n\"\"\"\r\nimport sys\r\nimport os\r\nimport subprocess as sp\r\nfrom time import sleep\r\n\r\n# uncomment this if using nose\r\nsys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '../sherlock')))\r\n\r\n# import sherlock",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import unittest
import math
from python.src.sort.insertion import Insertion
from python.src.sort.selection import Selection
from python.src.sort.shell import Shell
from python.test.util.utilities import Utilities
class ElementarySortTest(unittest.TestCase):
def setUp(self):
self.n = 1000
def test_insertion_sort(self):
insertion = Insertion()
actual = Utilities.generate_random_array(self.n)
expected = list(actual)
actual.sort()
insertion.sort(expected)
self.assertEqual(expected, actual)
self.assertLess(insertion.compares, (self.n ** 2 - self.n) / 2)
self.assertLess(insertion.swaps, (self.n ** 2 - self.n) / 2)
def test_insertion_sort_sub_array(self):
insertion = Insertion()
input = Utilities.generate_random_array(self.n)
low = math.floor(0.1 * self.n)
high = math.floor(0.9 * self.n)
insertion.sort(input, low, high)
self.assertTrue(Utilities.is_sorted(input, low, high))
self.assertFalse(Utilities.is_sorted(input, 0, len(input)))
def test_selection_sort(self):
selection = Selection()
actual = Utilities.generate_random_array(self.n)
expected = list(actual)
actual.sort()
selection.sort(expected)
self.assertEqual(expected, actual)
self.assertEqual(499500, selection.compares)
self.assertGreaterEqual(selection.swaps, 999)
self.assertLessEqual(selection.swaps, 1000)
def test_shell_sort(self):
shell = Shell()
actual = Utilities.generate_random_array(self.n)
expected = list(actual)
actual.sort()
shell.sort(expected)
self.assertEqual(expected, actual)
self.assertLess(13000, shell.compares)
self.assertLess(8000, shell.swaps)
if __name__ == '__main__':
unittest.main()
|
normal
|
{
"blob_id": "779ef8942bfb55bf017a8da9dfe34c03ac574a9a",
"index": 2591,
"step-1": "<mask token>\n\n\nclass ElementarySortTest(unittest.TestCase):\n <mask token>\n\n def test_insertion_sort(self):\n insertion = Insertion()\n actual = Utilities.generate_random_array(self.n)\n expected = list(actual)\n actual.sort()\n insertion.sort(expected)\n self.assertEqual(expected, actual)\n self.assertLess(insertion.compares, (self.n ** 2 - self.n) / 2)\n self.assertLess(insertion.swaps, (self.n ** 2 - self.n) / 2)\n\n def test_insertion_sort_sub_array(self):\n insertion = Insertion()\n input = Utilities.generate_random_array(self.n)\n low = math.floor(0.1 * self.n)\n high = math.floor(0.9 * self.n)\n insertion.sort(input, low, high)\n self.assertTrue(Utilities.is_sorted(input, low, high))\n self.assertFalse(Utilities.is_sorted(input, 0, len(input)))\n\n def test_selection_sort(self):\n selection = Selection()\n actual = Utilities.generate_random_array(self.n)\n expected = list(actual)\n actual.sort()\n selection.sort(expected)\n self.assertEqual(expected, actual)\n self.assertEqual(499500, selection.compares)\n self.assertGreaterEqual(selection.swaps, 999)\n self.assertLessEqual(selection.swaps, 1000)\n\n def test_shell_sort(self):\n shell = Shell()\n actual = Utilities.generate_random_array(self.n)\n expected = list(actual)\n actual.sort()\n shell.sort(expected)\n self.assertEqual(expected, actual)\n self.assertLess(13000, shell.compares)\n self.assertLess(8000, shell.swaps)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass ElementarySortTest(unittest.TestCase):\n\n def setUp(self):\n self.n = 1000\n\n def test_insertion_sort(self):\n insertion = Insertion()\n actual = Utilities.generate_random_array(self.n)\n expected = list(actual)\n actual.sort()\n insertion.sort(expected)\n self.assertEqual(expected, actual)\n self.assertLess(insertion.compares, (self.n ** 2 - self.n) / 2)\n self.assertLess(insertion.swaps, (self.n ** 2 - self.n) / 2)\n\n def test_insertion_sort_sub_array(self):\n insertion = Insertion()\n input = Utilities.generate_random_array(self.n)\n low = math.floor(0.1 * self.n)\n high = math.floor(0.9 * self.n)\n insertion.sort(input, low, high)\n self.assertTrue(Utilities.is_sorted(input, low, high))\n self.assertFalse(Utilities.is_sorted(input, 0, len(input)))\n\n def test_selection_sort(self):\n selection = Selection()\n actual = Utilities.generate_random_array(self.n)\n expected = list(actual)\n actual.sort()\n selection.sort(expected)\n self.assertEqual(expected, actual)\n self.assertEqual(499500, selection.compares)\n self.assertGreaterEqual(selection.swaps, 999)\n self.assertLessEqual(selection.swaps, 1000)\n\n def test_shell_sort(self):\n shell = Shell()\n actual = Utilities.generate_random_array(self.n)\n expected = list(actual)\n actual.sort()\n shell.sort(expected)\n self.assertEqual(expected, actual)\n self.assertLess(13000, shell.compares)\n self.assertLess(8000, shell.swaps)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass ElementarySortTest(unittest.TestCase):\n\n def setUp(self):\n self.n = 1000\n\n def test_insertion_sort(self):\n insertion = Insertion()\n actual = Utilities.generate_random_array(self.n)\n expected = list(actual)\n actual.sort()\n insertion.sort(expected)\n self.assertEqual(expected, actual)\n self.assertLess(insertion.compares, (self.n ** 2 - self.n) / 2)\n self.assertLess(insertion.swaps, (self.n ** 2 - self.n) / 2)\n\n def test_insertion_sort_sub_array(self):\n insertion = Insertion()\n input = Utilities.generate_random_array(self.n)\n low = math.floor(0.1 * self.n)\n high = math.floor(0.9 * self.n)\n insertion.sort(input, low, high)\n self.assertTrue(Utilities.is_sorted(input, low, high))\n self.assertFalse(Utilities.is_sorted(input, 0, len(input)))\n\n def test_selection_sort(self):\n selection = Selection()\n actual = Utilities.generate_random_array(self.n)\n expected = list(actual)\n actual.sort()\n selection.sort(expected)\n self.assertEqual(expected, actual)\n self.assertEqual(499500, selection.compares)\n self.assertGreaterEqual(selection.swaps, 999)\n self.assertLessEqual(selection.swaps, 1000)\n\n def test_shell_sort(self):\n shell = Shell()\n actual = Utilities.generate_random_array(self.n)\n expected = list(actual)\n actual.sort()\n shell.sort(expected)\n self.assertEqual(expected, actual)\n self.assertLess(13000, shell.compares)\n self.assertLess(8000, shell.swaps)\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-4": "import unittest\nimport math\nfrom python.src.sort.insertion import Insertion\nfrom python.src.sort.selection import Selection\nfrom python.src.sort.shell import Shell\nfrom python.test.util.utilities import Utilities\n\n\nclass ElementarySortTest(unittest.TestCase):\n\n def setUp(self):\n self.n = 1000\n\n def test_insertion_sort(self):\n insertion = Insertion()\n actual = Utilities.generate_random_array(self.n)\n expected = list(actual)\n actual.sort()\n insertion.sort(expected)\n self.assertEqual(expected, actual)\n self.assertLess(insertion.compares, (self.n ** 2 - self.n) / 2)\n self.assertLess(insertion.swaps, (self.n ** 2 - self.n) / 2)\n\n def test_insertion_sort_sub_array(self):\n insertion = Insertion()\n input = Utilities.generate_random_array(self.n)\n low = math.floor(0.1 * self.n)\n high = math.floor(0.9 * self.n)\n insertion.sort(input, low, high)\n self.assertTrue(Utilities.is_sorted(input, low, high))\n self.assertFalse(Utilities.is_sorted(input, 0, len(input)))\n\n def test_selection_sort(self):\n selection = Selection()\n actual = Utilities.generate_random_array(self.n)\n expected = list(actual)\n actual.sort()\n selection.sort(expected)\n self.assertEqual(expected, actual)\n self.assertEqual(499500, selection.compares)\n self.assertGreaterEqual(selection.swaps, 999)\n self.assertLessEqual(selection.swaps, 1000)\n\n def test_shell_sort(self):\n shell = Shell()\n actual = Utilities.generate_random_array(self.n)\n expected = list(actual)\n actual.sort()\n shell.sort(expected)\n self.assertEqual(expected, actual)\n self.assertLess(13000, shell.compares)\n self.assertLess(8000, shell.swaps)\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-5": null,
"step-ids": [
5,
6,
7,
8
]
}
|
[
5,
6,
7,
8
] |
<|reserved_special_token_0|>
class ComposePipelines:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ComposePipelines:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __call__(self, image):
augmented_image = image.copy()
newpipeline = dict()
for i, pipeline in enumerate(self.pipelines):
data_output = pipeline.augment(augmented_image)
augmented_image = data_output['output']
for key in data_output.keys():
newkey = 'pipeline' + str(i) + '-' + key
newpipeline[newkey] = data_output[key]
return newpipeline
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ComposePipelines:
<|reserved_special_token_0|>
def __init__(self, pipelines):
self.pipelines = pipelines
def __call__(self, image):
augmented_image = image.copy()
newpipeline = dict()
for i, pipeline in enumerate(self.pipelines):
data_output = pipeline.augment(augmented_image)
augmented_image = data_output['output']
for key in data_output.keys():
newkey = 'pipeline' + str(i) + '-' + key
newpipeline[newkey] = data_output[key]
return newpipeline
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ComposePipelines:
"""The composition of multiple AugraphyPipelines.
Define AugraphyPipelines elsewhere, then use this to compose them.
ComposePipelines objects are callable on images (as numpy.ndarrays).
:param pipelines: A list contains multiple augraphy.base.AugraphyPipeline.
:type pipelines: list or tuple
"""
def __init__(self, pipelines):
self.pipelines = pipelines
def __call__(self, image):
augmented_image = image.copy()
newpipeline = dict()
for i, pipeline in enumerate(self.pipelines):
data_output = pipeline.augment(augmented_image)
augmented_image = data_output['output']
for key in data_output.keys():
newkey = 'pipeline' + str(i) + '-' + key
newpipeline[newkey] = data_output[key]
return newpipeline
<|reserved_special_token_1|>
"""This module contains a class supporting composition of AugraphyPipelines"""
class ComposePipelines:
"""The composition of multiple AugraphyPipelines.
Define AugraphyPipelines elsewhere, then use this to compose them.
ComposePipelines objects are callable on images (as numpy.ndarrays).
:param pipelines: A list contains multiple augraphy.base.AugraphyPipeline.
:type pipelines: list or tuple
"""
def __init__(self, pipelines):
self.pipelines = pipelines
def __call__(self, image):
augmented_image = image.copy()
newpipeline = dict()
for i, pipeline in enumerate(self.pipelines):
data_output = pipeline.augment(augmented_image)
augmented_image = data_output["output"]
for key in data_output.keys():
newkey = "pipeline" + str(i) + "-" + key
newpipeline[newkey] = data_output[key]
return newpipeline
|
flexible
|
{
"blob_id": "13c55c313c740edce48fc979e8956fdd018e8aab",
"index": 9716,
"step-1": "<mask token>\n\n\nclass ComposePipelines:\n <mask token>\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass ComposePipelines:\n <mask token>\n <mask token>\n\n def __call__(self, image):\n augmented_image = image.copy()\n newpipeline = dict()\n for i, pipeline in enumerate(self.pipelines):\n data_output = pipeline.augment(augmented_image)\n augmented_image = data_output['output']\n for key in data_output.keys():\n newkey = 'pipeline' + str(i) + '-' + key\n newpipeline[newkey] = data_output[key]\n return newpipeline\n",
"step-3": "<mask token>\n\n\nclass ComposePipelines:\n <mask token>\n\n def __init__(self, pipelines):\n self.pipelines = pipelines\n\n def __call__(self, image):\n augmented_image = image.copy()\n newpipeline = dict()\n for i, pipeline in enumerate(self.pipelines):\n data_output = pipeline.augment(augmented_image)\n augmented_image = data_output['output']\n for key in data_output.keys():\n newkey = 'pipeline' + str(i) + '-' + key\n newpipeline[newkey] = data_output[key]\n return newpipeline\n",
"step-4": "<mask token>\n\n\nclass ComposePipelines:\n \"\"\"The composition of multiple AugraphyPipelines.\n Define AugraphyPipelines elsewhere, then use this to compose them.\n ComposePipelines objects are callable on images (as numpy.ndarrays).\n\n :param pipelines: A list contains multiple augraphy.base.AugraphyPipeline.\n :type pipelines: list or tuple\n \"\"\"\n\n def __init__(self, pipelines):\n self.pipelines = pipelines\n\n def __call__(self, image):\n augmented_image = image.copy()\n newpipeline = dict()\n for i, pipeline in enumerate(self.pipelines):\n data_output = pipeline.augment(augmented_image)\n augmented_image = data_output['output']\n for key in data_output.keys():\n newkey = 'pipeline' + str(i) + '-' + key\n newpipeline[newkey] = data_output[key]\n return newpipeline\n",
"step-5": "\"\"\"This module contains a class supporting composition of AugraphyPipelines\"\"\"\n\n\nclass ComposePipelines:\n \"\"\"The composition of multiple AugraphyPipelines.\n Define AugraphyPipelines elsewhere, then use this to compose them.\n ComposePipelines objects are callable on images (as numpy.ndarrays).\n\n :param pipelines: A list contains multiple augraphy.base.AugraphyPipeline.\n :type pipelines: list or tuple\n \"\"\"\n\n def __init__(self, pipelines):\n self.pipelines = pipelines\n\n def __call__(self, image):\n\n augmented_image = image.copy()\n newpipeline = dict()\n\n for i, pipeline in enumerate(self.pipelines):\n data_output = pipeline.augment(augmented_image)\n augmented_image = data_output[\"output\"]\n\n for key in data_output.keys():\n newkey = \"pipeline\" + str(i) + \"-\" + key\n newpipeline[newkey] = data_output[key]\n\n return newpipeline\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
import os
import datetime
import traceback
import json
import requests
import logging
from model import Product
from naver_api import naver_client_id, naver_client_secret
DEBUG = False
if not DEBUG:
logging.getLogger('boto3').setLevel(logging.WARNING)
logging.getLogger('botocore').setLevel(logging.WARNING)
logger = logging.getLogger()
logger.setLevel(logging.INFO)
def lambda_handler(event, context):
# print(naver_client_id)
# print(naver_client_secret)
products = list(Product.scan(Product.do_crawl==True))
for product in products:
product.search_lowest_price()
print('{} product(s) crawled'.format(len(products)))
|
normal
|
{
"blob_id": "76905171602cbeb53903a4b0259685288da3a083",
"index": 6365,
"step-1": "<mask token>\n\n\ndef lambda_handler(event, context):\n products = list(Product.scan(Product.do_crawl == True))\n for product in products:\n product.search_lowest_price()\n print('{} product(s) crawled'.format(len(products)))\n",
"step-2": "<mask token>\nif not DEBUG:\n logging.getLogger('boto3').setLevel(logging.WARNING)\n logging.getLogger('botocore').setLevel(logging.WARNING)\n<mask token>\nlogger.setLevel(logging.INFO)\n\n\ndef lambda_handler(event, context):\n products = list(Product.scan(Product.do_crawl == True))\n for product in products:\n product.search_lowest_price()\n print('{} product(s) crawled'.format(len(products)))\n",
"step-3": "<mask token>\nDEBUG = False\nif not DEBUG:\n logging.getLogger('boto3').setLevel(logging.WARNING)\n logging.getLogger('botocore').setLevel(logging.WARNING)\nlogger = logging.getLogger()\nlogger.setLevel(logging.INFO)\n\n\ndef lambda_handler(event, context):\n products = list(Product.scan(Product.do_crawl == True))\n for product in products:\n product.search_lowest_price()\n print('{} product(s) crawled'.format(len(products)))\n",
"step-4": "import os\nimport datetime\nimport traceback\nimport json\nimport requests\nimport logging\nfrom model import Product\nfrom naver_api import naver_client_id, naver_client_secret\nDEBUG = False\nif not DEBUG:\n logging.getLogger('boto3').setLevel(logging.WARNING)\n logging.getLogger('botocore').setLevel(logging.WARNING)\nlogger = logging.getLogger()\nlogger.setLevel(logging.INFO)\n\n\ndef lambda_handler(event, context):\n products = list(Product.scan(Product.do_crawl == True))\n for product in products:\n product.search_lowest_price()\n print('{} product(s) crawled'.format(len(products)))\n",
"step-5": "import os\nimport datetime\nimport traceback\nimport json\nimport requests\nimport logging\n\nfrom model import Product\nfrom naver_api import naver_client_id, naver_client_secret\n\n\nDEBUG = False\nif not DEBUG:\n logging.getLogger('boto3').setLevel(logging.WARNING)\n logging.getLogger('botocore').setLevel(logging.WARNING)\n\nlogger = logging.getLogger()\nlogger.setLevel(logging.INFO)\n\n\ndef lambda_handler(event, context):\n # print(naver_client_id)\n # print(naver_client_secret)\n\n products = list(Product.scan(Product.do_crawl==True))\n\n for product in products:\n product.search_lowest_price()\n\n print('{} product(s) crawled'.format(len(products)))\n\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
import time
from selenium import webdriver
import os
from selenium.webdriver.common.by import By
with open("file.txt", "w") as file:
content = file.write("Tanyuhich")
try:
browser = webdriver.Chrome()
browser.get("http://suninjuly.github.io/file_input.html")
input1 = browser.find_element_by_name('firstname')
input1.send_keys("Ivan")
input2 = browser.find_element_by_name('lastname')
input2.send_keys("Petrov")
input3 = browser.find_element_by_name('email')
input3.send_keys("tati.dmi@mail.ru")
current_dir = os.path.abspath(os.path.dirname(__file__))
path = os.getcwd() + '/' + file.name
element = browser.find_element(By.CSS_SELECTOR, "[type='file']")
element.send_keys(path)
button = browser.find_element_by_css_selector("button.btn")
button.click()
finally:
# успеваем скопировать код за 30 секунд
time.sleep(30)
# закрываем браузер после всех манипуляций
browser.quit()
# не забываем оставить пустую строку в конце файла
|
normal
|
{
"blob_id": "03270285c6dc99d8dcb9804270421f36b573048c",
"index": 2863,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwith open('file.txt', 'w') as file:\n content = file.write('Tanyuhich')\ntry:\n browser = webdriver.Chrome()\n browser.get('http://suninjuly.github.io/file_input.html')\n input1 = browser.find_element_by_name('firstname')\n input1.send_keys('Ivan')\n input2 = browser.find_element_by_name('lastname')\n input2.send_keys('Petrov')\n input3 = browser.find_element_by_name('email')\n input3.send_keys('tati.dmi@mail.ru')\n current_dir = os.path.abspath(os.path.dirname(__file__))\n path = os.getcwd() + '/' + file.name\n element = browser.find_element(By.CSS_SELECTOR, \"[type='file']\")\n element.send_keys(path)\n button = browser.find_element_by_css_selector('button.btn')\n button.click()\nfinally:\n time.sleep(30)\n browser.quit()\n",
"step-3": "import time\nfrom selenium import webdriver\nimport os\nfrom selenium.webdriver.common.by import By\nwith open('file.txt', 'w') as file:\n content = file.write('Tanyuhich')\ntry:\n browser = webdriver.Chrome()\n browser.get('http://suninjuly.github.io/file_input.html')\n input1 = browser.find_element_by_name('firstname')\n input1.send_keys('Ivan')\n input2 = browser.find_element_by_name('lastname')\n input2.send_keys('Petrov')\n input3 = browser.find_element_by_name('email')\n input3.send_keys('tati.dmi@mail.ru')\n current_dir = os.path.abspath(os.path.dirname(__file__))\n path = os.getcwd() + '/' + file.name\n element = browser.find_element(By.CSS_SELECTOR, \"[type='file']\")\n element.send_keys(path)\n button = browser.find_element_by_css_selector('button.btn')\n button.click()\nfinally:\n time.sleep(30)\n browser.quit()\n",
"step-4": "import time\nfrom selenium import webdriver\nimport os\nfrom selenium.webdriver.common.by import By\n\nwith open(\"file.txt\", \"w\") as file:\n content = file.write(\"Tanyuhich\")\n \ntry:\n browser = webdriver.Chrome()\n browser.get(\"http://suninjuly.github.io/file_input.html\")\n input1 = browser.find_element_by_name('firstname')\n input1.send_keys(\"Ivan\")\n input2 = browser.find_element_by_name('lastname')\n input2.send_keys(\"Petrov\")\n input3 = browser.find_element_by_name('email')\n input3.send_keys(\"tati.dmi@mail.ru\")\n current_dir = os.path.abspath(os.path.dirname(__file__))\n path = os.getcwd() + '/' + file.name\n element = browser.find_element(By.CSS_SELECTOR, \"[type='file']\")\n element.send_keys(path)\n button = browser.find_element_by_css_selector(\"button.btn\")\n button.click()\n\nfinally:\n # успеваем скопировать код за 30 секунд\n time.sleep(30)\n # закрываем браузер после всех манипуляций\n browser.quit()\n\n# не забываем оставить пустую строку в конце файла",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print(html.decode('utf-8'))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
seesion = requests.Session()
header = {'User-Agent':
'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.104 Safari/537.36 Core/1.53.3387.400 QQBrowser/9.6.11984.400'
}
cookie = {'Cookie':
'_ga=GA1.2.1866009938.1500885157; xmuuid=XMGUEST-B6484440-71B8-11E7-AF2E-EFCFEDA1C27A; muuid=1502080306158_2635; userId=912885850; cUserId=whd2FtY90KvyEb8BS1k8-0muAAo; xm_order_btauth=84949f1c6032d4129aca583e69b71c38; xm_link_history=t6sadFA60Z%2BMsuivPj8AHZ54rNmHsBBhyTKGmbJVUHs%3D; euid=hXV9isZHjBFFnkz317%2Fx9A%3D%3D; mUserId=kczvlmbCAd6Nb8wSLo5Gfza0sUdqAduE527UdXEgGWE%3D; axmuid=kczvlmbCAd6Nb8wSLo5Gfza0sUdqAduE527UdXEgGWE%3D; serviceToken=T6IIRQVomBeH%2FIys3W50AyGMBG%2BPHq9r6Xg8AHz%2BDs%2BADmxH%2BnGl0TqSTF%2Bvr75WoEgpebNWrd05nnmzaFFoa%2BdVk5lKX1RSBUTsePGYTNFwD9KeoLheKiXXweR3R1Mf67Q%2FZmBoqgT44iP4ZOYTjRlLKDBn%2BUiE2haBwG%2FLDfs%3D; xm_user_www_num=0; XM_912885850_UN=912885850; log_code=81190ccc4d52f577-5cb9ba924c37c3f1|https%3A%2F%2Fwww.mi.com%2Findex.html; lastsource=account.xiaomi.com; mstz=81190ccc4d52f577-5cb9ba924c37c3f1|%2F%2Faccount.xiaomi.com%2F|1028724705.19|pcpid|https%253A%252F%252Fwww.mi.com%252Findex.html|; mstuid=1501042380618_8910; xm_vistor=1501042380618_8910_1505118306768-1505118556462; pageid=f4f3444fdfa3d27a; xm_order_sid=1d1097d4897ab755d48b95b4bda6ab14'
}
html = requests.get('https://order.mi.com/portal?r=92853.1505118552',
cookies=cookie, headers=header).content
print(html.decode('utf-8'))
<|reserved_special_token_1|>
import requests
seesion = requests.Session()
header = {'User-Agent':
'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.104 Safari/537.36 Core/1.53.3387.400 QQBrowser/9.6.11984.400'
}
cookie = {'Cookie':
'_ga=GA1.2.1866009938.1500885157; xmuuid=XMGUEST-B6484440-71B8-11E7-AF2E-EFCFEDA1C27A; muuid=1502080306158_2635; userId=912885850; cUserId=whd2FtY90KvyEb8BS1k8-0muAAo; xm_order_btauth=84949f1c6032d4129aca583e69b71c38; xm_link_history=t6sadFA60Z%2BMsuivPj8AHZ54rNmHsBBhyTKGmbJVUHs%3D; euid=hXV9isZHjBFFnkz317%2Fx9A%3D%3D; mUserId=kczvlmbCAd6Nb8wSLo5Gfza0sUdqAduE527UdXEgGWE%3D; axmuid=kczvlmbCAd6Nb8wSLo5Gfza0sUdqAduE527UdXEgGWE%3D; serviceToken=T6IIRQVomBeH%2FIys3W50AyGMBG%2BPHq9r6Xg8AHz%2BDs%2BADmxH%2BnGl0TqSTF%2Bvr75WoEgpebNWrd05nnmzaFFoa%2BdVk5lKX1RSBUTsePGYTNFwD9KeoLheKiXXweR3R1Mf67Q%2FZmBoqgT44iP4ZOYTjRlLKDBn%2BUiE2haBwG%2FLDfs%3D; xm_user_www_num=0; XM_912885850_UN=912885850; log_code=81190ccc4d52f577-5cb9ba924c37c3f1|https%3A%2F%2Fwww.mi.com%2Findex.html; lastsource=account.xiaomi.com; mstz=81190ccc4d52f577-5cb9ba924c37c3f1|%2F%2Faccount.xiaomi.com%2F|1028724705.19|pcpid|https%253A%252F%252Fwww.mi.com%252Findex.html|; mstuid=1501042380618_8910; xm_vistor=1501042380618_8910_1505118306768-1505118556462; pageid=f4f3444fdfa3d27a; xm_order_sid=1d1097d4897ab755d48b95b4bda6ab14'
}
html = requests.get('https://order.mi.com/portal?r=92853.1505118552',
cookies=cookie, headers=header).content
print(html.decode('utf-8'))
|
flexible
|
{
"blob_id": "8c652f30cd256912512b6b91d1682af7da0ff915",
"index": 8265,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(html.decode('utf-8'))\n",
"step-3": "<mask token>\nseesion = requests.Session()\nheader = {'User-Agent':\n 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.104 Safari/537.36 Core/1.53.3387.400 QQBrowser/9.6.11984.400'\n }\ncookie = {'Cookie':\n '_ga=GA1.2.1866009938.1500885157; xmuuid=XMGUEST-B6484440-71B8-11E7-AF2E-EFCFEDA1C27A; muuid=1502080306158_2635; userId=912885850; cUserId=whd2FtY90KvyEb8BS1k8-0muAAo; xm_order_btauth=84949f1c6032d4129aca583e69b71c38; xm_link_history=t6sadFA60Z%2BMsuivPj8AHZ54rNmHsBBhyTKGmbJVUHs%3D; euid=hXV9isZHjBFFnkz317%2Fx9A%3D%3D; mUserId=kczvlmbCAd6Nb8wSLo5Gfza0sUdqAduE527UdXEgGWE%3D; axmuid=kczvlmbCAd6Nb8wSLo5Gfza0sUdqAduE527UdXEgGWE%3D; serviceToken=T6IIRQVomBeH%2FIys3W50AyGMBG%2BPHq9r6Xg8AHz%2BDs%2BADmxH%2BnGl0TqSTF%2Bvr75WoEgpebNWrd05nnmzaFFoa%2BdVk5lKX1RSBUTsePGYTNFwD9KeoLheKiXXweR3R1Mf67Q%2FZmBoqgT44iP4ZOYTjRlLKDBn%2BUiE2haBwG%2FLDfs%3D; xm_user_www_num=0; XM_912885850_UN=912885850; log_code=81190ccc4d52f577-5cb9ba924c37c3f1|https%3A%2F%2Fwww.mi.com%2Findex.html; lastsource=account.xiaomi.com; mstz=81190ccc4d52f577-5cb9ba924c37c3f1|%2F%2Faccount.xiaomi.com%2F|1028724705.19|pcpid|https%253A%252F%252Fwww.mi.com%252Findex.html|; mstuid=1501042380618_8910; xm_vistor=1501042380618_8910_1505118306768-1505118556462; pageid=f4f3444fdfa3d27a; xm_order_sid=1d1097d4897ab755d48b95b4bda6ab14'\n }\nhtml = requests.get('https://order.mi.com/portal?r=92853.1505118552',\n cookies=cookie, headers=header).content\nprint(html.decode('utf-8'))\n",
"step-4": "import requests\nseesion = requests.Session()\nheader = {'User-Agent':\n 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.104 Safari/537.36 Core/1.53.3387.400 QQBrowser/9.6.11984.400'\n }\ncookie = {'Cookie':\n '_ga=GA1.2.1866009938.1500885157; xmuuid=XMGUEST-B6484440-71B8-11E7-AF2E-EFCFEDA1C27A; muuid=1502080306158_2635; userId=912885850; cUserId=whd2FtY90KvyEb8BS1k8-0muAAo; xm_order_btauth=84949f1c6032d4129aca583e69b71c38; xm_link_history=t6sadFA60Z%2BMsuivPj8AHZ54rNmHsBBhyTKGmbJVUHs%3D; euid=hXV9isZHjBFFnkz317%2Fx9A%3D%3D; mUserId=kczvlmbCAd6Nb8wSLo5Gfza0sUdqAduE527UdXEgGWE%3D; axmuid=kczvlmbCAd6Nb8wSLo5Gfza0sUdqAduE527UdXEgGWE%3D; serviceToken=T6IIRQVomBeH%2FIys3W50AyGMBG%2BPHq9r6Xg8AHz%2BDs%2BADmxH%2BnGl0TqSTF%2Bvr75WoEgpebNWrd05nnmzaFFoa%2BdVk5lKX1RSBUTsePGYTNFwD9KeoLheKiXXweR3R1Mf67Q%2FZmBoqgT44iP4ZOYTjRlLKDBn%2BUiE2haBwG%2FLDfs%3D; xm_user_www_num=0; XM_912885850_UN=912885850; log_code=81190ccc4d52f577-5cb9ba924c37c3f1|https%3A%2F%2Fwww.mi.com%2Findex.html; lastsource=account.xiaomi.com; mstz=81190ccc4d52f577-5cb9ba924c37c3f1|%2F%2Faccount.xiaomi.com%2F|1028724705.19|pcpid|https%253A%252F%252Fwww.mi.com%252Findex.html|; mstuid=1501042380618_8910; xm_vistor=1501042380618_8910_1505118306768-1505118556462; pageid=f4f3444fdfa3d27a; xm_order_sid=1d1097d4897ab755d48b95b4bda6ab14'\n }\nhtml = requests.get('https://order.mi.com/portal?r=92853.1505118552',\n cookies=cookie, headers=header).content\nprint(html.decode('utf-8'))\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
def prime_sieve(n):
if n==2: return [2]
elif n<2: return []
s=range(3,n+1,2)
mroot = n ** 0.5
half=(n+1)/2-1
i=0
m=3
while m <= mroot:
if s[i]:
j=(m*m-3)/2
s[j]=0
while j<half:
s[j]=0
j+=m
i=i+1
m=2*i+3
return [2]+[x for x in s if x]
ps = prime_sieve(1000000)
def get_primes_upto(n):
i = 0
while ps[i] <= n:
i += 1
return ps[0:i+1];
def trial_division(n):
if n == 1: return [1]
primes = get_primes_upto(int(n**0.5) + 1)
prime_factors = []
for p in primes:
if p*p > n: break
while n % p == 0:
prime_factors.append(p)
n //= p
if n > 1: prime_factors.append(n)
return prime_factors
def unique_factors(n):
return len(set(trial_division(n)))
fs = [0]
c = 0
for i in range(1,1000000):
c+= 1
fs.append(unique_factors(i))
if len(fs) > 4:
if fs[-4:] == [4,4,4,4]:
print c -3
break
|
normal
|
{
"blob_id": "5771f49ad5254588f1683a8d45aa81ce472bb562",
"index": 30,
"step-1": "\ndef prime_sieve(n): \n\tif n==2: return [2]\n\telif n<2: return []\n\ts=range(3,n+1,2)\n\tmroot = n ** 0.5\n\thalf=(n+1)/2-1\n\ti=0\n\tm=3\n\twhile m <= mroot:\n\t\tif s[i]:\n\t\t\tj=(m*m-3)/2\n\t\t\ts[j]=0\n\t\t\twhile j<half:\n\t\t\t\ts[j]=0\n\t\t\t\tj+=m\n\t\ti=i+1\n\t\tm=2*i+3\n\treturn [2]+[x for x in s if x]\n\nps = prime_sieve(1000000)\n\ndef get_primes_upto(n):\n i = 0\n while ps[i] <= n:\n i += 1\n return ps[0:i+1];\n\ndef trial_division(n):\n if n == 1: return [1]\n primes = get_primes_upto(int(n**0.5) + 1)\n prime_factors = []\n \n for p in primes:\n if p*p > n: break\n while n % p == 0:\n prime_factors.append(p)\n n //= p\n if n > 1: prime_factors.append(n)\n \n return prime_factors\n\ndef unique_factors(n):\n return len(set(trial_division(n)))\n\nfs = [0]\nc = 0\nfor i in range(1,1000000):\n c+= 1\n fs.append(unique_factors(i))\n if len(fs) > 4:\n if fs[-4:] == [4,4,4,4]:\n print c -3\n break\n\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def lower_upper_confidence_intervals(avg, SD):
lower = avg - 2 * SD
upper = avg + 2 * SD
return lower, upper
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
sys.path.append(
'C:/Users/Laptop/Documents/Repos/udacity_stats_functions/descriptive')
<|reserved_special_token_0|>
def lower_upper_confidence_intervals(avg, SD):
lower = avg - 2 * SD
upper = avg + 2 * SD
return lower, upper
print(lower_upper_confidence_intervals(40, 2.71))
print(get_z_from_p(0.975))
<|reserved_special_token_1|>
import sys
import os
sys.path.append(
'C:/Users/Laptop/Documents/Repos/udacity_stats_functions/descriptive')
import normal_distribution_06
def lower_upper_confidence_intervals(avg, SD):
lower = avg - 2 * SD
upper = avg + 2 * SD
return lower, upper
print(lower_upper_confidence_intervals(40, 2.71))
print(get_z_from_p(0.975))
<|reserved_special_token_1|>
import sys
import os
sys.path.append("C:/Users/Laptop/Documents/Repos/udacity_stats_functions/descriptive")
import normal_distribution_06
#import sampling_distributions_07
def lower_upper_confidence_intervals(avg, SD):
#avg is x bar. The mean value at the "would be" point. ie Bieber Tweeter
#SD is standard error (standard deviation of population dataset dvided by sqrt(number_in_sample)
lower = avg-2*SD
upper = avg+2*SD
return((lower, upper))
#7. Quiz: Confidence Interval Bounds
print(lower_upper_confidence_intervals(40, 2.71))
#8. Quiz: Exact Z-Scores
print(get_z_from_p(0.975))
|
flexible
|
{
"blob_id": "d423b0bc6cd9ea9795317750141ad5f5eab01636",
"index": 1886,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef lower_upper_confidence_intervals(avg, SD):\n lower = avg - 2 * SD\n upper = avg + 2 * SD\n return lower, upper\n\n\n<mask token>\n",
"step-3": "<mask token>\nsys.path.append(\n 'C:/Users/Laptop/Documents/Repos/udacity_stats_functions/descriptive')\n<mask token>\n\n\ndef lower_upper_confidence_intervals(avg, SD):\n lower = avg - 2 * SD\n upper = avg + 2 * SD\n return lower, upper\n\n\nprint(lower_upper_confidence_intervals(40, 2.71))\nprint(get_z_from_p(0.975))\n",
"step-4": "import sys\nimport os\nsys.path.append(\n 'C:/Users/Laptop/Documents/Repos/udacity_stats_functions/descriptive')\nimport normal_distribution_06\n\n\ndef lower_upper_confidence_intervals(avg, SD):\n lower = avg - 2 * SD\n upper = avg + 2 * SD\n return lower, upper\n\n\nprint(lower_upper_confidence_intervals(40, 2.71))\nprint(get_z_from_p(0.975))\n",
"step-5": "import sys\nimport os\nsys.path.append(\"C:/Users/Laptop/Documents/Repos/udacity_stats_functions/descriptive\")\nimport normal_distribution_06\n#import sampling_distributions_07\n\ndef lower_upper_confidence_intervals(avg, SD):\n #avg is x bar. The mean value at the \"would be\" point. ie Bieber Tweeter\n #SD is standard error (standard deviation of population dataset dvided by sqrt(number_in_sample)\n lower = avg-2*SD\n upper = avg+2*SD\n return((lower, upper))\n \n#7. Quiz: Confidence Interval Bounds\nprint(lower_upper_confidence_intervals(40, 2.71))\n\n#8. Quiz: Exact Z-Scores\nprint(get_z_from_p(0.975))",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
ax.plot(data['Date'], data['HCHFI'], label='HCHFI')
ax.plot(data['Date'], data['SHA'] / 2.67547, label='SSE Composite Index')
ax.plot(data['Date'], data['Hushen300 Index'] / 3.20393, label=
'Hushen300 Index')
plt.xlabel('Time/year')
plt.ylabel('Index Point')
plt.title('Comparison of HCHFI,HS300 and SSE Composite Index')
plt.legend(loc='upper right')
plt.ylim(0, 7000)
plt.show()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
data = pd.read_excel('data_SHA.xls')
fig, ax = plt.subplots()
ax.plot(data['Date'], data['HCHFI'], label='HCHFI')
ax.plot(data['Date'], data['SHA'] / 2.67547, label='SSE Composite Index')
ax.plot(data['Date'], data['Hushen300 Index'] / 3.20393, label=
'Hushen300 Index')
plt.xlabel('Time/year')
plt.ylabel('Index Point')
plt.title('Comparison of HCHFI,HS300 and SSE Composite Index')
plt.legend(loc='upper right')
plt.ylim(0, 7000)
plt.show()
<|reserved_special_token_1|>
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
data = pd.read_excel('data_SHA.xls')
fig, ax = plt.subplots()
ax.plot(data['Date'], data['HCHFI'], label='HCHFI')
ax.plot(data['Date'], data['SHA'] / 2.67547, label='SSE Composite Index')
ax.plot(data['Date'], data['Hushen300 Index'] / 3.20393, label=
'Hushen300 Index')
plt.xlabel('Time/year')
plt.ylabel('Index Point')
plt.title('Comparison of HCHFI,HS300 and SSE Composite Index')
plt.legend(loc='upper right')
plt.ylim(0, 7000)
plt.show()
<|reserved_special_token_1|>
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
data=pd.read_excel("data_SHA.xls")
fig,ax=plt.subplots()
ax.plot(data["Date"],data["HCHFI"],label="HCHFI")
ax.plot(data["Date"],data["SHA"]/2.67547,label="SSE Composite Index")
ax.plot(data["Date"],data["Hushen300 Index"]/3.20393,label="Hushen300 Index")
plt.xlabel("Time/year")
plt.ylabel("Index Point")
plt.title("Comparison of HCHFI,HS300 and SSE Composite Index")
plt.legend(loc='upper right')
plt.ylim(0,7000)
plt.show()
|
flexible
|
{
"blob_id": "91df15d6d89d070677704572d35218558317a6ec",
"index": 117,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nax.plot(data['Date'], data['HCHFI'], label='HCHFI')\nax.plot(data['Date'], data['SHA'] / 2.67547, label='SSE Composite Index')\nax.plot(data['Date'], data['Hushen300 Index'] / 3.20393, label=\n 'Hushen300 Index')\nplt.xlabel('Time/year')\nplt.ylabel('Index Point')\nplt.title('Comparison of HCHFI,HS300 and SSE Composite Index')\nplt.legend(loc='upper right')\nplt.ylim(0, 7000)\nplt.show()\n",
"step-3": "<mask token>\ndata = pd.read_excel('data_SHA.xls')\nfig, ax = plt.subplots()\nax.plot(data['Date'], data['HCHFI'], label='HCHFI')\nax.plot(data['Date'], data['SHA'] / 2.67547, label='SSE Composite Index')\nax.plot(data['Date'], data['Hushen300 Index'] / 3.20393, label=\n 'Hushen300 Index')\nplt.xlabel('Time/year')\nplt.ylabel('Index Point')\nplt.title('Comparison of HCHFI,HS300 and SSE Composite Index')\nplt.legend(loc='upper right')\nplt.ylim(0, 7000)\nplt.show()\n",
"step-4": "import matplotlib.pyplot as plt\nimport pandas as pd\nimport numpy as np\ndata = pd.read_excel('data_SHA.xls')\nfig, ax = plt.subplots()\nax.plot(data['Date'], data['HCHFI'], label='HCHFI')\nax.plot(data['Date'], data['SHA'] / 2.67547, label='SSE Composite Index')\nax.plot(data['Date'], data['Hushen300 Index'] / 3.20393, label=\n 'Hushen300 Index')\nplt.xlabel('Time/year')\nplt.ylabel('Index Point')\nplt.title('Comparison of HCHFI,HS300 and SSE Composite Index')\nplt.legend(loc='upper right')\nplt.ylim(0, 7000)\nplt.show()\n",
"step-5": "import matplotlib.pyplot as plt\nimport pandas as pd\nimport numpy as np\ndata=pd.read_excel(\"data_SHA.xls\")\nfig,ax=plt.subplots()\nax.plot(data[\"Date\"],data[\"HCHFI\"],label=\"HCHFI\")\nax.plot(data[\"Date\"],data[\"SHA\"]/2.67547,label=\"SSE Composite Index\")\nax.plot(data[\"Date\"],data[\"Hushen300 Index\"]/3.20393,label=\"Hushen300 Index\")\nplt.xlabel(\"Time/year\")\nplt.ylabel(\"Index Point\")\nplt.title(\"Comparison of HCHFI,HS300 and SSE Composite Index\")\nplt.legend(loc='upper right')\nplt.ylim(0,7000)\nplt.show()",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import torch
import torch.nn.functional as F
import csv
class Net(torch.nn.Module):
def __init__(self, n_feature, n_hidden, n_output):
super(Net, self).__init__()
self.hidden = torch.nn.Linear(n_feature, n_hidden)
self.predict = torch.nn.Linear(n_hidden, n_output)
def forward(self, x):
h1 = F.relu(self.hidden(x))
y = self.predict(h1)
return y
net = Net(n_feature=40, n_hidden=10, n_output=20)
net.load_state_dict(torch.load('net_data_multi.pkl'))
file_test = open('dataset/test_data.csv','r')
line = file_test.readline()
file_out = open('result_multi.csv','w')
file_out.write('caseid,midprice\n')
case = 1
while case < 143:
line = file_test.readline().split(',')
if len(line) < 9:
case += 1
while case <= 153:
x = torch.FloatTensor(40).zero_()
y = torch.FloatTensor(20).zero_()
for ct in range(10):
line = file_test.readline()
if line == '':
break
line = line.split(',')
x[ct*4] = float(line[6])
x[ct*4+1] = float(line[7])/10000
x[ct*4+2] = float(line[8])
x[ct*4+3] = float(line[9])/10000
prediction = net(x)
average = 0
for k in range(10):
average += prediction.data.numpy()[k]
average = 1.0*average/10
file_out.write(str(case)+','+str(average)+'\n')
#print(str(case)+','+str(average)+'\n')
line = file_test.readline()
case += 1
file_test.close()
file_out.close()
print('test complete')
|
normal
|
{
"blob_id": "e221553f866de8b3e175197a40982506bf8c1ef9",
"index": 205,
"step-1": "<mask token>\n\n\nclass Net(torch.nn.Module):\n\n def __init__(self, n_feature, n_hidden, n_output):\n super(Net, self).__init__()\n self.hidden = torch.nn.Linear(n_feature, n_hidden)\n self.predict = torch.nn.Linear(n_hidden, n_output)\n\n def forward(self, x):\n h1 = F.relu(self.hidden(x))\n y = self.predict(h1)\n return y\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Net(torch.nn.Module):\n\n def __init__(self, n_feature, n_hidden, n_output):\n super(Net, self).__init__()\n self.hidden = torch.nn.Linear(n_feature, n_hidden)\n self.predict = torch.nn.Linear(n_hidden, n_output)\n\n def forward(self, x):\n h1 = F.relu(self.hidden(x))\n y = self.predict(h1)\n return y\n\n\n<mask token>\nnet.load_state_dict(torch.load('net_data_multi.pkl'))\n<mask token>\nfile_out.write('caseid,midprice\\n')\n<mask token>\nwhile case < 143:\n line = file_test.readline().split(',')\n if len(line) < 9:\n case += 1\nwhile case <= 153:\n x = torch.FloatTensor(40).zero_()\n y = torch.FloatTensor(20).zero_()\n for ct in range(10):\n line = file_test.readline()\n if line == '':\n break\n line = line.split(',')\n x[ct * 4] = float(line[6])\n x[ct * 4 + 1] = float(line[7]) / 10000\n x[ct * 4 + 2] = float(line[8])\n x[ct * 4 + 3] = float(line[9]) / 10000\n prediction = net(x)\n average = 0\n for k in range(10):\n average += prediction.data.numpy()[k]\n average = 1.0 * average / 10\n file_out.write(str(case) + ',' + str(average) + '\\n')\n line = file_test.readline()\n case += 1\nfile_test.close()\nfile_out.close()\nprint('test complete')\n",
"step-3": "<mask token>\n\n\nclass Net(torch.nn.Module):\n\n def __init__(self, n_feature, n_hidden, n_output):\n super(Net, self).__init__()\n self.hidden = torch.nn.Linear(n_feature, n_hidden)\n self.predict = torch.nn.Linear(n_hidden, n_output)\n\n def forward(self, x):\n h1 = F.relu(self.hidden(x))\n y = self.predict(h1)\n return y\n\n\nnet = Net(n_feature=40, n_hidden=10, n_output=20)\nnet.load_state_dict(torch.load('net_data_multi.pkl'))\nfile_test = open('dataset/test_data.csv', 'r')\nline = file_test.readline()\nfile_out = open('result_multi.csv', 'w')\nfile_out.write('caseid,midprice\\n')\ncase = 1\nwhile case < 143:\n line = file_test.readline().split(',')\n if len(line) < 9:\n case += 1\nwhile case <= 153:\n x = torch.FloatTensor(40).zero_()\n y = torch.FloatTensor(20).zero_()\n for ct in range(10):\n line = file_test.readline()\n if line == '':\n break\n line = line.split(',')\n x[ct * 4] = float(line[6])\n x[ct * 4 + 1] = float(line[7]) / 10000\n x[ct * 4 + 2] = float(line[8])\n x[ct * 4 + 3] = float(line[9]) / 10000\n prediction = net(x)\n average = 0\n for k in range(10):\n average += prediction.data.numpy()[k]\n average = 1.0 * average / 10\n file_out.write(str(case) + ',' + str(average) + '\\n')\n line = file_test.readline()\n case += 1\nfile_test.close()\nfile_out.close()\nprint('test complete')\n",
"step-4": "import torch\nimport torch.nn.functional as F\nimport csv\n\n\nclass Net(torch.nn.Module):\n\n def __init__(self, n_feature, n_hidden, n_output):\n super(Net, self).__init__()\n self.hidden = torch.nn.Linear(n_feature, n_hidden)\n self.predict = torch.nn.Linear(n_hidden, n_output)\n\n def forward(self, x):\n h1 = F.relu(self.hidden(x))\n y = self.predict(h1)\n return y\n\n\nnet = Net(n_feature=40, n_hidden=10, n_output=20)\nnet.load_state_dict(torch.load('net_data_multi.pkl'))\nfile_test = open('dataset/test_data.csv', 'r')\nline = file_test.readline()\nfile_out = open('result_multi.csv', 'w')\nfile_out.write('caseid,midprice\\n')\ncase = 1\nwhile case < 143:\n line = file_test.readline().split(',')\n if len(line) < 9:\n case += 1\nwhile case <= 153:\n x = torch.FloatTensor(40).zero_()\n y = torch.FloatTensor(20).zero_()\n for ct in range(10):\n line = file_test.readline()\n if line == '':\n break\n line = line.split(',')\n x[ct * 4] = float(line[6])\n x[ct * 4 + 1] = float(line[7]) / 10000\n x[ct * 4 + 2] = float(line[8])\n x[ct * 4 + 3] = float(line[9]) / 10000\n prediction = net(x)\n average = 0\n for k in range(10):\n average += prediction.data.numpy()[k]\n average = 1.0 * average / 10\n file_out.write(str(case) + ',' + str(average) + '\\n')\n line = file_test.readline()\n case += 1\nfile_test.close()\nfile_out.close()\nprint('test complete')\n",
"step-5": "import torch\nimport torch.nn.functional as F\nimport csv\n\n\nclass Net(torch.nn.Module):\n\n def __init__(self, n_feature, n_hidden, n_output):\n super(Net, self).__init__()\n self.hidden = torch.nn.Linear(n_feature, n_hidden)\n self.predict = torch.nn.Linear(n_hidden, n_output)\n\n def forward(self, x):\n h1 = F.relu(self.hidden(x))\n y = self.predict(h1) \n return y\n\n\nnet = Net(n_feature=40, n_hidden=10, n_output=20)\n\nnet.load_state_dict(torch.load('net_data_multi.pkl'))\n\nfile_test = open('dataset/test_data.csv','r')\nline = file_test.readline()\n\nfile_out = open('result_multi.csv','w')\nfile_out.write('caseid,midprice\\n')\n\ncase = 1\n\nwhile case < 143:\n line = file_test.readline().split(',')\n if len(line) < 9:\n case += 1\n \nwhile case <= 153:\n x = torch.FloatTensor(40).zero_()\n y = torch.FloatTensor(20).zero_()\n\n for ct in range(10):\n line = file_test.readline()\n if line == '':\n break\n line = line.split(',')\n\n x[ct*4] = float(line[6])\n x[ct*4+1] = float(line[7])/10000\n x[ct*4+2] = float(line[8])\n x[ct*4+3] = float(line[9])/10000\n\n prediction = net(x)\n\n average = 0\n for k in range(10):\n average += prediction.data.numpy()[k]\n average = 1.0*average/10\n\n file_out.write(str(case)+','+str(average)+'\\n')\n #print(str(case)+','+str(average)+'\\n')\n\n line = file_test.readline()\n case += 1\n\nfile_test.close()\nfile_out.close()\nprint('test complete')\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
SQL_INSERCION_COCHE = "INSERT INTO tabla_coches(marca, modelo, color, motor, precio) VALUES (%s,%s,%s,%s,%s);"
SQL_LISTADO_COCHES = "SELECT * FROM tabla_coches;"
|
normal
|
{
"blob_id": "fd41e6d8530d24a8a564572af46078be77e8177f",
"index": 6573,
"step-1": "<mask token>\n",
"step-2": "SQL_INSERCION_COCHE = (\n 'INSERT INTO tabla_coches(marca, modelo, color, motor, precio) VALUES (%s,%s,%s,%s,%s);'\n )\nSQL_LISTADO_COCHES = 'SELECT * FROM tabla_coches;'\n",
"step-3": "SQL_INSERCION_COCHE = \"INSERT INTO tabla_coches(marca, modelo, color, motor, precio) VALUES (%s,%s,%s,%s,%s);\"\n\nSQL_LISTADO_COCHES = \"SELECT * FROM tabla_coches;\"\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
from math import sqrt
from Engine.regulators.PID import PID
from Engine.regulators.regulator_base_class import RegulatorBaseClass
from Engine.robot import Robot, MAX_LINEAR_ACCELERATION, MAX_ANGULAR_SPEED
from Util import Pose
from Util.geometry import clamp, normalize
from Util.pose import Position
from config.config import Config
config = Config()
class RealVelocityController(RegulatorBaseClass):
settings = {'kp': 10, 'ki': 0, 'kd': 1}
v_d = 4 # lower = bigger path correction
emergency_break_constant = 0.4 # Higher = higher correction of trajectory
emergency_break_safety_factor = 1 # lower = bigger break distance
def __init__(self):
self.orientation_controller = PID(**self.settings, signed_error=True, deadzone=0.05)
self.dt = 0
self.last_commanded_velocity = Position()
def execute(self, robot: Robot, dt):
self.dt = dt
speed_norm = self.get_next_speed(robot)
path_correction = self.following_path_vector(robot)
velocity = robot.position_error * speed_norm / robot.position_error.norm + path_correction * speed_norm / self.v_d
velocity /= max(1.0, abs(velocity.norm) / speed_norm)
cmd_orientation = self.orientation_controller.execute(robot.orientation_error)
cmd_orientation /= max(1.0, abs(cmd_orientation) / MAX_ANGULAR_SPEED)
self.last_commanded_velocity = velocity
return Pose(velocity, cmd_orientation)
def following_path_vector(self, robot):
direction_error = self.last_commanded_velocity - robot.velocity.position
if direction_error.norm > 0:
return normalize(direction_error)
else:
return direction_error
def get_next_speed(self, robot, acc=MAX_LINEAR_ACCELERATION):
acceleration_offset = 1 # on veut que le robot soit plus aggressif en début de trajet
emergency_break_offset = self.emergency_break_constant / self.dt * (robot.current_speed / 1000) # on veut que le robot break le plus qu'il peut si on s'approche trop vite de la target
emergency_break_offset = max(1.0, emergency_break_offset)
if robot.target_speed > robot.current_speed:
next_speed = robot.current_speed + acc * self.dt * acceleration_offset
else:
if self.is_distance_for_break(robot, acc, offset=1):
next_speed = robot.current_speed + acc * self.dt * acceleration_offset
else:
distance = 0.5 * abs(robot.current_speed ** 2 - robot.target_speed ** 2) / acc
if robot.position_error.norm < (distance/self.emergency_break_safety_factor):
next_speed = robot.current_speed - acc * self.dt * emergency_break_offset
else:
next_speed = robot.current_speed - acc * self.dt
return clamp(next_speed, -1 * robot.cruise_speed, robot.cruise_speed)
@staticmethod
def is_distance_for_break(robot, acc, offset=1) -> bool:
distance = 0.5 * abs(robot.current_speed ** 2 - robot.target_speed ** 2) / acc
return robot.position_error.norm > (distance * offset)
def reset(self):
self.orientation_controller.reset()
class GrSimVelocityController(RealVelocityController):
settings = {'kp': 2, 'ki': 0.3, 'kd': 0}
v_d = 15
emergency_break_constant = 0
emergency_break_safety_factor = 1 # lower = bigger break distance
def is_time_to_break(robot, destination, cruise_speed, acceleration, target_speed):
# formule physique: v_finale ** 2 = v_init ** 2 - 2 * acceleration * distance_deplacement
offset = 1.2 # petite marge pour break avant le point vue qu'il y a du délais
dist_to_target = (destination - robot.pose.position).norm
return dist_to_target < (abs(cruise_speed ** 2 - target_speed**2) / (2 * acceleration)) * offset
def optimal_speed(robot, destination, cruise_speed, acceleration, target_speed):
# formule physique: v_finale ** 2 = v_init ** 2 - 2 * acceleration * distance_deplacement
dist_to_target = (destination - robot.pose.position).norm
return max(cruise_speed, sqrt(abs(2 * acceleration * dist_to_target - target_speed**2)))
|
normal
|
{
"blob_id": "98bf0a332a6753e500b24bed2af16fe4a1cb9568",
"index": 1560,
"step-1": "<mask token>\n\n\nclass RealVelocityController(RegulatorBaseClass):\n settings = {'kp': 10, 'ki': 0, 'kd': 1}\n v_d = 4\n emergency_break_constant = 0.4\n emergency_break_safety_factor = 1\n\n def __init__(self):\n self.orientation_controller = PID(**self.settings, signed_error=\n True, deadzone=0.05)\n self.dt = 0\n self.last_commanded_velocity = Position()\n\n def execute(self, robot: Robot, dt):\n self.dt = dt\n speed_norm = self.get_next_speed(robot)\n path_correction = self.following_path_vector(robot)\n velocity = (robot.position_error * speed_norm / robot.\n position_error.norm + path_correction * speed_norm / self.v_d)\n velocity /= max(1.0, abs(velocity.norm) / speed_norm)\n cmd_orientation = self.orientation_controller.execute(robot.\n orientation_error)\n cmd_orientation /= max(1.0, abs(cmd_orientation) / MAX_ANGULAR_SPEED)\n self.last_commanded_velocity = velocity\n return Pose(velocity, cmd_orientation)\n\n def following_path_vector(self, robot):\n direction_error = (self.last_commanded_velocity - robot.velocity.\n position)\n if direction_error.norm > 0:\n return normalize(direction_error)\n else:\n return direction_error\n\n def get_next_speed(self, robot, acc=MAX_LINEAR_ACCELERATION):\n acceleration_offset = 1\n emergency_break_offset = self.emergency_break_constant / self.dt * (\n robot.current_speed / 1000)\n emergency_break_offset = max(1.0, emergency_break_offset)\n if robot.target_speed > robot.current_speed:\n next_speed = (robot.current_speed + acc * self.dt *\n acceleration_offset)\n elif self.is_distance_for_break(robot, acc, offset=1):\n next_speed = (robot.current_speed + acc * self.dt *\n acceleration_offset)\n else:\n distance = 0.5 * abs(robot.current_speed ** 2 - robot.\n target_speed ** 2) / acc\n if (robot.position_error.norm < distance / self.\n emergency_break_safety_factor):\n next_speed = (robot.current_speed - acc * self.dt *\n emergency_break_offset)\n else:\n next_speed = robot.current_speed - acc * self.dt\n return clamp(next_speed, -1 * robot.cruise_speed, robot.cruise_speed)\n\n @staticmethod\n def is_distance_for_break(robot, acc, offset=1) ->bool:\n distance = 0.5 * abs(robot.current_speed ** 2 - robot.target_speed ** 2\n ) / acc\n return robot.position_error.norm > distance * offset\n\n def reset(self):\n self.orientation_controller.reset()\n\n\nclass GrSimVelocityController(RealVelocityController):\n settings = {'kp': 2, 'ki': 0.3, 'kd': 0}\n v_d = 15\n emergency_break_constant = 0\n emergency_break_safety_factor = 1\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass RealVelocityController(RegulatorBaseClass):\n settings = {'kp': 10, 'ki': 0, 'kd': 1}\n v_d = 4\n emergency_break_constant = 0.4\n emergency_break_safety_factor = 1\n\n def __init__(self):\n self.orientation_controller = PID(**self.settings, signed_error=\n True, deadzone=0.05)\n self.dt = 0\n self.last_commanded_velocity = Position()\n\n def execute(self, robot: Robot, dt):\n self.dt = dt\n speed_norm = self.get_next_speed(robot)\n path_correction = self.following_path_vector(robot)\n velocity = (robot.position_error * speed_norm / robot.\n position_error.norm + path_correction * speed_norm / self.v_d)\n velocity /= max(1.0, abs(velocity.norm) / speed_norm)\n cmd_orientation = self.orientation_controller.execute(robot.\n orientation_error)\n cmd_orientation /= max(1.0, abs(cmd_orientation) / MAX_ANGULAR_SPEED)\n self.last_commanded_velocity = velocity\n return Pose(velocity, cmd_orientation)\n\n def following_path_vector(self, robot):\n direction_error = (self.last_commanded_velocity - robot.velocity.\n position)\n if direction_error.norm > 0:\n return normalize(direction_error)\n else:\n return direction_error\n\n def get_next_speed(self, robot, acc=MAX_LINEAR_ACCELERATION):\n acceleration_offset = 1\n emergency_break_offset = self.emergency_break_constant / self.dt * (\n robot.current_speed / 1000)\n emergency_break_offset = max(1.0, emergency_break_offset)\n if robot.target_speed > robot.current_speed:\n next_speed = (robot.current_speed + acc * self.dt *\n acceleration_offset)\n elif self.is_distance_for_break(robot, acc, offset=1):\n next_speed = (robot.current_speed + acc * self.dt *\n acceleration_offset)\n else:\n distance = 0.5 * abs(robot.current_speed ** 2 - robot.\n target_speed ** 2) / acc\n if (robot.position_error.norm < distance / self.\n emergency_break_safety_factor):\n next_speed = (robot.current_speed - acc * self.dt *\n emergency_break_offset)\n else:\n next_speed = robot.current_speed - acc * self.dt\n return clamp(next_speed, -1 * robot.cruise_speed, robot.cruise_speed)\n\n @staticmethod\n def is_distance_for_break(robot, acc, offset=1) ->bool:\n distance = 0.5 * abs(robot.current_speed ** 2 - robot.target_speed ** 2\n ) / acc\n return robot.position_error.norm > distance * offset\n\n def reset(self):\n self.orientation_controller.reset()\n\n\nclass GrSimVelocityController(RealVelocityController):\n settings = {'kp': 2, 'ki': 0.3, 'kd': 0}\n v_d = 15\n emergency_break_constant = 0\n emergency_break_safety_factor = 1\n\n\ndef is_time_to_break(robot, destination, cruise_speed, acceleration,\n target_speed):\n offset = 1.2\n dist_to_target = (destination - robot.pose.position).norm\n return dist_to_target < abs(cruise_speed ** 2 - target_speed ** 2) / (2 *\n acceleration) * offset\n\n\n<mask token>\n",
"step-3": "<mask token>\nconfig = Config()\n\n\nclass RealVelocityController(RegulatorBaseClass):\n settings = {'kp': 10, 'ki': 0, 'kd': 1}\n v_d = 4\n emergency_break_constant = 0.4\n emergency_break_safety_factor = 1\n\n def __init__(self):\n self.orientation_controller = PID(**self.settings, signed_error=\n True, deadzone=0.05)\n self.dt = 0\n self.last_commanded_velocity = Position()\n\n def execute(self, robot: Robot, dt):\n self.dt = dt\n speed_norm = self.get_next_speed(robot)\n path_correction = self.following_path_vector(robot)\n velocity = (robot.position_error * speed_norm / robot.\n position_error.norm + path_correction * speed_norm / self.v_d)\n velocity /= max(1.0, abs(velocity.norm) / speed_norm)\n cmd_orientation = self.orientation_controller.execute(robot.\n orientation_error)\n cmd_orientation /= max(1.0, abs(cmd_orientation) / MAX_ANGULAR_SPEED)\n self.last_commanded_velocity = velocity\n return Pose(velocity, cmd_orientation)\n\n def following_path_vector(self, robot):\n direction_error = (self.last_commanded_velocity - robot.velocity.\n position)\n if direction_error.norm > 0:\n return normalize(direction_error)\n else:\n return direction_error\n\n def get_next_speed(self, robot, acc=MAX_LINEAR_ACCELERATION):\n acceleration_offset = 1\n emergency_break_offset = self.emergency_break_constant / self.dt * (\n robot.current_speed / 1000)\n emergency_break_offset = max(1.0, emergency_break_offset)\n if robot.target_speed > robot.current_speed:\n next_speed = (robot.current_speed + acc * self.dt *\n acceleration_offset)\n elif self.is_distance_for_break(robot, acc, offset=1):\n next_speed = (robot.current_speed + acc * self.dt *\n acceleration_offset)\n else:\n distance = 0.5 * abs(robot.current_speed ** 2 - robot.\n target_speed ** 2) / acc\n if (robot.position_error.norm < distance / self.\n emergency_break_safety_factor):\n next_speed = (robot.current_speed - acc * self.dt *\n emergency_break_offset)\n else:\n next_speed = robot.current_speed - acc * self.dt\n return clamp(next_speed, -1 * robot.cruise_speed, robot.cruise_speed)\n\n @staticmethod\n def is_distance_for_break(robot, acc, offset=1) ->bool:\n distance = 0.5 * abs(robot.current_speed ** 2 - robot.target_speed ** 2\n ) / acc\n return robot.position_error.norm > distance * offset\n\n def reset(self):\n self.orientation_controller.reset()\n\n\nclass GrSimVelocityController(RealVelocityController):\n settings = {'kp': 2, 'ki': 0.3, 'kd': 0}\n v_d = 15\n emergency_break_constant = 0\n emergency_break_safety_factor = 1\n\n\ndef is_time_to_break(robot, destination, cruise_speed, acceleration,\n target_speed):\n offset = 1.2\n dist_to_target = (destination - robot.pose.position).norm\n return dist_to_target < abs(cruise_speed ** 2 - target_speed ** 2) / (2 *\n acceleration) * offset\n\n\ndef optimal_speed(robot, destination, cruise_speed, acceleration, target_speed\n ):\n dist_to_target = (destination - robot.pose.position).norm\n return max(cruise_speed, sqrt(abs(2 * acceleration * dist_to_target - \n target_speed ** 2)))\n",
"step-4": "from math import sqrt\nfrom Engine.regulators.PID import PID\nfrom Engine.regulators.regulator_base_class import RegulatorBaseClass\nfrom Engine.robot import Robot, MAX_LINEAR_ACCELERATION, MAX_ANGULAR_SPEED\nfrom Util import Pose\nfrom Util.geometry import clamp, normalize\nfrom Util.pose import Position\nfrom config.config import Config\nconfig = Config()\n\n\nclass RealVelocityController(RegulatorBaseClass):\n settings = {'kp': 10, 'ki': 0, 'kd': 1}\n v_d = 4\n emergency_break_constant = 0.4\n emergency_break_safety_factor = 1\n\n def __init__(self):\n self.orientation_controller = PID(**self.settings, signed_error=\n True, deadzone=0.05)\n self.dt = 0\n self.last_commanded_velocity = Position()\n\n def execute(self, robot: Robot, dt):\n self.dt = dt\n speed_norm = self.get_next_speed(robot)\n path_correction = self.following_path_vector(robot)\n velocity = (robot.position_error * speed_norm / robot.\n position_error.norm + path_correction * speed_norm / self.v_d)\n velocity /= max(1.0, abs(velocity.norm) / speed_norm)\n cmd_orientation = self.orientation_controller.execute(robot.\n orientation_error)\n cmd_orientation /= max(1.0, abs(cmd_orientation) / MAX_ANGULAR_SPEED)\n self.last_commanded_velocity = velocity\n return Pose(velocity, cmd_orientation)\n\n def following_path_vector(self, robot):\n direction_error = (self.last_commanded_velocity - robot.velocity.\n position)\n if direction_error.norm > 0:\n return normalize(direction_error)\n else:\n return direction_error\n\n def get_next_speed(self, robot, acc=MAX_LINEAR_ACCELERATION):\n acceleration_offset = 1\n emergency_break_offset = self.emergency_break_constant / self.dt * (\n robot.current_speed / 1000)\n emergency_break_offset = max(1.0, emergency_break_offset)\n if robot.target_speed > robot.current_speed:\n next_speed = (robot.current_speed + acc * self.dt *\n acceleration_offset)\n elif self.is_distance_for_break(robot, acc, offset=1):\n next_speed = (robot.current_speed + acc * self.dt *\n acceleration_offset)\n else:\n distance = 0.5 * abs(robot.current_speed ** 2 - robot.\n target_speed ** 2) / acc\n if (robot.position_error.norm < distance / self.\n emergency_break_safety_factor):\n next_speed = (robot.current_speed - acc * self.dt *\n emergency_break_offset)\n else:\n next_speed = robot.current_speed - acc * self.dt\n return clamp(next_speed, -1 * robot.cruise_speed, robot.cruise_speed)\n\n @staticmethod\n def is_distance_for_break(robot, acc, offset=1) ->bool:\n distance = 0.5 * abs(robot.current_speed ** 2 - robot.target_speed ** 2\n ) / acc\n return robot.position_error.norm > distance * offset\n\n def reset(self):\n self.orientation_controller.reset()\n\n\nclass GrSimVelocityController(RealVelocityController):\n settings = {'kp': 2, 'ki': 0.3, 'kd': 0}\n v_d = 15\n emergency_break_constant = 0\n emergency_break_safety_factor = 1\n\n\ndef is_time_to_break(robot, destination, cruise_speed, acceleration,\n target_speed):\n offset = 1.2\n dist_to_target = (destination - robot.pose.position).norm\n return dist_to_target < abs(cruise_speed ** 2 - target_speed ** 2) / (2 *\n acceleration) * offset\n\n\ndef optimal_speed(robot, destination, cruise_speed, acceleration, target_speed\n ):\n dist_to_target = (destination - robot.pose.position).norm\n return max(cruise_speed, sqrt(abs(2 * acceleration * dist_to_target - \n target_speed ** 2)))\n",
"step-5": "from math import sqrt\n\nfrom Engine.regulators.PID import PID\nfrom Engine.regulators.regulator_base_class import RegulatorBaseClass\nfrom Engine.robot import Robot, MAX_LINEAR_ACCELERATION, MAX_ANGULAR_SPEED\nfrom Util import Pose\nfrom Util.geometry import clamp, normalize\nfrom Util.pose import Position\nfrom config.config import Config\nconfig = Config()\n\n\nclass RealVelocityController(RegulatorBaseClass):\n\n settings = {'kp': 10, 'ki': 0, 'kd': 1}\n v_d = 4 # lower = bigger path correction\n emergency_break_constant = 0.4 # Higher = higher correction of trajectory\n emergency_break_safety_factor = 1 # lower = bigger break distance\n\n def __init__(self):\n self.orientation_controller = PID(**self.settings, signed_error=True, deadzone=0.05)\n self.dt = 0\n self.last_commanded_velocity = Position()\n\n def execute(self, robot: Robot, dt):\n self.dt = dt\n speed_norm = self.get_next_speed(robot)\n\n path_correction = self.following_path_vector(robot)\n\n velocity = robot.position_error * speed_norm / robot.position_error.norm + path_correction * speed_norm / self.v_d\n velocity /= max(1.0, abs(velocity.norm) / speed_norm)\n cmd_orientation = self.orientation_controller.execute(robot.orientation_error)\n cmd_orientation /= max(1.0, abs(cmd_orientation) / MAX_ANGULAR_SPEED)\n\n self.last_commanded_velocity = velocity\n\n return Pose(velocity, cmd_orientation)\n\n def following_path_vector(self, robot):\n\n direction_error = self.last_commanded_velocity - robot.velocity.position\n if direction_error.norm > 0:\n return normalize(direction_error)\n else:\n return direction_error\n\n def get_next_speed(self, robot, acc=MAX_LINEAR_ACCELERATION):\n acceleration_offset = 1 # on veut que le robot soit plus aggressif en début de trajet\n emergency_break_offset = self.emergency_break_constant / self.dt * (robot.current_speed / 1000) # on veut que le robot break le plus qu'il peut si on s'approche trop vite de la target\n emergency_break_offset = max(1.0, emergency_break_offset)\n\n if robot.target_speed > robot.current_speed:\n next_speed = robot.current_speed + acc * self.dt * acceleration_offset\n else:\n if self.is_distance_for_break(robot, acc, offset=1):\n next_speed = robot.current_speed + acc * self.dt * acceleration_offset\n else:\n distance = 0.5 * abs(robot.current_speed ** 2 - robot.target_speed ** 2) / acc\n if robot.position_error.norm < (distance/self.emergency_break_safety_factor):\n next_speed = robot.current_speed - acc * self.dt * emergency_break_offset\n else:\n next_speed = robot.current_speed - acc * self.dt\n\n return clamp(next_speed, -1 * robot.cruise_speed, robot.cruise_speed)\n\n @staticmethod\n def is_distance_for_break(robot, acc, offset=1) -> bool:\n distance = 0.5 * abs(robot.current_speed ** 2 - robot.target_speed ** 2) / acc\n return robot.position_error.norm > (distance * offset)\n\n def reset(self):\n self.orientation_controller.reset()\n\n\nclass GrSimVelocityController(RealVelocityController):\n\n settings = {'kp': 2, 'ki': 0.3, 'kd': 0}\n v_d = 15\n emergency_break_constant = 0\n emergency_break_safety_factor = 1 # lower = bigger break distance\n\n\ndef is_time_to_break(robot, destination, cruise_speed, acceleration, target_speed):\n # formule physique: v_finale ** 2 = v_init ** 2 - 2 * acceleration * distance_deplacement\n offset = 1.2 # petite marge pour break avant le point vue qu'il y a du délais\n dist_to_target = (destination - robot.pose.position).norm\n return dist_to_target < (abs(cruise_speed ** 2 - target_speed**2) / (2 * acceleration)) * offset\n\n\ndef optimal_speed(robot, destination, cruise_speed, acceleration, target_speed):\n # formule physique: v_finale ** 2 = v_init ** 2 - 2 * acceleration * distance_deplacement\n dist_to_target = (destination - robot.pose.position).norm\n\n return max(cruise_speed, sqrt(abs(2 * acceleration * dist_to_target - target_speed**2)))\n",
"step-ids": [
10,
11,
13,
14,
15
]
}
|
[
10,
11,
13,
14,
15
] |
no = int(input("Enter a number: "))
no = str(no)
rev = no[::-1]
if no==rev:
print(f"{no}--->{rev} Input is a palindrome")
else:
print(f"{no}--->{rev} Input is not a palindrome")
|
normal
|
{
"blob_id": "020a41e7d3cc3f5adf3a38a6852dac6037595372",
"index": 2043,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif no == rev:\n print(f'{no}--->{rev} Input is a palindrome')\nelse:\n print(f'{no}--->{rev} Input is not a palindrome')\n",
"step-3": "no = int(input('Enter a number: '))\nno = str(no)\nrev = no[::-1]\nif no == rev:\n print(f'{no}--->{rev} Input is a palindrome')\nelse:\n print(f'{no}--->{rev} Input is not a palindrome')\n",
"step-4": "no = int(input(\"Enter a number: \"))\nno = str(no)\nrev = no[::-1]\nif no==rev:\n print(f\"{no}--->{rev} Input is a palindrome\")\nelse:\n print(f\"{no}--->{rev} Input is not a palindrome\")\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print(friends)
print(friends[0])
print(friends[-1])
print(friends[-2])
<|reserved_special_token_1|>
friends = ['Vino', 'Ammu', 'Appu']
print(friends)
print(friends[0])
print(friends[-1])
print(friends[-2])
<|reserved_special_token_1|>
friends = ["Vino", "Ammu", "Appu"]
print(friends)
print(friends[0])
# returns the last element in the list
print(friends[-1])
# returns the second to last element in the list
print(friends[-2])
|
flexible
|
{
"blob_id": "8050b757c20da7ad8dd3c12a30b523b752d6a3ff",
"index": 9457,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(friends)\nprint(friends[0])\nprint(friends[-1])\nprint(friends[-2])\n",
"step-3": "friends = ['Vino', 'Ammu', 'Appu']\nprint(friends)\nprint(friends[0])\nprint(friends[-1])\nprint(friends[-2])\n",
"step-4": "friends = [\"Vino\", \"Ammu\", \"Appu\"]\n\nprint(friends)\nprint(friends[0])\n\n# returns the last element in the list\nprint(friends[-1])\n# returns the second to last element in the list\nprint(friends[-2])",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
def add_logs_to_response(response):
response['logs'] = ClientLogger.get_logs()
ClientLogger.clear_logs()
return response
@app.route('/generate/melody', methods=['POST', 'OPTIONS'])
@crossdomain(origin='*')
def generate_melody():
ClientLogger.log('Generating new melody...')
content = request.get_json()
melody_generator = Generator(content)
result = melody_generator.generate_melody()
response = {'generationResult': result}
return json.dumps(add_logs_to_response(response))
@app.route('/generate/chords', methods=['POST', 'OPTIONS'])
@crossdomain(origin='*')
def generate_chords():
content = request.get_json()
chord_generator = Generator(content)
result_chords, result_chord_names = chord_generator.generate_chords()
DawState['chord_names'] = result_chord_names
response = {'generationResult': result_chords}
return json.dumps(add_logs_to_response(response))
<|reserved_special_token_0|>
@app.route('/constants', methods=['GET'])
@crossdomain(origin='*')
def get_constants():
return json.dumps(constants.constants, default=set_default)
@app.route('/midi', methods=['POST', 'OPTIONS'])
@crossdomain(origin='*')
def create_midi_file():
content = request.get_json()
filename, fp = midi_tools.create_midi_file(content)
return send_file(filename, mimetype='audio/midi audio/x-midi',
as_attachment=True, attachment_filename=filename)
@app.errorhandler(404)
def page_not_found(error):
return render_template('page_not_found.html'), 404
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def set_default(obj):
if isinstance(obj, set):
return list(obj)
raise TypeError
def add_logs_to_response(response):
response['logs'] = ClientLogger.get_logs()
ClientLogger.clear_logs()
return response
@app.route('/generate/melody', methods=['POST', 'OPTIONS'])
@crossdomain(origin='*')
def generate_melody():
ClientLogger.log('Generating new melody...')
content = request.get_json()
melody_generator = Generator(content)
result = melody_generator.generate_melody()
response = {'generationResult': result}
return json.dumps(add_logs_to_response(response))
@app.route('/generate/chords', methods=['POST', 'OPTIONS'])
@crossdomain(origin='*')
def generate_chords():
content = request.get_json()
chord_generator = Generator(content)
result_chords, result_chord_names = chord_generator.generate_chords()
DawState['chord_names'] = result_chord_names
response = {'generationResult': result_chords}
return json.dumps(add_logs_to_response(response))
@app.route('/daw-state', methods=['POST', 'OPTIONS'])
@crossdomain(origin='*')
def update_daw_state():
content = request.get_json()
key = content['key'].replace('#', 's')
scale = content['scale']
tempo = content['tempo']
tracks = content['tracks']
DawState['scale'] = scale
DawState['key'] = key
DawState['tempo'] = tempo
DawState['tracks'] = tracks
chord_names, chord_degrees = name_chords_in_tracks(tracks, key, scale)
DawState['chord_names'] = chord_names
DawState['chord_degrees'] = chord_degrees
response = DawState
return json.dumps(add_logs_to_response(response))
@app.route('/constants', methods=['GET'])
@crossdomain(origin='*')
def get_constants():
return json.dumps(constants.constants, default=set_default)
@app.route('/midi', methods=['POST', 'OPTIONS'])
@crossdomain(origin='*')
def create_midi_file():
content = request.get_json()
filename, fp = midi_tools.create_midi_file(content)
return send_file(filename, mimetype='audio/midi audio/x-midi',
as_attachment=True, attachment_filename=filename)
@app.errorhandler(404)
def page_not_found(error):
return render_template('page_not_found.html'), 404
<|reserved_special_token_1|>
<|reserved_special_token_0|>
CORS(app)
<|reserved_special_token_0|>
def set_default(obj):
if isinstance(obj, set):
return list(obj)
raise TypeError
def add_logs_to_response(response):
response['logs'] = ClientLogger.get_logs()
ClientLogger.clear_logs()
return response
@app.route('/generate/melody', methods=['POST', 'OPTIONS'])
@crossdomain(origin='*')
def generate_melody():
ClientLogger.log('Generating new melody...')
content = request.get_json()
melody_generator = Generator(content)
result = melody_generator.generate_melody()
response = {'generationResult': result}
return json.dumps(add_logs_to_response(response))
@app.route('/generate/chords', methods=['POST', 'OPTIONS'])
@crossdomain(origin='*')
def generate_chords():
content = request.get_json()
chord_generator = Generator(content)
result_chords, result_chord_names = chord_generator.generate_chords()
DawState['chord_names'] = result_chord_names
response = {'generationResult': result_chords}
return json.dumps(add_logs_to_response(response))
@app.route('/daw-state', methods=['POST', 'OPTIONS'])
@crossdomain(origin='*')
def update_daw_state():
content = request.get_json()
key = content['key'].replace('#', 's')
scale = content['scale']
tempo = content['tempo']
tracks = content['tracks']
DawState['scale'] = scale
DawState['key'] = key
DawState['tempo'] = tempo
DawState['tracks'] = tracks
chord_names, chord_degrees = name_chords_in_tracks(tracks, key, scale)
DawState['chord_names'] = chord_names
DawState['chord_degrees'] = chord_degrees
response = DawState
return json.dumps(add_logs_to_response(response))
@app.route('/constants', methods=['GET'])
@crossdomain(origin='*')
def get_constants():
return json.dumps(constants.constants, default=set_default)
@app.route('/midi', methods=['POST', 'OPTIONS'])
@crossdomain(origin='*')
def create_midi_file():
content = request.get_json()
filename, fp = midi_tools.create_midi_file(content)
return send_file(filename, mimetype='audio/midi audio/x-midi',
as_attachment=True, attachment_filename=filename)
@app.errorhandler(404)
def page_not_found(error):
return render_template('page_not_found.html'), 404
<|reserved_special_token_1|>
from flask import Flask, render_template, send_from_directory
from flask import request, send_file
from flask_cors import CORS
import os
import json
from crossdomain import crossdomain
import constants
import generation_tools
from music_theory import name_chords_in_tracks
import midi_tools
from client_logging import ClientLogger
from generation_tools import Generator
app = Flask(__name__)
CORS(app)
BASE_URL = os.path.abspath(os.path.dirname(__file__))
CLIENT_APP_FOLDER = os.path.join(BASE_URL, 'ClientApp')
DawState = {}
ClientLogger = ClientLogger()
def set_default(obj):
if isinstance(obj, set):
return list(obj)
raise TypeError
def add_logs_to_response(response):
response['logs'] = ClientLogger.get_logs()
ClientLogger.clear_logs()
return response
@app.route('/generate/melody', methods=['POST', 'OPTIONS'])
@crossdomain(origin='*')
def generate_melody():
ClientLogger.log('Generating new melody...')
content = request.get_json()
melody_generator = Generator(content)
result = melody_generator.generate_melody()
response = {'generationResult': result}
return json.dumps(add_logs_to_response(response))
@app.route('/generate/chords', methods=['POST', 'OPTIONS'])
@crossdomain(origin='*')
def generate_chords():
content = request.get_json()
chord_generator = Generator(content)
result_chords, result_chord_names = chord_generator.generate_chords()
DawState['chord_names'] = result_chord_names
response = {'generationResult': result_chords}
return json.dumps(add_logs_to_response(response))
@app.route('/daw-state', methods=['POST', 'OPTIONS'])
@crossdomain(origin='*')
def update_daw_state():
content = request.get_json()
key = content['key'].replace('#', 's')
scale = content['scale']
tempo = content['tempo']
tracks = content['tracks']
DawState['scale'] = scale
DawState['key'] = key
DawState['tempo'] = tempo
DawState['tracks'] = tracks
chord_names, chord_degrees = name_chords_in_tracks(tracks, key, scale)
DawState['chord_names'] = chord_names
DawState['chord_degrees'] = chord_degrees
response = DawState
return json.dumps(add_logs_to_response(response))
@app.route('/constants', methods=['GET'])
@crossdomain(origin='*')
def get_constants():
return json.dumps(constants.constants, default=set_default)
@app.route('/midi', methods=['POST', 'OPTIONS'])
@crossdomain(origin='*')
def create_midi_file():
content = request.get_json()
filename, fp = midi_tools.create_midi_file(content)
return send_file(filename, mimetype='audio/midi audio/x-midi',
as_attachment=True, attachment_filename=filename)
@app.errorhandler(404)
def page_not_found(error):
return render_template('page_not_found.html'), 404
<|reserved_special_token_1|>
from flask import Flask, render_template, send_from_directory
from flask import request, send_file
from flask_cors import CORS
import os
import json
from crossdomain import crossdomain
import constants
import generation_tools
from music_theory import name_chords_in_tracks
import midi_tools
from client_logging import ClientLogger
from generation_tools import Generator
app = Flask(__name__)
CORS(app)
BASE_URL = os.path.abspath(os.path.dirname(__file__))
CLIENT_APP_FOLDER = os.path.join(BASE_URL, "ClientApp")
DawState = {}
ClientLogger = ClientLogger()
def set_default(obj):
if isinstance(obj, set):
return list(obj)
raise TypeError
def add_logs_to_response(response):
response['logs'] = ClientLogger.get_logs()
ClientLogger.clear_logs()
return response
@app.route('/generate/melody', methods=['POST', 'OPTIONS'])
@crossdomain(origin='*')
def generate_melody():
ClientLogger.log('Generating new melody...')
content = request.get_json()
melody_generator = Generator(content)
result = melody_generator.generate_melody()
response = {'generationResult' : result}
return json.dumps(add_logs_to_response(response))
@app.route('/generate/chords', methods=['POST', 'OPTIONS'])
@crossdomain(origin='*')
def generate_chords():
content = request.get_json()
chord_generator = Generator(content)
result_chords, result_chord_names = chord_generator.generate_chords()
DawState['chord_names'] = result_chord_names
response = {'generationResult' : result_chords}
return json.dumps(add_logs_to_response(response))
@app.route('/daw-state', methods=['POST', 'OPTIONS'])
@crossdomain(origin='*')
def update_daw_state():
content = request.get_json()
key = content['key'].replace('#', 's')
scale = content['scale']
tempo = content['tempo']
tracks = content['tracks']
DawState['scale'] = scale
DawState['key'] = key
DawState['tempo'] = tempo
DawState['tracks'] = tracks
chord_names, chord_degrees = name_chords_in_tracks(tracks, key, scale)
DawState['chord_names'] = chord_names
DawState['chord_degrees'] = chord_degrees
response = DawState
return json.dumps(add_logs_to_response(response))
@app.route('/constants', methods=['GET'])
@crossdomain(origin='*')
def get_constants():
return json.dumps(constants.constants, default=set_default)
@app.route('/midi', methods=['POST', 'OPTIONS'])
@crossdomain(origin='*')
def create_midi_file():
content = request.get_json()
filename, fp = midi_tools.create_midi_file(content)
return send_file(filename,
mimetype='audio/midi audio/x-midi',
as_attachment=True,
attachment_filename=filename)
@app.errorhandler(404)
def page_not_found(error):
return render_template('page_not_found.html'), 404
|
flexible
|
{
"blob_id": "471cab65aac29f5b47de0ffef8f032dbbadf8dd0",
"index": 1877,
"step-1": "<mask token>\n\n\ndef add_logs_to_response(response):\n response['logs'] = ClientLogger.get_logs()\n ClientLogger.clear_logs()\n return response\n\n\n@app.route('/generate/melody', methods=['POST', 'OPTIONS'])\n@crossdomain(origin='*')\ndef generate_melody():\n ClientLogger.log('Generating new melody...')\n content = request.get_json()\n melody_generator = Generator(content)\n result = melody_generator.generate_melody()\n response = {'generationResult': result}\n return json.dumps(add_logs_to_response(response))\n\n\n@app.route('/generate/chords', methods=['POST', 'OPTIONS'])\n@crossdomain(origin='*')\ndef generate_chords():\n content = request.get_json()\n chord_generator = Generator(content)\n result_chords, result_chord_names = chord_generator.generate_chords()\n DawState['chord_names'] = result_chord_names\n response = {'generationResult': result_chords}\n return json.dumps(add_logs_to_response(response))\n\n\n<mask token>\n\n\n@app.route('/constants', methods=['GET'])\n@crossdomain(origin='*')\ndef get_constants():\n return json.dumps(constants.constants, default=set_default)\n\n\n@app.route('/midi', methods=['POST', 'OPTIONS'])\n@crossdomain(origin='*')\ndef create_midi_file():\n content = request.get_json()\n filename, fp = midi_tools.create_midi_file(content)\n return send_file(filename, mimetype='audio/midi audio/x-midi',\n as_attachment=True, attachment_filename=filename)\n\n\n@app.errorhandler(404)\ndef page_not_found(error):\n return render_template('page_not_found.html'), 404\n",
"step-2": "<mask token>\n\n\ndef set_default(obj):\n if isinstance(obj, set):\n return list(obj)\n raise TypeError\n\n\ndef add_logs_to_response(response):\n response['logs'] = ClientLogger.get_logs()\n ClientLogger.clear_logs()\n return response\n\n\n@app.route('/generate/melody', methods=['POST', 'OPTIONS'])\n@crossdomain(origin='*')\ndef generate_melody():\n ClientLogger.log('Generating new melody...')\n content = request.get_json()\n melody_generator = Generator(content)\n result = melody_generator.generate_melody()\n response = {'generationResult': result}\n return json.dumps(add_logs_to_response(response))\n\n\n@app.route('/generate/chords', methods=['POST', 'OPTIONS'])\n@crossdomain(origin='*')\ndef generate_chords():\n content = request.get_json()\n chord_generator = Generator(content)\n result_chords, result_chord_names = chord_generator.generate_chords()\n DawState['chord_names'] = result_chord_names\n response = {'generationResult': result_chords}\n return json.dumps(add_logs_to_response(response))\n\n\n@app.route('/daw-state', methods=['POST', 'OPTIONS'])\n@crossdomain(origin='*')\ndef update_daw_state():\n content = request.get_json()\n key = content['key'].replace('#', 's')\n scale = content['scale']\n tempo = content['tempo']\n tracks = content['tracks']\n DawState['scale'] = scale\n DawState['key'] = key\n DawState['tempo'] = tempo\n DawState['tracks'] = tracks\n chord_names, chord_degrees = name_chords_in_tracks(tracks, key, scale)\n DawState['chord_names'] = chord_names\n DawState['chord_degrees'] = chord_degrees\n response = DawState\n return json.dumps(add_logs_to_response(response))\n\n\n@app.route('/constants', methods=['GET'])\n@crossdomain(origin='*')\ndef get_constants():\n return json.dumps(constants.constants, default=set_default)\n\n\n@app.route('/midi', methods=['POST', 'OPTIONS'])\n@crossdomain(origin='*')\ndef create_midi_file():\n content = request.get_json()\n filename, fp = midi_tools.create_midi_file(content)\n return send_file(filename, mimetype='audio/midi audio/x-midi',\n as_attachment=True, attachment_filename=filename)\n\n\n@app.errorhandler(404)\ndef page_not_found(error):\n return render_template('page_not_found.html'), 404\n",
"step-3": "<mask token>\nCORS(app)\n<mask token>\n\n\ndef set_default(obj):\n if isinstance(obj, set):\n return list(obj)\n raise TypeError\n\n\ndef add_logs_to_response(response):\n response['logs'] = ClientLogger.get_logs()\n ClientLogger.clear_logs()\n return response\n\n\n@app.route('/generate/melody', methods=['POST', 'OPTIONS'])\n@crossdomain(origin='*')\ndef generate_melody():\n ClientLogger.log('Generating new melody...')\n content = request.get_json()\n melody_generator = Generator(content)\n result = melody_generator.generate_melody()\n response = {'generationResult': result}\n return json.dumps(add_logs_to_response(response))\n\n\n@app.route('/generate/chords', methods=['POST', 'OPTIONS'])\n@crossdomain(origin='*')\ndef generate_chords():\n content = request.get_json()\n chord_generator = Generator(content)\n result_chords, result_chord_names = chord_generator.generate_chords()\n DawState['chord_names'] = result_chord_names\n response = {'generationResult': result_chords}\n return json.dumps(add_logs_to_response(response))\n\n\n@app.route('/daw-state', methods=['POST', 'OPTIONS'])\n@crossdomain(origin='*')\ndef update_daw_state():\n content = request.get_json()\n key = content['key'].replace('#', 's')\n scale = content['scale']\n tempo = content['tempo']\n tracks = content['tracks']\n DawState['scale'] = scale\n DawState['key'] = key\n DawState['tempo'] = tempo\n DawState['tracks'] = tracks\n chord_names, chord_degrees = name_chords_in_tracks(tracks, key, scale)\n DawState['chord_names'] = chord_names\n DawState['chord_degrees'] = chord_degrees\n response = DawState\n return json.dumps(add_logs_to_response(response))\n\n\n@app.route('/constants', methods=['GET'])\n@crossdomain(origin='*')\ndef get_constants():\n return json.dumps(constants.constants, default=set_default)\n\n\n@app.route('/midi', methods=['POST', 'OPTIONS'])\n@crossdomain(origin='*')\ndef create_midi_file():\n content = request.get_json()\n filename, fp = midi_tools.create_midi_file(content)\n return send_file(filename, mimetype='audio/midi audio/x-midi',\n as_attachment=True, attachment_filename=filename)\n\n\n@app.errorhandler(404)\ndef page_not_found(error):\n return render_template('page_not_found.html'), 404\n",
"step-4": "from flask import Flask, render_template, send_from_directory\nfrom flask import request, send_file\nfrom flask_cors import CORS\nimport os\nimport json\nfrom crossdomain import crossdomain\nimport constants\nimport generation_tools\nfrom music_theory import name_chords_in_tracks\nimport midi_tools\nfrom client_logging import ClientLogger\nfrom generation_tools import Generator\napp = Flask(__name__)\nCORS(app)\nBASE_URL = os.path.abspath(os.path.dirname(__file__))\nCLIENT_APP_FOLDER = os.path.join(BASE_URL, 'ClientApp')\nDawState = {}\nClientLogger = ClientLogger()\n\n\ndef set_default(obj):\n if isinstance(obj, set):\n return list(obj)\n raise TypeError\n\n\ndef add_logs_to_response(response):\n response['logs'] = ClientLogger.get_logs()\n ClientLogger.clear_logs()\n return response\n\n\n@app.route('/generate/melody', methods=['POST', 'OPTIONS'])\n@crossdomain(origin='*')\ndef generate_melody():\n ClientLogger.log('Generating new melody...')\n content = request.get_json()\n melody_generator = Generator(content)\n result = melody_generator.generate_melody()\n response = {'generationResult': result}\n return json.dumps(add_logs_to_response(response))\n\n\n@app.route('/generate/chords', methods=['POST', 'OPTIONS'])\n@crossdomain(origin='*')\ndef generate_chords():\n content = request.get_json()\n chord_generator = Generator(content)\n result_chords, result_chord_names = chord_generator.generate_chords()\n DawState['chord_names'] = result_chord_names\n response = {'generationResult': result_chords}\n return json.dumps(add_logs_to_response(response))\n\n\n@app.route('/daw-state', methods=['POST', 'OPTIONS'])\n@crossdomain(origin='*')\ndef update_daw_state():\n content = request.get_json()\n key = content['key'].replace('#', 's')\n scale = content['scale']\n tempo = content['tempo']\n tracks = content['tracks']\n DawState['scale'] = scale\n DawState['key'] = key\n DawState['tempo'] = tempo\n DawState['tracks'] = tracks\n chord_names, chord_degrees = name_chords_in_tracks(tracks, key, scale)\n DawState['chord_names'] = chord_names\n DawState['chord_degrees'] = chord_degrees\n response = DawState\n return json.dumps(add_logs_to_response(response))\n\n\n@app.route('/constants', methods=['GET'])\n@crossdomain(origin='*')\ndef get_constants():\n return json.dumps(constants.constants, default=set_default)\n\n\n@app.route('/midi', methods=['POST', 'OPTIONS'])\n@crossdomain(origin='*')\ndef create_midi_file():\n content = request.get_json()\n filename, fp = midi_tools.create_midi_file(content)\n return send_file(filename, mimetype='audio/midi audio/x-midi',\n as_attachment=True, attachment_filename=filename)\n\n\n@app.errorhandler(404)\ndef page_not_found(error):\n return render_template('page_not_found.html'), 404\n",
"step-5": "from flask import Flask, render_template, send_from_directory\nfrom flask import request, send_file\nfrom flask_cors import CORS\nimport os\nimport json\nfrom crossdomain import crossdomain\nimport constants\nimport generation_tools\nfrom music_theory import name_chords_in_tracks\nimport midi_tools\nfrom client_logging import ClientLogger\nfrom generation_tools import Generator\napp = Flask(__name__)\nCORS(app)\n\nBASE_URL = os.path.abspath(os.path.dirname(__file__))\nCLIENT_APP_FOLDER = os.path.join(BASE_URL, \"ClientApp\")\n\nDawState = {}\nClientLogger = ClientLogger()\n\ndef set_default(obj):\n if isinstance(obj, set):\n return list(obj)\n raise TypeError\n\ndef add_logs_to_response(response):\n response['logs'] = ClientLogger.get_logs()\n ClientLogger.clear_logs()\n return response\n\n@app.route('/generate/melody', methods=['POST', 'OPTIONS'])\n@crossdomain(origin='*')\ndef generate_melody():\n ClientLogger.log('Generating new melody...')\n\n content = request.get_json()\n melody_generator = Generator(content)\n result = melody_generator.generate_melody()\n \n response = {'generationResult' : result}\n return json.dumps(add_logs_to_response(response))\n\n@app.route('/generate/chords', methods=['POST', 'OPTIONS'])\n@crossdomain(origin='*')\ndef generate_chords():\n content = request.get_json()\n chord_generator = Generator(content)\n result_chords, result_chord_names = chord_generator.generate_chords()\n\n DawState['chord_names'] = result_chord_names\n response = {'generationResult' : result_chords}\n return json.dumps(add_logs_to_response(response))\n\n@app.route('/daw-state', methods=['POST', 'OPTIONS'])\n@crossdomain(origin='*')\ndef update_daw_state():\n content = request.get_json()\n key = content['key'].replace('#', 's')\n scale = content['scale']\n tempo = content['tempo']\n tracks = content['tracks']\n \n DawState['scale'] = scale\n DawState['key'] = key\n DawState['tempo'] = tempo\n DawState['tracks'] = tracks\n chord_names, chord_degrees = name_chords_in_tracks(tracks, key, scale)\n DawState['chord_names'] = chord_names\n DawState['chord_degrees'] = chord_degrees\n \n response = DawState\n return json.dumps(add_logs_to_response(response))\n\n@app.route('/constants', methods=['GET'])\n@crossdomain(origin='*')\ndef get_constants():\n return json.dumps(constants.constants, default=set_default)\n\n@app.route('/midi', methods=['POST', 'OPTIONS'])\n@crossdomain(origin='*')\ndef create_midi_file():\n content = request.get_json()\n filename, fp = midi_tools.create_midi_file(content)\n return send_file(filename,\n mimetype='audio/midi audio/x-midi',\n as_attachment=True,\n attachment_filename=filename)\n\n@app.errorhandler(404)\ndef page_not_found(error):\n return render_template('page_not_found.html'), 404",
"step-ids": [
6,
8,
9,
11,
12
]
}
|
[
6,
8,
9,
11,
12
] |
<|reserved_special_token_0|>
class BinaryTree:
<|reserved_special_token_0|>
def __init__(self, rootObj):
self.key = rootObj
self.leftChild = None
self.rightChild = None
self.parent = None
def insertLeft(self, newNode):
if self.leftChild == None:
self.leftChild = BinaryTree(newNode) if not isinstance(newNode,
BinaryTree) else newNode
else:
t = BinaryTree(newNode) if not isinstance(newNode, BinaryTree
) else newNode
self.leftChild = t
self.leftChild.parent = self
def insertRight(self, newNode):
if self.rightChild == None:
self.rightChild = BinaryTree(newNode) if not isinstance(newNode,
BinaryTree) else newNode
else:
t = BinaryTree(newNode) if not isinstance(newNode, BinaryTree
) else newNode
self.rightChild = t
self.rightChild.parent = self
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def getLeftChild(self):
return self.leftChild
def getSibling(self):
if self.isRoot():
return None
Rsib = self.parent.getRightChild()
return Rsib if Rsib != self else self.parent.getLeftChild()
<|reserved_special_token_0|>
def hasParent(self):
return self.key != None
def setRootVal(self, obj):
self.key = obj
<|reserved_special_token_0|>
def inorder(self):
if self.leftChild:
self.leftChild.inorder()
print(self.key)
if self.rightChild:
self.rightChild.inorder()
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def printexp(self):
sVal = ''
if self:
sVal = '(' if self.hasChild() else ''
sVal += printexp(self.getLeftChild())
sVal = sVal + str(self.getRootVal())
sVal = sVal + printexp(self.getRightChild())
sVal += ')' if self.hasChild() else ''
return sVal
def __str__(self):
return self.printexp()
<|reserved_special_token_0|>
def hasRightChild(self):
return self.rightChild
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class BinaryTree:
<|reserved_special_token_0|>
def __init__(self, rootObj):
self.key = rootObj
self.leftChild = None
self.rightChild = None
self.parent = None
def insertLeft(self, newNode):
if self.leftChild == None:
self.leftChild = BinaryTree(newNode) if not isinstance(newNode,
BinaryTree) else newNode
else:
t = BinaryTree(newNode) if not isinstance(newNode, BinaryTree
) else newNode
self.leftChild = t
self.leftChild.parent = self
def insertRight(self, newNode):
if self.rightChild == None:
self.rightChild = BinaryTree(newNode) if not isinstance(newNode,
BinaryTree) else newNode
else:
t = BinaryTree(newNode) if not isinstance(newNode, BinaryTree
) else newNode
self.rightChild = t
self.rightChild.parent = self
def isLeaf(self):
return not self.leftChild and not self.rightChild
<|reserved_special_token_0|>
def getRightChild(self):
return self.rightChild
def getLeftChild(self):
return self.leftChild
def getSibling(self):
if self.isRoot():
return None
Rsib = self.parent.getRightChild()
return Rsib if Rsib != self else self.parent.getLeftChild()
def hasChild(self):
return self.rightChild != None or self.leftChild != None
def hasParent(self):
return self.key != None
def setRootVal(self, obj):
self.key = obj
def getRootVal(self):
return self.key
def inorder(self):
if self.leftChild:
self.leftChild.inorder()
print(self.key)
if self.rightChild:
self.rightChild.inorder()
<|reserved_special_token_0|>
def preorder(self):
print(self.key)
if self.leftChild:
self.leftChild.preorder()
if self.rightChild:
self.rightChild.preorder()
def printexp(self):
sVal = ''
if self:
sVal = '(' if self.hasChild() else ''
sVal += printexp(self.getLeftChild())
sVal = sVal + str(self.getRootVal())
sVal = sVal + printexp(self.getRightChild())
sVal += ')' if self.hasChild() else ''
return sVal
def __str__(self):
return self.printexp()
<|reserved_special_token_0|>
def hasRightChild(self):
return self.rightChild
def __iter__(self):
"""The standard inorder traversal of a binary tree."""
if self:
if self.hasLeftChild():
for elem in self.leftChild:
yield elem
yield self.key
if self.hasRightChild():
for elem in self.rightChild:
yield elem
def postordereval(self, opers=None):
if not opers:
opers = {'+': operator.add, '-': operator.sub, '*': operator.
mul, '/': operator.truediv}
res1 = None
res2 = None
if self.leftChild:
res1 = self.leftChild.postordereval()
if self.rightChild:
res2 = self.rightChild.postordereval()
if res1 and res2:
return opers[self.key](res1, res2)
else:
return self.key
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Stack:
def __init__(self):
self.items = []
def isEmpty(self):
return self.items == []
def push(self, item):
self.items.append(item)
def pop(self):
return self.items.pop()
def peek(self):
return self.items[len(self.items) - 1]
def size(self):
return len(self.items)
class BinaryTree:
"""
A recursive implementation of Binary Tree
Using links and Nodes approach.
"""
def __init__(self, rootObj):
self.key = rootObj
self.leftChild = None
self.rightChild = None
self.parent = None
def insertLeft(self, newNode):
if self.leftChild == None:
self.leftChild = BinaryTree(newNode) if not isinstance(newNode,
BinaryTree) else newNode
else:
t = BinaryTree(newNode) if not isinstance(newNode, BinaryTree
) else newNode
self.leftChild = t
self.leftChild.parent = self
def insertRight(self, newNode):
if self.rightChild == None:
self.rightChild = BinaryTree(newNode) if not isinstance(newNode,
BinaryTree) else newNode
else:
t = BinaryTree(newNode) if not isinstance(newNode, BinaryTree
) else newNode
self.rightChild = t
self.rightChild.parent = self
def isLeaf(self):
return not self.leftChild and not self.rightChild
def isRoot(self):
return not self.parent
def getRightChild(self):
return self.rightChild
def getLeftChild(self):
return self.leftChild
def getSibling(self):
if self.isRoot():
return None
Rsib = self.parent.getRightChild()
return Rsib if Rsib != self else self.parent.getLeftChild()
def hasChild(self):
return self.rightChild != None or self.leftChild != None
def hasParent(self):
return self.key != None
def setRootVal(self, obj):
self.key = obj
def getRootVal(self):
return self.key
def inorder(self):
if self.leftChild:
self.leftChild.inorder()
print(self.key)
if self.rightChild:
self.rightChild.inorder()
def postorder(self):
if self.leftChild:
self.leftChild.postorder()
if self.rightChild:
self.rightChild.postorder()
print(self.key)
def preorder(self):
print(self.key)
if self.leftChild:
self.leftChild.preorder()
if self.rightChild:
self.rightChild.preorder()
def printexp(self):
sVal = ''
if self:
sVal = '(' if self.hasChild() else ''
sVal += printexp(self.getLeftChild())
sVal = sVal + str(self.getRootVal())
sVal = sVal + printexp(self.getRightChild())
sVal += ')' if self.hasChild() else ''
return sVal
def __str__(self):
return self.printexp()
def hasLeftChild(self):
return self.leftChild
def hasRightChild(self):
return self.rightChild
def __iter__(self):
"""The standard inorder traversal of a binary tree."""
if self:
if self.hasLeftChild():
for elem in self.leftChild:
yield elem
yield self.key
if self.hasRightChild():
for elem in self.rightChild:
yield elem
def postordereval(self, opers=None):
if not opers:
opers = {'+': operator.add, '-': operator.sub, '*': operator.
mul, '/': operator.truediv}
res1 = None
res2 = None
if self.leftChild:
res1 = self.leftChild.postordereval()
if self.rightChild:
res2 = self.rightChild.postordereval()
if res1 and res2:
return opers[self.key](res1, res2)
else:
return self.key
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Stack:
def __init__(self):
self.items = []
def isEmpty(self):
return self.items == []
def push(self, item):
self.items.append(item)
def pop(self):
return self.items.pop()
def peek(self):
return self.items[len(self.items) - 1]
def size(self):
return len(self.items)
class BinaryTree:
"""
A recursive implementation of Binary Tree
Using links and Nodes approach.
"""
def __init__(self, rootObj):
self.key = rootObj
self.leftChild = None
self.rightChild = None
self.parent = None
def insertLeft(self, newNode):
if self.leftChild == None:
self.leftChild = BinaryTree(newNode) if not isinstance(newNode,
BinaryTree) else newNode
else:
t = BinaryTree(newNode) if not isinstance(newNode, BinaryTree
) else newNode
self.leftChild = t
self.leftChild.parent = self
def insertRight(self, newNode):
if self.rightChild == None:
self.rightChild = BinaryTree(newNode) if not isinstance(newNode,
BinaryTree) else newNode
else:
t = BinaryTree(newNode) if not isinstance(newNode, BinaryTree
) else newNode
self.rightChild = t
self.rightChild.parent = self
def isLeaf(self):
return not self.leftChild and not self.rightChild
def isRoot(self):
return not self.parent
def getRightChild(self):
return self.rightChild
def getLeftChild(self):
return self.leftChild
def getSibling(self):
if self.isRoot():
return None
Rsib = self.parent.getRightChild()
return Rsib if Rsib != self else self.parent.getLeftChild()
def hasChild(self):
return self.rightChild != None or self.leftChild != None
def hasParent(self):
return self.key != None
def setRootVal(self, obj):
self.key = obj
def getRootVal(self):
return self.key
def inorder(self):
if self.leftChild:
self.leftChild.inorder()
print(self.key)
if self.rightChild:
self.rightChild.inorder()
def postorder(self):
if self.leftChild:
self.leftChild.postorder()
if self.rightChild:
self.rightChild.postorder()
print(self.key)
def preorder(self):
print(self.key)
if self.leftChild:
self.leftChild.preorder()
if self.rightChild:
self.rightChild.preorder()
def printexp(self):
sVal = ''
if self:
sVal = '(' if self.hasChild() else ''
sVal += printexp(self.getLeftChild())
sVal = sVal + str(self.getRootVal())
sVal = sVal + printexp(self.getRightChild())
sVal += ')' if self.hasChild() else ''
return sVal
def __str__(self):
return self.printexp()
def hasLeftChild(self):
return self.leftChild
def hasRightChild(self):
return self.rightChild
def __iter__(self):
"""The standard inorder traversal of a binary tree."""
if self:
if self.hasLeftChild():
for elem in self.leftChild:
yield elem
yield self.key
if self.hasRightChild():
for elem in self.rightChild:
yield elem
def postordereval(self, opers=None):
if not opers:
opers = {'+': operator.add, '-': operator.sub, '*': operator.
mul, '/': operator.truediv}
res1 = None
res2 = None
if self.leftChild:
res1 = self.leftChild.postordereval()
if self.rightChild:
res2 = self.rightChild.postordereval()
if res1 and res2:
return opers[self.key](res1, res2)
else:
return self.key
def inorder(tree):
if tree != None:
inorder(tree.getLeftChild())
print(tree.getRootVal())
inorder(tree.getRightChild())
<|reserved_special_token_0|>
<|reserved_special_token_1|>
# Bradley N. Miller, David L. Ranum
# Introduction to Data Structures and Algorithms in Python
# Copyright 2005
#
__all__=['BinaryTree', 'Stack']
class Stack:
def __init__(self):
self.items = []
def isEmpty(self):
return self.items == []
def push(self, item):
self.items.append(item)
def pop(self):
return self.items.pop()
def peek(self):
return self.items[len(self.items)-1]
def size(self):
return len(self.items)
class BinaryTree:
"""
A recursive implementation of Binary Tree
Using links and Nodes approach.
"""
def __init__(self,rootObj):
self.key = rootObj
self.leftChild = None
self.rightChild = None
self.parent = None
def insertLeft(self,newNode):
if self.leftChild == None:
self.leftChild = BinaryTree(newNode) if not isinstance(newNode, BinaryTree) else newNode
else:
t = BinaryTree(newNode) if not isinstance(newNode, BinaryTree) else newNode
# t.left = self.leftChild
self.leftChild = t
self.leftChild.parent = self
def insertRight(self,newNode):
if self.rightChild == None:
self.rightChild = BinaryTree(newNode) if not isinstance(newNode, BinaryTree) else newNode
else:
t = BinaryTree(newNode) if not isinstance(newNode, BinaryTree) else newNode
# t.right = self.rightChild
self.rightChild = t
self.rightChild.parent = self
def isLeaf(self):
return ((not self.leftChild) and (not self.rightChild))
def isRoot(self):
return not self.parent
def getRightChild(self):
return self.rightChild
def getLeftChild(self):
return self.leftChild
def getSibling(self):
if self.isRoot():
return None
Rsib = self.parent.getRightChild()
return Rsib if Rsib != self else self.parent.getLeftChild()
def hasChild(self):
return (self.rightChild != None) or (self.leftChild != None)
def hasParent(self):
return (self.key != None)
def setRootVal(self,obj):
self.key = obj
def getRootVal(self):
return self.key
def inorder(self):
if self.leftChild:
self.leftChild.inorder()
print(self.key)
if self.rightChild:
self.rightChild.inorder()
def postorder(self):
if self.leftChild:
self.leftChild.postorder()
if self.rightChild:
self.rightChild.postorder()
print(self.key)
def preorder(self):
print(self.key)
if self.leftChild:
self.leftChild.preorder()
if self.rightChild:
self.rightChild.preorder()
def printexp(self):
sVal = ""
if self:
sVal = '(' if self.hasChild() else ''
sVal += printexp(self.getLeftChild())
sVal = sVal + str(self.getRootVal())
sVal = sVal + printexp(self.getRightChild())
sVal += ')' if self.hasChild() else ''
return sVal
def __str__(self):
return self.printexp()
def hasLeftChild(self):
return self.leftChild
def hasRightChild(self):
return self.rightChild
def __iter__(self):
"""The standard inorder traversal of a binary tree."""
if self:
if self.hasLeftChild():
for elem in self.leftChild:
yield elem
yield self.key
if self.hasRightChild():
for elem in self.rightChild:
yield elem
def postordereval(self, opers = None):
if not opers:
opers = {'+':operator.add, '-':operator.sub, '*':operator.mul, '/':operator.truediv}
res1 = None
res2 = None
if self.leftChild:
res1 = self.leftChild.postordereval() #// \label{peleft}
if self.rightChild:
res2 = self.rightChild.postordereval() #// \label{peright}
if res1 and res2:
return opers[self.key](res1,res2) #// \label{peeval}
else:
return self.key
def inorder(tree):
if tree != None:
inorder(tree.getLeftChild())
print(tree.getRootVal())
inorder(tree.getRightChild())
# def printexp(tree):
# if tree.leftChild:
# print'( '
# printexp(tree.getLeftChild())
# print '%s '%tree.getRootVal()
# if tree.rightChild:
# printexp(tree.getRightChild())
# print') '
def printexp(tree):
sVal = ""
if tree:
sVal = '(' if tree.hasChild() else ''
sVal += printexp(tree.getLeftChild())
sVal = sVal + str(tree.getRootVal())
sVal = sVal + printexp(tree.getRightChild())
sVal += ')' if tree.hasChild() else ''
return sVal
def postordereval(tree):
opers = {'+':operator.add, '-':operator.sub, '*':operator.mul, '/':operator.truediv}
res1 = None
res2 = None
if tree:
res1 = postordereval(tree.getLeftChild()) #// \label{peleft}
res2 = postordereval(tree.getRightChild()) #// \label{peright}
if res1 and res2:
return opers[tree.getRootVal()](res1,res2) #// \label{peeval}
else:
return tree.getRootVal()
def height(tree):
if tree == None:
return -1
else:
return 1 + max(height(tree.leftChild),height(tree.rightChild))
if __name__ == '__main__':
t = BinaryTree(7)
t.insertLeft(3)
t.insertRight(9)
inorder(t)
# import operator
x = BinaryTree('*')
x.insertLeft('+')
l = x.getLeftChild()
l.insertLeft(4)
l.insertRight(5)
x.insertRight(7)
print(printexp(x))
# print(postordereval(x))
print(height(x))
|
flexible
|
{
"blob_id": "5f48c7a68cb9734d84dee2cf8ff4d7be490cf328",
"index": 2888,
"step-1": "<mask token>\n\n\nclass BinaryTree:\n <mask token>\n\n def __init__(self, rootObj):\n self.key = rootObj\n self.leftChild = None\n self.rightChild = None\n self.parent = None\n\n def insertLeft(self, newNode):\n if self.leftChild == None:\n self.leftChild = BinaryTree(newNode) if not isinstance(newNode,\n BinaryTree) else newNode\n else:\n t = BinaryTree(newNode) if not isinstance(newNode, BinaryTree\n ) else newNode\n self.leftChild = t\n self.leftChild.parent = self\n\n def insertRight(self, newNode):\n if self.rightChild == None:\n self.rightChild = BinaryTree(newNode) if not isinstance(newNode,\n BinaryTree) else newNode\n else:\n t = BinaryTree(newNode) if not isinstance(newNode, BinaryTree\n ) else newNode\n self.rightChild = t\n self.rightChild.parent = self\n <mask token>\n <mask token>\n <mask token>\n\n def getLeftChild(self):\n return self.leftChild\n\n def getSibling(self):\n if self.isRoot():\n return None\n Rsib = self.parent.getRightChild()\n return Rsib if Rsib != self else self.parent.getLeftChild()\n <mask token>\n\n def hasParent(self):\n return self.key != None\n\n def setRootVal(self, obj):\n self.key = obj\n <mask token>\n\n def inorder(self):\n if self.leftChild:\n self.leftChild.inorder()\n print(self.key)\n if self.rightChild:\n self.rightChild.inorder()\n <mask token>\n <mask token>\n\n def printexp(self):\n sVal = ''\n if self:\n sVal = '(' if self.hasChild() else ''\n sVal += printexp(self.getLeftChild())\n sVal = sVal + str(self.getRootVal())\n sVal = sVal + printexp(self.getRightChild())\n sVal += ')' if self.hasChild() else ''\n return sVal\n\n def __str__(self):\n return self.printexp()\n <mask token>\n\n def hasRightChild(self):\n return self.rightChild\n <mask token>\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass BinaryTree:\n <mask token>\n\n def __init__(self, rootObj):\n self.key = rootObj\n self.leftChild = None\n self.rightChild = None\n self.parent = None\n\n def insertLeft(self, newNode):\n if self.leftChild == None:\n self.leftChild = BinaryTree(newNode) if not isinstance(newNode,\n BinaryTree) else newNode\n else:\n t = BinaryTree(newNode) if not isinstance(newNode, BinaryTree\n ) else newNode\n self.leftChild = t\n self.leftChild.parent = self\n\n def insertRight(self, newNode):\n if self.rightChild == None:\n self.rightChild = BinaryTree(newNode) if not isinstance(newNode,\n BinaryTree) else newNode\n else:\n t = BinaryTree(newNode) if not isinstance(newNode, BinaryTree\n ) else newNode\n self.rightChild = t\n self.rightChild.parent = self\n\n def isLeaf(self):\n return not self.leftChild and not self.rightChild\n <mask token>\n\n def getRightChild(self):\n return self.rightChild\n\n def getLeftChild(self):\n return self.leftChild\n\n def getSibling(self):\n if self.isRoot():\n return None\n Rsib = self.parent.getRightChild()\n return Rsib if Rsib != self else self.parent.getLeftChild()\n\n def hasChild(self):\n return self.rightChild != None or self.leftChild != None\n\n def hasParent(self):\n return self.key != None\n\n def setRootVal(self, obj):\n self.key = obj\n\n def getRootVal(self):\n return self.key\n\n def inorder(self):\n if self.leftChild:\n self.leftChild.inorder()\n print(self.key)\n if self.rightChild:\n self.rightChild.inorder()\n <mask token>\n\n def preorder(self):\n print(self.key)\n if self.leftChild:\n self.leftChild.preorder()\n if self.rightChild:\n self.rightChild.preorder()\n\n def printexp(self):\n sVal = ''\n if self:\n sVal = '(' if self.hasChild() else ''\n sVal += printexp(self.getLeftChild())\n sVal = sVal + str(self.getRootVal())\n sVal = sVal + printexp(self.getRightChild())\n sVal += ')' if self.hasChild() else ''\n return sVal\n\n def __str__(self):\n return self.printexp()\n <mask token>\n\n def hasRightChild(self):\n return self.rightChild\n\n def __iter__(self):\n \"\"\"The standard inorder traversal of a binary tree.\"\"\"\n if self:\n if self.hasLeftChild():\n for elem in self.leftChild:\n yield elem\n yield self.key\n if self.hasRightChild():\n for elem in self.rightChild:\n yield elem\n\n def postordereval(self, opers=None):\n if not opers:\n opers = {'+': operator.add, '-': operator.sub, '*': operator.\n mul, '/': operator.truediv}\n res1 = None\n res2 = None\n if self.leftChild:\n res1 = self.leftChild.postordereval()\n if self.rightChild:\n res2 = self.rightChild.postordereval()\n if res1 and res2:\n return opers[self.key](res1, res2)\n else:\n return self.key\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Stack:\n\n def __init__(self):\n self.items = []\n\n def isEmpty(self):\n return self.items == []\n\n def push(self, item):\n self.items.append(item)\n\n def pop(self):\n return self.items.pop()\n\n def peek(self):\n return self.items[len(self.items) - 1]\n\n def size(self):\n return len(self.items)\n\n\nclass BinaryTree:\n \"\"\"\n A recursive implementation of Binary Tree\n Using links and Nodes approach.\n \"\"\"\n\n def __init__(self, rootObj):\n self.key = rootObj\n self.leftChild = None\n self.rightChild = None\n self.parent = None\n\n def insertLeft(self, newNode):\n if self.leftChild == None:\n self.leftChild = BinaryTree(newNode) if not isinstance(newNode,\n BinaryTree) else newNode\n else:\n t = BinaryTree(newNode) if not isinstance(newNode, BinaryTree\n ) else newNode\n self.leftChild = t\n self.leftChild.parent = self\n\n def insertRight(self, newNode):\n if self.rightChild == None:\n self.rightChild = BinaryTree(newNode) if not isinstance(newNode,\n BinaryTree) else newNode\n else:\n t = BinaryTree(newNode) if not isinstance(newNode, BinaryTree\n ) else newNode\n self.rightChild = t\n self.rightChild.parent = self\n\n def isLeaf(self):\n return not self.leftChild and not self.rightChild\n\n def isRoot(self):\n return not self.parent\n\n def getRightChild(self):\n return self.rightChild\n\n def getLeftChild(self):\n return self.leftChild\n\n def getSibling(self):\n if self.isRoot():\n return None\n Rsib = self.parent.getRightChild()\n return Rsib if Rsib != self else self.parent.getLeftChild()\n\n def hasChild(self):\n return self.rightChild != None or self.leftChild != None\n\n def hasParent(self):\n return self.key != None\n\n def setRootVal(self, obj):\n self.key = obj\n\n def getRootVal(self):\n return self.key\n\n def inorder(self):\n if self.leftChild:\n self.leftChild.inorder()\n print(self.key)\n if self.rightChild:\n self.rightChild.inorder()\n\n def postorder(self):\n if self.leftChild:\n self.leftChild.postorder()\n if self.rightChild:\n self.rightChild.postorder()\n print(self.key)\n\n def preorder(self):\n print(self.key)\n if self.leftChild:\n self.leftChild.preorder()\n if self.rightChild:\n self.rightChild.preorder()\n\n def printexp(self):\n sVal = ''\n if self:\n sVal = '(' if self.hasChild() else ''\n sVal += printexp(self.getLeftChild())\n sVal = sVal + str(self.getRootVal())\n sVal = sVal + printexp(self.getRightChild())\n sVal += ')' if self.hasChild() else ''\n return sVal\n\n def __str__(self):\n return self.printexp()\n\n def hasLeftChild(self):\n return self.leftChild\n\n def hasRightChild(self):\n return self.rightChild\n\n def __iter__(self):\n \"\"\"The standard inorder traversal of a binary tree.\"\"\"\n if self:\n if self.hasLeftChild():\n for elem in self.leftChild:\n yield elem\n yield self.key\n if self.hasRightChild():\n for elem in self.rightChild:\n yield elem\n\n def postordereval(self, opers=None):\n if not opers:\n opers = {'+': operator.add, '-': operator.sub, '*': operator.\n mul, '/': operator.truediv}\n res1 = None\n res2 = None\n if self.leftChild:\n res1 = self.leftChild.postordereval()\n if self.rightChild:\n res2 = self.rightChild.postordereval()\n if res1 and res2:\n return opers[self.key](res1, res2)\n else:\n return self.key\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass Stack:\n\n def __init__(self):\n self.items = []\n\n def isEmpty(self):\n return self.items == []\n\n def push(self, item):\n self.items.append(item)\n\n def pop(self):\n return self.items.pop()\n\n def peek(self):\n return self.items[len(self.items) - 1]\n\n def size(self):\n return len(self.items)\n\n\nclass BinaryTree:\n \"\"\"\n A recursive implementation of Binary Tree\n Using links and Nodes approach.\n \"\"\"\n\n def __init__(self, rootObj):\n self.key = rootObj\n self.leftChild = None\n self.rightChild = None\n self.parent = None\n\n def insertLeft(self, newNode):\n if self.leftChild == None:\n self.leftChild = BinaryTree(newNode) if not isinstance(newNode,\n BinaryTree) else newNode\n else:\n t = BinaryTree(newNode) if not isinstance(newNode, BinaryTree\n ) else newNode\n self.leftChild = t\n self.leftChild.parent = self\n\n def insertRight(self, newNode):\n if self.rightChild == None:\n self.rightChild = BinaryTree(newNode) if not isinstance(newNode,\n BinaryTree) else newNode\n else:\n t = BinaryTree(newNode) if not isinstance(newNode, BinaryTree\n ) else newNode\n self.rightChild = t\n self.rightChild.parent = self\n\n def isLeaf(self):\n return not self.leftChild and not self.rightChild\n\n def isRoot(self):\n return not self.parent\n\n def getRightChild(self):\n return self.rightChild\n\n def getLeftChild(self):\n return self.leftChild\n\n def getSibling(self):\n if self.isRoot():\n return None\n Rsib = self.parent.getRightChild()\n return Rsib if Rsib != self else self.parent.getLeftChild()\n\n def hasChild(self):\n return self.rightChild != None or self.leftChild != None\n\n def hasParent(self):\n return self.key != None\n\n def setRootVal(self, obj):\n self.key = obj\n\n def getRootVal(self):\n return self.key\n\n def inorder(self):\n if self.leftChild:\n self.leftChild.inorder()\n print(self.key)\n if self.rightChild:\n self.rightChild.inorder()\n\n def postorder(self):\n if self.leftChild:\n self.leftChild.postorder()\n if self.rightChild:\n self.rightChild.postorder()\n print(self.key)\n\n def preorder(self):\n print(self.key)\n if self.leftChild:\n self.leftChild.preorder()\n if self.rightChild:\n self.rightChild.preorder()\n\n def printexp(self):\n sVal = ''\n if self:\n sVal = '(' if self.hasChild() else ''\n sVal += printexp(self.getLeftChild())\n sVal = sVal + str(self.getRootVal())\n sVal = sVal + printexp(self.getRightChild())\n sVal += ')' if self.hasChild() else ''\n return sVal\n\n def __str__(self):\n return self.printexp()\n\n def hasLeftChild(self):\n return self.leftChild\n\n def hasRightChild(self):\n return self.rightChild\n\n def __iter__(self):\n \"\"\"The standard inorder traversal of a binary tree.\"\"\"\n if self:\n if self.hasLeftChild():\n for elem in self.leftChild:\n yield elem\n yield self.key\n if self.hasRightChild():\n for elem in self.rightChild:\n yield elem\n\n def postordereval(self, opers=None):\n if not opers:\n opers = {'+': operator.add, '-': operator.sub, '*': operator.\n mul, '/': operator.truediv}\n res1 = None\n res2 = None\n if self.leftChild:\n res1 = self.leftChild.postordereval()\n if self.rightChild:\n res2 = self.rightChild.postordereval()\n if res1 and res2:\n return opers[self.key](res1, res2)\n else:\n return self.key\n\n\ndef inorder(tree):\n if tree != None:\n inorder(tree.getLeftChild())\n print(tree.getRootVal())\n inorder(tree.getRightChild())\n\n\n<mask token>\n",
"step-5": "# Bradley N. Miller, David L. Ranum\n# Introduction to Data Structures and Algorithms in Python\n# Copyright 2005\n# \n\n\n__all__=['BinaryTree', 'Stack']\n\n\nclass Stack:\n def __init__(self):\n self.items = []\n\n def isEmpty(self):\n return self.items == []\n\n def push(self, item):\n self.items.append(item)\n\n def pop(self):\n return self.items.pop()\n\n def peek(self):\n return self.items[len(self.items)-1]\n\n def size(self):\n return len(self.items)\n\n\n\nclass BinaryTree:\n \"\"\"\n A recursive implementation of Binary Tree\n Using links and Nodes approach.\n \"\"\" \n def __init__(self,rootObj):\n self.key = rootObj\n self.leftChild = None\n self.rightChild = None\n self.parent = None\n\n def insertLeft(self,newNode):\n if self.leftChild == None:\n self.leftChild = BinaryTree(newNode) if not isinstance(newNode, BinaryTree) else newNode\n else:\n t = BinaryTree(newNode) if not isinstance(newNode, BinaryTree) else newNode\n # t.left = self.leftChild\n self.leftChild = t\n self.leftChild.parent = self \n \n def insertRight(self,newNode):\n if self.rightChild == None:\n self.rightChild = BinaryTree(newNode) if not isinstance(newNode, BinaryTree) else newNode\n else:\n t = BinaryTree(newNode) if not isinstance(newNode, BinaryTree) else newNode\n # t.right = self.rightChild\n self.rightChild = t\n self.rightChild.parent = self \n\n def isLeaf(self):\n return ((not self.leftChild) and (not self.rightChild))\n\n def isRoot(self):\n return not self.parent\n \n def getRightChild(self):\n return self.rightChild\n\n def getLeftChild(self):\n return self.leftChild\n \n def getSibling(self):\n if self.isRoot(): \n return None\n Rsib = self.parent.getRightChild() \n return Rsib if Rsib != self else self.parent.getLeftChild()\n\n def hasChild(self):\n return (self.rightChild != None) or (self.leftChild != None) \n\n def hasParent(self):\n return (self.key != None)\n\n def setRootVal(self,obj):\n self.key = obj\n\n def getRootVal(self):\n return self.key\n\n def inorder(self):\n if self.leftChild:\n self.leftChild.inorder()\n print(self.key)\n if self.rightChild:\n self.rightChild.inorder()\n\n def postorder(self):\n if self.leftChild:\n self.leftChild.postorder()\n if self.rightChild:\n self.rightChild.postorder()\n print(self.key)\n\n\n def preorder(self):\n print(self.key)\n if self.leftChild:\n self.leftChild.preorder()\n if self.rightChild:\n self.rightChild.preorder()\n\n def printexp(self):\n sVal = \"\"\n if self:\n sVal = '(' if self.hasChild() else ''\n sVal += printexp(self.getLeftChild())\n sVal = sVal + str(self.getRootVal())\n sVal = sVal + printexp(self.getRightChild()) \n sVal += ')' if self.hasChild() else ''\n return sVal\n\n def __str__(self):\n return self.printexp()\n\n def hasLeftChild(self):\n return self.leftChild\n\n def hasRightChild(self):\n return self.rightChild\n\n def __iter__(self):\n \"\"\"The standard inorder traversal of a binary tree.\"\"\"\n if self:\n if self.hasLeftChild():\n for elem in self.leftChild:\n yield elem\n yield self.key\n if self.hasRightChild():\n for elem in self.rightChild:\n yield elem\n\n\n def postordereval(self, opers = None):\n if not opers:\n opers = {'+':operator.add, '-':operator.sub, '*':operator.mul, '/':operator.truediv}\n res1 = None\n res2 = None\n if self.leftChild:\n res1 = self.leftChild.postordereval() #// \\label{peleft}\n if self.rightChild:\n res2 = self.rightChild.postordereval() #// \\label{peright}\n if res1 and res2:\n return opers[self.key](res1,res2) #// \\label{peeval}\n else:\n return self.key\n\ndef inorder(tree):\n if tree != None:\n inorder(tree.getLeftChild())\n print(tree.getRootVal())\n inorder(tree.getRightChild())\n\n# def printexp(tree):\n# if tree.leftChild:\n# print'( '\n# printexp(tree.getLeftChild())\n# print '%s '%tree.getRootVal()\n# if tree.rightChild:\n# printexp(tree.getRightChild())\n# print') '\n\ndef printexp(tree):\n sVal = \"\"\n if tree:\n sVal = '(' if tree.hasChild() else ''\n sVal += printexp(tree.getLeftChild())\n sVal = sVal + str(tree.getRootVal())\n sVal = sVal + printexp(tree.getRightChild()) \n sVal += ')' if tree.hasChild() else ''\n return sVal\n\ndef postordereval(tree):\n opers = {'+':operator.add, '-':operator.sub, '*':operator.mul, '/':operator.truediv}\n res1 = None\n res2 = None\n if tree:\n res1 = postordereval(tree.getLeftChild()) #// \\label{peleft}\n res2 = postordereval(tree.getRightChild()) #// \\label{peright}\n if res1 and res2:\n return opers[tree.getRootVal()](res1,res2) #// \\label{peeval}\n else:\n return tree.getRootVal()\n\ndef height(tree):\n if tree == None:\n return -1\n else:\n return 1 + max(height(tree.leftChild),height(tree.rightChild))\n\nif __name__ == '__main__':\n t = BinaryTree(7)\n t.insertLeft(3)\n t.insertRight(9)\n inorder(t)\n # import operator\n x = BinaryTree('*')\n x.insertLeft('+')\n l = x.getLeftChild()\n l.insertLeft(4)\n l.insertRight(5)\n x.insertRight(7)\n print(printexp(x))\n # print(postordereval(x))\n print(height(x))\n",
"step-ids": [
12,
19,
30,
31,
37
]
}
|
[
12,
19,
30,
31,
37
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
print('n:', end='')
<|reserved_special_token_0|>
print('a:', end='')
<|reserved_special_token_0|>
for i in range(n):
for j in range(i + 1, n):
for k in range(j + 1, n):
ai, aj, ak = sorted([a[i], a[j], a[k]])
if ai + aj > ak and ai + aj + ak > ans:
ans = ai + aj + ak
print(ans)
<|reserved_special_token_1|>
print('n:', end='')
n = int(input())
print('a:', end='')
a = list(map(int, input().split()))
ans = 0
for i in range(n):
for j in range(i + 1, n):
for k in range(j + 1, n):
ai, aj, ak = sorted([a[i], a[j], a[k]])
if ai + aj > ak and ai + aj + ak > ans:
ans = ai + aj + ak
print(ans)
<|reserved_special_token_1|>
print("n:",end="")
n=int(input())
print("a:",end="")
a=list(map(int,input().split()))
ans=0
for i in range(n):
for j in range(i+1,n):
for k in range(j+1,n):
ai,aj,ak=sorted([a[i],a[j],a[k]])
if(ai+aj>ak and ai+aj+ak>ans):
ans=ai+aj+ak
print(ans)
|
flexible
|
{
"blob_id": "130f49028833bf57d7e4f9fbb0764801c3508c3b",
"index": 3055,
"step-1": "<mask token>\n",
"step-2": "print('n:', end='')\n<mask token>\nprint('a:', end='')\n<mask token>\nfor i in range(n):\n for j in range(i + 1, n):\n for k in range(j + 1, n):\n ai, aj, ak = sorted([a[i], a[j], a[k]])\n if ai + aj > ak and ai + aj + ak > ans:\n ans = ai + aj + ak\nprint(ans)\n",
"step-3": "print('n:', end='')\nn = int(input())\nprint('a:', end='')\na = list(map(int, input().split()))\nans = 0\nfor i in range(n):\n for j in range(i + 1, n):\n for k in range(j + 1, n):\n ai, aj, ak = sorted([a[i], a[j], a[k]])\n if ai + aj > ak and ai + aj + ak > ans:\n ans = ai + aj + ak\nprint(ans)\n",
"step-4": "print(\"n:\",end=\"\")\nn=int(input())\nprint(\"a:\",end=\"\")\na=list(map(int,input().split()))\n\nans=0\nfor i in range(n):\n for j in range(i+1,n):\n for k in range(j+1,n):\n ai,aj,ak=sorted([a[i],a[j],a[k]])\n if(ai+aj>ak and ai+aj+ak>ans):\n ans=ai+aj+ak\nprint(ans)",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for p in palabras:
print(palabras[p])
<|reserved_special_token_1|>
frase = 'todos somos promgramadores'
palabras = frase.split()
for p in palabras:
print(palabras[p])
<|reserved_special_token_1|>
frase = "todos somos promgramadores"
palabras = frase.split()
for p in palabras:
print(palabras[p])
#if p[-2] == "o":
|
flexible
|
{
"blob_id": "00c57e7e26a3181ab23697a25257aca479d9ee05",
"index": 5755,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor p in palabras:\n print(palabras[p])\n",
"step-3": "frase = 'todos somos promgramadores'\npalabras = frase.split()\nfor p in palabras:\n print(palabras[p])\n",
"step-4": "frase = \"todos somos promgramadores\"\r\npalabras = frase.split()\r\nfor p in palabras:\r\n print(palabras[p])\r\n\r\n\r\n #if p[-2] == \"o\":\r\n \r\n ",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.